input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
"""Prompt class."""
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.prompts.base import (
BasePromptTemplate,
ChatPromptTemplate,
LangchainPromptTemplate,
Prompt,
PromptTemplate,
PromptType,
SelectorPromptTemplate,
)
from llama_index.core.prompts.rich import RichPromptTemplate
from llama_index.core.prompts.display_utils import display_prompt_dict
__all__ = [
"Prompt",
"PromptTemplate",
"SelectorPromptTemplate",
"ChatPromptTemplate",
"LangchainPromptTemplate",
"BasePromptTemplate",
"PromptType",
"ChatMessage",
"MessageRole",
"display_prompt_dict",
"RichPromptTemplate",
]
|
"""Prompt class."""
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.prompts.base import (
BasePromptTemplate,
ChatPromptTemplate,
LangchainPromptTemplate,
Prompt,
PromptTemplate,
PromptType,
SelectorPromptTemplate,
)
from llama_index.core.prompts.display_utils import display_prompt_dict
__all__ = [
"Prompt",
"PromptTemplate",
"SelectorPromptTemplate",
"ChatPromptTemplate",
"LangchainPromptTemplate",
"BasePromptTemplate",
"PromptType",
"ChatMessage",
"MessageRole",
"display_prompt_dict",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import platform
import random
import numpy as np
import torch
from mmdet.registry import DATASETS, TRANSFORMS
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
base_soft_limit = rlimit[0]
hard_limit = rlimit[1]
soft_limit = min(max(4096, base_soft_limit), hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
PIPELINES = TRANSFORMS
def _concat_dataset(cfg, default_args=None):
from .dataset_wrappers import ConcatDataset
ann_files = cfg['ann_file']
img_prefixes = cfg.get('img_prefix', None)
seg_prefixes = cfg.get('seg_prefix', None)
proposal_files = cfg.get('proposal_file', None)
separate_eval = cfg.get('separate_eval', True)
datasets = []
num_dset = len(ann_files)
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
# pop 'separate_eval' since it is not a valid key for common datasets.
if 'separate_eval' in data_cfg:
data_cfg.pop('separate_eval')
data_cfg['ann_file'] = ann_files[i]
if isinstance(img_prefixes, (list, tuple)):
data_cfg['img_prefix'] = img_prefixes[i]
if isinstance(seg_prefixes, (list, tuple)):
data_cfg['seg_prefix'] = seg_prefixes[i]
if isinstance(proposal_files, (list, tuple)):
data_cfg['proposal_file'] = proposal_files[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets, separate_eval)
# TODO: Need to refactor later
def build_dataset(cfg, default_args=None):
from mmengine.dataset import ClassBalancedDataset
from .dataset_wrappers import MultiImageMixDataset
if cfg['type'] == 'ClassBalancedDataset':
dataset = ClassBalancedDataset(
build_dataset(cfg['dataset'], default_args), cfg['oversample_thr'])
elif cfg['type'] == 'MultiImageMixDataset':
cp_cfg = copy.deepcopy(cfg)
cp_cfg['dataset'] = build_dataset(cp_cfg['dataset'])
cp_cfg.pop('type')
dataset = MultiImageMixDataset(**cp_cfg)
elif isinstance(cfg.get('ann_file'), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = DATASETS.build(cfg, default_args=default_args)
return dataset
def worker_init_fn(worker_id, num_workers, rank, seed):
# The seed of each worker equals to
# num_worker * rank + worker_id + user_seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
torch.manual_seed(worker_seed)
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import platform
import random
import numpy as np
import torch
from mmdet.registry import DATASETS, TRANSFORMS
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
base_soft_limit = rlimit[0]
hard_limit = rlimit[1]
soft_limit = min(max(4096, base_soft_limit), hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
PIPELINES = TRANSFORMS
def _concat_dataset(cfg, default_args=None):
from .dataset_wrappers import ConcatDataset
ann_files = cfg['ann_file']
img_prefixes = cfg.get('img_prefix', None)
seg_prefixes = cfg.get('seg_prefix', None)
proposal_files = cfg.get('proposal_file', None)
separate_eval = cfg.get('separate_eval', True)
datasets = []
num_dset = len(ann_files)
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
# pop 'separate_eval' since it is not a valid key for common datasets.
if 'separate_eval' in data_cfg:
data_cfg.pop('separate_eval')
data_cfg['ann_file'] = ann_files[i]
if isinstance(img_prefixes, (list, tuple)):
data_cfg['img_prefix'] = img_prefixes[i]
if isinstance(seg_prefixes, (list, tuple)):
data_cfg['seg_prefix'] = seg_prefixes[i]
if isinstance(proposal_files, (list, tuple)):
data_cfg['proposal_file'] = proposal_files[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets, separate_eval)
# TODO: Need to refactor later
def build_dataset(cfg, default_args=None):
from .dataset_wrappers import ClassBalancedDataset, MultiImageMixDataset
if cfg['type'] == 'ClassBalancedDataset':
dataset = ClassBalancedDataset(
build_dataset(cfg['dataset'], default_args), cfg['oversample_thr'])
elif cfg['type'] == 'MultiImageMixDataset':
cp_cfg = copy.deepcopy(cfg)
cp_cfg['dataset'] = build_dataset(cp_cfg['dataset'])
cp_cfg.pop('type')
dataset = MultiImageMixDataset(**cp_cfg)
elif isinstance(cfg.get('ann_file'), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = DATASETS.build(cfg, default_args=default_args)
return dataset
def worker_init_fn(worker_id, num_workers, rank, seed):
# The seed of each worker equals to
# num_worker * rank + worker_id + user_seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
torch.manual_seed(worker_seed)
|
__version__ = '0.13.4'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.13.3'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
from __future__ import annotations
from .CSRLoss import CSRLoss, CSRReconstructionLoss
from .RegularizerLoss import FlopsLoss, IDFFlopsLoss
from .SparseAnglELoss import SparseAnglELoss
from .SparseCachedGISTEmbedLoss import SparseCachedGISTEmbedLoss
from .SparseCachedMultipleNegativesRankingLoss import SparseCachedMultipleNegativesRankingLoss
from .SparseCoSENTLoss import SparseCoSENTLoss
from .SparseCosineSimilarityLoss import SparseCosineSimilarityLoss
from .SparseDistillKLDivLoss import SparseDistillKLDivLoss
from .SparseGISTEmbedLoss import SparseGISTEmbedLoss
from .SparseMarginMSELoss import SparseMarginMSELoss
from .SparseMSELoss import SparseMSELoss
from .SparseMultipleNegativesRankingLoss import SparseMultipleNegativesRankingLoss
from .SparseTripletLoss import SparseTripletLoss
from .SpladeLoss import SpladeLoss
__all__ = [
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"IDFFlopsLoss",
"SpladeLoss",
]
# TODO: Test cached losses
|
from __future__ import annotations
from .CSRLoss import CSRLoss, CSRReconstructionLoss
from .FlopsLoss import FlopsLoss
from .SparseAnglELoss import SparseAnglELoss
from .SparseCachedGISTEmbedLoss import SparseCachedGISTEmbedLoss
from .SparseCachedMultipleNegativesRankingLoss import SparseCachedMultipleNegativesRankingLoss
from .SparseCoSENTLoss import SparseCoSENTLoss
from .SparseCosineSimilarityLoss import SparseCosineSimilarityLoss
from .SparseDistillKLDivLoss import SparseDistillKLDivLoss
from .SparseGISTEmbedLoss import SparseGISTEmbedLoss
from .SparseMarginMSELoss import SparseMarginMSELoss
from .SparseMSELoss import SparseMSELoss
from .SparseMultipleNegativesRankingLoss import SparseMultipleNegativesRankingLoss
from .SparseTripletLoss import SparseTripletLoss
from .SpladeLoss import SpladeLoss
__all__ = [
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
]
# TODO: Test cached losses
|
"""Tests related to the `DataIter` interface."""
import numpy as np
import xgboost
from xgboost import testing as tm
def run_mixed_sparsity(device: str) -> None:
"""Check QDM with mixed batches."""
X_0, y_0, _ = tm.make_regression(128, 16, False)
if device.startswith("cuda"):
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
else:
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, False)
X_2, y_2 = tm.make_sparse_regression(512, 16, 0.9, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
if device.startswith("cuda"):
import cupy as cp # pylint: disable=import-error
X = [cp.array(batch) for batch in X]
it = tm.IteratorForTest(X, y, None, None, on_host=False)
Xy_0 = xgboost.QuantileDMatrix(it)
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
X_arr = np.concatenate(X, axis=0)
y_arr = np.concatenate(y, axis=0)
Xy_1 = xgboost.QuantileDMatrix(X_arr, y_arr)
assert tm.predictor_equal(Xy_0, Xy_1)
|
"""Tests related to the `DataIter` interface."""
import numpy as np
import xgboost
from xgboost import testing as tm
def run_mixed_sparsity(device: str) -> None:
"""Check QDM with mixed batches."""
X_0, y_0, _ = tm.make_regression(128, 16, False)
if device.startswith("cuda"):
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
else:
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, False)
X_2, y_2 = tm.make_sparse_regression(512, 16, 0.9, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
if device.startswith("cuda"):
import cupy as cp # pylint: disable=import-error
X = [cp.array(batch) for batch in X]
it = tm.IteratorForTest(X, y, None, None)
Xy_0 = xgboost.QuantileDMatrix(it)
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
X_arr = np.concatenate(X, axis=0)
y_arr = np.concatenate(y, axis=0)
Xy_1 = xgboost.QuantileDMatrix(X_arr, y_arr)
assert tm.predictor_equal(Xy_0, Xy_1)
|
_base_ = './mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py'
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(type='AmpOptimWrapper')
|
_base_ = './mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py'
# you need to set mode='dynamic' if you are using pytorch<=1.5.0
fp16 = dict(loss_scale=dict(init_scale=512))
|
from typing import Any
from collections import deque
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.llms.custom import CustomLLM
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.llms import LLMMetadata, CompletionResponse, CompletionResponseGen
from llama_index.core.prompts.base import PromptTemplate
from llama_index.agent.introspective.reflective.self_reflection import (
Reflection,
Correction,
SelfReflectionAgentWorker,
)
PRINT_CHAT_HISTORY = False
mock_reflections_queue = deque(
[
Reflection(is_done=False, feedback="This is a mock reflection."),
Reflection(is_done=True, feedback="This is a mock reflection."),
]
)
mock_correction = Correction(correction="This is a mock correction.")
class MockLLM(CustomLLM):
@property
def metadata(self) -> LLMMetadata:
"""
LLM metadata.
Returns:
LLMMetadata: LLM metadata containing various information about the LLM.
"""
return LLMMetadata()
def structured_predict(
self, output_cls: BaseModel, prompt: PromptTemplate, **prompt_args: Any
) -> BaseModel:
"""This is fixed so that it goes through 2 Reflections and 1 Correction."""
if output_cls == Reflection:
return mock_reflections_queue.popleft()
elif output_cls == Correction:
return mock_correction
else:
raise ValueError("Unexpected output_cls type for this test.")
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
raise NotImplementedError
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
raise NotImplementedError
def test_reflection_agent() -> None:
# Arrange
dummy_llm = MockLLM()
worker = SelfReflectionAgentWorker.from_defaults(llm=dummy_llm)
# messages that would be sent from the introspective_agent when it delegates
# to reflection task
messages = [
ChatMessage(content="You are a helpful assistant.", role=MessageRole.SYSTEM),
ChatMessage(content="What's 2+2?", role=MessageRole.USER),
ChatMessage(content="I think it's 5.", role=MessageRole.ASSISTANT),
]
agent = worker.as_agent(chat_history=messages)
# Act
response = agent.chat("I think it's 5.") # reflect on current response
# Assert
if PRINT_CHAT_HISTORY:
for msg in agent.chat_history:
print(str(msg))
print()
assert response.response == "This is a mock correction."
assert (
len(agent.chat_history) == 8
) # (system, user, asst, user, ref, cor, ref, asst)
|
from typing import Any
from collections import deque
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.llms.custom import CustomLLM
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.llms import LLMMetadata, CompletionResponse, CompletionResponseGen
from llama_index.core.prompts.base import PromptTemplate
from llama_index.agent.introspective.reflective.self_reflection import (
Reflection,
Correction,
SelfReflectionAgentWorker,
)
PRINT_CHAT_HISTORY = False
mock_reflections_queue = deque(
[
Reflection(is_done=False, feedback="This is a mock reflection."),
Reflection(is_done=True, feedback="This is a mock reflection."),
]
)
mock_correction = Correction(correction="This is a mock correction.")
class MockLLM(CustomLLM):
@property
def metadata(self) -> LLMMetadata:
"""LLM metadata.
Returns:
LLMMetadata: LLM metadata containing various information about the LLM.
"""
return LLMMetadata()
def structured_predict(
self, output_cls: BaseModel, prompt: PromptTemplate, **prompt_args: Any
) -> BaseModel:
"""This is fixed so that it goes through 2 Reflections and 1 Correction."""
if output_cls == Reflection:
return mock_reflections_queue.popleft()
elif output_cls == Correction:
return mock_correction
else:
raise ValueError("Unexpected output_cls type for this test.")
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
raise NotImplementedError
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
raise NotImplementedError
def test_reflection_agent() -> None:
# Arrange
dummy_llm = MockLLM()
worker = SelfReflectionAgentWorker.from_defaults(llm=dummy_llm)
# messages that would be sent from the introspective_agent when it delegates
# to reflection task
messages = [
ChatMessage(content="You are a helpful assistant.", role=MessageRole.SYSTEM),
ChatMessage(content="What's 2+2?", role=MessageRole.USER),
ChatMessage(content="I think it's 5.", role=MessageRole.ASSISTANT),
]
agent = worker.as_agent(chat_history=messages)
# Act
response = agent.chat("I think it's 5.") # reflect on current response
# Assert
if PRINT_CHAT_HISTORY:
for msg in agent.chat_history:
print(str(msg))
print()
assert response.response == "This is a mock correction."
assert (
len(agent.chat_history) == 8
) # (system, user, asst, user, ref, cor, ref, asst)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils.dl_utils import TORCH_VERSION
from mmengine.utils.version_utils import digit_version
from .averaged_model import (BaseAveragedModel, ExponentialMovingAverage,
MomentumAnnealingEMA, StochasticWeightAverage)
from .base_model import BaseDataPreprocessor, BaseModel, ImgDataPreprocessor
from .base_module import BaseModule, ModuleDict, ModuleList, Sequential
from .utils import (detect_anomalous_params, merge_dict, revert_sync_batchnorm,
stack_batch)
from .weight_init import (BaseInit, Caffe2XavierInit, ConstantInit,
KaimingInit, NormalInit, PretrainedInit,
TruncNormalInit, UniformInit, XavierInit,
bias_init_with_prob, caffe2_xavier_init,
constant_init, initialize, kaiming_init, normal_init,
trunc_normal_init, uniform_init, update_init_info,
xavier_init)
from .wrappers import (MMDistributedDataParallel,
MMSeparateDistributedDataParallel, is_model_wrapper)
__all__ = [
'MMDistributedDataParallel', 'is_model_wrapper', 'BaseAveragedModel',
'StochasticWeightAverage', 'ExponentialMovingAverage',
'MomentumAnnealingEMA', 'BaseModel', 'BaseDataPreprocessor',
'ImgDataPreprocessor', 'MMSeparateDistributedDataParallel', 'BaseModule',
'stack_batch', 'merge_dict', 'detect_anomalous_params', 'ModuleList',
'ModuleDict', 'Sequential', 'revert_sync_batchnorm', 'update_init_info',
'constant_init', 'xavier_init', 'normal_init', 'trunc_normal_init',
'uniform_init', 'kaiming_init', 'caffe2_xavier_init',
'bias_init_with_prob', 'BaseInit', 'ConstantInit', 'XavierInit',
'NormalInit', 'TruncNormalInit', 'UniformInit', 'KaimingInit',
'Caffe2XavierInit', 'PretrainedInit', 'initialize'
]
if digit_version(TORCH_VERSION) >= digit_version('1.11.0'):
from .wrappers import MMFullyShardedDataParallel # noqa:F401
__all__.append('MMFullyShardedDataParallel')
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils.parrots_wrapper import TORCH_VERSION
from mmengine.utils.version_utils import digit_version
from .averaged_model import (BaseAveragedModel, ExponentialMovingAverage,
MomentumAnnealingEMA, StochasticWeightAverage)
from .base_model import BaseDataPreprocessor, BaseModel, ImgDataPreprocessor
from .base_module import BaseModule, ModuleDict, ModuleList, Sequential
from .utils import detect_anomalous_params, merge_dict, stack_batch
from .wrappers import (MMDistributedDataParallel,
MMSeparateDistributedDataParallel, is_model_wrapper)
__all__ = [
'MMDistributedDataParallel', 'is_model_wrapper', 'BaseAveragedModel',
'StochasticWeightAverage', 'ExponentialMovingAverage',
'MomentumAnnealingEMA', 'BaseModel', 'BaseDataPreprocessor',
'ImgDataPreprocessor', 'MMSeparateDistributedDataParallel', 'BaseModule',
'stack_batch', 'merge_dict', 'detect_anomalous_params', 'ModuleList',
'ModuleDict', 'Sequential'
]
if digit_version(TORCH_VERSION) >= digit_version('1.11.0'):
from .wrappers import MMFullyShardedDataParallel # noqa:F401
__all__.append('MMFullyShardedDataParallel')
|
import types
from typing_extensions import TYPE_CHECKING
from docarray.typing.tensor.video.video_ndarray import VideoNdArray
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.typing.tensor.video.video_tensorflow_tensor import ( # noqa
VideoTensorFlowTensor,
)
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor # noqa
__all__ = ['VideoNdArray', 'VideoTensor']
def __getattr__(name: str):
lib: types.ModuleType
if name == 'VideoTorchTensor':
import_library('torch', raise_error=True)
import docarray.typing.tensor.video.video_torch_tensor as lib
elif name == 'VideoTensorFlowTensor':
import_library('tensorflow', raise_error=True)
import docarray.typing.tensor.video.video_tensorflow_tensor as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
tensor_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return tensor_cls
|
from docarray.typing.tensor.video.video_ndarray import VideoNdArray
__all__ = ['VideoNdArray']
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor # noqa
__all__.extend(['VideoTorchTensor'])
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.video.video_tensorflow_tensor import ( # noqa: F401
VideoTensorFlowTensor,
)
__all__.extend(['VideoTensorFlowTensor'])
|
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Masking")
class Masking(Layer):
"""Masks a sequence by using a mask value to skip timesteps.
For each timestep in the input tensor (dimension #1 in the tensor),
if all values in the input tensor at that timestep
are equal to `mask_value`, then the timestep will be masked (skipped)
in all downstream layers (as long as they support masking).
If any downstream layer does not support masking yet receives such
an input mask, an exception will be raised.
Example:
Consider a NumPy data array `x` of shape `(samples, timesteps, features)`,
to be fed to an LSTM layer. You want to mask timestep #3 and #5 because you
lack data for these timesteps. You can:
- Set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`
- Insert a `Masking` layer with `mask_value=0.` before the LSTM layer:
```python
samples, timesteps, features = 32, 10, 8
inputs = np.random.random([samples, timesteps, features]).astype(np.float32)
inputs[:, 3, :] = 0.
inputs[:, 5, :] = 0.
model = keras.models.Sequential()
model.add(keras.layers.Masking(mask_value=0.0))
model.add(keras.layers.LSTM(32))
output = model(inputs)
# The time step 3 and 5 will be skipped from LSTM calculation.
```
Note: in the Keras masking convention, a masked timestep is denoted by
a mask value of `False`, while a non-masked (i.e. usable) timestep
is denoted by a mask value of `True`.
"""
def __init__(self, mask_value=0.0, **kwargs):
super().__init__(**kwargs)
self.mask_value = mask_value
self.supports_masking = True
self.built = True
def compute_mask(self, inputs, mask=None):
return ops.any(ops.not_equal(inputs, self.mask_value), axis=-1)
def call(self, inputs):
boolean_mask = ops.any(
ops.not_equal(inputs, self.mask_value), axis=-1, keepdims=True
)
# Set masked outputs to 0
outputs = inputs * backend.cast(boolean_mask, dtype=inputs.dtype)
# Compute the mask and outputs simultaneously.
backend.set_keras_mask(outputs, mask=ops.squeeze(boolean_mask, axis=-1))
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {"mask_value": self.mask_value}
return {**base_config, **config}
|
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Masking")
class Masking(Layer):
"""Masks a sequence by using a mask value to skip timesteps.
For each timestep in the input tensor (dimension #1 in the tensor),
if all values in the input tensor at that timestep
are equal to `mask_value`, then the timestep will be masked (skipped)
in all downstream layers (as long as they support masking).
If any downstream layer does not support masking yet receives such
an input mask, an exception will be raised.
Example:
Consider a NumPy data array `x` of shape `(samples, timesteps, features)`,
to be fed to an LSTM layer. You want to mask timestep #3 and #5 because you
lack data for these timesteps. You can:
- Set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`
- Insert a `Masking` layer with `mask_value=0.` before the LSTM layer:
```python
samples, timesteps, features = 32, 10, 8
inputs = np.random.random([samples, timesteps, features]).astype(np.float32)
inputs[:, 3, :] = 0.
inputs[:, 5, :] = 0.
model = keras.models.Sequential()
model.add(keras.layers.Masking(mask_value=0.)
model.add(keras.layers.LSTM(32))
output = model(inputs)
# The time step 3 and 5 will be skipped from LSTM calculation.
```
Note: in the Keras masking convention, a masked timestep is denoted by
a mask value of `False`, while a non-masked (i.e. usable) timestep
is denoted by a mask value of `True`.
"""
def __init__(self, mask_value=0.0, **kwargs):
super().__init__(**kwargs)
self.mask_value = mask_value
self.supports_masking = True
self.built = True
def compute_mask(self, inputs, mask=None):
return ops.any(ops.not_equal(inputs, self.mask_value), axis=-1)
def call(self, inputs):
boolean_mask = ops.any(
ops.not_equal(inputs, self.mask_value), axis=-1, keepdims=True
)
# Set masked outputs to 0
outputs = inputs * backend.cast(boolean_mask, dtype=inputs.dtype)
# Compute the mask and outputs simultaneously.
backend.set_keras_mask(outputs, mask=ops.squeeze(boolean_mask, axis=-1))
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {"mask_value": self.mask_value}
return {**base_config, **config}
|
from typing import Any, Literal
from pydantic import SecretStr
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import (
APIKeyCredentials,
CredentialsField,
CredentialsMetaInput,
SchemaField,
)
from backend.integrations.providers import ProviderName
from backend.util.request import requests
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="unreal_speech",
api_key=SecretStr("mock-unreal-speech-api-key"),
title="Mock Unreal Speech API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
class UnrealTextToSpeechBlock(Block):
class Input(BlockSchema):
text: str = SchemaField(
description="The text to be converted to speech",
placeholder="Enter the text you want to convert to speech",
)
voice_id: str = SchemaField(
description="The voice ID to use for text-to-speech conversion",
placeholder="Scarlett",
default="Scarlett",
)
credentials: CredentialsMetaInput[
Literal[ProviderName.UNREAL_SPEECH], Literal["api_key"]
] = CredentialsField(
description="The Unreal Speech integration can be used with "
"any API key with sufficient permissions for the blocks it is used on.",
)
class Output(BlockSchema):
mp3_url: str = SchemaField(description="The URL of the generated MP3 file")
error: str = SchemaField(description="Error message if the API call failed")
def __init__(self):
super().__init__(
id="4ff1ff6d-cc40-4caa-ae69-011daa20c378",
description="Converts text to speech using the Unreal Speech API",
categories={BlockCategory.AI, BlockCategory.TEXT},
input_schema=UnrealTextToSpeechBlock.Input,
output_schema=UnrealTextToSpeechBlock.Output,
test_input={
"text": "This is a test of the text to speech API.",
"voice_id": "Scarlett",
"credentials": TEST_CREDENTIALS_INPUT,
},
test_output=[("mp3_url", "https://example.com/test.mp3")],
test_mock={
"call_unreal_speech_api": lambda *args, **kwargs: {
"OutputUri": "https://example.com/test.mp3"
}
},
test_credentials=TEST_CREDENTIALS,
)
@staticmethod
def call_unreal_speech_api(
api_key: SecretStr, text: str, voice_id: str
) -> dict[str, Any]:
url = "https://api.v7.unrealspeech.com/speech"
headers = {
"Authorization": f"Bearer {api_key.get_secret_value()}",
"Content-Type": "application/json",
}
data = {
"Text": text,
"VoiceId": voice_id,
"Bitrate": "192k",
"Speed": "0",
"Pitch": "1",
"TimestampType": "sentence",
}
response = requests.post(url, headers=headers, json=data)
return response.json()
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_response = self.call_unreal_speech_api(
credentials.api_key,
input_data.text,
input_data.voice_id,
)
yield "mp3_url", api_response["OutputUri"]
|
from typing import Any, Literal
from pydantic import SecretStr
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import (
APIKeyCredentials,
CredentialsField,
CredentialsMetaInput,
SchemaField,
)
from backend.util.request import requests
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="unreal_speech",
api_key=SecretStr("mock-unreal-speech-api-key"),
title="Mock Unreal Speech API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
class UnrealTextToSpeechBlock(Block):
class Input(BlockSchema):
text: str = SchemaField(
description="The text to be converted to speech",
placeholder="Enter the text you want to convert to speech",
)
voice_id: str = SchemaField(
description="The voice ID to use for text-to-speech conversion",
placeholder="Scarlett",
default="Scarlett",
)
credentials: CredentialsMetaInput[
Literal["unreal_speech"], Literal["api_key"]
] = CredentialsField(
provider="unreal_speech",
supported_credential_types={"api_key"},
description="The Unreal Speech integration can be used with "
"any API key with sufficient permissions for the blocks it is used on.",
)
class Output(BlockSchema):
mp3_url: str = SchemaField(description="The URL of the generated MP3 file")
error: str = SchemaField(description="Error message if the API call failed")
def __init__(self):
super().__init__(
id="4ff1ff6d-cc40-4caa-ae69-011daa20c378",
description="Converts text to speech using the Unreal Speech API",
categories={BlockCategory.AI, BlockCategory.TEXT},
input_schema=UnrealTextToSpeechBlock.Input,
output_schema=UnrealTextToSpeechBlock.Output,
test_input={
"text": "This is a test of the text to speech API.",
"voice_id": "Scarlett",
"credentials": TEST_CREDENTIALS_INPUT,
},
test_output=[("mp3_url", "https://example.com/test.mp3")],
test_mock={
"call_unreal_speech_api": lambda *args, **kwargs: {
"OutputUri": "https://example.com/test.mp3"
}
},
test_credentials=TEST_CREDENTIALS,
)
@staticmethod
def call_unreal_speech_api(
api_key: SecretStr, text: str, voice_id: str
) -> dict[str, Any]:
url = "https://api.v7.unrealspeech.com/speech"
headers = {
"Authorization": f"Bearer {api_key.get_secret_value()}",
"Content-Type": "application/json",
}
data = {
"Text": text,
"VoiceId": voice_id,
"Bitrate": "192k",
"Speed": "0",
"Pitch": "1",
"TimestampType": "sentence",
}
response = requests.post(url, headers=headers, json=data)
return response.json()
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_response = self.call_unreal_speech_api(
credentials.api_key,
input_data.text,
input_data.voice_id,
)
yield "mp3_url", api_response["OutputUri"]
|
import asyncio
from typing import Any, AsyncGenerator, List, Optional
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.errors import WorkflowDone
from llama_index.core.workflow.events import Event, StopEvent
from .types import RunResultT
from .utils import BUSY_WAIT_DELAY
class WorkflowHandler(asyncio.Future[RunResultT]):
def __init__(
self,
*args: Any,
ctx: Optional[Context] = None,
run_id: Optional[str] = None,
**kwargs: Any,
) -> None:
super().__init__(*args, **kwargs)
self.run_id = run_id
self.ctx = ctx
def __str__(self) -> str:
return str(self.result())
def is_done(self) -> bool:
return self.done()
async def stream_events(self) -> AsyncGenerator[Event, None]:
if not self.ctx:
raise ValueError("Context is not set!")
while True:
ev = await self.ctx.streaming_queue.get()
yield ev
if type(ev) is StopEvent:
break
async def run_step(self) -> Optional[List[Event]]:
"""Runs the next workflow step and returns the output Event.
If return is None, then the workflow is considered done.
Examples:
```python
handler = workflow.run(stepwise=True)
while not handler.is_done():
ev = await handler.run_step()
handler.ctx.send_event(ev)
result = handler.result()
print(result)
```
"""
# since event is sent before calling this method, we need to unblock the event loop
await asyncio.sleep(0)
if self.ctx is None:
raise ValueError("Context must be set to run a workflow step-wise!")
if not self.ctx.stepwise:
raise ValueError(
"Workflow must be created passing stepwise=True to call this method."
)
try:
# Reset the events collected in current step
self.ctx._step_events_holding = None
# Unblock all pending steps
for flag in self.ctx._step_flags.values():
flag.set()
# Yield back control to the event loop to give an unblocked step
# the chance to run (we won't actually sleep here).
await asyncio.sleep(0)
# check if we're done, or if a step raised error
we_done = False
exception_raised = None
retval = None
for t in self.ctx._tasks:
# Check if we're done
if not t.done():
continue
we_done = True
e = t.exception()
if type(e) is not WorkflowDone:
exception_raised = e
if we_done:
# Remove any reference to the tasks
for t in self.ctx._tasks:
t.cancel()
await asyncio.sleep(0)
# the context is no longer running
self.ctx.is_running = False
if exception_raised:
raise exception_raised
if not self.done():
self.set_result(self.ctx.get_result())
else:
# Continue with running next step. Make sure we wait for the
# step function to return before proceeding.
in_progress = len(await self.ctx.running_steps())
while in_progress:
await asyncio.sleep(BUSY_WAIT_DELAY)
in_progress = len(await self.ctx.running_steps())
# notify unblocked task that we're ready to accept next event
async with self.ctx._step_condition:
self.ctx._step_condition.notify()
# Wait to be notified that the new_ev has been written
async with self.ctx._step_event_written:
await self.ctx._step_event_written.wait()
retval = self.ctx.get_holding_events()
except Exception as e:
if not self.is_done(): # Avoid InvalidStateError edge case
self.set_exception(e)
raise
return retval
async def cancel_run(self) -> None:
"""Method to cancel a Workflow execution."""
if self.ctx:
self.ctx._cancel_flag.set()
await asyncio.sleep(0)
|
import asyncio
from typing import Any, AsyncGenerator, Optional
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.errors import WorkflowDone
from llama_index.core.workflow.events import Event, StopEvent
from .types import RunResultT
from .utils import BUSY_WAIT_DELAY
class WorkflowHandler(asyncio.Future[RunResultT]):
def __init__(
self,
*args: Any,
ctx: Optional[Context] = None,
run_id: Optional[str] = None,
**kwargs: Any,
) -> None:
super().__init__(*args, **kwargs)
self.run_id = run_id
self.ctx = ctx
def __str__(self) -> str:
return str(self.result())
def is_done(self) -> bool:
return self.done()
async def stream_events(self) -> AsyncGenerator[Event, None]:
if not self.ctx:
raise ValueError("Context is not set!")
while True:
ev = await self.ctx.streaming_queue.get()
yield ev
if type(ev) is StopEvent:
break
async def run_step(self) -> Optional[Event]:
"""Runs the next workflow step and returns the output Event.
If return is None, then the workflow is considered done.
Examples:
```python
handler = workflow.run(stepwise=True)
while not handler.is_done():
ev = await handler.run_step()
handler.ctx.send_event(ev)
result = handler.result()
print(result)
```
"""
# since event is sent before calling this method, we need to unblock the event loop
await asyncio.sleep(0)
if self.ctx and not self.ctx.stepwise:
raise ValueError("Stepwise context is required to run stepwise.")
if self.ctx:
try:
# Unblock all pending steps
for flag in self.ctx._step_flags.values():
flag.set()
# Yield back control to the event loop to give an unblocked step
# the chance to run (we won't actually sleep here).
await asyncio.sleep(0)
# check if we're done, or if a step raised error
we_done = False
exception_raised = None
retval = None
for t in self.ctx._tasks:
# Check if we're done
if not t.done():
continue
we_done = True
e = t.exception()
if type(e) is not WorkflowDone:
exception_raised = e
if we_done:
# Remove any reference to the tasks
for t in self.ctx._tasks:
t.cancel()
await asyncio.sleep(0)
# the context is no longer running
self.ctx.is_running = False
if exception_raised:
raise exception_raised
if not self.done():
self.set_result(self.ctx.get_result())
else:
# Continue with running next step. Make sure we wait for the
# step function to return before proceeding.
in_progress = len(await self.ctx.running_steps())
while in_progress:
await asyncio.sleep(BUSY_WAIT_DELAY)
in_progress = len(await self.ctx.running_steps())
# notify unblocked task that we're ready to accept next event
async with self.ctx._step_condition:
self.ctx._step_condition.notify()
# Wait to be notified that the new_ev has been written
async with self.ctx._step_event_written:
await self.ctx._step_event_written.wait()
retval = self.ctx._step_event_holding
except Exception as e:
if not self.is_done(): # Avoid InvalidStateError edge case
self.set_exception(e)
raise
else:
raise ValueError("Context is not set!")
return retval
async def cancel_run(self) -> None:
"""Method to cancel a Workflow execution."""
if self.ctx:
self.ctx._cancel_flag.set()
await asyncio.sleep(0)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .build_functions import (build_from_cfg, build_model_from_cfg,
build_runner_from_cfg, build_scheduler_from_cfg)
from .default_scope import DefaultScope
from .registry import Registry
from .root import (DATA_SAMPLERS, DATASETS, EVALUATOR, FUNCTIONS, HOOKS,
INFERENCERS, LOG_PROCESSORS, LOOPS, METRICS, MODEL_WRAPPERS,
MODELS, OPTIM_WRAPPER_CONSTRUCTORS, OPTIM_WRAPPERS,
OPTIMIZERS, PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS,
TASK_UTILS, TRANSFORMS, VISBACKENDS, VISUALIZERS,
WEIGHT_INITIALIZERS)
from .utils import (count_registered_modules, init_default_scope,
traverse_registry_tree)
__all__ = [
'Registry', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS', 'DATASETS',
'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIM_WRAPPER_CONSTRUCTORS', 'TASK_UTILS',
'PARAM_SCHEDULERS', 'METRICS', 'MODEL_WRAPPERS', 'OPTIM_WRAPPERS', 'LOOPS',
'VISBACKENDS', 'VISUALIZERS', 'LOG_PROCESSORS', 'EVALUATOR', 'INFERENCERS',
'DefaultScope', 'traverse_registry_tree', 'count_registered_modules',
'build_model_from_cfg', 'build_runner_from_cfg', 'build_from_cfg',
'build_scheduler_from_cfg', 'init_default_scope', 'FUNCTIONS'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .build_functions import (build_from_cfg, build_model_from_cfg,
build_runner_from_cfg, build_scheduler_from_cfg)
from .default_scope import DefaultScope
from .registry import Registry
from .root import (DATA_SAMPLERS, DATASETS, EVALUATOR, HOOKS, INFERENCERS,
LOG_PROCESSORS, LOOPS, METRICS, MODEL_WRAPPERS, MODELS,
OPTIM_WRAPPER_CONSTRUCTORS, OPTIM_WRAPPERS, OPTIMIZERS,
PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS,
TRANSFORMS, VISBACKENDS, VISUALIZERS, WEIGHT_INITIALIZERS)
from .utils import (count_registered_modules, init_default_scope,
traverse_registry_tree)
__all__ = [
'Registry', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS', 'DATASETS',
'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIM_WRAPPER_CONSTRUCTORS', 'TASK_UTILS',
'PARAM_SCHEDULERS', 'METRICS', 'MODEL_WRAPPERS', 'OPTIM_WRAPPERS', 'LOOPS',
'VISBACKENDS', 'VISUALIZERS', 'LOG_PROCESSORS', 'EVALUATOR', 'INFERENCERS',
'DefaultScope', 'traverse_registry_tree', 'count_registered_modules',
'build_model_from_cfg', 'build_runner_from_cfg', 'build_from_cfg',
'build_scheduler_from_cfg', 'init_default_scope'
]
|
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.util import fullname
class MSELoss(nn.Module):
def __init__(self, model: CrossEncoder, activation_fct: nn.Module = nn.Identity(), **kwargs) -> None:
"""
Computes the MSE loss between the computed query-passage score and a target query-passage score. This loss
is used to distill a cross-encoder model from a teacher cross-encoder model or gold labels.
Args:
model (:class:`~sentence_transformers.cross_encoder.CrossEncoder`): A CrossEncoder model to be trained.
activation_fct (:class:`~torch.nn.Module`): Activation function applied to the logits before computing the loss.
**kwargs: Additional keyword arguments passed to the underlying :class:`torch.nn.MSELoss`.
.. note::
Be mindful of the magnitude of both the labels and what the model produces. If the teacher model produces
logits with Sigmoid to bound them to [0, 1], then you may wish to use a Sigmoid activation function in the loss.
References:
- Improving Efficient Neural Ranking Models with Cross-Architecture Knowledge Distillation: https://arxiv.org/abs/2010.02666
Requirements:
1. Your model must be initialized with `num_labels = 1` (a.k.a. the default) to predict one class.
2. Usually uses a finetuned CrossEncoder teacher M in a knowledge distillation setup.
Inputs:
+-----------------------------------------+-----------------------------+-------------------------------+
| Texts | Labels | Number of Model Output Labels |
+=========================================+=============================+===============================+
| (sentence_A, sentence_B) pairs | similarity score | 1 |
+-----------------------------------------+-----------------------------+-------------------------------+
Relations:
- :class:`MarginMSELoss` is similar to this loss, but with a margin through a negative pair.
Example:
::
from sentence_transformers.cross_encoder import CrossEncoder, CrossEncoderTrainer, losses
from datasets import Dataset
student_model = CrossEncoder("microsoft/mpnet-base")
teacher_model = CrossEncoder("cross-encoder/ms-marco-MiniLM-L12-v2")
train_dataset = Dataset.from_dict({
"query": ["What are pandas?", "What is the capital of France?"],
"answer": ["Pandas are a kind of bear.", "The capital of France is Paris."],
})
def compute_labels(batch):
return {
"label": teacher_model.predict(list(zip(batch["query"], batch["answer"])))
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.MSELoss(student_model)
trainer = CrossEncoderTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.activation_fct = activation_fct
self.loss_fct = nn.MSELoss(**kwargs)
if not isinstance(self.model, CrossEncoder):
raise ValueError(
f"{self.__class__.__name__} expects a model of type CrossEncoder, "
f"but got a model of type {type(self.model)}."
)
if self.model.num_labels != 1:
raise ValueError(
f"{self.__class__.__name__} expects a model with 1 output label, "
f"but got a model with {self.model.num_labels} output labels."
)
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
if len(inputs) != 2:
raise ValueError(
f"MSELoss expects a dataset with two non-label columns, but got a dataset with {len(inputs)} columns."
)
pairs = list(zip(inputs[0], inputs[1]))
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0].view(-1)
logits = self.activation_fct(logits)
loss = self.loss_fct(logits, labels.float())
return loss
def get_config_dict(self):
return {
"activation_fct": fullname(self.activation_fct),
}
|
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.util import fullname
class MSELoss(nn.Module):
def __init__(self, model: CrossEncoder, activation_fct: nn.Module = nn.Identity(), **kwargs) -> None:
"""
Computes the MSE loss between the computed query-passage score and a target query-passage score. This loss
is used to distill a cross-encoder model from a teacher cross-encoder model or gold labels.
Args:
model (:class:`~sentence_transformers.cross_encoder.CrossEncoder`): A CrossEncoder model to be trained.
activation_fct (:class:`~torch.nn.Module`): Activation function applied to the logits before computing the loss.
**kwargs: Additional keyword arguments passed to the underlying :class:`torch.nn.MSELoss`.
.. note::
Be mindful of the magnitude of both the labels and what the model produces. If the teacher model produces
logits with Sigmoid to bound them to [0, 1], then you may wish to use a Sigmoid activation function in the loss.
References:
- Improving Efficient Neural Ranking Models with Cross-Architecture Knowledge Distillation: https://arxiv.org/abs/2010.02666
Requirements:
1. Your model must be initialized with `num_labels = 1` (a.k.a. the default) to predict one class.
2. Usually uses a finetuned CrossEncoder teacher M in a knowledge distillation setup.
Inputs:
+-----------------------------------------+-----------------------------+-------------------------------+
| Texts | Labels | Number of Model Output Labels |
+=========================================+=============================+===============================+
| (sentence_A, sentence_B) pairs | similarity score | 1 |
+-----------------------------------------+-----------------------------+-------------------------------+
Relations:
- :class:`MarginMSELoss` is similar to this loss, but with a margin through a negative pair.
Example:
::
from sentence_transformers.cross_encoder import CrossEncoder, CrossEncoderTrainer, losses
from datasets import Dataset
student_model = CrossEncoder("microsoft/mpnet-base")
teacher_model = CrossEncoder("cross-encoder/ms-marco-MiniLM-L-12-v2")
train_dataset = Dataset.from_dict({
"query": ["What are pandas?", "What is the capital of France?"],
"answer": ["Pandas are a kind of bear.", "The capital of France is Paris."],
})
def compute_labels(batch):
return {
"label": teacher_model.predict(list(zip(batch["query"], batch["answer"])))
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.MSELoss(student_model)
trainer = CrossEncoderTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.activation_fct = activation_fct
self.loss_fct = nn.MSELoss(**kwargs)
if not isinstance(self.model, CrossEncoder):
raise ValueError(
f"{self.__class__.__name__} expects a model of type CrossEncoder, "
f"but got a model of type {type(self.model)}."
)
if self.model.num_labels != 1:
raise ValueError(
f"{self.__class__.__name__} expects a model with 1 output label, "
f"but got a model with {self.model.num_labels} output labels."
)
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
if len(inputs) != 2:
raise ValueError(
f"MSELoss expects a dataset with two non-label columns, but got a dataset with {len(inputs)} columns."
)
pairs = list(zip(inputs[0], inputs[1]))
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0].view(-1)
logits = self.activation_fct(logits)
loss = self.loss_fct(logits, labels.float())
return loss
def get_config_dict(self):
return {
"activation_fct": fullname(self.activation_fct),
}
|
"""Standard LangChain interface tests"""
import os
from typing import Type
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openai import AzureChatOpenAI
OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION", "")
OPENAI_API_BASE = os.environ.get("AZURE_OPENAI_API_BASE", "")
class TestAzureOpenAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"],
"model": "gpt-4o-mini",
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
"stream_usage": True,
}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
class TestAzureOpenAIStandardLegacy(ChatModelIntegrationTests):
"""Test a legacy model."""
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME"],
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
"stream_usage": True,
}
@property
def structured_output_kwargs(self) -> dict:
return {"method": "function_calling"}
|
"""Standard LangChain interface tests"""
import os
from typing import Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openai import AzureChatOpenAI
OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION", "")
OPENAI_API_BASE = os.environ.get("AZURE_OPENAI_API_BASE", "")
class TestAzureOpenAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"],
"model": "gpt-4o-mini",
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
@pytest.mark.xfail(reason="Not yet supported.")
def test_usage_metadata_streaming(self, model: BaseChatModel) -> None:
super().test_usage_metadata_streaming(model)
class TestAzureOpenAIStandardLegacy(ChatModelIntegrationTests):
"""Test a legacy model."""
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME"],
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
}
@property
def structured_output_kwargs(self) -> dict:
return {"method": "function_calling"}
@pytest.mark.xfail(reason="Not yet supported.")
def test_usage_metadata_streaming(self, model: BaseChatModel) -> None:
super().test_usage_metadata_streaming(model)
|
"""Base class for Amadeus tools."""
from __future__ import annotations
from typing import TYPE_CHECKING
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.tools.amadeus.utils import authenticate
if TYPE_CHECKING:
from amadeus import Client
class AmadeusBaseTool(BaseTool):
"""Base Tool for Amadeus."""
client: Client = Field(default_factory=authenticate)
|
"""Base class for Amadeus tools."""
from __future__ import annotations
from typing import TYPE_CHECKING
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.tools.amadeus.utils import authenticate
if TYPE_CHECKING:
from amadeus import Client
class AmadeusBaseTool(BaseTool): # type: ignore[override]
"""Base Tool for Amadeus."""
client: Client = Field(default_factory=authenticate)
|
import re
from setuptools import find_packages, setup # type: ignore
from pkg_resources import DistributionNotFound, get_distribution
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'mmengine/version.py'
def choose_requirement(primary, secondary):
"""If some version of primary requirement installed, return primary, else
return secondary."""
try:
name = re.split(r'[!<>=]', primary)[0]
get_distribution(name)
except DistributionNotFound:
return secondary
return str(primary)
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def parse_requirements(fname='requirements/runtime.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import re
import sys
from os.path import exists
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
install_requires = parse_requirements()
try:
# OpenCV installed via conda.
import cv2 # NOQA: F401
major, minor, *rest = cv2.__version__.split('.')
if int(major) < 3:
raise RuntimeError(
f'OpenCV >=3 is required but {cv2.__version__} is installed')
except ImportError:
# If first not installed install second package
CHOOSE_INSTALL_REQUIRES = [('opencv-python-headless>=3',
'opencv-python>=3')]
for main, secondary in CHOOSE_INSTALL_REQUIRES:
install_requires.append(choose_requirement(main, secondary))
setup(
name='mmengine',
version=get_version(),
description='Engine of OpenMMLab projects',
long_description=readme(),
long_description_content_type='text/markdown',
url='https://github.com/open-mmlab/mmengine',
author='MMEngine Authors',
author_email='[email protected]',
packages=find_packages(),
include_package_data=True,
python_requires='>=3.6',
install_requires=install_requires,
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
},
)
|
from setuptools import find_packages, setup # type: ignore
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'mmengine/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def parse_requirements(fname='requirements/runtime.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import re
import sys
from os.path import exists
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
setup(
name='mmengine',
version=get_version(),
description='Engine of OpenMMLab projects',
long_description=readme(),
long_description_content_type='text/markdown',
url='https://github.com/open-mmlab/mmengine',
author='MMEngine Authors',
author_email='[email protected]',
packages=find_packages(),
include_package_data=True,
python_requires='>=3.6',
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
},
)
|
import logging
from typing import Any, Callable, List
from llama_index.core.node_parser.interface import TextSplitter
from llama_index.core.utils import globals_helper
logger = logging.getLogger(__name__)
def truncate_text(text: str, text_splitter: TextSplitter) -> str:
"""Truncate text to fit within the chunk size.
Args:
text (str): The text to truncate.
text_splitter (TextSplitter): The splitter to use for chunking.
Returns:
str: The first chunk of the split text that fits within the chunk size.
"""
chunks = text_splitter.split_text(text)
return chunks[0]
def split_text_keep_separator(text: str, separator: str) -> List[str]:
"""Split text with separator and keep the separator at the end of each split.
Args:
text (str): The text to split.
separator (str): The separator to split on.
Returns:
List[str]: List of text segments with separators preserved at the end of each split.
"""
parts = text.split(separator)
result = [separator + s if i > 0 else s for i, s in enumerate(parts)]
return [s for s in result if s]
def split_by_sep(sep: str, keep_sep: bool = True) -> Callable[[str], List[str]]:
"""Create a function that splits text by a separator.
Args:
sep (str): The separator to split on.
keep_sep (bool, optional): Whether to keep the separator in the output. Defaults to True.
Returns:
Callable[[str], List[str]]: A function that takes a string and returns a list of split strings.
"""
if keep_sep:
return lambda text: split_text_keep_separator(text, sep)
else:
return lambda text: text.split(sep)
def split_by_char() -> Callable[[str], List[str]]:
"""Create a function that splits text into individual characters.
Returns:
Callable[[str], List[str]]: A function that takes a string and returns a list of individual characters.
"""
return lambda text: list(text)
def split_by_sentence_tokenizer_internal(text: str, tokenizer: Any) -> List[str]:
"""Get the spans and then return the sentences.
Using the start index of each span
Instead of using end, use the start of the next span if available
"""
spans = list(tokenizer.span_tokenize(text))
sentences = []
for i, span in enumerate(spans):
start = span[0]
if i < len(spans) - 1:
end = spans[i + 1][0]
else:
end = len(text)
sentences.append(text[start:end])
return sentences
def split_by_sentence_tokenizer() -> Callable[[str], List[str]]:
return lambda text: split_by_sentence_tokenizer_internal(
text, globals_helper.punkt_tokenizer
)
def split_by_regex(regex: str) -> Callable[[str], List[str]]:
"""Create a function that splits text using a regular expression pattern.
Args:
regex (str): The regular expression pattern to use for splitting.
Returns:
Callable[[str], List[str]]: A function that takes a string and returns a list of matches based on the regex pattern.
"""
import re
return lambda text: re.findall(regex, text)
def split_by_phrase_regex() -> Callable[[str], List[str]]:
"""Split text by phrase regex.
This regular expression will split the sentences into phrases,
where each phrase is a sequence of one or more non-comma,
non-period, and non-semicolon characters, followed by an optional comma,
period, or semicolon. The regular expression will also capture the
delimiters themselves as separate items in the list of phrases.
"""
regex = "[^,.;。]+[,.;。]?"
return split_by_regex(regex)
|
import logging
from typing import Any, Callable, List
from llama_index.core.node_parser.interface import TextSplitter
logger = logging.getLogger(__name__)
def truncate_text(text: str, text_splitter: TextSplitter) -> str:
"""Truncate text to fit within the chunk size.
Args:
text (str): The text to truncate.
text_splitter (TextSplitter): The splitter to use for chunking.
Returns:
str: The first chunk of the split text that fits within the chunk size.
"""
chunks = text_splitter.split_text(text)
return chunks[0]
def split_text_keep_separator(text: str, separator: str) -> List[str]:
"""Split text with separator and keep the separator at the end of each split.
Args:
text (str): The text to split.
separator (str): The separator to split on.
Returns:
List[str]: List of text segments with separators preserved at the end of each split.
"""
parts = text.split(separator)
result = [separator + s if i > 0 else s for i, s in enumerate(parts)]
return [s for s in result if s]
def split_by_sep(sep: str, keep_sep: bool = True) -> Callable[[str], List[str]]:
"""Create a function that splits text by a separator.
Args:
sep (str): The separator to split on.
keep_sep (bool, optional): Whether to keep the separator in the output. Defaults to True.
Returns:
Callable[[str], List[str]]: A function that takes a string and returns a list of split strings.
"""
if keep_sep:
return lambda text: split_text_keep_separator(text, sep)
else:
return lambda text: text.split(sep)
def split_by_char() -> Callable[[str], List[str]]:
"""Create a function that splits text into individual characters.
Returns:
Callable[[str], List[str]]: A function that takes a string and returns a list of individual characters.
"""
return lambda text: list(text)
def split_by_sentence_tokenizer_internal(text: str, tokenizer: Any) -> List[str]:
"""Get the spans and then return the sentences.
Using the start index of each span
Instead of using end, use the start of the next span if available
"""
spans = list(tokenizer.span_tokenize(text))
sentences = []
for i, span in enumerate(spans):
start = span[0]
if i < len(spans) - 1:
end = spans[i + 1][0]
else:
end = len(text)
sentences.append(text[start:end])
return sentences
def split_by_sentence_tokenizer() -> Callable[[str], List[str]]:
import nltk
tokenizer = nltk.tokenize.PunktSentenceTokenizer()
return lambda text: split_by_sentence_tokenizer_internal(text, tokenizer)
def split_by_regex(regex: str) -> Callable[[str], List[str]]:
"""Create a function that splits text using a regular expression pattern.
Args:
regex (str): The regular expression pattern to use for splitting.
Returns:
Callable[[str], List[str]]: A function that takes a string and returns a list of matches based on the regex pattern.
"""
import re
return lambda text: re.findall(regex, text)
def split_by_phrase_regex() -> Callable[[str], List[str]]:
"""Split text by phrase regex.
This regular expression will split the sentences into phrases,
where each phrase is a sequence of one or more non-comma,
non-period, and non-semicolon characters, followed by an optional comma,
period, or semicolon. The regular expression will also capture the
delimiters themselves as separate items in the list of phrases.
"""
regex = "[^,.;。]+[,.;。]?"
return split_by_regex(regex)
|
"""
This script contains an example how to perform semantic search with Seismic.
For more information, please refer to the documentation:
https://github.com/TusKANNy/seismic/blob/main/docs/Guidelines.md
All you need is installing the `pyseismic-lsr` package:
```
pip install pyseismic-lsr
```
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_seismic
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 4. Encode the corpus
print("Start encoding corpus...")
start_time = time.time()
corpus_embeddings = sparse_model.encode_document(
corpus, convert_to_sparse_tensor=True, batch_size=16, show_progress_bar=True
)
corpus_embeddings_decoded = sparse_model.decode(corpus_embeddings)
print(f"Corpus encoding time: {time.time() - start_time:.6f} seconds")
corpus_index = None
while True:
# 5. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode_query(queries, convert_to_sparse_tensor=True)
query_embeddings_decoded = sparse_model.decode(query_embeddings)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using Seismic
results, search_time, corpus_index = semantic_search_seismic(
query_embeddings_decoded,
corpus_embeddings_decoded=corpus_embeddings_decoded if corpus_index is None else None,
corpus_index=corpus_index,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""
This script contains an example how to perform semantic search with Seismic.
For more information, please refer to the documentation:
https://github.com/TusKANNy/seismic/blob/main/docs/Guidelines.md
All you need is installing the `pyseismic-lsr` package:
```
pip install pyseismic-lsr
```
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_seismic
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 4. Encode the corpus
print("Start encoding corpus...")
start_time = time.time()
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=16, show_progress_bar=True)
corpus_embeddings_decoded = sparse_model.decode(corpus_embeddings)
print(f"Corpus encoding time: {time.time() - start_time:.6f} seconds")
corpus_index = None
while True:
# 5. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
query_embeddings_decoded = sparse_model.decode(query_embeddings)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using Seismic
results, search_time, corpus_index = semantic_search_seismic(
query_embeddings_decoded,
corpus_embeddings_decoded=corpus_embeddings_decoded if corpus_index is None else None,
corpus_index=corpus_index,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
__version__ = "3.1.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
]
|
__version__ = "3.0.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
]
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=64,
batch_augments=[dict(type='BatchFixedSizePad', size=(640, 640))]),
backbone=dict(norm_eval=False),
neck=dict(
relu_before_extra_convs=True,
no_norm_on_lateral=True,
norm_cfg=norm_cfg),
bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg),
# training and testing settings
train_cfg=dict(assigner=dict(neg_iou_thr=0.5)))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=(640, 640),
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(640, 640), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8, num_workers=4, dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# training schedule for 50e
max_epochs = 50
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[30, 40],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001),
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
env_cfg = dict(cudnn_benchmark=True)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=64,
batch_augments=[dict(type='BatchFixedSizePad', size=(640, 640))]),
backbone=dict(norm_eval=False),
neck=dict(
relu_before_extra_convs=True,
no_norm_on_lateral=True,
norm_cfg=norm_cfg),
bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg),
# training and testing settings
train_cfg=dict(assigner=dict(neg_iou_thr=0.5)))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=(640, 640),
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(640, 640), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8, num_workers=4, dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# training schedule for 50e
max_epochs = 50
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[30, 40],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001),
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
env_cfg = dict(cudnn_benchmark=True)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
import time
from functools import partial
from huggingface_hub import HfApi, hf_hub_url
from huggingface_hub.hf_api import RepoFile
from packaging import version
from requests import ConnectionError, HTTPError
from .. import config
from . import logging
logger = logging.get_logger(__name__)
# Retry `preupload_lfs_files` in `huggingface_hub<0.20.0` on the "500 (Internal Server Error)" and "503 (Service Unavailable)" HTTP errors
if config.HF_HUB_VERSION.release < version.parse("0.20.0").release:
def preupload_lfs_files(hf_api: HfApi, **kwargs):
max_retries = 5
base_wait_time = 1
max_wait_time = 8
retry = 0
while True:
try:
hf_api.preupload_lfs_files(**kwargs)
except (RuntimeError, HTTPError, ConnectionError) as err:
if isinstance(err, RuntimeError):
if isinstance(err.__cause__, (HTTPError, ConnectionError)):
err = err.__cause__
else:
raise err
if retry >= max_retries or err.response and err.response.status_code not in [500, 503]:
raise err
else:
sleep_time = min(max_wait_time, base_wait_time * 2**retry) # Exponential backoff
logger.info(
f"{hf_api.preupload_lfs_files} timed out, retrying in {sleep_time}s... [{retry/max_retries}]"
)
time.sleep(sleep_time)
retry += 1
else:
break
else:
def preupload_lfs_files(hf_api: HfApi, **kwargs):
hf_api.preupload_lfs_files(**kwargs)
# `list_files_info` is deprecated in favor of `list_repo_tree` in `huggingface_hub>=0.20.0`
if config.HF_HUB_VERSION.release < version.parse("0.20.0").release:
def list_files_info(hf_api: HfApi, **kwargs):
yield from hf_api.list_files_info(**kwargs)
else:
def list_files_info(hf_api: HfApi, **kwargs):
kwargs = {**kwargs, "recursive": True}
for repo_path in hf_api.list_repo_tree(**kwargs):
if isinstance(repo_path, RepoFile):
yield repo_path
# bakckward compatibility
hf_hub_url = partial(hf_hub_url, repo_type="dataset")
|
import time
from functools import partial
from huggingface_hub import HfApi, hf_hub_url
from huggingface_hub.hf_api import RepoFile
from packaging import version
from requests import ConnectionError, HTTPError
from .. import config
from . import logging
logger = logging.get_logger(__name__)
# Retry `preupload_lfs_files` in `huggingface_hub<0.20.0` on the "500 (Internal Server Error)" and "503 (Service Unavailable)" HTTP errors
if config.HF_HUB_VERSION < version.parse("0.20.0"):
def preupload_lfs_files(hf_api: HfApi, **kwargs):
max_retries = 5
base_wait_time = 1
max_wait_time = 8
retry = 0
while True:
try:
hf_api.preupload_lfs_files(**kwargs)
except (RuntimeError, HTTPError, ConnectionError) as err:
if isinstance(err, RuntimeError):
if isinstance(err.__cause__, (HTTPError, ConnectionError)):
err = err.__cause__
else:
raise err
if retry >= max_retries or err.response and err.response.status_code not in [500, 503]:
raise err
else:
sleep_time = min(max_wait_time, base_wait_time * 2**retry) # Exponential backoff
logger.info(
f"{hf_api.preupload_lfs_files} timed out, retrying in {sleep_time}s... [{retry/max_retries}]"
)
time.sleep(sleep_time)
retry += 1
else:
break
else:
def preupload_lfs_files(hf_api: HfApi, **kwargs):
hf_api.preupload_lfs_files(**kwargs)
# `list_files_info` is deprecated in favor of `list_repo_tree` in `huggingface_hub>=0.20.0`
if config.HF_HUB_VERSION < version.parse("0.20.0"):
def list_files_info(hf_api: HfApi, **kwargs):
yield from hf_api.list_files_info(**kwargs)
else:
def list_files_info(hf_api: HfApi, **kwargs):
kwargs = {**kwargs, "recursive": True}
for repo_path in hf_api.list_repo_tree(**kwargs):
if isinstance(repo_path, RepoFile):
yield repo_path
# bakckward compatibility
hf_hub_url = partial(hf_hub_url, repo_type="dataset")
|
import json
import logging
from abc import ABC, abstractmethod
from datetime import datetime
from typing import Any, AsyncGenerator, Generator, Generic, TypeVar
from pydantic import BaseModel
from redis.asyncio.client import PubSub as AsyncPubSub
from redis.client import PubSub
from backend.data import redis
logger = logging.getLogger(__name__)
class DateTimeEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.isoformat()
return super().default(o)
M = TypeVar("M", bound=BaseModel)
class BaseRedisEventBus(Generic[M], ABC):
Model: type[M]
@property
@abstractmethod
def event_bus_name(self) -> str:
pass
def _serialize_message(self, item: M, channel_key: str) -> tuple[str, str]:
message = json.dumps(item.model_dump(), cls=DateTimeEncoder)
channel_name = f"{self.event_bus_name}/{channel_key}"
logger.info(f"[{channel_name}] Publishing an event to Redis {message}")
return message, channel_name
def _deserialize_message(self, msg: Any, channel_key: str) -> M | None:
message_type = "pmessage" if "*" in channel_key else "message"
if msg["type"] != message_type:
return None
try:
data = json.loads(msg["data"])
logger.info(f"Consuming an event from Redis {data}")
return self.Model(**data)
except Exception as e:
logger.error(f"Failed to parse event result from Redis {msg} {e}")
def _subscribe(
self, connection: redis.Redis | redis.AsyncRedis, channel_key: str
) -> tuple[PubSub | AsyncPubSub, str]:
channel_name = f"{self.event_bus_name}/{channel_key}"
pubsub = connection.pubsub()
return pubsub, channel_name
class RedisEventBus(BaseRedisEventBus[M], ABC):
Model: type[M]
@property
def connection(self) -> redis.Redis:
return redis.get_redis()
def publish_event(self, event: M, channel_key: str):
message, channel_name = self._serialize_message(event, channel_key)
self.connection.publish(channel_name, message)
def listen_events(self, channel_key: str) -> Generator[M, None, None]:
pubsub, channel_name = self._subscribe(self.connection, channel_key)
assert isinstance(pubsub, PubSub)
if "*" in channel_key:
pubsub.psubscribe(channel_name)
else:
pubsub.subscribe(channel_name)
for message in pubsub.listen():
if event := self._deserialize_message(message, channel_key):
yield event
class AsyncRedisEventBus(BaseRedisEventBus[M], ABC):
Model: type[M]
@property
async def connection(self) -> redis.AsyncRedis:
return await redis.get_redis_async()
async def publish_event(self, event: M, channel_key: str):
message, channel_name = self._serialize_message(event, channel_key)
connection = await self.connection
await connection.publish(channel_name, message)
async def listen_events(self, channel_key: str) -> AsyncGenerator[M, None]:
pubsub, channel_name = self._subscribe(await self.connection, channel_key)
assert isinstance(pubsub, AsyncPubSub)
if "*" in channel_key:
await pubsub.psubscribe(channel_name)
else:
await pubsub.subscribe(channel_name)
async for message in pubsub.listen():
if event := self._deserialize_message(message, channel_key):
yield event
|
import json
import logging
from abc import ABC, abstractmethod
from datetime import datetime
from typing import Any, AsyncGenerator, Generator, Generic, TypeVar
from pydantic import BaseModel
from redis.asyncio.client import PubSub as AsyncPubSub
from redis.client import PubSub
from backend.data import redis
from backend.data.execution import ExecutionResult
from backend.util.settings import Config
logger = logging.getLogger(__name__)
config = Config()
class DateTimeEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.isoformat()
return super().default(o)
M = TypeVar("M", bound=BaseModel)
class BaseRedisEventBus(Generic[M], ABC):
Model: type[M]
@property
@abstractmethod
def event_bus_name(self) -> str:
pass
def _serialize_message(self, item: M, channel_key: str) -> tuple[str, str]:
message = json.dumps(item.model_dump(), cls=DateTimeEncoder)
channel_name = f"{self.event_bus_name}-{channel_key}"
logger.info(f"[{channel_name}] Publishing an event to Redis {message}")
return message, channel_name
def _deserialize_message(self, msg: Any, channel_key: str) -> M | None:
message_type = "pmessage" if "*" in channel_key else "message"
if msg["type"] != message_type:
return None
try:
data = json.loads(msg["data"])
logger.info(f"Consuming an event from Redis {data}")
return self.Model(**data)
except Exception as e:
logger.error(f"Failed to parse event result from Redis {msg} {e}")
def _subscribe(
self, connection: redis.Redis | redis.AsyncRedis, channel_key: str
) -> tuple[PubSub | AsyncPubSub, str]:
channel_name = f"{self.event_bus_name}-{channel_key}"
pubsub = connection.pubsub()
return pubsub, channel_name
class RedisEventBus(BaseRedisEventBus[M], ABC):
Model: type[M]
@property
def connection(self) -> redis.Redis:
return redis.get_redis()
def publish_event(self, event: M, channel_key: str):
message, channel_name = self._serialize_message(event, channel_key)
self.connection.publish(channel_name, message)
def listen_events(self, channel_key: str) -> Generator[M, None, None]:
pubsub, channel_name = self._subscribe(self.connection, channel_key)
assert isinstance(pubsub, PubSub)
if "*" in channel_key:
pubsub.psubscribe(channel_name)
else:
pubsub.subscribe(channel_name)
for message in pubsub.listen():
if event := self._deserialize_message(message, channel_key):
yield event
class AsyncRedisEventBus(BaseRedisEventBus[M], ABC):
Model: type[M]
@property
async def connection(self) -> redis.AsyncRedis:
return await redis.get_redis_async()
async def publish_event(self, event: M, channel_key: str):
message, channel_name = self._serialize_message(event, channel_key)
connection = await self.connection
await connection.publish(channel_name, message)
async def listen_events(self, channel_key: str) -> AsyncGenerator[M, None]:
pubsub, channel_name = self._subscribe(await self.connection, channel_key)
assert isinstance(pubsub, AsyncPubSub)
if "*" in channel_key:
await pubsub.psubscribe(channel_name)
else:
await pubsub.subscribe(channel_name)
async for message in pubsub.listen():
if event := self._deserialize_message(message, channel_key):
yield event
class RedisExecutionEventBus(RedisEventBus[ExecutionResult]):
Model = ExecutionResult
@property
def event_bus_name(self) -> str:
return config.execution_event_bus_name
def publish(self, res: ExecutionResult):
self.publish_event(res, f"{res.graph_id}-{res.graph_exec_id}")
def listen(
self, graph_id: str = "*", graph_exec_id: str = "*"
) -> Generator[ExecutionResult, None, None]:
for execution_result in self.listen_events(f"{graph_id}-{graph_exec_id}"):
yield execution_result
class AsyncRedisExecutionEventBus(AsyncRedisEventBus[ExecutionResult]):
Model = ExecutionResult
@property
def event_bus_name(self) -> str:
return config.execution_event_bus_name
async def publish(self, res: ExecutionResult):
await self.publish_event(res, f"{res.graph_id}-{res.graph_exec_id}")
async def listen(
self, graph_id: str = "*", graph_exec_id: str = "*"
) -> AsyncGenerator[ExecutionResult, None]:
async for execution_result in self.listen_events(f"{graph_id}-{graph_exec_id}"):
yield execution_result
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
type='MaskScoringRCNN',
roi_head=dict(
type='MaskScoringRoIHead',
mask_iou_head=dict(
type='MaskIoUHead',
num_convs=4,
num_fcs=2,
roi_feat_size=14,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
num_classes=80)),
# model training and testing settings
train_cfg=dict(rcnn=dict(mask_thr_binary=0.5)))
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
type='MaskScoringRCNN',
roi_head=dict(
type='MaskScoringRoIHead',
mask_iou_head=dict(
type='MaskIoUHead',
num_convs=4,
num_fcs=2,
roi_feat_size=14,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
num_classes=80)),
# model training and testing settings
train_cfg=dict(rcnn=dict(mask_thr_binary=0.5)))
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
checkpoint = 'https://download.pytorch.org/models/resnet50-11ad3fa6.pth'
model = dict(
backbone=dict(init_cfg=dict(type='Pretrained', checkpoint=checkpoint)))
# `lr` and `weight_decay` have been searched to be optimal.
optim_wrapper = dict(
optimizer=dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.1),
paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True))
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
checkpoint = 'https://download.pytorch.org/models/resnet50-11ad3fa6.pth'
model = dict(
backbone=dict(init_cfg=dict(type='Pretrained', checkpoint=checkpoint)))
# `lr` and `weight_decay` have been searched to be optimal.
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.0001,
weight_decay=0.1,
paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True))
|
_base_ = './gfl_r50_fpn_ms-2x_coco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './gfl_r50_fpn_mstrain_2x_coco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
from docarray import BaseDoc
from docarray.typing import ImageUrl
def test_set_image_url():
class MyDocument(BaseDoc):
image_url: ImageUrl
d = MyDocument(image_url="https://jina.ai/img.png")
assert isinstance(d.image_url, ImageUrl)
assert d.image_url == "https://jina.ai/img.png"
|
from docarray import BaseDocument
from docarray.typing import ImageUrl
def test_set_image_url():
class MyDocument(BaseDocument):
image_url: ImageUrl
d = MyDocument(image_url="https://jina.ai/img.png")
assert isinstance(d.image_url, ImageUrl)
assert d.image_url == "https://jina.ai/img.png"
|
# coding: utf-8
import logging
import numpy as np
import lightgbm as lgb
def test_register_logger(tmp_path):
logger = logging.getLogger("LightGBM")
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s | %(message)s')
log_filename = tmp_path / "LightGBM_test_logger.log"
file_handler = logging.FileHandler(log_filename, mode="w", encoding="utf-8")
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def dummy_metric(_, __):
logger.debug('In dummy_metric')
return 'dummy_metric', 1, True
lgb.register_logger(logger)
X = np.array([[1, 2, 3],
[1, 2, 4],
[1, 2, 4],
[1, 2, 3]],
dtype=np.float32)
y = np.array([0, 1, 1, 0])
lgb_data = lgb.Dataset(X, y)
eval_records = {}
callbacks = [
lgb.record_evaluation(eval_records),
lgb.log_evaluation(2),
lgb.early_stopping(4)
]
lgb.train({'objective': 'binary', 'metric': ['auc', 'binary_error']},
lgb_data, num_boost_round=10, feval=dummy_metric,
valid_sets=[lgb_data], categorical_feature=[1], callbacks=callbacks)
lgb.plot_metric(eval_records)
expected_log = r"""
INFO | [LightGBM] [Warning] There are no meaningful features, as all feature values are constant.
INFO | [LightGBM] [Info] Number of positive: 2, number of negative: 2
INFO | [LightGBM] [Info] Total Bins 0
INFO | [LightGBM] [Info] Number of data points in the train set: 4, number of used features: 0
INFO | [LightGBM] [Info] [binary:BoostFromScore]: pavg=0.500000 -> initscore=0.000000
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | Training until validation scores don't improve for 4 rounds
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [2] training's auc: 0.5 training's binary_error: 0.5 training's dummy_metric: 1
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [4] training's auc: 0.5 training's binary_error: 0.5 training's dummy_metric: 1
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [6] training's auc: 0.5 training's binary_error: 0.5 training's dummy_metric: 1
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [8] training's auc: 0.5 training's binary_error: 0.5 training's dummy_metric: 1
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [10] training's auc: 0.5 training's binary_error: 0.5 training's dummy_metric: 1
INFO | Did not meet early stopping. Best iteration is:
[1] training's auc: 0.5 training's binary_error: 0.5 training's dummy_metric: 1
WARNING | More than one metric available, picking one to plot.
""".strip()
gpu_lines = [
"INFO | [LightGBM] [Info] This is the GPU trainer",
"INFO | [LightGBM] [Info] Using GPU Device:",
"INFO | [LightGBM] [Info] Compiling OpenCL Kernel with 16 bins...",
"INFO | [LightGBM] [Info] GPU programs have been built",
"INFO | [LightGBM] [Warning] GPU acceleration is disabled because no non-trivial dense features can be found",
"INFO | [LightGBM] [Warning] Using sparse features with CUDA is currently not supported.",
"INFO | [LightGBM] [Warning] CUDA currently requires double precision calculations.",
"INFO | [LightGBM] [Info] LightGBM using CUDA trainer with DP float!!"
]
with open(log_filename, "rt", encoding="utf-8") as f:
actual_log = f.read().strip()
actual_log_wo_gpu_stuff = []
for line in actual_log.split("\n"):
if not any(line.startswith(gpu_line) for gpu_line in gpu_lines):
actual_log_wo_gpu_stuff.append(line)
assert "\n".join(actual_log_wo_gpu_stuff) == expected_log
|
# coding: utf-8
import logging
import numpy as np
import lightgbm as lgb
def test_register_logger(tmp_path):
logger = logging.getLogger("LightGBM")
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s | %(message)s')
log_filename = tmp_path / "LightGBM_test_logger.log"
file_handler = logging.FileHandler(log_filename, mode="w", encoding="utf-8")
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def dummy_metric(_, __):
logger.debug('In dummy_metric')
return 'dummy_metric', 1, True
lgb.register_logger(logger)
X = np.array([[1, 2, 3],
[1, 2, 4],
[1, 2, 4],
[1, 2, 3]],
dtype=np.float32)
y = np.array([0, 1, 1, 0])
lgb_data = lgb.Dataset(X, y)
eval_records = {}
callbacks = [
lgb.record_evaluation(eval_records),
lgb.log_evaluation(2),
lgb.early_stopping(4)
]
lgb.train({'objective': 'binary', 'metric': ['auc', 'binary_error']},
lgb_data, num_boost_round=10, feval=dummy_metric,
valid_sets=[lgb_data], categorical_feature=[1], callbacks=callbacks)
lgb.plot_metric(eval_records)
expected_log = r"""
WARNING | categorical_feature in Dataset is overridden.
New categorical_feature is [1]
INFO | [LightGBM] [Warning] There are no meaningful features, as all feature values are constant.
INFO | [LightGBM] [Info] Number of positive: 2, number of negative: 2
INFO | [LightGBM] [Info] Total Bins 0
INFO | [LightGBM] [Info] Number of data points in the train set: 4, number of used features: 0
INFO | [LightGBM] [Info] [binary:BoostFromScore]: pavg=0.500000 -> initscore=0.000000
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | Training until validation scores don't improve for 4 rounds
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [2] training's auc: 0.5 training's binary_error: 0.5 training's dummy_metric: 1
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [4] training's auc: 0.5 training's binary_error: 0.5 training's dummy_metric: 1
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [6] training's auc: 0.5 training's binary_error: 0.5 training's dummy_metric: 1
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [8] training's auc: 0.5 training's binary_error: 0.5 training's dummy_metric: 1
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [10] training's auc: 0.5 training's binary_error: 0.5 training's dummy_metric: 1
INFO | Did not meet early stopping. Best iteration is:
[1] training's auc: 0.5 training's binary_error: 0.5 training's dummy_metric: 1
WARNING | More than one metric available, picking one to plot.
""".strip()
gpu_lines = [
"INFO | [LightGBM] [Info] This is the GPU trainer",
"INFO | [LightGBM] [Info] Using GPU Device:",
"INFO | [LightGBM] [Info] Compiling OpenCL Kernel with 16 bins...",
"INFO | [LightGBM] [Info] GPU programs have been built",
"INFO | [LightGBM] [Warning] GPU acceleration is disabled because no non-trivial dense features can be found",
"INFO | [LightGBM] [Warning] Using sparse features with CUDA is currently not supported.",
"INFO | [LightGBM] [Warning] CUDA currently requires double precision calculations.",
"INFO | [LightGBM] [Info] LightGBM using CUDA trainer with DP float!!"
]
with open(log_filename, "rt", encoding="utf-8") as f:
actual_log = f.read().strip()
actual_log_wo_gpu_stuff = []
for line in actual_log.split("\n"):
if not any(line.startswith(gpu_line) for gpu_line in gpu_lines):
actual_log_wo_gpu_stuff.append(line)
assert "\n".join(actual_log_wo_gpu_stuff) == expected_log
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNet2DModel
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch, torch_device
enable_full_determinism()
class PNDMPipelineFastTests(unittest.TestCase):
@property
def dummy_uncond_unet(self):
torch.manual_seed(0)
model = UNet2DModel(
block_out_channels=(32, 64),
layers_per_block=2,
sample_size=32,
in_channels=3,
out_channels=3,
down_block_types=("DownBlock2D", "AttnDownBlock2D"),
up_block_types=("AttnUpBlock2D", "UpBlock2D"),
)
return model
def test_inference(self):
unet = self.dummy_uncond_unet
scheduler = PNDMScheduler()
pndm = PNDMPipeline(unet=unet, scheduler=scheduler)
pndm.to(torch_device)
pndm.set_progress_bar_config(disable=None)
generator = torch.manual_seed(0)
image = pndm(generator=generator, num_inference_steps=20, output_type="np").images
generator = torch.manual_seed(0)
image_from_tuple = pndm(generator=generator, num_inference_steps=20, output_type="np", return_dict=False)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
expected_slice = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@nightly
@require_torch
class PNDMPipelineIntegrationTests(unittest.TestCase):
def test_inference_cifar10(self):
model_id = "google/ddpm-cifar10-32"
unet = UNet2DModel.from_pretrained(model_id)
scheduler = PNDMScheduler()
pndm = PNDMPipeline(unet=unet, scheduler=scheduler)
pndm.to(torch_device)
pndm.set_progress_bar_config(disable=None)
generator = torch.manual_seed(0)
image = pndm(generator=generator, output_type="np").images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
expected_slice = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNet2DModel
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch, torch_device
enable_full_determinism()
class PNDMPipelineFastTests(unittest.TestCase):
@property
def dummy_uncond_unet(self):
torch.manual_seed(0)
model = UNet2DModel(
block_out_channels=(32, 64),
layers_per_block=2,
sample_size=32,
in_channels=3,
out_channels=3,
down_block_types=("DownBlock2D", "AttnDownBlock2D"),
up_block_types=("AttnUpBlock2D", "UpBlock2D"),
)
return model
def test_inference(self):
unet = self.dummy_uncond_unet
scheduler = PNDMScheduler()
pndm = PNDMPipeline(unet=unet, scheduler=scheduler)
pndm.to(torch_device)
pndm.set_progress_bar_config(disable=None)
generator = torch.manual_seed(0)
image = pndm(generator=generator, num_inference_steps=20, output_type="np").images
generator = torch.manual_seed(0)
image_from_tuple = pndm(generator=generator, num_inference_steps=20, output_type="np", return_dict=False)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
expected_slice = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@nightly
@require_torch
class PNDMPipelineIntegrationTests(unittest.TestCase):
def test_inference_cifar10(self):
model_id = "google/ddpm-cifar10-32"
unet = UNet2DModel.from_pretrained(model_id)
scheduler = PNDMScheduler()
pndm = PNDMPipeline(unet=unet, scheduler=scheduler)
pndm.to(torch_device)
pndm.set_progress_bar_config(disable=None)
generator = torch.manual_seed(0)
image = pndm(generator=generator, output_type="np").images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
expected_slice = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
"""Fake Chat Model wrapper for testing purposes."""
import json
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import SimpleChatModel
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.outputs import ChatGeneration, ChatResult
class FakeEchoPromptChatModel(SimpleChatModel):
"""Fake Chat Model wrapper for testing purposes."""
def _call(
self,
messages: list[BaseMessage],
stop: Optional[list[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
return json.dumps([message.model_dump() for message in messages])
async def _agenerate(
self,
messages: list[BaseMessage],
stop: Optional[list[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
output_str = "fake response 2"
message = AIMessage(content=output_str)
generation = ChatGeneration(message=message)
return ChatResult(generations=[generation])
@property
def _llm_type(self) -> str:
return "fake-echo-prompt-chat-model"
@property
def _identifying_params(self) -> dict[str, Any]:
return {"key": "fake"}
|
"""Fake Chat Model wrapper for testing purposes."""
import json
from typing import Any, Dict, List, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import SimpleChatModel
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.outputs import ChatGeneration, ChatResult
class FakeEchoPromptChatModel(SimpleChatModel):
"""Fake Chat Model wrapper for testing purposes."""
def _call(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
return json.dumps([message.model_dump() for message in messages])
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
output_str = "fake response 2"
message = AIMessage(content=output_str)
generation = ChatGeneration(message=message)
return ChatResult(generations=[generation])
@property
def _llm_type(self) -> str:
return "fake-echo-prompt-chat-model"
@property
def _identifying_params(self) -> Dict[str, Any]:
return {"key": "fake"}
|
"""
This file evaluates CrossEncoder on the TREC 2019 Deep Learning (DL) Track: https://arxiv.org/abs/2003.07820
TREC 2019 DL is based on the corpus of MS Marco. MS Marco provides a sparse annotation, i.e., usually only a single
passage is marked as relevant for a given query. Many other highly relevant passages are not annotated and hence are treated
as an error if a model ranks those high.
TREC DL instead annotated up to 200 passages per query for their relevance to a given query. It is better suited to estimate
the model performance for the task of reranking in Information Retrieval.
Run:
python eval_cross-encoder-trec-dl.py cross-encoder-model-name
"""
import gzip
from collections import defaultdict
import logging
import tqdm
import numpy as np
import sys
import pytrec_eval
from sentence_transformers import util, CrossEncoder
import os
data_folder = "trec2019-data"
os.makedirs(data_folder, exist_ok=True)
# Read test queries
queries = {}
queries_filepath = os.path.join(data_folder, "msmarco-test2019-queries.tsv.gz")
if not os.path.exists(queries_filepath):
logging.info("Download " + os.path.basename(queries_filepath))
util.http_get(
"https://msmarco.z22.web.core.windows.net/msmarcoranking/msmarco-test2019-queries.tsv.gz", queries_filepath
)
with gzip.open(queries_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
qid, query = line.strip().split("\t")
queries[qid] = query
# Read which passages are relevant
relevant_docs = defaultdict(lambda: defaultdict(int))
qrels_filepath = os.path.join(data_folder, "2019qrels-pass.txt")
if not os.path.exists(qrels_filepath):
logging.info("Download " + os.path.basename(qrels_filepath))
util.http_get("https://trec.nist.gov/data/deep/2019qrels-pass.txt", qrels_filepath)
with open(qrels_filepath) as fIn:
for line in fIn:
qid, _, pid, score = line.strip().split()
score = int(score)
if score > 0:
relevant_docs[qid][pid] = score
# Only use queries that have at least one relevant passage
relevant_qid = []
for qid in queries:
if len(relevant_docs[qid]) > 0:
relevant_qid.append(qid)
# Read the top 1000 passages that are supposed to be re-ranked
passage_filepath = os.path.join(data_folder, "msmarco-passagetest2019-top1000.tsv.gz")
if not os.path.exists(passage_filepath):
logging.info("Download " + os.path.basename(passage_filepath))
util.http_get(
"https://msmarco.z22.web.core.windows.net/msmarcoranking/msmarco-passagetest2019-top1000.tsv.gz",
passage_filepath,
)
passage_cand = {}
with gzip.open(passage_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
qid, pid, query, passage = line.strip().split("\t")
if qid not in passage_cand:
passage_cand[qid] = []
passage_cand[qid].append([pid, passage])
logging.info("Queries: {}".format(len(queries)))
queries_result_list = []
run = {}
model = CrossEncoder(sys.argv[1], max_length=512)
for qid in tqdm.tqdm(relevant_qid):
query = queries[qid]
cand = passage_cand[qid]
pids = [c[0] for c in cand]
corpus_sentences = [c[1] for c in cand]
cross_inp = [[query, sent] for sent in corpus_sentences]
if model.config.num_labels > 1: # Cross-Encoder that predict more than 1 score, we use the last and apply softmax
cross_scores = model.predict(cross_inp, apply_softmax=True)[:, 1].tolist()
else:
cross_scores = model.predict(cross_inp).tolist()
cross_scores_sparse = {}
for idx, pid in enumerate(pids):
cross_scores_sparse[pid] = cross_scores[idx]
sparse_scores = cross_scores_sparse
run[qid] = {}
for pid in sparse_scores:
run[qid][pid] = float(sparse_scores[pid])
evaluator = pytrec_eval.RelevanceEvaluator(relevant_docs, {"ndcg_cut.10"})
scores = evaluator.evaluate(run)
print("Queries:", len(relevant_qid))
print("NDCG@10: {:.2f}".format(np.mean([ele["ndcg_cut_10"] for ele in scores.values()]) * 100))
|
"""
This file evaluates CrossEncoder on the TREC 2019 Deep Learning (DL) Track: https://arxiv.org/abs/2003.07820
TREC 2019 DL is based on the corpus of MS Marco. MS Marco provides a sparse annotation, i.e., usually only a single
passage is marked as relevant for a given query. Many other highly relevant passages are not annotated and hence are treated
as an error if a model ranks those high.
TREC DL instead annotated up to 200 passages per query for their relevance to a given query. It is better suited to estimate
the model performance for the task of reranking in Information Retrieval.
Run:
python eval_cross-encoder-trec-dl.py cross-encoder-model-name
"""
import gzip
from collections import defaultdict
import logging
import tqdm
import numpy as np
import sys
import pytrec_eval
from sentence_transformers import util, CrossEncoder
import os
data_folder = "trec2019-data"
os.makedirs(data_folder, exist_ok=True)
# Read test queries
queries = {}
queries_filepath = os.path.join(data_folder, "msmarco-test2019-queries.tsv.gz")
if not os.path.exists(queries_filepath):
logging.info("Download " + os.path.basename(queries_filepath))
util.http_get(
"https://msmarco.blob.core.windows.net/msmarcoranking/msmarco-test2019-queries.tsv.gz", queries_filepath
)
with gzip.open(queries_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
qid, query = line.strip().split("\t")
queries[qid] = query
# Read which passages are relevant
relevant_docs = defaultdict(lambda: defaultdict(int))
qrels_filepath = os.path.join(data_folder, "2019qrels-pass.txt")
if not os.path.exists(qrels_filepath):
logging.info("Download " + os.path.basename(qrels_filepath))
util.http_get("https://trec.nist.gov/data/deep/2019qrels-pass.txt", qrels_filepath)
with open(qrels_filepath) as fIn:
for line in fIn:
qid, _, pid, score = line.strip().split()
score = int(score)
if score > 0:
relevant_docs[qid][pid] = score
# Only use queries that have at least one relevant passage
relevant_qid = []
for qid in queries:
if len(relevant_docs[qid]) > 0:
relevant_qid.append(qid)
# Read the top 1000 passages that are supposed to be re-ranked
passage_filepath = os.path.join(data_folder, "msmarco-passagetest2019-top1000.tsv.gz")
if not os.path.exists(passage_filepath):
logging.info("Download " + os.path.basename(passage_filepath))
util.http_get(
"https://msmarco.blob.core.windows.net/msmarcoranking/msmarco-passagetest2019-top1000.tsv.gz", passage_filepath
)
passage_cand = {}
with gzip.open(passage_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
qid, pid, query, passage = line.strip().split("\t")
if qid not in passage_cand:
passage_cand[qid] = []
passage_cand[qid].append([pid, passage])
logging.info("Queries: {}".format(len(queries)))
queries_result_list = []
run = {}
model = CrossEncoder(sys.argv[1], max_length=512)
for qid in tqdm.tqdm(relevant_qid):
query = queries[qid]
cand = passage_cand[qid]
pids = [c[0] for c in cand]
corpus_sentences = [c[1] for c in cand]
cross_inp = [[query, sent] for sent in corpus_sentences]
if model.config.num_labels > 1: # Cross-Encoder that predict more than 1 score, we use the last and apply softmax
cross_scores = model.predict(cross_inp, apply_softmax=True)[:, 1].tolist()
else:
cross_scores = model.predict(cross_inp).tolist()
cross_scores_sparse = {}
for idx, pid in enumerate(pids):
cross_scores_sparse[pid] = cross_scores[idx]
sparse_scores = cross_scores_sparse
run[qid] = {}
for pid in sparse_scores:
run[qid][pid] = float(sparse_scores[pid])
evaluator = pytrec_eval.RelevanceEvaluator(relevant_docs, {"ndcg_cut.10"})
scores = evaluator.evaluate(run)
print("Queries:", len(relevant_qid))
print("NDCG@10: {:.2f}".format(np.mean([ele["ndcg_cut_10"] for ele in scores.values()]) * 100))
|
"""Torch backend APIs.
# Note on device placement
Torch has a different device placement style compared to TF and JAX.
In short, variables/tensors are not created on GPU by default,
and the GPU cannot directly communicate with the CPU.
To bring Torch behavior in line with TF and JAX automated device placement,
we are doing the following to automate device placement if a GPU is available:
- Variables are created on GPU.
- Input data will be placed on GPU at the first `keras.layers.Layer` call.
- Tensor creation happens on GPU, e.g., `zeros()` will create a tensor on GPU.
- `convert_to_numpy` will bring the tensor to CPU before converting it to NumPy.
"""
from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.torch import core
from keras.src.backend.torch import image
from keras.src.backend.torch import linalg
from keras.src.backend.torch import math
from keras.src.backend.torch import nn
from keras.src.backend.torch import numpy
from keras.src.backend.torch import random
from keras.src.backend.torch.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.torch.core import Variable
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import compute_output_spec
from keras.src.backend.torch.core import cond
from keras.src.backend.torch.core import convert_to_numpy
from keras.src.backend.torch.core import convert_to_tensor
from keras.src.backend.torch.core import device_scope
from keras.src.backend.torch.core import is_tensor
from keras.src.backend.torch.core import random_seed_dtype
from keras.src.backend.torch.core import scatter
from keras.src.backend.torch.core import shape
from keras.src.backend.torch.core import stop_gradient
from keras.src.backend.torch.core import to_torch_dtype
from keras.src.backend.torch.core import vectorized_map
from keras.src.backend.torch.rnn import cudnn_ok
from keras.src.backend.torch.rnn import gru
from keras.src.backend.torch.rnn import lstm
from keras.src.backend.torch.rnn import rnn
|
"""Torch backend APIs.
# Note on device placement
Torch has a different device placement style compared to TF and JAX.
In short, variables/tensors are not created on GPU by default,
and the GPU cannot directly communicate with the CPU.
To bring Torch behavior in line with TF and JAX automated device placement,
we are doing the following to automate device placement if a GPU is available:
- Variables are created on GPU.
- Input data will be placed on GPU at the first `keras.layers.Layer` call.
- Tensor creation happens on GPU, e.g., `zeros()` will create a tensor on GPU.
- `convert_to_numpy` will bring the tensor to CPU before converting it to NumPy.
"""
from keras.src.backend.torch import core
from keras.src.backend.torch import image
from keras.src.backend.torch import linalg
from keras.src.backend.torch import math
from keras.src.backend.torch import nn
from keras.src.backend.torch import numpy
from keras.src.backend.torch import random
from keras.src.backend.torch.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.torch.core import Variable
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import compute_output_spec
from keras.src.backend.torch.core import cond
from keras.src.backend.torch.core import convert_to_numpy
from keras.src.backend.torch.core import convert_to_tensor
from keras.src.backend.torch.core import device_scope
from keras.src.backend.torch.core import is_tensor
from keras.src.backend.torch.core import random_seed_dtype
from keras.src.backend.torch.core import scatter
from keras.src.backend.torch.core import shape
from keras.src.backend.torch.core import stop_gradient
from keras.src.backend.torch.core import to_torch_dtype
from keras.src.backend.torch.core import vectorized_map
from keras.src.backend.torch.rnn import cudnn_ok
from keras.src.backend.torch.rnn import gru
from keras.src.backend.torch.rnn import lstm
from keras.src.backend.torch.rnn import rnn
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import mmengine
from mmengine.utils import digit_version
from .version import __version__, version_info
mmcv_minimum_version = '2.0.0rc4'
mmcv_maximum_version = '3.0.0'
mmcv_version = digit_version(mmcv.__version__)
mmengine_minimum_version = '0.7.1'
mmengine_maximum_version = '1.0.0'
mmengine_version = digit_version(mmengine.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version < digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <{mmcv_maximum_version}.'
assert (mmengine_version >= digit_version(mmengine_minimum_version)
and mmengine_version < digit_version(mmengine_maximum_version)), \
f'MMEngine=={mmengine.__version__} is used but incompatible. ' \
f'Please install mmengine>={mmengine_minimum_version}, ' \
f'<{mmengine_maximum_version}.'
__all__ = ['__version__', 'version_info', 'digit_version']
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import mmengine
from mmengine.utils import digit_version
from .version import __version__, version_info
mmcv_minimum_version = '2.0.0rc4'
mmcv_maximum_version = '2.1.0'
mmcv_version = digit_version(mmcv.__version__)
mmengine_minimum_version = '0.7.1'
mmengine_maximum_version = '1.0.0'
mmengine_version = digit_version(mmengine.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version < digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <{mmcv_maximum_version}.'
assert (mmengine_version >= digit_version(mmengine_minimum_version)
and mmengine_version < digit_version(mmengine_maximum_version)), \
f'MMEngine=={mmengine.__version__} is used but incompatible. ' \
f'Please install mmengine>={mmengine_minimum_version}, ' \
f'<{mmengine_maximum_version}.'
__all__ = ['__version__', 'version_info', 'digit_version']
|
"""Test for Serializable base class"""
import json
import os
from typing import Any
from unittest.mock import patch
import pytest
from langchain_core.load.dump import dumps
from langchain_core.load.serializable import Serializable
from pydantic import ConfigDict, Field, model_validator
class Person(Serializable):
secret: str
you_can_see_me: str = "hello"
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@property
def lc_secrets(self) -> dict[str, str]:
return {"secret": "SECRET"}
@property
def lc_attributes(self) -> dict[str, str]:
return {"you_can_see_me": self.you_can_see_me}
class SpecialPerson(Person):
another_secret: str
another_visible: str = "bye"
@classmethod
def get_lc_namespace(cls) -> list[str]:
return ["my", "special", "namespace"]
# Gets merged with parent class's secrets
@property
def lc_secrets(self) -> dict[str, str]:
return {"another_secret": "ANOTHER_SECRET"}
# Gets merged with parent class's attributes
@property
def lc_attributes(self) -> dict[str, str]:
return {"another_visible": self.another_visible}
class NotSerializable:
pass
def test_person(snapshot: Any) -> None:
p = Person(secret="hello")
assert dumps(p, pretty=True) == snapshot
sp = SpecialPerson(another_secret="Wooo", secret="Hmm")
assert dumps(sp, pretty=True) == snapshot
assert Person.lc_id() == ["tests", "unit_tests", "load", "test_dump", "Person"]
assert SpecialPerson.lc_id() == ["my", "special", "namespace", "SpecialPerson"]
def test_typeerror() -> None:
assert (
dumps({(1, 2): 3})
== """{"lc": 1, "type": "not_implemented", "id": ["builtins", "dict"], "repr": "{(1, 2): 3}"}""" # noqa: E501
)
def test_person_with_kwargs(snapshot: Any) -> None:
person = Person(secret="hello")
assert dumps(person, separators=(",", ":")) == snapshot
def test_person_with_invalid_kwargs() -> None:
person = Person(secret="hello")
with pytest.raises(TypeError):
dumps(person, invalid_kwarg="hello")
class TestClass(Serializable):
my_favorite_secret: str = Field(alias="my_favorite_secret_alias")
my_other_secret: str = Field()
model_config = ConfigDict(
populate_by_name=True,
)
@model_validator(mode="before")
@classmethod
def get_from_env(cls, values: dict) -> Any:
"""Get the values from the environment."""
if "my_favorite_secret" not in values:
values["my_favorite_secret"] = os.getenv("MY_FAVORITE_SECRET")
if "my_other_secret" not in values:
values["my_other_secret"] = os.getenv("MY_OTHER_SECRET")
return values
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
return ["my", "special", "namespace"]
@property
def lc_secrets(self) -> dict[str, str]:
return {
"my_favorite_secret": "MY_FAVORITE_SECRET",
"my_other_secret": "MY_OTHER_SECRET",
}
def test_aliases_hidden() -> None:
test_class = TestClass(my_favorite_secret="hello", my_other_secret="world") # type: ignore[call-arg]
dumped = json.loads(dumps(test_class, pretty=True))
expected_dump = {
"lc": 1,
"type": "constructor",
"id": ["my", "special", "namespace", "TestClass"],
"kwargs": {
"my_favorite_secret": {
"lc": 1,
"type": "secret",
"id": ["MY_FAVORITE_SECRET"],
},
"my_other_secret": {"lc": 1, "type": "secret", "id": ["MY_OTHER_SECRET"]},
},
}
assert dumped == expected_dump
# Check while patching the os environment
with patch.dict(
os.environ, {"MY_FAVORITE_SECRET": "hello", "MY_OTHER_SECRET": "world"}
):
test_class = TestClass() # type: ignore[call-arg]
dumped = json.loads(dumps(test_class, pretty=True))
# Check by alias
test_class = TestClass(my_favorite_secret_alias="hello", my_other_secret="world") # type: ignore[call-arg]
dumped = json.loads(dumps(test_class, pretty=True))
expected_dump = {
"lc": 1,
"type": "constructor",
"id": ["my", "special", "namespace", "TestClass"],
"kwargs": {
"my_favorite_secret": {
"lc": 1,
"type": "secret",
"id": ["MY_FAVORITE_SECRET"],
},
"my_other_secret": {"lc": 1, "type": "secret", "id": ["MY_OTHER_SECRET"]},
},
}
assert dumped == expected_dump
|
"""Test for Serializable base class"""
import json
import os
from typing import Any, Dict, List
from unittest.mock import patch
import pytest
from langchain_core.load.dump import dumps
from langchain_core.load.serializable import Serializable
from pydantic import ConfigDict, Field, model_validator
class Person(Serializable):
secret: str
you_can_see_me: str = "hello"
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@property
def lc_secrets(self) -> Dict[str, str]:
return {"secret": "SECRET"}
@property
def lc_attributes(self) -> Dict[str, str]:
return {"you_can_see_me": self.you_can_see_me}
class SpecialPerson(Person):
another_secret: str
another_visible: str = "bye"
@classmethod
def get_lc_namespace(cls) -> List[str]:
return ["my", "special", "namespace"]
# Gets merged with parent class's secrets
@property
def lc_secrets(self) -> Dict[str, str]:
return {"another_secret": "ANOTHER_SECRET"}
# Gets merged with parent class's attributes
@property
def lc_attributes(self) -> Dict[str, str]:
return {"another_visible": self.another_visible}
class NotSerializable:
pass
def test_person(snapshot: Any) -> None:
p = Person(secret="hello")
assert dumps(p, pretty=True) == snapshot
sp = SpecialPerson(another_secret="Wooo", secret="Hmm")
assert dumps(sp, pretty=True) == snapshot
assert Person.lc_id() == ["tests", "unit_tests", "load", "test_dump", "Person"]
assert SpecialPerson.lc_id() == ["my", "special", "namespace", "SpecialPerson"]
def test_typeerror() -> None:
assert (
dumps({(1, 2): 3})
== """{"lc": 1, "type": "not_implemented", "id": ["builtins", "dict"], "repr": "{(1, 2): 3}"}""" # noqa: E501
)
def test_person_with_kwargs(snapshot: Any) -> None:
person = Person(secret="hello")
assert dumps(person, separators=(",", ":")) == snapshot
def test_person_with_invalid_kwargs() -> None:
person = Person(secret="hello")
with pytest.raises(TypeError):
dumps(person, invalid_kwarg="hello")
class TestClass(Serializable):
my_favorite_secret: str = Field(alias="my_favorite_secret_alias")
my_other_secret: str = Field()
model_config = ConfigDict(
populate_by_name=True,
)
@model_validator(mode="before")
@classmethod
def get_from_env(cls, values: Dict) -> Any:
"""Get the values from the environment."""
if "my_favorite_secret" not in values:
values["my_favorite_secret"] = os.getenv("MY_FAVORITE_SECRET")
if "my_other_secret" not in values:
values["my_other_secret"] = os.getenv("MY_OTHER_SECRET")
return values
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
return ["my", "special", "namespace"]
@property
def lc_secrets(self) -> Dict[str, str]:
return {
"my_favorite_secret": "MY_FAVORITE_SECRET",
"my_other_secret": "MY_OTHER_SECRET",
}
def test_aliases_hidden() -> None:
test_class = TestClass(my_favorite_secret="hello", my_other_secret="world") # type: ignore[call-arg]
dumped = json.loads(dumps(test_class, pretty=True))
expected_dump = {
"lc": 1,
"type": "constructor",
"id": ["my", "special", "namespace", "TestClass"],
"kwargs": {
"my_favorite_secret": {
"lc": 1,
"type": "secret",
"id": ["MY_FAVORITE_SECRET"],
},
"my_other_secret": {"lc": 1, "type": "secret", "id": ["MY_OTHER_SECRET"]},
},
}
assert dumped == expected_dump
# Check while patching the os environment
with patch.dict(
os.environ, {"MY_FAVORITE_SECRET": "hello", "MY_OTHER_SECRET": "world"}
):
test_class = TestClass() # type: ignore[call-arg]
dumped = json.loads(dumps(test_class, pretty=True))
# Check by alias
test_class = TestClass(my_favorite_secret_alias="hello", my_other_secret="world") # type: ignore[call-arg]
dumped = json.loads(dumps(test_class, pretty=True))
expected_dump = {
"lc": 1,
"type": "constructor",
"id": ["my", "special", "namespace", "TestClass"],
"kwargs": {
"my_favorite_secret": {
"lc": 1,
"type": "secret",
"id": ["MY_FAVORITE_SECRET"],
},
"my_other_secret": {"lc": 1, "type": "secret", "id": ["MY_OTHER_SECRET"]},
},
}
assert dumped == expected_dump
|
"""Score functions, performance metrics, pairwise metrics and distance computations."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from . import cluster
from ._classification import (
accuracy_score,
balanced_accuracy_score,
brier_score_loss,
class_likelihood_ratios,
classification_report,
cohen_kappa_score,
confusion_matrix,
d2_log_loss_score,
f1_score,
fbeta_score,
hamming_loss,
hinge_loss,
jaccard_score,
log_loss,
matthews_corrcoef,
multilabel_confusion_matrix,
precision_recall_fscore_support,
precision_score,
recall_score,
zero_one_loss,
)
from ._dist_metrics import DistanceMetric
from ._plot.confusion_matrix import ConfusionMatrixDisplay
from ._plot.det_curve import DetCurveDisplay
from ._plot.precision_recall_curve import PrecisionRecallDisplay
from ._plot.regression import PredictionErrorDisplay
from ._plot.roc_curve import RocCurveDisplay
from ._ranking import (
auc,
average_precision_score,
coverage_error,
dcg_score,
det_curve,
label_ranking_average_precision_score,
label_ranking_loss,
ndcg_score,
precision_recall_curve,
roc_auc_score,
roc_curve,
top_k_accuracy_score,
)
from ._regression import (
d2_absolute_error_score,
d2_pinball_score,
d2_tweedie_score,
explained_variance_score,
max_error,
mean_absolute_error,
mean_absolute_percentage_error,
mean_gamma_deviance,
mean_pinball_loss,
mean_poisson_deviance,
mean_squared_error,
mean_squared_log_error,
mean_tweedie_deviance,
median_absolute_error,
r2_score,
root_mean_squared_error,
root_mean_squared_log_error,
)
from ._scorer import check_scoring, get_scorer, get_scorer_names, make_scorer
from .cluster import (
adjusted_mutual_info_score,
adjusted_rand_score,
calinski_harabasz_score,
completeness_score,
consensus_score,
davies_bouldin_score,
fowlkes_mallows_score,
homogeneity_completeness_v_measure,
homogeneity_score,
mutual_info_score,
normalized_mutual_info_score,
pair_confusion_matrix,
rand_score,
silhouette_samples,
silhouette_score,
v_measure_score,
)
from .pairwise import (
euclidean_distances,
nan_euclidean_distances,
pairwise_distances,
pairwise_distances_argmin,
pairwise_distances_argmin_min,
pairwise_distances_chunked,
pairwise_kernels,
)
__all__ = [
"ConfusionMatrixDisplay",
"DetCurveDisplay",
"DistanceMetric",
"PrecisionRecallDisplay",
"PredictionErrorDisplay",
"RocCurveDisplay",
"accuracy_score",
"adjusted_mutual_info_score",
"adjusted_rand_score",
"auc",
"average_precision_score",
"balanced_accuracy_score",
"brier_score_loss",
"calinski_harabasz_score",
"check_scoring",
"class_likelihood_ratios",
"classification_report",
"cluster",
"cohen_kappa_score",
"completeness_score",
"confusion_matrix",
"consensus_score",
"coverage_error",
"d2_absolute_error_score",
"d2_log_loss_score",
"d2_pinball_score",
"d2_tweedie_score",
"davies_bouldin_score",
"dcg_score",
"det_curve",
"euclidean_distances",
"explained_variance_score",
"f1_score",
"fbeta_score",
"fowlkes_mallows_score",
"get_scorer",
"get_scorer_names",
"hamming_loss",
"hinge_loss",
"homogeneity_completeness_v_measure",
"homogeneity_score",
"jaccard_score",
"label_ranking_average_precision_score",
"label_ranking_loss",
"log_loss",
"make_scorer",
"matthews_corrcoef",
"max_error",
"mean_absolute_error",
"mean_absolute_percentage_error",
"mean_gamma_deviance",
"mean_pinball_loss",
"mean_poisson_deviance",
"mean_squared_error",
"mean_squared_log_error",
"mean_tweedie_deviance",
"median_absolute_error",
"multilabel_confusion_matrix",
"mutual_info_score",
"nan_euclidean_distances",
"ndcg_score",
"normalized_mutual_info_score",
"pair_confusion_matrix",
"pairwise_distances",
"pairwise_distances_argmin",
"pairwise_distances_argmin_min",
"pairwise_distances_chunked",
"pairwise_kernels",
"precision_recall_curve",
"precision_recall_fscore_support",
"precision_score",
"r2_score",
"rand_score",
"recall_score",
"roc_auc_score",
"roc_curve",
"root_mean_squared_error",
"root_mean_squared_log_error",
"silhouette_samples",
"silhouette_score",
"top_k_accuracy_score",
"v_measure_score",
"zero_one_loss",
]
|
"""Score functions, performance metrics, pairwise metrics and distance computations."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from . import cluster
from ._classification import (
accuracy_score,
balanced_accuracy_score,
brier_score_loss,
class_likelihood_ratios,
classification_report,
cohen_kappa_score,
confusion_matrix,
d2_log_loss_score,
f1_score,
fbeta_score,
hamming_loss,
hinge_loss,
jaccard_score,
log_loss,
matthews_corrcoef,
multilabel_confusion_matrix,
precision_recall_fscore_support,
precision_score,
recall_score,
zero_one_loss,
)
from ._dist_metrics import DistanceMetric
from ._plot.confusion_matrix import ConfusionMatrixDisplay
from ._plot.det_curve import DetCurveDisplay
from ._plot.precision_recall_curve import PrecisionRecallDisplay
from ._plot.regression import PredictionErrorDisplay
from ._plot.roc_curve import RocCurveDisplay
from ._ranking import (
auc,
average_precision_score,
coverage_error,
dcg_score,
det_curve,
label_ranking_average_precision_score,
label_ranking_loss,
ndcg_score,
precision_recall_curve,
roc_auc_score,
roc_curve,
top_k_accuracy_score,
)
from ._regression import (
d2_absolute_error_score,
d2_pinball_score,
d2_tweedie_score,
explained_variance_score,
max_error,
mean_absolute_error,
mean_absolute_percentage_error,
mean_gamma_deviance,
mean_pinball_loss,
mean_poisson_deviance,
mean_squared_error,
mean_squared_log_error,
mean_tweedie_deviance,
median_absolute_error,
r2_score,
root_mean_squared_error,
root_mean_squared_log_error,
)
from ._scorer import check_scoring, get_scorer, get_scorer_names, make_scorer
from .cluster import (
adjusted_mutual_info_score,
adjusted_rand_score,
calinski_harabasz_score,
completeness_score,
consensus_score,
davies_bouldin_score,
fowlkes_mallows_score,
homogeneity_completeness_v_measure,
homogeneity_score,
mutual_info_score,
normalized_mutual_info_score,
pair_confusion_matrix,
rand_score,
silhouette_samples,
silhouette_score,
v_measure_score,
)
from .pairwise import (
euclidean_distances,
nan_euclidean_distances,
pairwise_distances,
pairwise_distances_argmin,
pairwise_distances_argmin_min,
pairwise_distances_chunked,
pairwise_kernels,
)
__all__ = [
"accuracy_score",
"adjusted_mutual_info_score",
"adjusted_rand_score",
"auc",
"average_precision_score",
"balanced_accuracy_score",
"calinski_harabasz_score",
"check_scoring",
"class_likelihood_ratios",
"classification_report",
"cluster",
"cohen_kappa_score",
"completeness_score",
"ConfusionMatrixDisplay",
"confusion_matrix",
"consensus_score",
"coverage_error",
"d2_tweedie_score",
"d2_absolute_error_score",
"d2_log_loss_score",
"d2_pinball_score",
"dcg_score",
"davies_bouldin_score",
"DetCurveDisplay",
"det_curve",
"DistanceMetric",
"euclidean_distances",
"explained_variance_score",
"f1_score",
"fbeta_score",
"fowlkes_mallows_score",
"get_scorer",
"hamming_loss",
"hinge_loss",
"homogeneity_completeness_v_measure",
"homogeneity_score",
"jaccard_score",
"label_ranking_average_precision_score",
"label_ranking_loss",
"log_loss",
"make_scorer",
"nan_euclidean_distances",
"matthews_corrcoef",
"max_error",
"mean_absolute_error",
"mean_squared_error",
"mean_squared_log_error",
"mean_pinball_loss",
"mean_poisson_deviance",
"mean_gamma_deviance",
"mean_tweedie_deviance",
"median_absolute_error",
"mean_absolute_percentage_error",
"multilabel_confusion_matrix",
"mutual_info_score",
"ndcg_score",
"normalized_mutual_info_score",
"pair_confusion_matrix",
"pairwise_distances",
"pairwise_distances_argmin",
"pairwise_distances_argmin_min",
"pairwise_distances_chunked",
"pairwise_kernels",
"PrecisionRecallDisplay",
"precision_recall_curve",
"precision_recall_fscore_support",
"precision_score",
"PredictionErrorDisplay",
"r2_score",
"rand_score",
"recall_score",
"RocCurveDisplay",
"roc_auc_score",
"roc_curve",
"root_mean_squared_log_error",
"root_mean_squared_error",
"get_scorer_names",
"silhouette_samples",
"silhouette_score",
"top_k_accuracy_score",
"v_measure_score",
"zero_one_loss",
"brier_score_loss",
]
|
from enum import Enum
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_core.stores import BaseStore, ByteStore
from langchain_core.vectorstores import VectorStore
from pydantic import Field, model_validator
from langchain.storage._lc_store import create_kv_docstore
class SearchType(str, Enum):
"""Enumerator of the types of search to perform."""
similarity = "similarity"
"""Similarity search."""
similarity_score_threshold = "similarity_score_threshold"
"""Similarity search with a score threshold."""
mmr = "mmr"
"""Maximal Marginal Relevance reranking of similarity search."""
class MultiVectorRetriever(BaseRetriever):
"""Retrieve from a set of multiple embeddings for the same document."""
vectorstore: VectorStore
"""The underlying vectorstore to use to store small chunks
and their embedding vectors"""
byte_store: Optional[ByteStore] = None
"""The lower-level backing storage layer for the parent documents"""
docstore: BaseStore[str, Document]
"""The storage interface for the parent documents"""
id_key: str = "doc_id"
search_kwargs: dict = Field(default_factory=dict)
"""Keyword arguments to pass to the search function."""
search_type: SearchType = SearchType.similarity
"""Type of search to perform (similarity / mmr)"""
@model_validator(mode="before")
@classmethod
def shim_docstore(cls, values: dict) -> Any:
byte_store = values.get("byte_store")
docstore = values.get("docstore")
if byte_store is not None:
docstore = create_kv_docstore(byte_store)
elif docstore is None:
msg = "You must pass a `byte_store` parameter."
raise Exception(msg)
values["docstore"] = docstore
return values
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> list[Document]:
"""Get documents relevant to a query.
Args:
query: String to find relevant documents for
run_manager: The callbacks handler to use
Returns:
List of relevant documents
"""
if self.search_type == SearchType.mmr:
sub_docs = self.vectorstore.max_marginal_relevance_search(
query, **self.search_kwargs
)
elif self.search_type == SearchType.similarity_score_threshold:
sub_docs_and_similarities = (
self.vectorstore.similarity_search_with_relevance_scores(
query, **self.search_kwargs
)
)
sub_docs = [sub_doc for sub_doc, _ in sub_docs_and_similarities]
else:
sub_docs = self.vectorstore.similarity_search(query, **self.search_kwargs)
# We do this to maintain the order of the ids that are returned
ids = []
for d in sub_docs:
if self.id_key in d.metadata and d.metadata[self.id_key] not in ids:
ids.append(d.metadata[self.id_key])
docs = self.docstore.mget(ids)
return [d for d in docs if d is not None]
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> list[Document]:
"""Asynchronously get documents relevant to a query.
Args:
query: String to find relevant documents for
run_manager: The callbacks handler to use
Returns:
List of relevant documents
"""
if self.search_type == SearchType.mmr:
sub_docs = await self.vectorstore.amax_marginal_relevance_search(
query, **self.search_kwargs
)
elif self.search_type == SearchType.similarity_score_threshold:
sub_docs_and_similarities = (
await self.vectorstore.asimilarity_search_with_relevance_scores(
query, **self.search_kwargs
)
)
sub_docs = [sub_doc for sub_doc, _ in sub_docs_and_similarities]
else:
sub_docs = await self.vectorstore.asimilarity_search(
query, **self.search_kwargs
)
# We do this to maintain the order of the ids that are returned
ids = []
for d in sub_docs:
if self.id_key in d.metadata and d.metadata[self.id_key] not in ids:
ids.append(d.metadata[self.id_key])
docs = await self.docstore.amget(ids)
return [d for d in docs if d is not None]
|
from enum import Enum
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_core.stores import BaseStore, ByteStore
from langchain_core.vectorstores import VectorStore
from pydantic import Field, model_validator
from langchain.storage._lc_store import create_kv_docstore
class SearchType(str, Enum):
"""Enumerator of the types of search to perform."""
similarity = "similarity"
"""Similarity search."""
similarity_score_threshold = "similarity_score_threshold"
"""Similarity search with a score threshold."""
mmr = "mmr"
"""Maximal Marginal Relevance reranking of similarity search."""
class MultiVectorRetriever(BaseRetriever):
"""Retrieve from a set of multiple embeddings for the same document."""
vectorstore: VectorStore
"""The underlying vectorstore to use to store small chunks
and their embedding vectors"""
byte_store: Optional[ByteStore] = None
"""The lower-level backing storage layer for the parent documents"""
docstore: BaseStore[str, Document]
"""The storage interface for the parent documents"""
id_key: str = "doc_id"
search_kwargs: dict = Field(default_factory=dict)
"""Keyword arguments to pass to the search function."""
search_type: SearchType = SearchType.similarity
"""Type of search to perform (similarity / mmr)"""
@model_validator(mode="before")
@classmethod
def shim_docstore(cls, values: dict) -> Any:
byte_store = values.get("byte_store")
docstore = values.get("docstore")
if byte_store is not None:
docstore = create_kv_docstore(byte_store)
elif docstore is None:
raise Exception("You must pass a `byte_store` parameter.")
values["docstore"] = docstore
return values
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> list[Document]:
"""Get documents relevant to a query.
Args:
query: String to find relevant documents for
run_manager: The callbacks handler to use
Returns:
List of relevant documents
"""
if self.search_type == SearchType.mmr:
sub_docs = self.vectorstore.max_marginal_relevance_search(
query, **self.search_kwargs
)
elif self.search_type == SearchType.similarity_score_threshold:
sub_docs_and_similarities = (
self.vectorstore.similarity_search_with_relevance_scores(
query, **self.search_kwargs
)
)
sub_docs = [sub_doc for sub_doc, _ in sub_docs_and_similarities]
else:
sub_docs = self.vectorstore.similarity_search(query, **self.search_kwargs)
# We do this to maintain the order of the ids that are returned
ids = []
for d in sub_docs:
if self.id_key in d.metadata and d.metadata[self.id_key] not in ids:
ids.append(d.metadata[self.id_key])
docs = self.docstore.mget(ids)
return [d for d in docs if d is not None]
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> list[Document]:
"""Asynchronously get documents relevant to a query.
Args:
query: String to find relevant documents for
run_manager: The callbacks handler to use
Returns:
List of relevant documents
"""
if self.search_type == SearchType.mmr:
sub_docs = await self.vectorstore.amax_marginal_relevance_search(
query, **self.search_kwargs
)
elif self.search_type == SearchType.similarity_score_threshold:
sub_docs_and_similarities = (
await self.vectorstore.asimilarity_search_with_relevance_scores(
query, **self.search_kwargs
)
)
sub_docs = [sub_doc for sub_doc, _ in sub_docs_and_similarities]
else:
sub_docs = await self.vectorstore.asimilarity_search(
query, **self.search_kwargs
)
# We do this to maintain the order of the ids that are returned
ids = []
for d in sub_docs:
if self.id_key in d.metadata and d.metadata[self.id_key] not in ids:
ids.append(d.metadata[self.id_key])
docs = await self.docstore.amget(ids)
return [d for d in docs if d is not None]
|
"""Ollama specific chat model integration tests"""
from typing import Annotated, Optional
import pytest
from pydantic import BaseModel, Field
from typing_extensions import TypedDict
from langchain_ollama import ChatOllama
@pytest.mark.parametrize(("method"), [("function_calling"), ("json_schema")])
def test_structured_output(method: str) -> None:
"""Test to verify structured output via tool calling and ``format`` parameter."""
class Joke(BaseModel):
"""Joke to tell user."""
setup: str = Field(description="question to set up a joke")
punchline: str = Field(description="answer to resolve the joke")
llm = ChatOllama(model="llama3.1", temperature=0)
query = "Tell me a joke about cats."
# Pydantic
structured_llm = llm.with_structured_output(Joke, method=method) # type: ignore[arg-type]
result = structured_llm.invoke(query)
assert isinstance(result, Joke)
for chunk in structured_llm.stream(query):
assert isinstance(chunk, Joke)
# JSON Schema
structured_llm = llm.with_structured_output(Joke.model_json_schema(), method=method) # type: ignore[arg-type]
result = structured_llm.invoke(query)
assert isinstance(result, dict)
assert set(result.keys()) == {"setup", "punchline"}
for chunk in structured_llm.stream(query):
assert isinstance(chunk, dict)
assert isinstance(chunk, dict) # for mypy
assert set(chunk.keys()) == {"setup", "punchline"}
# Typed Dict
class JokeSchema(TypedDict):
"""Joke to tell user."""
setup: Annotated[str, "question to set up a joke"]
punchline: Annotated[str, "answer to resolve the joke"]
structured_llm = llm.with_structured_output(JokeSchema, method=method) # type: ignore[arg-type]
result = structured_llm.invoke(query)
assert isinstance(result, dict)
assert set(result.keys()) == {"setup", "punchline"}
for chunk in structured_llm.stream(query):
assert isinstance(chunk, dict)
assert isinstance(chunk, dict) # for mypy
assert set(chunk.keys()) == {"setup", "punchline"}
@pytest.mark.parametrize(("model"), [("llama3.1")])
def test_structured_output_deeply_nested(model: str) -> None:
"""Test to verify structured output with a nested objects."""
llm = ChatOllama(model=model, temperature=0)
class Person(BaseModel):
"""Information about a person."""
name: Optional[str] = Field(default=None, description="The name of the person")
hair_color: Optional[str] = Field(
default=None, description="The color of the person's hair if known"
)
height_in_meters: Optional[str] = Field(
default=None, description="Height measured in meters"
)
class Data(BaseModel):
"""Extracted data about people."""
people: list[Person]
chat = llm.with_structured_output(Data) # type: ignore[arg-type]
text = (
"Alan Smith is 6 feet tall and has blond hair."
"Alan Poe is 3 feet tall and has grey hair."
)
result = chat.invoke(text)
assert isinstance(result, Data)
for chunk in chat.stream(text):
assert isinstance(chunk, Data)
|
"""Ollama specific chat model integration tests"""
from typing import List, Optional
import pytest
from pydantic import BaseModel, Field
from typing_extensions import Annotated, TypedDict
from langchain_ollama import ChatOllama
@pytest.mark.parametrize(("method"), [("function_calling"), ("json_schema")])
def test_structured_output(method: str) -> None:
"""Test to verify structured output via tool calling and ``format`` parameter."""
class Joke(BaseModel):
"""Joke to tell user."""
setup: str = Field(description="question to set up a joke")
punchline: str = Field(description="answer to resolve the joke")
llm = ChatOllama(model="llama3.1", temperature=0)
query = "Tell me a joke about cats."
# Pydantic
structured_llm = llm.with_structured_output(Joke, method=method) # type: ignore[arg-type]
result = structured_llm.invoke(query)
assert isinstance(result, Joke)
for chunk in structured_llm.stream(query):
assert isinstance(chunk, Joke)
# JSON Schema
structured_llm = llm.with_structured_output(Joke.model_json_schema(), method=method) # type: ignore[arg-type]
result = structured_llm.invoke(query)
assert isinstance(result, dict)
assert set(result.keys()) == {"setup", "punchline"}
for chunk in structured_llm.stream(query):
assert isinstance(chunk, dict)
assert isinstance(chunk, dict) # for mypy
assert set(chunk.keys()) == {"setup", "punchline"}
# Typed Dict
class JokeSchema(TypedDict):
"""Joke to tell user."""
setup: Annotated[str, "question to set up a joke"]
punchline: Annotated[str, "answer to resolve the joke"]
structured_llm = llm.with_structured_output(JokeSchema, method=method) # type: ignore[arg-type]
result = structured_llm.invoke(query)
assert isinstance(result, dict)
assert set(result.keys()) == {"setup", "punchline"}
for chunk in structured_llm.stream(query):
assert isinstance(chunk, dict)
assert isinstance(chunk, dict) # for mypy
assert set(chunk.keys()) == {"setup", "punchline"}
@pytest.mark.parametrize(("model"), [("llama3.1")])
def test_structured_output_deeply_nested(model: str) -> None:
"""Test to verify structured output with a nested objects."""
llm = ChatOllama(model=model, temperature=0)
class Person(BaseModel):
"""Information about a person."""
name: Optional[str] = Field(default=None, description="The name of the person")
hair_color: Optional[str] = Field(
default=None, description="The color of the person's hair if known"
)
height_in_meters: Optional[str] = Field(
default=None, description="Height measured in meters"
)
class Data(BaseModel):
"""Extracted data about people."""
people: List[Person]
chat = llm.with_structured_output(Data) # type: ignore[arg-type]
text = (
"Alan Smith is 6 feet tall and has blond hair."
"Alan Poe is 3 feet tall and has grey hair."
)
result = chat.invoke(text)
assert isinstance(result, Data)
for chunk in chat.stream(text):
assert isinstance(chunk, Data)
|
_base_ = ['./mask2former_r50_lsj_8x2_50e_coco.py']
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './mask2former_r50_lsj_8x2_50e_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
import pytest
from xgboost import testing as tm
from xgboost.testing.ordinal import (
run_cat_container,
run_cat_container_iter,
run_cat_container_mixed,
run_cat_invalid,
run_cat_leaf,
run_cat_predict,
run_cat_shap,
run_cat_thread_safety,
run_specified_cat,
)
pytestmark = pytest.mark.skipif(**tm.no_multiple(tm.no_arrow(), tm.no_pandas()))
def test_cat_container() -> None:
run_cat_container("cpu")
def test_cat_container_mixed() -> None:
run_cat_container_mixed("cpu")
def test_cat_container_iter() -> None:
run_cat_container_iter("cpu")
def test_cat_predict() -> None:
run_cat_predict("cpu")
def test_cat_invalid() -> None:
run_cat_invalid("cpu")
def test_cat_thread_safety() -> None:
run_cat_thread_safety("cpu")
def test_cat_shap() -> None:
run_cat_shap("cpu")
def test_cat_leaf() -> None:
run_cat_leaf("cpu")
def test_spcified_cat() -> None:
run_specified_cat("cpu")
|
import pytest
from xgboost import testing as tm
from xgboost.testing.ordinal import (
run_cat_container,
run_cat_container_iter,
run_cat_container_mixed,
run_cat_invalid,
run_cat_leaf,
run_cat_predict,
run_cat_shap,
run_cat_thread_safety,
)
pytestmark = pytest.mark.skipif(**tm.no_multiple(tm.no_arrow(), tm.no_pandas()))
def test_cat_container() -> None:
run_cat_container("cpu")
def test_cat_container_mixed() -> None:
run_cat_container_mixed("cpu")
def test_cat_container_iter() -> None:
run_cat_container_iter("cpu")
def test_cat_predict() -> None:
run_cat_predict("cpu")
def test_cat_invalid() -> None:
run_cat_invalid("cpu")
def test_cat_thread_safety() -> None:
run_cat_thread_safety("cpu")
def test_cat_shap() -> None:
run_cat_shap("cpu")
def test_cat_leaf() -> None:
run_cat_leaf("cpu")
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import LTXVideoTransformer3DModel
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin
enable_full_determinism()
class LTXTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = LTXVideoTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 2
height = 16
width = 16
embedding_dim = 16
sequence_length = 16
hidden_states = torch.randn((batch_size, num_frames * height * width, num_channels)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
encoder_attention_mask = torch.ones((batch_size, sequence_length)).bool().to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
"encoder_attention_mask": encoder_attention_mask,
"num_frames": num_frames,
"height": height,
"width": width,
}
@property
def input_shape(self):
return (512, 4)
@property
def output_shape(self):
return (512, 4)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"in_channels": 4,
"out_channels": 4,
"num_attention_heads": 2,
"attention_head_dim": 8,
"cross_attention_dim": 16,
"num_layers": 1,
"qk_norm": "rms_norm_across_heads",
"caption_channels": 16,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"LTXVideoTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
class LTXTransformerCompileTests(TorchCompileTesterMixin, unittest.TestCase):
model_class = LTXVideoTransformer3DModel
def prepare_init_args_and_inputs_for_common(self):
return LTXTransformerTests().prepare_init_args_and_inputs_for_common()
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import LTXVideoTransformer3DModel
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin
enable_full_determinism()
class LTXTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = LTXVideoTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 2
height = 16
width = 16
embedding_dim = 16
sequence_length = 16
hidden_states = torch.randn((batch_size, num_frames * height * width, num_channels)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
encoder_attention_mask = torch.ones((batch_size, sequence_length)).bool().to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
"encoder_attention_mask": encoder_attention_mask,
"num_frames": num_frames,
"height": height,
"width": width,
}
@property
def input_shape(self):
return (512, 4)
@property
def output_shape(self):
return (512, 4)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"in_channels": 4,
"out_channels": 4,
"num_attention_heads": 2,
"attention_head_dim": 8,
"cross_attention_dim": 16,
"num_layers": 1,
"qk_norm": "rms_norm_across_heads",
"caption_channels": 16,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"LTXVideoTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
class LTXTransformerCompileTests(TorchCompileTesterMixin, unittest.TestCase):
model_class = LTXVideoTransformer3DModel
def prepare_init_args_and_inputs_for_common(self):
return LTXTransformerTests().prepare_init_args_and_inputs_for_common()
|
import re
from typing import TYPE_CHECKING, Dict, Iterable, Optional, Tuple
from langchain_core._api import beta
from langchain_core.documents import Document
from langchain_community.graph_vectorstores.links import get_links
if TYPE_CHECKING:
import graphviz
def _escape_id(id: str) -> str:
return id.replace(":", "_")
_EDGE_DIRECTION = {
"in": "back",
"out": "forward",
"bidir": "both",
}
_WORD_RE = re.compile(r"\s*\S+")
def _split_prefix(s: str, max_chars: int = 50) -> str:
words = _WORD_RE.finditer(s)
split = min(len(s), max_chars)
for word in words:
if word.end(0) > max_chars:
break
split = word.end(0)
if split == len(s):
return s
else:
return f"{s[0:split]}..."
@beta()
def render_graphviz(
documents: Iterable[Document],
engine: Optional[str] = None,
node_color: Optional[str] = None,
node_colors: Optional[Dict[str, Optional[str]]] = None,
skip_tags: Iterable[Tuple[str, str]] = (),
) -> "graphviz.Digraph":
"""Render a collection of GraphVectorStore documents to GraphViz format.
Args:
documents: The documents to render.
engine: GraphViz layout engine to use. `None` uses the default.
node_color: Default node color.
node_colors: Dictionary specifying colors of specific nodes. Useful for
emphasizing nodes that were selected by MMR, or differ from other
results.
skip_tags: Set of tags to skip when rendering the graph. Specified as
tuples containing the kind and tag.
Returns:
The "graphviz.Digraph" representing the nodes. May be printed to source,
or rendered using `dot`.
Note:
To render the generated DOT source code, you also need to install Graphviz_
(`download page <https://www.graphviz.org/download/>`_,
`archived versions <https://www2.graphviz.org/Archive/stable/>`_,
`installation procedure for Windows <https://forum.graphviz.org/t/new-simplified-installation-procedure-on-windows/224>`_).
"""
if node_colors is None:
node_colors = {}
try:
import graphviz
except (ImportError, ModuleNotFoundError):
raise ImportError(
"Could not import graphviz python package. "
"Please install it with `pip install graphviz`."
)
graph = graphviz.Digraph(engine=engine)
graph.attr(rankdir="LR")
graph.attr("node", style="filled")
skip_tags = set(skip_tags)
tags: dict[Tuple[str, str], str] = {}
for document in documents:
id = document.id
if id is None:
raise ValueError(f"Illegal graph document without ID: {document}")
escaped_id = _escape_id(id)
color = node_colors[id] if id in node_colors else node_color
node_label = "\n".join(
[
graphviz.escape(id),
graphviz.escape(_split_prefix(document.page_content)),
]
)
graph.node(
escaped_id,
label=node_label,
shape="note",
fillcolor=color,
tooltip=graphviz.escape(document.page_content),
)
for link in get_links(document):
tag_key = (link.kind, link.tag)
if tag_key in skip_tags:
continue
tag_id = tags.get(tag_key)
if tag_id is None:
tag_id = f"tag_{len(tags)}"
tags[tag_key] = tag_id
graph.node(tag_id, label=graphviz.escape(f"{link.kind}:{link.tag}"))
graph.edge(escaped_id, tag_id, dir=_EDGE_DIRECTION[link.direction])
return graph
|
import re
from typing import TYPE_CHECKING, Dict, Iterable, Optional, Tuple
from langchain_core._api import beta
from langchain_core.documents import Document
from langchain_community.graph_vectorstores.links import get_links
if TYPE_CHECKING:
import graphviz
def _escape_id(id: str) -> str:
return id.replace(":", "_")
_EDGE_DIRECTION = {
"in": "back",
"out": "forward",
"bidir": "both",
}
_WORD_RE = re.compile("\s*\S+")
def _split_prefix(s: str, max_chars: int = 50) -> str:
words = _WORD_RE.finditer(s)
split = min(len(s), max_chars)
for word in words:
if word.end(0) > max_chars:
break
split = word.end(0)
if split == len(s):
return s
else:
return f"{s[0:split]}..."
@beta()
def render_graphviz(
documents: Iterable[Document],
engine: Optional[str] = None,
node_color: Optional[str] = None,
node_colors: Optional[Dict[str, Optional[str]]] = None,
skip_tags: Iterable[Tuple[str, str]] = (),
) -> "graphviz.Digraph":
"""Render a collection of GraphVectorStore documents to GraphViz format.
Args:
documents: The documents to render.
engine: GraphViz layout engine to use. `None` uses the default.
node_color: Default node color.
node_colors: Dictionary specifying colors of specific nodes. Useful for
emphasizing nodes that were selected by MMR, or differ from other
results.
skip_tags: Set of tags to skip when rendering the graph. Specified as
tuples containing the kind and tag.
Returns:
The "graphviz.Digraph" representing the nodes. May be printed to source,
or rendered using `dot`.
Note:
To render the generated DOT source code, you also need to install Graphviz_
(`download page <https://www.graphviz.org/download/>`_,
`archived versions <https://www2.graphviz.org/Archive/stable/>`_,
`installation procedure for Windows <https://forum.graphviz.org/t/new-simplified-installation-procedure-on-windows/224>`_).
"""
if node_colors is None:
node_colors = {}
try:
import graphviz
except (ImportError, ModuleNotFoundError):
raise ImportError(
"Could not import graphviz python package. "
"Please install it with `pip install graphviz`."
)
graph = graphviz.Digraph(engine=engine)
graph.attr(rankdir="LR")
graph.attr("node", style="filled")
skip_tags = set(skip_tags)
tags: dict[Tuple[str, str], str] = {}
for document in documents:
id = document.id
if id is None:
raise ValueError(f"Illegal graph document without ID: {document}")
escaped_id = _escape_id(id)
color = node_colors[id] if id in node_colors else node_color
node_label = "\n".join(
[
graphviz.escape(id),
graphviz.escape(_split_prefix(document.page_content)),
]
)
graph.node(
escaped_id,
label=node_label,
shape="note",
fillcolor=color,
tooltip=graphviz.escape(document.page_content),
)
for link in get_links(document):
tag_key = (link.kind, link.tag)
if tag_key in skip_tags:
continue
tag_id = tags.get(tag_key)
if tag_id is None:
tag_id = f"tag_{len(tags)}"
tags[tag_key] = tag_id
graph.node(tag_id, label=graphviz.escape(f"{link.kind}:{link.tag}"))
graph.edge(escaped_id, tag_id, dir=_EDGE_DIRECTION[link.direction])
return graph
|
import os
from typing import Union
from uuid import UUID
from pydantic import BaseModel, Field
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
from .mixins import ProtoMixin
class BaseDocument(BaseModel, ProtoMixin, AbstractDocument, BaseNode):
"""
The base class for Document
"""
id: Union[int, str, UUID] = Field(default_factory=lambda: os.urandom(16).hex())
|
import os
from typing import Union
from uuid import UUID
from pydantic import BaseModel, Field
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
from .mixins import ProtoMixin
class BaseDocument(BaseModel, ProtoMixin, AbstractDocument, BaseNode):
"""
The base class for Document
"""
id: Union[int, str, UUID] = Field(default_factory=lambda: os.urandom(16).hex())
class Config:
arbitrary_types_allowed = True
|
from enum import Enum
from typing import Any, Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""The metric for the triplet loss"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
class TripletLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SentenceTransformerModel
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
})
loss = losses.TripletLoss(model=model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(TripletLoss, self).__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor) -> Tensor:
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
rep_anchor, rep_pos, rep_neg = reps
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean()
def get_config_dict(self) -> Dict[str, Any]:
distance_metric_name = self.distance_metric.__name__
for name, value in vars(TripletDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "TripletDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "triplet_margin": self.triplet_margin}
@property
def citation(self) -> str:
return """
@misc{hermans2017defense,
title={In Defense of the Triplet Loss for Person Re-Identification},
author={Alexander Hermans and Lucas Beyer and Bastian Leibe},
year={2017},
eprint={1703.07737},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
"""
|
from enum import Enum
from typing import Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""The metric for the triplet loss"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
class TripletLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
):
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SentenceTransformerModel
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
})
loss = losses.TripletLoss(model=model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(TripletLoss, self).__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
rep_anchor, rep_pos, rep_neg = reps
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean()
def get_config_dict(self):
distance_metric_name = self.distance_metric.__name__
for name, value in vars(TripletDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "TripletDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "triplet_margin": self.triplet_margin}
@property
def citation(self) -> str:
return """
@misc{hermans2017defense,
title={In Defense of the Triplet Loss for Person Re-Identification},
author={Alexander Hermans and Lucas Beyer and Bastian Leibe},
year={2017},
eprint={1703.07737},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
"""
|
"""Load Documents from a set of persistent Steamship Files."""
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class SteamshipFileReader(BaseReader):
"""
Reads persistent Steamship Files and converts them to Documents.
Args:
api_key: Steamship API key. Defaults to STEAMSHIP_API_KEY value if not provided.
Note:
Requires install of `steamship` package and an active Steamship API Key.
To get a Steamship API Key, visit: https://steamship.com/account/api.
Once you have an API Key, expose it via an environment variable named
`STEAMSHIP_API_KEY` or pass it as an init argument (`api_key`).
"""
def __init__(self, api_key: Optional[str] = None) -> None:
"""Initialize the Reader."""
try:
import steamship # noqa
self.api_key = api_key
except ImportError:
raise ImportError(
"`steamship` must be installed to use the SteamshipFileReader.\n"
"Please run `pip install --upgrade steamship."
)
def load_data(
self,
workspace: str,
query: Optional[str] = None,
file_handles: Optional[List[str]] = None,
collapse_blocks: bool = True,
join_str: str = "\n\n",
) -> List[Document]:
"""
Load data from persistent Steamship Files into Documents.
Args:
workspace: the handle for a Steamship workspace
(see: https://docs.steamship.com/workspaces/index.html)
query: a Steamship tag query for retrieving files
(ex: 'filetag and value("import-id")="import-001"')
file_handles: a list of Steamship File handles
(ex: `smooth-valley-9kbdr`)
collapse_blocks: whether to merge individual File Blocks into a
single Document, or separate them.
join_str: when collapse_blocks is True, this is how the block texts
will be concatenated.
Note:
The collection of Files from both `query` and `file_handles` will be
combined. There is no (current) support for deconflicting the collections
(meaning that if a file appears both in the result set of the query and
as a handle in file_handles, it will be loaded twice).
"""
from steamship import File, Steamship
client = Steamship(workspace=workspace, api_key=self.api_key)
files = []
if query:
files_from_query = File.query(client=client, tag_filter_query=query).files
files.extend(files_from_query)
if file_handles:
files.extend([File.get(client=client, handle=h) for h in file_handles])
docs = []
for file in files:
metadata = {"source": file.handle}
for tag in file.tags:
metadata[tag.kind] = tag.value
if collapse_blocks:
text = join_str.join([b.text for b in file.blocks])
docs.append(Document(text=text, id_=file.handle, metadata=metadata))
else:
docs.extend(
[
Document(text=b.text, id_=file.handle, metadata=metadata)
for b in file.blocks
]
)
return docs
|
"""Load Documents from a set of persistent Steamship Files."""
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class SteamshipFileReader(BaseReader):
"""
Reads persistent Steamship Files and converts them to Documents.
Args:
api_key: Steamship API key. Defaults to STEAMSHIP_API_KEY value if not provided.
Note:
Requires install of `steamship` package and an active Steamship API Key.
To get a Steamship API Key, visit: https://steamship.com/account/api.
Once you have an API Key, expose it via an environment variable named
`STEAMSHIP_API_KEY` or pass it as an init argument (`api_key`).
"""
def __init__(self, api_key: Optional[str] = None) -> None:
"""Initialize the Reader."""
try:
import steamship # noqa
self.api_key = api_key
except ImportError:
raise ImportError(
"`steamship` must be installed to use the SteamshipFileReader.\n"
"Please run `pip install --upgrade steamship."
)
def load_data(
self,
workspace: str,
query: Optional[str] = None,
file_handles: Optional[List[str]] = None,
collapse_blocks: bool = True,
join_str: str = "\n\n",
) -> List[Document]:
"""
Load data from persistent Steamship Files into Documents.
Args:
workspace: the handle for a Steamship workspace
(see: https://docs.steamship.com/workspaces/index.html)
query: a Steamship tag query for retrieving files
(ex: 'filetag and value("import-id")="import-001"')
file_handles: a list of Steamship File handles
(ex: `smooth-valley-9kbdr`)
collapse_blocks: whether to merge individual File Blocks into a
single Document, or separate them.
join_str: when collapse_blocks is True, this is how the block texts
will be concatenated.
Note:
The collection of Files from both `query` and `file_handles` will be
combined. There is no (current) support for deconflicting the collections
(meaning that if a file appears both in the result set of the query and
as a handle in file_handles, it will be loaded twice).
"""
from steamship import File, Steamship
client = Steamship(workspace=workspace, api_key=self.api_key)
files = []
if query:
files_from_query = File.query(client=client, tag_filter_query=query).files
files.extend(files_from_query)
if file_handles:
files.extend([File.get(client=client, handle=h) for h in file_handles])
docs = []
for file in files:
metadata = {"source": file.handle}
for tag in file.tags:
metadata[tag.kind] = tag.value
if collapse_blocks:
text = join_str.join([b.text for b in file.blocks])
docs.append(Document(text=text, id_=file.handle, metadata=metadata))
else:
docs.extend(
[
Document(text=b.text, id_=file.handle, metadata=metadata)
for b in file.blocks
]
)
return docs
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.regularizers import deserialize as deserialize
from keras.src.regularizers import get as get
from keras.src.regularizers import serialize as serialize
from keras.src.regularizers.regularizers import L1 as L1
from keras.src.regularizers.regularizers import L1 as l1
from keras.src.regularizers.regularizers import L1L2 as L1L2
from keras.src.regularizers.regularizers import L1L2 as l1_l2
from keras.src.regularizers.regularizers import L2 as L2
from keras.src.regularizers.regularizers import L2 as l2
from keras.src.regularizers.regularizers import (
OrthogonalRegularizer as OrthogonalRegularizer,
)
from keras.src.regularizers.regularizers import (
OrthogonalRegularizer as orthogonal_regularizer,
)
from keras.src.regularizers.regularizers import Regularizer as Regularizer
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.regularizers import deserialize
from keras.src.regularizers import get
from keras.src.regularizers import serialize
from keras.src.regularizers.regularizers import L1
from keras.src.regularizers.regularizers import L1 as l1
from keras.src.regularizers.regularizers import L1L2
from keras.src.regularizers.regularizers import L1L2 as l1_l2
from keras.src.regularizers.regularizers import L2
from keras.src.regularizers.regularizers import L2 as l2
from keras.src.regularizers.regularizers import OrthogonalRegularizer
from keras.src.regularizers.regularizers import (
OrthogonalRegularizer as orthogonal_regularizer,
)
from keras.src.regularizers.regularizers import Regularizer
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean, sync_random_seed)
from .logger import get_caller_name, get_root_logger, log_img_scale
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import find_latest_checkpoint, update_data_root
from .parallel import MMDataParallel, MMDistributedDataParallel
from .replace_cfg_vals import replace_cfg_vals
from .setup_env import register_all_modules, setup_multi_processes
from .split_batch import split_batch
from .typing import (ConfigType, InstanceList, MultiConfig, OptConfigType,
OptInstanceList, OptMultiConfig, OptPixelList, PixelList,
RangeType)
from .util_distribution import build_ddp, build_dp, get_device
__all__ = [
'get_root_logger', 'collect_env', 'find_latest_checkpoint',
'update_data_root', 'setup_multi_processes', 'get_caller_name',
'log_img_scale', 'compat_cfg', 'split_batch', 'build_ddp', 'build_dp',
'get_device', 'MMDataParallel', 'MMDistributedDataParallel',
'register_all_modules', 'replace_cfg_vals', 'AvoidOOM', 'AvoidCUDAOOM',
'DistOptimizerHook', 'all_reduce_dict', 'allreduce_grads', 'reduce_mean',
'sync_random_seed', 'ConfigType', 'InstanceList', 'MultiConfig',
'OptConfigType', 'OptInstanceList', 'OptMultiConfig', 'OptPixelList',
'PixelList', 'RangeType'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean, sync_random_seed)
from .logger import get_caller_name, get_root_logger, log_img_scale
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import find_latest_checkpoint, update_data_root
from .parallel import MMDataParallel, MMDistributedDataParallel
from .setup_env import register_all_modules, setup_multi_processes
from .split_batch import split_batch
from .typing import (ConfigType, InstanceList, MultiConfig, OptConfigType,
OptInstanceList, OptMultiConfig, OptPixelList, PixelList,
RangeType)
from .util_distribution import build_ddp, build_dp, get_device
__all__ = [
'get_root_logger', 'collect_env', 'find_latest_checkpoint',
'update_data_root', 'setup_multi_processes', 'get_caller_name',
'log_img_scale', 'compat_cfg', 'split_batch', 'build_ddp', 'build_dp',
'get_device', 'MMDataParallel', 'MMDistributedDataParallel',
'register_all_modules', 'replace_cfg_vals', 'AvoidOOM', 'AvoidCUDAOOM',
'DistOptimizerHook', 'all_reduce_dict', 'allreduce_grads', 'reduce_mean',
'sync_random_seed', 'ConfigType', 'InstanceList', 'MultiConfig',
'OptConfigType', 'OptInstanceList', 'OptMultiConfig', 'OptPixelList',
'PixelList', 'RangeType'
]
|
from __future__ import annotations
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import torch
import transformers
from PIL import Image
from sentence_transformers.models.Asym import InputModule
class CLIPModel(InputModule):
save_in_root: bool = True
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None) -> None:
super().__init__()
if processor_name is None:
processor_name = model_name
self.model = transformers.CLIPModel.from_pretrained(model_name)
self.processor = transformers.CLIPProcessor.from_pretrained(processor_name)
def __repr__(self) -> str:
return "CLIPModel()"
@property
def max_seq_length(self) -> int:
return self.processor.tokenizer.model_max_length
@max_seq_length.setter
def max_seq_length(self, value: int) -> None:
self.processor.tokenizer.model_max_length = value
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
image_embeds = []
text_embeds = []
if "pixel_values" in features:
vision_outputs = self.model.vision_model(pixel_values=features["pixel_values"])
image_embeds = self.model.visual_projection(vision_outputs[1])
if "input_ids" in features:
text_outputs = self.model.text_model(
input_ids=features.get("input_ids"),
attention_mask=features.get("attention_mask", None),
position_ids=features.get("position_ids", None),
output_attentions=features.get("output_attentions", None),
output_hidden_states=features.get("output_hidden_states", None),
)
text_embeds = self.model.text_projection(text_outputs[1])
sentence_embedding = []
image_features = iter(image_embeds)
text_features = iter(text_embeds)
for idx, input_type in enumerate(features["image_text_info"]):
if input_type == 0:
sentence_embedding.append(next(image_features))
else:
sentence_embedding.append(next(text_features))
features["sentence_embedding"] = torch.stack(sentence_embedding).float()
return features
def tokenize(self, texts, padding: str | bool = True) -> dict[str, torch.Tensor]:
images = []
texts_values = []
image_text_info = []
for idx, data in enumerate(texts):
if isinstance(data, Image.Image): # An Image
images.append(data)
image_text_info.append(0)
else: # A text
texts_values.append(data)
image_text_info.append(1)
encoding = {}
if len(texts_values):
encoding = self.processor.tokenizer(texts_values, padding=padding, truncation=True, return_tensors="pt")
if len(images):
image_features = self.processor.image_processor(images, return_tensors="pt")
encoding["pixel_values"] = image_features.pixel_values
encoding["image_text_info"] = image_text_info
return dict(encoding)
@property
def tokenizer(self) -> transformers.CLIPProcessor:
return self.processor
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
self.model.save_pretrained(output_path, safe_serialization=safe_serialization)
self.processor.save_pretrained(output_path)
@classmethod
def load(
cls,
model_name_or_path: str,
subfolder: str = "",
token: bool | str | None = None,
cache_folder: str | None = None,
revision: str | None = None,
local_files_only: bool = False,
**kwargs,
) -> Self:
local_path = cls.load_dir_path(
model_name_or_path=model_name_or_path,
subfolder=subfolder,
token=token,
cache_folder=cache_folder,
revision=revision,
local_files_only=local_files_only,
)
return cls(local_path)
|
from __future__ import annotations
import torch
import transformers
from PIL import Image
from torch import nn
class CLIPModel(nn.Module):
save_in_root: bool = True
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None) -> None:
super().__init__()
if processor_name is None:
processor_name = model_name
self.model = transformers.CLIPModel.from_pretrained(model_name)
self.processor = transformers.CLIPProcessor.from_pretrained(processor_name)
def __repr__(self) -> str:
return "CLIPModel()"
@property
def max_seq_length(self) -> int:
return self.processor.tokenizer.model_max_length
@max_seq_length.setter
def max_seq_length(self, value: int) -> None:
self.processor.tokenizer.model_max_length = value
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
image_embeds = []
text_embeds = []
if "pixel_values" in features:
vision_outputs = self.model.vision_model(pixel_values=features["pixel_values"])
image_embeds = self.model.visual_projection(vision_outputs[1])
if "input_ids" in features:
text_outputs = self.model.text_model(
input_ids=features.get("input_ids"),
attention_mask=features.get("attention_mask", None),
position_ids=features.get("position_ids", None),
output_attentions=features.get("output_attentions", None),
output_hidden_states=features.get("output_hidden_states", None),
)
text_embeds = self.model.text_projection(text_outputs[1])
sentence_embedding = []
image_features = iter(image_embeds)
text_features = iter(text_embeds)
for idx, input_type in enumerate(features["image_text_info"]):
if input_type == 0:
sentence_embedding.append(next(image_features))
else:
sentence_embedding.append(next(text_features))
features["sentence_embedding"] = torch.stack(sentence_embedding).float()
return features
def tokenize(self, texts, padding: str | bool = True) -> dict[str, torch.Tensor]:
images = []
texts_values = []
image_text_info = []
for idx, data in enumerate(texts):
if isinstance(data, Image.Image): # An Image
images.append(data)
image_text_info.append(0)
else: # A text
texts_values.append(data)
image_text_info.append(1)
encoding = {}
if len(texts_values):
encoding = self.processor.tokenizer(texts_values, padding=padding, truncation=True, return_tensors="pt")
if len(images):
image_features = self.processor.image_processor(images, return_tensors="pt")
encoding["pixel_values"] = image_features.pixel_values
encoding["image_text_info"] = image_text_info
return dict(encoding)
@property
def tokenizer(self) -> transformers.CLIPProcessor:
return self.processor
def save(self, output_path: str) -> None:
self.model.save_pretrained(output_path)
self.processor.save_pretrained(output_path)
@staticmethod
def load(input_path: str) -> CLIPModel:
return CLIPModel(model_name=input_path)
|
from typing import Any, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.memory import BaseMemory
from langchain_core.messages import SystemMessage
from langchain_core.prompts.chat import MessagesPlaceholder
from langchain_core.tools import BaseTool
from langchain.agents.agent import AgentExecutor
from langchain.agents.openai_functions_agent.agent_token_buffer_memory import (
AgentTokenBufferMemory,
)
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain.memory.token_buffer import ConversationTokenBufferMemory
def _get_default_system_message() -> SystemMessage:
return SystemMessage(
content=(
"Do your best to answer the questions. "
"Feel free to use any tools available to look up "
"relevant information, only if necessary"
)
)
def create_conversational_retrieval_agent(
llm: BaseLanguageModel,
tools: list[BaseTool],
remember_intermediate_steps: bool = True, # noqa: FBT001,FBT002
memory_key: str = "chat_history",
system_message: Optional[SystemMessage] = None,
verbose: bool = False, # noqa: FBT001,FBT002
max_token_limit: int = 2000,
**kwargs: Any,
) -> AgentExecutor:
"""A convenience method for creating a conversational retrieval agent.
Args:
llm: The language model to use, should be ChatOpenAI
tools: A list of tools the agent has access to
remember_intermediate_steps: Whether the agent should remember intermediate
steps or not. Intermediate steps refer to prior action/observation
pairs from previous questions. The benefit of remembering these is if
there is relevant information in there, the agent can use it to answer
follow up questions. The downside is it will take up more tokens.
memory_key: The name of the memory key in the prompt.
system_message: The system message to use. By default, a basic one will
be used.
verbose: Whether or not the final AgentExecutor should be verbose or not,
defaults to False.
max_token_limit: The max number of tokens to keep around in memory.
Defaults to 2000.
Returns:
An agent executor initialized appropriately
"""
if remember_intermediate_steps:
memory: BaseMemory = AgentTokenBufferMemory(
memory_key=memory_key, llm=llm, max_token_limit=max_token_limit
)
else:
memory = ConversationTokenBufferMemory(
memory_key=memory_key,
return_messages=True,
output_key="output",
llm=llm,
max_token_limit=max_token_limit,
)
_system_message = system_message or _get_default_system_message()
prompt = OpenAIFunctionsAgent.create_prompt(
system_message=_system_message,
extra_prompt_messages=[MessagesPlaceholder(variable_name=memory_key)],
)
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt)
return AgentExecutor(
agent=agent,
tools=tools,
memory=memory,
verbose=verbose,
return_intermediate_steps=remember_intermediate_steps,
**kwargs,
)
|
from typing import Any, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.memory import BaseMemory
from langchain_core.messages import SystemMessage
from langchain_core.prompts.chat import MessagesPlaceholder
from langchain_core.tools import BaseTool
from langchain.agents.agent import AgentExecutor
from langchain.agents.openai_functions_agent.agent_token_buffer_memory import (
AgentTokenBufferMemory,
)
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain.memory.token_buffer import ConversationTokenBufferMemory
def _get_default_system_message() -> SystemMessage:
return SystemMessage(
content=(
"Do your best to answer the questions. "
"Feel free to use any tools available to look up "
"relevant information, only if necessary"
)
)
def create_conversational_retrieval_agent(
llm: BaseLanguageModel,
tools: list[BaseTool],
remember_intermediate_steps: bool = True,
memory_key: str = "chat_history",
system_message: Optional[SystemMessage] = None,
verbose: bool = False,
max_token_limit: int = 2000,
**kwargs: Any,
) -> AgentExecutor:
"""A convenience method for creating a conversational retrieval agent.
Args:
llm: The language model to use, should be ChatOpenAI
tools: A list of tools the agent has access to
remember_intermediate_steps: Whether the agent should remember intermediate
steps or not. Intermediate steps refer to prior action/observation
pairs from previous questions. The benefit of remembering these is if
there is relevant information in there, the agent can use it to answer
follow up questions. The downside is it will take up more tokens.
memory_key: The name of the memory key in the prompt.
system_message: The system message to use. By default, a basic one will
be used.
verbose: Whether or not the final AgentExecutor should be verbose or not,
defaults to False.
max_token_limit: The max number of tokens to keep around in memory.
Defaults to 2000.
Returns:
An agent executor initialized appropriately
"""
if remember_intermediate_steps:
memory: BaseMemory = AgentTokenBufferMemory(
memory_key=memory_key, llm=llm, max_token_limit=max_token_limit
)
else:
memory = ConversationTokenBufferMemory(
memory_key=memory_key,
return_messages=True,
output_key="output",
llm=llm,
max_token_limit=max_token_limit,
)
_system_message = system_message or _get_default_system_message()
prompt = OpenAIFunctionsAgent.create_prompt(
system_message=_system_message,
extra_prompt_messages=[MessagesPlaceholder(variable_name=memory_key)],
)
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt)
return AgentExecutor(
agent=agent,
tools=tools,
memory=memory,
verbose=verbose,
return_intermediate_steps=remember_intermediate_steps,
**kwargs,
)
|
from typing import Annotated, Optional
import typer
from langchain_cli._version import __version__
from langchain_cli.namespaces import app as app_namespace
from langchain_cli.namespaces import integration as integration_namespace
from langchain_cli.namespaces import template as template_namespace
from langchain_cli.namespaces.migrate import main as migrate_namespace
from langchain_cli.utils.packages import get_langserve_export, get_package_root
app = typer.Typer(no_args_is_help=True, add_completion=False)
app.add_typer(
template_namespace.package_cli, name="template", help=template_namespace.__doc__
)
app.add_typer(app_namespace.app_cli, name="app", help=app_namespace.__doc__)
app.add_typer(
integration_namespace.integration_cli,
name="integration",
help=integration_namespace.__doc__,
)
app.command(
name="migrate",
context_settings={
# Let Grit handle the arguments
"allow_extra_args": True,
"ignore_unknown_options": True,
},
)(
migrate_namespace.migrate,
)
def version_callback(show_version: bool) -> None:
if show_version:
typer.echo(f"langchain-cli {__version__}")
raise typer.Exit()
@app.callback()
def main(
version: bool = typer.Option(
False,
"--version",
"-v",
help="Print the current CLI version.",
callback=version_callback,
is_eager=True,
),
):
pass
@app.command()
def serve(
*,
port: Annotated[
Optional[int], typer.Option(help="The port to run the server on")
] = None,
host: Annotated[
Optional[str], typer.Option(help="The host to run the server on")
] = None,
) -> None:
"""
Start the LangServe app, whether it's a template or an app.
"""
# see if is a template
try:
project_dir = get_package_root()
pyproject = project_dir / "pyproject.toml"
get_langserve_export(pyproject)
except KeyError:
# not a template
app_namespace.serve(port=port, host=host)
else:
# is a template
template_namespace.serve(port=port, host=host)
if __name__ == "__main__":
app()
|
from typing import Optional
import typer
from typing_extensions import Annotated
from langchain_cli._version import __version__
from langchain_cli.namespaces import app as app_namespace
from langchain_cli.namespaces import integration as integration_namespace
from langchain_cli.namespaces import template as template_namespace
from langchain_cli.namespaces.migrate import main as migrate_namespace
from langchain_cli.utils.packages import get_langserve_export, get_package_root
app = typer.Typer(no_args_is_help=True, add_completion=False)
app.add_typer(
template_namespace.package_cli, name="template", help=template_namespace.__doc__
)
app.add_typer(app_namespace.app_cli, name="app", help=app_namespace.__doc__)
app.add_typer(
integration_namespace.integration_cli,
name="integration",
help=integration_namespace.__doc__,
)
app.command(
name="migrate",
context_settings={
# Let Grit handle the arguments
"allow_extra_args": True,
"ignore_unknown_options": True,
},
)(
migrate_namespace.migrate,
)
def version_callback(show_version: bool) -> None:
if show_version:
typer.echo(f"langchain-cli {__version__}")
raise typer.Exit()
@app.callback()
def main(
version: bool = typer.Option(
False,
"--version",
"-v",
help="Print the current CLI version.",
callback=version_callback,
is_eager=True,
),
):
pass
@app.command()
def serve(
*,
port: Annotated[
Optional[int], typer.Option(help="The port to run the server on")
] = None,
host: Annotated[
Optional[str], typer.Option(help="The host to run the server on")
] = None,
) -> None:
"""
Start the LangServe app, whether it's a template or an app.
"""
# see if is a template
try:
project_dir = get_package_root()
pyproject = project_dir / "pyproject.toml"
get_langserve_export(pyproject)
except KeyError:
# not a template
app_namespace.serve(port=port, host=host)
else:
# is a template
template_namespace.serve(port=port, host=host)
if __name__ == "__main__":
app()
|
"""
=========================
Tensor transforms and JIT
=========================
.. note::
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_scripted_tensor_transforms.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_others_plot_scripted_tensor_transforms.py>` to download the full example code.
This example illustrates various features that are now supported by the
:ref:`image transformations <transforms>` on Tensor images. In particular, we
show how image transforms can be performed on GPU, and how one can also script
them using JIT compilation.
Prior to v0.8.0, transforms in torchvision have traditionally been PIL-centric
and presented multiple limitations due to that. Now, since v0.8.0, transforms
implementations are Tensor and PIL compatible, and we can achieve the following
new features:
- transform multi-band torch tensor images (with more than 3-4 channels)
- torchscript transforms together with your model for deployment
- support for GPU acceleration
- batched transformation such as for videos
- read and decode data directly as torch tensor with torchscript support (for PNG and JPEG image formats)
.. note::
These features are only possible with **Tensor** images.
"""
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision.transforms as T
from torchvision.io import read_image
plt.rcParams["savefig.bbox"] = 'tight'
torch.manual_seed(1)
def show(imgs):
fix, axs = plt.subplots(ncols=len(imgs), squeeze=False)
for i, img in enumerate(imgs):
img = T.ToPILImage()(img.to('cpu'))
axs[0, i].imshow(np.asarray(img))
axs[0, i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
# %%
# The :func:`~torchvision.io.read_image` function allows to read an image and
# directly load it as a tensor
dog1 = read_image(str(Path('../assets') / 'dog1.jpg'))
dog2 = read_image(str(Path('../assets') / 'dog2.jpg'))
show([dog1, dog2])
# %%
# Transforming images on GPU
# --------------------------
# Most transforms natively support tensors on top of PIL images (to visualize
# the effect of the transforms, you may refer to see
# :ref:`sphx_glr_auto_examples_transforms_plot_transforms_illustrations.py`).
# Using tensor images, we can run the transforms on GPUs if cuda is available!
import torch.nn as nn
transforms = torch.nn.Sequential(
T.RandomCrop(224),
T.RandomHorizontalFlip(p=0.3),
)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
dog1 = dog1.to(device)
dog2 = dog2.to(device)
transformed_dog1 = transforms(dog1)
transformed_dog2 = transforms(dog2)
show([transformed_dog1, transformed_dog2])
# %%
# Scriptable transforms for easier deployment via torchscript
# -----------------------------------------------------------
# We now show how to combine image transformations and a model forward pass,
# while using ``torch.jit.script`` to obtain a single scripted module.
#
# Let's define a ``Predictor`` module that transforms the input tensor and then
# applies an ImageNet model on it.
from torchvision.models import resnet18, ResNet18_Weights
class Predictor(nn.Module):
def __init__(self):
super().__init__()
weights = ResNet18_Weights.DEFAULT
self.resnet18 = resnet18(weights=weights, progress=False).eval()
self.transforms = weights.transforms()
def forward(self, x: torch.Tensor) -> torch.Tensor:
with torch.no_grad():
x = self.transforms(x)
y_pred = self.resnet18(x)
return y_pred.argmax(dim=1)
# %%
# Now, let's define scripted and non-scripted instances of ``Predictor`` and
# apply it on multiple tensor images of the same size
predictor = Predictor().to(device)
scripted_predictor = torch.jit.script(predictor).to(device)
batch = torch.stack([dog1, dog2]).to(device)
res = predictor(batch)
res_scripted = scripted_predictor(batch)
# %%
# We can verify that the prediction of the scripted and non-scripted models are
# the same:
import json
with open(Path('../assets') / 'imagenet_class_index.json') as labels_file:
labels = json.load(labels_file)
for i, (pred, pred_scripted) in enumerate(zip(res, res_scripted)):
assert pred == pred_scripted
print(f"Prediction for Dog {i + 1}: {labels[str(pred.item())]}")
# %%
# Since the model is scripted, it can be easily dumped on disk and re-used
import tempfile
with tempfile.NamedTemporaryFile() as f:
scripted_predictor.save(f.name)
dumped_scripted_predictor = torch.jit.load(f.name)
res_scripted_dumped = dumped_scripted_predictor(batch)
assert (res_scripted_dumped == res_scripted).all()
|
"""
=========================
Tensor transforms and JIT
=========================
.. note::
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_scripted_tensor_transforms.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_others_plot_scripted_tensor_transforms.py>` to download the full example code.
This example illustrates various features that are now supported by the
:ref:`image transformations <transforms>` on Tensor images. In particular, we
show how image transforms can be performed on GPU, and how one can also script
them using JIT compilation.
Prior to v0.8.0, transforms in torchvision have traditionally been PIL-centric
and presented multiple limitations due to that. Now, since v0.8.0, transforms
implementations are Tensor and PIL compatible, and we can achieve the following
new features:
- transform multi-band torch tensor images (with more than 3-4 channels)
- torchscript transforms together with your model for deployment
- support for GPU acceleration
- batched transformation such as for videos
- read and decode data directly as torch tensor with torchscript support (for PNG and JPEG image formats)
.. note::
These features are only possible with **Tensor** images.
"""
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision.transforms as T
from torchvision.io import read_image
plt.rcParams["savefig.bbox"] = 'tight'
torch.manual_seed(1)
def show(imgs):
fix, axs = plt.subplots(ncols=len(imgs), squeeze=False)
for i, img in enumerate(imgs):
img = T.ToPILImage()(img.to('cpu'))
axs[0, i].imshow(np.asarray(img))
axs[0, i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
# %%
# The :func:`~torchvision.io.read_image` function allows to read an image and
# directly load it as a tensor
dog1 = read_image(str(Path('../assets') / 'dog1.jpg'))
dog2 = read_image(str(Path('../assets') / 'dog2.jpg'))
show([dog1, dog2])
# %%
# Transforming images on GPU
# --------------------------
# Most transforms natively support tensors on top of PIL images (to visualize
# the effect of the transforms, you may refer to see
# :ref:`sphx_glr_auto_examples_others_plot_transforms.py`).
# Using tensor images, we can run the transforms on GPUs if cuda is available!
import torch.nn as nn
transforms = torch.nn.Sequential(
T.RandomCrop(224),
T.RandomHorizontalFlip(p=0.3),
)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
dog1 = dog1.to(device)
dog2 = dog2.to(device)
transformed_dog1 = transforms(dog1)
transformed_dog2 = transforms(dog2)
show([transformed_dog1, transformed_dog2])
# %%
# Scriptable transforms for easier deployment via torchscript
# -----------------------------------------------------------
# We now show how to combine image transformations and a model forward pass,
# while using ``torch.jit.script`` to obtain a single scripted module.
#
# Let's define a ``Predictor`` module that transforms the input tensor and then
# applies an ImageNet model on it.
from torchvision.models import resnet18, ResNet18_Weights
class Predictor(nn.Module):
def __init__(self):
super().__init__()
weights = ResNet18_Weights.DEFAULT
self.resnet18 = resnet18(weights=weights, progress=False).eval()
self.transforms = weights.transforms()
def forward(self, x: torch.Tensor) -> torch.Tensor:
with torch.no_grad():
x = self.transforms(x)
y_pred = self.resnet18(x)
return y_pred.argmax(dim=1)
# %%
# Now, let's define scripted and non-scripted instances of ``Predictor`` and
# apply it on multiple tensor images of the same size
predictor = Predictor().to(device)
scripted_predictor = torch.jit.script(predictor).to(device)
batch = torch.stack([dog1, dog2]).to(device)
res = predictor(batch)
res_scripted = scripted_predictor(batch)
# %%
# We can verify that the prediction of the scripted and non-scripted models are
# the same:
import json
with open(Path('../assets') / 'imagenet_class_index.json') as labels_file:
labels = json.load(labels_file)
for i, (pred, pred_scripted) in enumerate(zip(res, res_scripted)):
assert pred == pred_scripted
print(f"Prediction for Dog {i + 1}: {labels[str(pred.item())]}")
# %%
# Since the model is scripted, it can be easily dumped on disk and re-used
import tempfile
with tempfile.NamedTemporaryFile() as f:
scripted_predictor.save(f.name)
dumped_scripted_predictor = torch.jit.load(f.name)
res_scripted_dumped = dumped_scripted_predictor(batch)
assert (res_scripted_dumped == res_scripted).all()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .vis_backend import (BaseVisBackend, LocalVisBackend,
TensorboardVisBackend, WandbVisBackend)
from .visualizer import Visualizer
__all__ = [
'Visualizer', 'BaseVisBackend', 'LocalVisBackend', 'WandbVisBackend',
'TensorboardVisBackend'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .visualizer import Visualizer
from .writer import (BaseWriter, ComposedWriter, LocalWriter,
TensorboardWriter, WandbWriter)
__all__ = [
'Visualizer', 'BaseWriter', 'LocalWriter', 'WandbWriter',
'TensorboardWriter', 'ComposedWriter'
]
|
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin
class AutogradTestImpl(TestBaseMixin):
@nested_params(
[F.convolve, F.fftconvolve],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (4, 3, 2)
L_x, L_y = 23, 40
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device, requires_grad=True)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device, requires_grad=True)
self.assertTrue(gradcheck(fn, (x, y, mode)))
self.assertTrue(gradgradcheck(fn, (x, y, mode)))
def test_add_noise(self):
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self.assertTrue(gradcheck(F.add_noise, (waveform, noise, snr, lengths)))
self.assertTrue(gradgradcheck(F.add_noise, (waveform, noise, snr, lengths)))
@parameterized.expand(
[
(8000, (2, 3, 5, 7)),
(8000, (8000, 1)),
]
)
def test_oscillator_bank(self, sample_rate, shape):
# can be replaced with math.prod when we drop 3.7 support
def prod(iterable):
ret = 1
for item in iterable:
ret *= item
return ret
numel = prod(shape)
# use 1.9 instead of 2 so as to include values above nyquist frequency
fmax = sample_rate / 1.9
freq = torch.linspace(-fmax, fmax, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(
shape
)
amps = torch.linspace(-5, 5, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(shape)
assert gradcheck(F.oscillator_bank, (freq, amps, sample_rate))
def test_extend_pitch(self):
num_frames, num_pitches = 5, 7
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype, requires_grad=True)
pattern = torch.linspace(1, num_pitches, num_pitches, device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.extend_pitch, (input, num_pitches))
assert gradcheck(F.extend_pitch, (input, pattern))
def test_sinc_ir(self):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, False))
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, True))
def test_speed(self):
leading_dims = (3, 2)
T = 200
waveform = torch.rand(*leading_dims, T, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, T, leading_dims, dtype=self.dtype, device=self.device)
self.assertTrue(gradcheck(F.speed, (waveform, lengths, 1000, 1.1)))
self.assertTrue(gradgradcheck(F.speed, (waveform, lengths, 1000, 1.1)))
def test_preemphasis(self):
waveform = torch.rand(3, 2, 100, device=self.device, dtype=self.dtype, requires_grad=True)
coeff = 0.9
self.assertTrue(gradcheck(F.preemphasis, (waveform, coeff)))
self.assertTrue(gradgradcheck(F.preemphasis, (waveform, coeff)))
def test_deemphasis(self):
waveform = torch.rand(3, 2, 100, device=self.device, dtype=self.dtype, requires_grad=True)
coeff = 0.9
self.assertTrue(gradcheck(F.deemphasis, (waveform, coeff)))
self.assertTrue(gradgradcheck(F.deemphasis, (waveform, coeff)))
def test_freq_ir(self):
mags = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.frequency_impulse_response, (mags,))
def test_filter_waveform(self):
waveform = torch.rand(3, 1, 2, 10, device=self.device, dtype=self.dtype, requires_grad=True)
filters = torch.rand(3, 2, device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.filter_waveform, (waveform, filters))
|
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin
class AutogradTestImpl(TestBaseMixin):
@nested_params(
[F.convolve, F.fftconvolve],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (4, 3, 2)
L_x, L_y = 23, 40
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device, requires_grad=True)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device, requires_grad=True)
self.assertTrue(gradcheck(fn, (x, y, mode)))
self.assertTrue(gradgradcheck(fn, (x, y, mode)))
def test_add_noise(self):
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self.assertTrue(gradcheck(F.add_noise, (waveform, noise, lengths, snr)))
self.assertTrue(gradgradcheck(F.add_noise, (waveform, noise, lengths, snr)))
@parameterized.expand(
[
(8000, (2, 3, 5, 7)),
(8000, (8000, 1)),
]
)
def test_oscillator_bank(self, sample_rate, shape):
# can be replaced with math.prod when we drop 3.7 support
def prod(iterable):
ret = 1
for item in iterable:
ret *= item
return ret
numel = prod(shape)
# use 1.9 instead of 2 so as to include values above nyquist frequency
fmax = sample_rate / 1.9
freq = torch.linspace(-fmax, fmax, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(
shape
)
amps = torch.linspace(-5, 5, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(shape)
assert gradcheck(F.oscillator_bank, (freq, amps, sample_rate))
def test_extend_pitch(self):
num_frames, num_pitches = 5, 7
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype, requires_grad=True)
pattern = torch.linspace(1, num_pitches, num_pitches, device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.extend_pitch, (input, num_pitches))
assert gradcheck(F.extend_pitch, (input, pattern))
def test_sinc_ir(self):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, False))
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, True))
def test_speed(self):
leading_dims = (3, 2)
T = 200
waveform = torch.rand(*leading_dims, T, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, T, leading_dims, dtype=self.dtype, device=self.device)
self.assertTrue(gradcheck(F.speed, (waveform, lengths, 1000, 1.1)))
self.assertTrue(gradgradcheck(F.speed, (waveform, lengths, 1000, 1.1)))
def test_preemphasis(self):
waveform = torch.rand(3, 2, 100, device=self.device, dtype=self.dtype, requires_grad=True)
coeff = 0.9
self.assertTrue(gradcheck(F.preemphasis, (waveform, coeff)))
self.assertTrue(gradgradcheck(F.preemphasis, (waveform, coeff)))
def test_deemphasis(self):
waveform = torch.rand(3, 2, 100, device=self.device, dtype=self.dtype, requires_grad=True)
coeff = 0.9
self.assertTrue(gradcheck(F.deemphasis, (waveform, coeff)))
self.assertTrue(gradgradcheck(F.deemphasis, (waveform, coeff)))
def test_freq_ir(self):
mags = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.frequency_impulse_response, (mags,))
def test_filter_waveform(self):
waveform = torch.rand(3, 1, 2, 10, device=self.device, dtype=self.dtype, requires_grad=True)
filters = torch.rand(3, 2, device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.filter_waveform, (waveform, filters))
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.parallel import is_module_wrapper
from mmcv.runner.hooks import Hook
from mmdet.registry import HOOKS
@HOOKS.register_module()
class YOLOXModeSwitchHook(Hook):
"""Switch the mode of YOLOX during training.
This hook turns off the mosaic and mixup data augmentation and switches
to use L1 loss in bbox_head.
Args:
num_last_epochs (int): The number of latter epochs in the end of the
training to close the data augmentation and switch to L1 loss.
Default: 15.
skip_type_keys (list[str], optional): Sequence of type string to be
skip pipeline. Default: ('Mosaic', 'RandomAffine', 'MixUp')
"""
def __init__(self,
num_last_epochs=15,
skip_type_keys=('Mosaic', 'RandomAffine', 'MixUp')):
self.num_last_epochs = num_last_epochs
self.skip_type_keys = skip_type_keys
self._restart_dataloader = False
def before_train_epoch(self, runner):
"""Close mosaic and mixup augmentation and switches to use L1 loss."""
epoch = runner.epoch
train_loader = runner.data_loader
model = runner.model
if is_module_wrapper(model):
model = model.module
if (epoch + 1) == runner.max_epochs - self.num_last_epochs:
runner.logger.info('No mosaic and mixup aug now!')
# The dataset pipeline cannot be updated when persistent_workers
# is True, so we need to force the dataloader's multi-process
# restart. This is a very hacky approach.
train_loader.dataset.update_skip_type_keys(self.skip_type_keys)
if hasattr(train_loader, 'persistent_workers'
) and train_loader.persistent_workers is True:
train_loader._DataLoader__initialized = False
train_loader._iterator = None
self._restart_dataloader = True
runner.logger.info('Add additional L1 loss now!')
model.bbox_head.use_l1 = True
else:
# Once the restart is complete, we need to restore
# the initialization flag.
if self._restart_dataloader:
train_loader._DataLoader__initialized = True
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.parallel import is_module_wrapper
from mmcv.runner.hooks import HOOKS, Hook
@HOOKS.register_module()
class YOLOXModeSwitchHook(Hook):
"""Switch the mode of YOLOX during training.
This hook turns off the mosaic and mixup data augmentation and switches
to use L1 loss in bbox_head.
Args:
num_last_epochs (int): The number of latter epochs in the end of the
training to close the data augmentation and switch to L1 loss.
Default: 15.
skip_type_keys (list[str], optional): Sequence of type string to be
skip pipeline. Default: ('Mosaic', 'RandomAffine', 'MixUp')
"""
def __init__(self,
num_last_epochs=15,
skip_type_keys=('Mosaic', 'RandomAffine', 'MixUp')):
self.num_last_epochs = num_last_epochs
self.skip_type_keys = skip_type_keys
self._restart_dataloader = False
def before_train_epoch(self, runner):
"""Close mosaic and mixup augmentation and switches to use L1 loss."""
epoch = runner.epoch
train_loader = runner.data_loader
model = runner.model
if is_module_wrapper(model):
model = model.module
if (epoch + 1) == runner.max_epochs - self.num_last_epochs:
runner.logger.info('No mosaic and mixup aug now!')
# The dataset pipeline cannot be updated when persistent_workers
# is True, so we need to force the dataloader's multi-process
# restart. This is a very hacky approach.
train_loader.dataset.update_skip_type_keys(self.skip_type_keys)
if hasattr(train_loader, 'persistent_workers'
) and train_loader.persistent_workers is True:
train_loader._DataLoader__initialized = False
train_loader._iterator = None
self._restart_dataloader = True
runner.logger.info('Add additional L1 loss now!')
model.bbox_head.use_l1 = True
else:
# Once the restart is complete, we need to restore
# the initialization flag.
if self._restart_dataloader:
train_loader._DataLoader__initialized = True
|
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.backend import KerasTensor
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Identity")
class Identity(Layer):
"""Identity layer.
This layer should be used as a placeholder when no operation is to be
performed. The layer just returns its `inputs` argument as output.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
self.built = True
def call(self, inputs):
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs):
return tree.map_structure(
lambda x: KerasTensor(x.shape, dtype=x.dtype, sparse=x.sparse),
inputs,
)
|
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.backend import KerasTensor
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Identity")
class Identity(Layer):
"""Identity layer.
This layer should be used as a placeholder when no operation is to be
performed. The layer just returns its `inputs` argument as output.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
def call(self, inputs):
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs):
return tree.map_structure(
lambda x: KerasTensor(x.shape, dtype=x.dtype, sparse=x.sparse),
inputs,
)
|
# flake8: noqa
import torchaudio
from . import utils
from .utils import _is_backend_dispatcher_enabled, get_audio_backend, list_audio_backends, set_audio_backend
if _is_backend_dispatcher_enabled():
from torchaudio._backend.utils import get_info_func, get_load_func, get_save_func
torchaudio.info = get_info_func()
torchaudio.load = get_load_func()
torchaudio.save = get_save_func()
else:
utils._init_audio_backend()
|
# flake8: noqa
import torchaudio
from torchaudio._backend.utils import get_info_func, get_load_func, get_save_func
from . import utils
from .utils import _is_backend_dispatcher_enabled, get_audio_backend, list_audio_backends, set_audio_backend
if _is_backend_dispatcher_enabled():
torchaudio.info = get_info_func()
torchaudio.load = get_load_func()
torchaudio.save = get_save_func()
else:
utils._init_audio_backend()
|
"""Util that Searches calendar events in Office 365.
Free, but setup is required. See link below.
https://learn.microsoft.com/en-us/graph/auth/
"""
from datetime import datetime as dt
from typing import Any, Dict, List, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, ConfigDict, Field
from langchain_community.tools.office365.base import O365BaseTool
from langchain_community.tools.office365.utils import UTC_FORMAT, clean_body
class SearchEventsInput(BaseModel):
"""Input for SearchEmails Tool.
From https://learn.microsoft.com/en-us/graph/search-query-parameter"""
start_datetime: str = Field(
description=(
" The start datetime for the search query in the following format: "
' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time '
" components, and the time zone offset is specified as ±hh:mm. "
' For example: "2023-06-09T10:30:00+03:00" represents June 9th, '
" 2023, at 10:30 AM in a time zone with a positive offset of 3 "
" hours from Coordinated Universal Time (UTC)."
)
)
end_datetime: str = Field(
description=(
" The end datetime for the search query in the following format: "
' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time '
" components, and the time zone offset is specified as ±hh:mm. "
' For example: "2023-06-09T10:30:00+03:00" represents June 9th, '
" 2023, at 10:30 AM in a time zone with a positive offset of 3 "
" hours from Coordinated Universal Time (UTC)."
)
)
max_results: int = Field(
default=10,
description="The maximum number of results to return.",
)
truncate: bool = Field(
default=True,
description=(
"Whether the event's body is truncated to meet token number limits. Set to "
"False for searches that will retrieve small events, otherwise, set to "
"True."
),
)
class O365SearchEvents(O365BaseTool):
"""Search calendar events in Office 365.
Free, but setup is required
"""
name: str = "events_search"
args_schema: Type[BaseModel] = SearchEventsInput
description: str = (
" Use this tool to search for the user's calendar events."
" The input must be the start and end datetimes for the search query."
" The output is a JSON list of all the events in the user's calendar"
" between the start and end times. You can assume that the user can "
" not schedule any meeting over existing meetings, and that the user "
"is busy during meetings. Any times without events are free for the user. "
)
model_config = ConfigDict(
extra="forbid",
)
def _run(
self,
start_datetime: str,
end_datetime: str,
max_results: int = 10,
truncate: bool = True,
run_manager: Optional[CallbackManagerForToolRun] = None,
truncate_limit: int = 150,
) -> List[Dict[str, Any]]:
# Get calendar object
schedule = self.account.schedule()
calendar = schedule.get_default_calendar()
# Process the date range parameters
start_datetime_query = dt.strptime(start_datetime, UTC_FORMAT)
end_datetime_query = dt.strptime(end_datetime, UTC_FORMAT)
# Run the query
q = calendar.new_query("start").greater_equal(start_datetime_query)
q.chain("and").on_attribute("end").less_equal(end_datetime_query)
events = calendar.get_events(query=q, include_recurring=True, limit=max_results)
# Generate output dict
output_events = []
for event in events:
output_event = {}
output_event["organizer"] = event.organizer
output_event["subject"] = event.subject
if truncate:
output_event["body"] = clean_body(event.body)[:truncate_limit]
else:
output_event["body"] = clean_body(event.body)
# Get the time zone from the search parameters
time_zone = start_datetime_query.tzinfo
# Assign the datetimes in the search time zone
output_event["start_datetime"] = event.start.astimezone(time_zone).strftime(
UTC_FORMAT
)
output_event["end_datetime"] = event.end.astimezone(time_zone).strftime(
UTC_FORMAT
)
output_event["modified_date"] = event.modified.astimezone(
time_zone
).strftime(UTC_FORMAT)
output_events.append(output_event)
return output_events
|
"""Util that Searches calendar events in Office 365.
Free, but setup is required. See link below.
https://learn.microsoft.com/en-us/graph/auth/
"""
from datetime import datetime as dt
from typing import Any, Dict, List, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, ConfigDict, Field
from langchain_community.tools.office365.base import O365BaseTool
from langchain_community.tools.office365.utils import UTC_FORMAT, clean_body
class SearchEventsInput(BaseModel):
"""Input for SearchEmails Tool.
From https://learn.microsoft.com/en-us/graph/search-query-parameter"""
start_datetime: str = Field(
description=(
" The start datetime for the search query in the following format: "
' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time '
" components, and the time zone offset is specified as ±hh:mm. "
' For example: "2023-06-09T10:30:00+03:00" represents June 9th, '
" 2023, at 10:30 AM in a time zone with a positive offset of 3 "
" hours from Coordinated Universal Time (UTC)."
)
)
end_datetime: str = Field(
description=(
" The end datetime for the search query in the following format: "
' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time '
" components, and the time zone offset is specified as ±hh:mm. "
' For example: "2023-06-09T10:30:00+03:00" represents June 9th, '
" 2023, at 10:30 AM in a time zone with a positive offset of 3 "
" hours from Coordinated Universal Time (UTC)."
)
)
max_results: int = Field(
default=10,
description="The maximum number of results to return.",
)
truncate: bool = Field(
default=True,
description=(
"Whether the event's body is truncated to meet token number limits. Set to "
"False for searches that will retrieve small events, otherwise, set to "
"True."
),
)
class O365SearchEvents(O365BaseTool): # type: ignore[override, override]
"""Search calendar events in Office 365.
Free, but setup is required
"""
name: str = "events_search"
args_schema: Type[BaseModel] = SearchEventsInput
description: str = (
" Use this tool to search for the user's calendar events."
" The input must be the start and end datetimes for the search query."
" The output is a JSON list of all the events in the user's calendar"
" between the start and end times. You can assume that the user can "
" not schedule any meeting over existing meetings, and that the user "
"is busy during meetings. Any times without events are free for the user. "
)
model_config = ConfigDict(
extra="forbid",
)
def _run(
self,
start_datetime: str,
end_datetime: str,
max_results: int = 10,
truncate: bool = True,
run_manager: Optional[CallbackManagerForToolRun] = None,
truncate_limit: int = 150,
) -> List[Dict[str, Any]]:
# Get calendar object
schedule = self.account.schedule()
calendar = schedule.get_default_calendar()
# Process the date range parameters
start_datetime_query = dt.strptime(start_datetime, UTC_FORMAT)
end_datetime_query = dt.strptime(end_datetime, UTC_FORMAT)
# Run the query
q = calendar.new_query("start").greater_equal(start_datetime_query)
q.chain("and").on_attribute("end").less_equal(end_datetime_query)
events = calendar.get_events(query=q, include_recurring=True, limit=max_results)
# Generate output dict
output_events = []
for event in events:
output_event = {}
output_event["organizer"] = event.organizer
output_event["subject"] = event.subject
if truncate:
output_event["body"] = clean_body(event.body)[:truncate_limit]
else:
output_event["body"] = clean_body(event.body)
# Get the time zone from the search parameters
time_zone = start_datetime_query.tzinfo
# Assign the datetimes in the search time zone
output_event["start_datetime"] = event.start.astimezone(time_zone).strftime(
UTC_FORMAT
)
output_event["end_datetime"] = event.end.astimezone(time_zone).strftime(
UTC_FORMAT
)
output_event["modified_date"] = event.modified.astimezone(
time_zone
).strftime(UTC_FORMAT)
output_events.append(output_event)
return output_events
|
"""
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the :func:`~sklearn.datasets.make_multilabel_classification`
dataset generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular class.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_multilabel_classification as make_ml_clf
COLORS = np.array(
[
"!",
"#FF3333", # red
"#0198E1", # blue
"#BF5FFF", # purple
"#FCD116", # yellow
"#FF7216", # orange
"#4DBD33", # green
"#87421F", # brown
]
)
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2**10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(
n_samples=150,
n_features=2,
n_classes=n_classes,
n_labels=n_labels,
length=length,
allow_unlabeled=False,
return_distributions=True,
random_state=RANDOM_SEED,
)
ax.scatter(
X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]).sum(axis=1)), marker="."
)
ax.scatter(
p_w_c[0] * length,
p_w_c[1] * length,
marker="*",
linewidth=0.5,
edgecolor="black",
s=20 + 1500 * p_c**2,
color=COLORS.take([1, 2, 4]),
)
ax.set_xlabel("Feature 0 count")
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex="row", sharey="row", figsize=(8, 4))
plt.subplots_adjust(bottom=0.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title("n_labels=1, length=50")
ax1.set_ylabel("Feature 1 count")
plot_2d(ax2, n_labels=3)
ax2.set_title("n_labels=3, length=50")
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print("The data was generated from (random_state=%d):" % RANDOM_SEED)
print("Class", "P(C)", "P(w0|C)", "P(w1|C)", sep="\t")
for k, p, p_w in zip(["red", "blue", "yellow"], p_c, p_w_c.T):
print("%s\t%0.2f\t%0.2f\t%0.2f" % (k, p, p_w[0], p_w[1]))
|
"""
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the :func:`~sklearn.datasets.make_multilabel_classification`
dataset generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular class.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_multilabel_classification as make_ml_clf
COLORS = np.array(
[
"!",
"#FF3333", # red
"#0198E1", # blue
"#BF5FFF", # purple
"#FCD116", # yellow
"#FF7216", # orange
"#4DBD33", # green
"#87421F", # brown
]
)
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2**10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(
n_samples=150,
n_features=2,
n_classes=n_classes,
n_labels=n_labels,
length=length,
allow_unlabeled=False,
return_distributions=True,
random_state=RANDOM_SEED,
)
ax.scatter(
X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]).sum(axis=1)), marker="."
)
ax.scatter(
p_w_c[0] * length,
p_w_c[1] * length,
marker="*",
linewidth=0.5,
edgecolor="black",
s=20 + 1500 * p_c**2,
color=COLORS.take([1, 2, 4]),
)
ax.set_xlabel("Feature 0 count")
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex="row", sharey="row", figsize=(8, 4))
plt.subplots_adjust(bottom=0.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title("n_labels=1, length=50")
ax1.set_ylabel("Feature 1 count")
plot_2d(ax2, n_labels=3)
ax2.set_title("n_labels=3, length=50")
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print("The data was generated from (random_state=%d):" % RANDOM_SEED)
print("Class", "P(C)", "P(w0|C)", "P(w1|C)", sep="\t")
for k, p, p_w in zip(["red", "blue", "yellow"], p_c, p_w_c.T):
print("%s\t%0.2f\t%0.2f\t%0.2f" % (k, p, p_w[0], p_w[1]))
|
#!/usr/bin/env python
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from transformers import HfArgumentParser
from transformers.commands.add_fast_image_processor import AddFastImageProcessorCommand
from transformers.commands.add_new_model_like import AddNewModelLikeCommand
from transformers.commands.chat import ChatCommand
from transformers.commands.convert import ConvertCommand
from transformers.commands.download import DownloadCommand
from transformers.commands.env import EnvironmentCommand
from transformers.commands.run import RunCommand
from transformers.commands.serving import ServeCommand
def main_cli():
warnings.warn(
"`transformers-cli` is deprecated in favour of `transformers` directly and will be removed in v5.",
DeprecationWarning,
)
main()
def main():
parser = HfArgumentParser(prog="Transformers CLI tool", usage="transformers <command> [<args>]")
commands_parser = parser.add_subparsers(help="transformers command helpers")
# Register commands
ChatCommand.register_subcommand(commands_parser)
ConvertCommand.register_subcommand(commands_parser)
DownloadCommand.register_subcommand(commands_parser)
EnvironmentCommand.register_subcommand(commands_parser)
RunCommand.register_subcommand(commands_parser)
ServeCommand.register_subcommand(commands_parser)
AddNewModelLikeCommand.register_subcommand(commands_parser)
AddFastImageProcessorCommand.register_subcommand(commands_parser)
# Let's go
args = parser.parse_args()
if not hasattr(args, "func"):
parser.print_help()
exit(1)
# Run
service = args.func(args)
service.run()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import HfArgumentParser
from transformers.commands.add_fast_image_processor import AddFastImageProcessorCommand
from transformers.commands.add_new_model_like import AddNewModelLikeCommand
from transformers.commands.chat import ChatCommand
from transformers.commands.convert import ConvertCommand
from transformers.commands.download import DownloadCommand
from transformers.commands.env import EnvironmentCommand
from transformers.commands.run import RunCommand
from transformers.commands.serving import ServeCommand
def main():
parser = HfArgumentParser(prog="Transformers CLI tool", usage="transformers-cli <command> [<args>]")
commands_parser = parser.add_subparsers(help="transformers-cli command helpers")
# Register commands
ChatCommand.register_subcommand(commands_parser)
ConvertCommand.register_subcommand(commands_parser)
DownloadCommand.register_subcommand(commands_parser)
EnvironmentCommand.register_subcommand(commands_parser)
RunCommand.register_subcommand(commands_parser)
ServeCommand.register_subcommand(commands_parser)
AddNewModelLikeCommand.register_subcommand(commands_parser)
AddFastImageProcessorCommand.register_subcommand(commands_parser)
# Let's go
args = parser.parse_args()
if not hasattr(args, "func"):
parser.print_help()
exit(1)
# Run
service = args.func(args)
service.run()
if __name__ == "__main__":
main()
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)],
# multiscale_mode='range'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args)))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
backend_args=backend_args)
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)],
# multiscale_mode='range'
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'])
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
import os
# DO NOT EDIT. Generated by api_gen.sh
from keras.api import DTypePolicy
from keras.api import FloatDTypePolicy
from keras.api import Function
from keras.api import Initializer
from keras.api import Input
from keras.api import InputSpec
from keras.api import KerasTensor
from keras.api import Layer
from keras.api import Loss
from keras.api import Metric
from keras.api import Model
from keras.api import Operation
from keras.api import Optimizer
from keras.api import Quantizer
from keras.api import Regularizer
from keras.api import Sequential
from keras.api import StatelessScope
from keras.api import Variable
from keras.api import __version__
from keras.api import _tf_keras
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import device
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import layers
from keras.api import legacy
from keras.api import losses
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import name_scope
from keras.api import ops
from keras.api import optimizers
from keras.api import preprocessing
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import saving
from keras.api import tree
from keras.api import utils
from keras.api import version
# END DO NOT EDIT.
# Add everything in /api/ to the module search path.
__path__.append(os.path.join(os.path.dirname(__file__), "api")) # noqa: F405
# Don't pollute namespace.
del os
# Never autocomplete `.src` or `.api` on an imported keras object.
def __dir__():
keys = dict.fromkeys((globals().keys()))
keys.pop("src")
keys.pop("api")
return list(keys)
# Don't import `.src` or `.api` during `from keras import *`.
__all__ = [
name
for name in globals().keys()
if not (name.startswith("_") or name in ("src", "api"))
]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
import os
# Import everything from /api/ into keras.
from keras.api import * # noqa: F403
from keras.api import __version__ # Import * ignores names start with "_".
# Add everything in /api/ to the module search path.
__path__.append(os.path.join(os.path.dirname(__file__), "api")) # noqa: F405
# Don't pollute namespace.
del os
# Never autocomplete `.src` or `.api` on an imported keras object.
def __dir__():
keys = dict.fromkeys((globals().keys()))
keys.pop("src")
keys.pop("api")
return list(keys)
# Don't import `.src` or `.api` during `from keras import *`.
__all__ = [
name
for name in globals().keys()
if not (name.startswith("_") or name in ("src", "api"))
]
|
import os.path
from pathlib import Path
from typing import Callable, Optional, Union
import numpy as np
import torch
from torchvision.datasets.utils import download_url, verify_str_arg
from torchvision.datasets.vision import VisionDataset
class MovingMNIST(VisionDataset):
"""`MovingMNIST <http://www.cs.toronto.edu/~nitish/unsupervised_video/>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where ``MovingMNIST/mnist_test_seq.npy`` exists.
split (string, optional): The dataset split, supports ``None`` (default), ``"train"`` and ``"test"``.
If ``split=None``, the full data is returned.
split_ratio (int, optional): The split ratio of number of frames. If ``split="train"``, the first split
frames ``data[:, :split_ratio]`` is returned. If ``split="test"``, the last split frames ``data[:, split_ratio:]``
is returned. If ``split=None``, this parameter is ignored and the all frames data is returned.
transform (callable, optional): A function/transform that takes in a torch Tensor
and returns a transformed version. E.g, ``transforms.RandomCrop``
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
_URL = "http://www.cs.toronto.edu/~nitish/unsupervised_video/mnist_test_seq.npy"
def __init__(
self,
root: Union[str, Path],
split: Optional[str] = None,
split_ratio: int = 10,
download: bool = False,
transform: Optional[Callable] = None,
) -> None:
super().__init__(root, transform=transform)
self._base_folder = os.path.join(self.root, self.__class__.__name__)
self._filename = self._URL.split("/")[-1]
if split is not None:
verify_str_arg(split, "split", ("train", "test"))
self.split = split
if not isinstance(split_ratio, int):
raise TypeError(f"`split_ratio` should be an integer, but got {type(split_ratio)}")
elif not (1 <= split_ratio <= 19):
raise ValueError(f"`split_ratio` should be `1 <= split_ratio <= 19`, but got {split_ratio} instead.")
self.split_ratio = split_ratio
if download:
self.download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it.")
data = torch.from_numpy(np.load(os.path.join(self._base_folder, self._filename)))
if self.split == "train":
data = data[: self.split_ratio]
elif self.split == "test":
data = data[self.split_ratio :]
self.data = data.transpose(0, 1).unsqueeze(2).contiguous()
def __getitem__(self, idx: int) -> torch.Tensor:
"""
Args:
index (int): Index
Returns:
torch.Tensor: Video frames (torch Tensor[T, C, H, W]). The `T` is the number of frames.
"""
data = self.data[idx]
if self.transform is not None:
data = self.transform(data)
return data
def __len__(self) -> int:
return len(self.data)
def _check_exists(self) -> bool:
return os.path.exists(os.path.join(self._base_folder, self._filename))
def download(self) -> None:
if self._check_exists():
return
download_url(
url=self._URL,
root=self._base_folder,
filename=self._filename,
md5="be083ec986bfe91a449d63653c411eb2",
)
|
import os.path
from typing import Callable, Optional
import numpy as np
import torch
from torchvision.datasets.utils import download_url, verify_str_arg
from torchvision.datasets.vision import VisionDataset
class MovingMNIST(VisionDataset):
"""`MovingMNIST <http://www.cs.toronto.edu/~nitish/unsupervised_video/>`_ Dataset.
Args:
root (string): Root directory of dataset where ``MovingMNIST/mnist_test_seq.npy`` exists.
split (string, optional): The dataset split, supports ``None`` (default), ``"train"`` and ``"test"``.
If ``split=None``, the full data is returned.
split_ratio (int, optional): The split ratio of number of frames. If ``split="train"``, the first split
frames ``data[:, :split_ratio]`` is returned. If ``split="test"``, the last split frames ``data[:, split_ratio:]``
is returned. If ``split=None``, this parameter is ignored and the all frames data is returned.
transform (callable, optional): A function/transform that takes in a torch Tensor
and returns a transformed version. E.g, ``transforms.RandomCrop``
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
_URL = "http://www.cs.toronto.edu/~nitish/unsupervised_video/mnist_test_seq.npy"
def __init__(
self,
root: str,
split: Optional[str] = None,
split_ratio: int = 10,
download: bool = False,
transform: Optional[Callable] = None,
) -> None:
super().__init__(root, transform=transform)
self._base_folder = os.path.join(self.root, self.__class__.__name__)
self._filename = self._URL.split("/")[-1]
if split is not None:
verify_str_arg(split, "split", ("train", "test"))
self.split = split
if not isinstance(split_ratio, int):
raise TypeError(f"`split_ratio` should be an integer, but got {type(split_ratio)}")
elif not (1 <= split_ratio <= 19):
raise ValueError(f"`split_ratio` should be `1 <= split_ratio <= 19`, but got {split_ratio} instead.")
self.split_ratio = split_ratio
if download:
self.download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it.")
data = torch.from_numpy(np.load(os.path.join(self._base_folder, self._filename)))
if self.split == "train":
data = data[: self.split_ratio]
elif self.split == "test":
data = data[self.split_ratio :]
self.data = data.transpose(0, 1).unsqueeze(2).contiguous()
def __getitem__(self, idx: int) -> torch.Tensor:
"""
Args:
index (int): Index
Returns:
torch.Tensor: Video frames (torch Tensor[T, C, H, W]). The `T` is the number of frames.
"""
data = self.data[idx]
if self.transform is not None:
data = self.transform(data)
return data
def __len__(self) -> int:
return len(self.data)
def _check_exists(self) -> bool:
return os.path.exists(os.path.join(self._base_folder, self._filename))
def download(self) -> None:
if self._check_exists():
return
download_url(
url=self._URL,
root=self._base_folder,
filename=self._filename,
md5="be083ec986bfe91a449d63653c411eb2",
)
|
_base_ = './vfnet_r50-mdconv-c3-c5_fpn_ms-2x_coco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
from __future__ import annotations
from typing import Any
from langchain_text_splitters.base import TextSplitter
class NLTKTextSplitter(TextSplitter):
"""Splitting text using NLTK package."""
def __init__(
self,
separator: str = "\n\n",
language: str = "english",
*,
use_span_tokenize: bool = False,
**kwargs: Any,
) -> None:
"""Initialize the NLTK splitter."""
super().__init__(**kwargs)
self._separator = separator
self._language = language
self._use_span_tokenize = use_span_tokenize
if self._use_span_tokenize and self._separator != "":
msg = "When use_span_tokenize is True, separator should be ''"
raise ValueError(msg)
try:
import nltk
if self._use_span_tokenize:
self._tokenizer = nltk.tokenize._get_punkt_tokenizer(self._language)
else:
self._tokenizer = nltk.tokenize.sent_tokenize
except ImportError:
msg = "NLTK is not installed, please install it with `pip install nltk`."
raise ImportError(msg)
def split_text(self, text: str) -> list[str]:
"""Split incoming text and return chunks."""
# First we naively split the large input into a bunch of smaller ones.
if self._use_span_tokenize:
spans = list(self._tokenizer.span_tokenize(text))
splits = []
for i, (start, end) in enumerate(spans):
if i > 0:
prev_end = spans[i - 1][1]
sentence = text[prev_end:start] + text[start:end]
else:
sentence = text[start:end]
splits.append(sentence)
else:
splits = self._tokenizer(text, language=self._language)
return self._merge_splits(splits, self._separator)
|
from __future__ import annotations
from typing import Any
from langchain_text_splitters.base import TextSplitter
class NLTKTextSplitter(TextSplitter):
"""Splitting text using NLTK package."""
def __init__(
self,
separator: str = "\n\n",
language: str = "english",
*,
use_span_tokenize: bool = False,
**kwargs: Any,
) -> None:
"""Initialize the NLTK splitter."""
super().__init__(**kwargs)
self._separator = separator
self._language = language
self._use_span_tokenize = use_span_tokenize
if self._use_span_tokenize and self._separator != "":
raise ValueError("When use_span_tokenize is True, separator should be ''")
try:
import nltk
if self._use_span_tokenize:
self._tokenizer = nltk.tokenize._get_punkt_tokenizer(self._language)
else:
self._tokenizer = nltk.tokenize.sent_tokenize
except ImportError:
raise ImportError(
"NLTK is not installed, please install it with `pip install nltk`."
)
def split_text(self, text: str) -> list[str]:
"""Split incoming text and return chunks."""
# First we naively split the large input into a bunch of smaller ones.
if self._use_span_tokenize:
spans = list(self._tokenizer.span_tokenize(text))
splits = []
for i, (start, end) in enumerate(spans):
if i > 0:
prev_end = spans[i - 1][1]
sentence = text[prev_end:start] + text[start:end]
else:
sentence = text[start:end]
splits.append(sentence)
else:
splits = self._tokenizer(text, language=self._language)
return self._merge_splits(splits, self._separator)
|
from langchain_anthropic.chat_models import (
ChatAnthropic,
ChatAnthropicMessages,
convert_to_anthropic_tool,
)
from langchain_anthropic.llms import Anthropic, AnthropicLLM
__all__ = [
"ChatAnthropicMessages",
"ChatAnthropic",
"convert_to_anthropic_tool",
"Anthropic",
"AnthropicLLM",
]
|
from langchain_anthropic.chat_models import ChatAnthropic, ChatAnthropicMessages
from langchain_anthropic.llms import Anthropic, AnthropicLLM
__all__ = ["ChatAnthropicMessages", "ChatAnthropic", "Anthropic", "AnthropicLLM"]
|
_base_ = './gfl_r50_fpn_1x_coco.py'
max_epochs = 24
# learning policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# multi-scale training
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './gfl_r50_fpn_1x_coco.py'
max_epochs = 24
# learning policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# multi-scale training
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
import os
import shutil
import subprocess
import sys
def _get_run_args(print_args: bool = True):
from jina.helper import get_rich_console
from jina.parsers import get_main_parser
console = get_rich_console()
silent_print = {'help', 'hub', 'export', 'auth', 'cloud', 'ping'}
parser = get_main_parser()
if len(sys.argv) > 1:
from argparse import _StoreAction, _StoreTrueAction
from rich import box
from rich.table import Table
args, unknown = parser.parse_known_args()
if unknown:
from jina.helper import warn_unknown_args
unknown = list(filter(lambda x: x.startswith('--'), unknown))
warn_unknown_args(unknown)
if args.cli not in silent_print and print_args:
from jina import __resources_path__
p = parser._actions[-1].choices[sys.argv[1]]
default_args = {
a.dest: a.default
for a in p._actions
if isinstance(a, (_StoreAction, _StoreTrueAction))
}
with open(os.path.join(__resources_path__, 'jina.logo')) as fp:
logo_str = fp.read()
param_str = Table(
title=' '.join(sys.argv),
box=box.ROUNDED,
highlight=True,
title_justify='left',
)
param_str.add_column('Argument', justify='right')
param_str.add_column('Value', justify='left')
for k, v in sorted(vars(args).items()):
param = k.replace('_', '-')
value = str(v)
if not default_args.get(k, None) == v:
value = f'[b]{value}[/]'
param_str.add_row(param, value)
if 'JINA_LOG_NO_COLOR' not in os.environ:
print(f'\n{logo_str}\n')
console.print(param_str)
return args
else:
parser.print_help()
exit()
def _quick_ac_lookup():
from jina_cli.autocomplete import ac_table
if len(sys.argv) > 1:
if sys.argv[1] == 'commands':
for k in ac_table['commands']:
print(k)
exit()
elif sys.argv[1] == 'completions':
# search with the longest shared prefix
for j in range(len(sys.argv), 2, -1):
_input = ' '.join(sys.argv[2:j]).strip()
if _input in ac_table['completions']:
compl = ac_table['completions'][_input]
for k in compl:
if k not in sys.argv:
print(k)
break
exit()
def _try_plugin_command():
"""Tries to call the CLI of an external Jina project.
:return: if the plugin has been found (locally or among the known plugins)
"""
argv = sys.argv
if len(argv) < 2: # no command given
return False
from jina_cli.autocomplete import ac_table
if argv[1] in ac_table['commands']: # native command can't be plugin command
return False
def _cmd_exists(cmd):
return shutil.which(cmd) is not None
subcommand = argv[1]
cmd = 'jina-' + subcommand
if _cmd_exists(cmd):
subprocess.run([cmd] + argv[2:])
return True
from jina_cli.known_plugins import plugin_info
if subcommand in plugin_info:
from jina.helper import get_rich_console
cmd_info = plugin_info[subcommand]
project, package = cmd_info['display-name'], cmd_info['pip-package']
console = get_rich_console()
console.print(
f"It seems like [yellow]{project}[/yellow] is not installed in your environment."
f"To use it via the [green]'jina {subcommand}'[/green] command, "
f"install it first: [green]'pip install {package}'[/green]."
)
return True
return False
def main():
"""The main entrypoint of the CLI"""
found_plugin = _try_plugin_command()
if not found_plugin:
_quick_ac_lookup()
from jina_cli import api
args = _get_run_args()
getattr(api, args.cli.replace('-', '_'))(args)
|
import os
import shutil
import subprocess
import sys
def _get_run_args(print_args: bool = True):
from jina.helper import get_rich_console
from jina.parsers import get_main_parser
console = get_rich_console()
silent_print = {'help', 'hub', 'export', 'auth', 'cloud'}
parser = get_main_parser()
if len(sys.argv) > 1:
from argparse import _StoreAction, _StoreTrueAction
from rich import box
from rich.table import Table
args, unknown = parser.parse_known_args()
if unknown:
from jina.helper import warn_unknown_args
unknown = list(filter(lambda x: x.startswith('--'), unknown))
warn_unknown_args(unknown)
if args.cli not in silent_print and print_args:
from jina import __resources_path__
p = parser._actions[-1].choices[sys.argv[1]]
default_args = {
a.dest: a.default
for a in p._actions
if isinstance(a, (_StoreAction, _StoreTrueAction))
}
with open(os.path.join(__resources_path__, 'jina.logo')) as fp:
logo_str = fp.read()
param_str = Table(
title=' '.join(sys.argv),
box=box.ROUNDED,
highlight=True,
title_justify='left',
)
param_str.add_column('Argument', justify='right')
param_str.add_column('Value', justify='left')
for k, v in sorted(vars(args).items()):
param = k.replace('_', '-')
value = str(v)
if not default_args.get(k, None) == v:
value = f'[b]{value}[/]'
param_str.add_row(param, value)
if 'JINA_LOG_NO_COLOR' not in os.environ:
print(f'\n{logo_str}\n')
console.print(param_str)
return args
else:
parser.print_help()
exit()
def _quick_ac_lookup():
from jina_cli.autocomplete import ac_table
if len(sys.argv) > 1:
if sys.argv[1] == 'commands':
for k in ac_table['commands']:
print(k)
exit()
elif sys.argv[1] == 'completions':
# search with the longest shared prefix
for j in range(len(sys.argv), 2, -1):
_input = ' '.join(sys.argv[2:j]).strip()
if _input in ac_table['completions']:
compl = ac_table['completions'][_input]
for k in compl:
if k not in sys.argv:
print(k)
break
exit()
def _try_plugin_command():
"""Tries to call the CLI of an external Jina project.
:return: if the plugin has been found (locally or among the known plugins)
"""
argv = sys.argv
if len(argv) < 2: # no command given
return False
from jina_cli.autocomplete import ac_table
if argv[1] in ac_table['commands']: # native command can't be plugin command
return False
def _cmd_exists(cmd):
return shutil.which(cmd) is not None
subcommand = argv[1]
cmd = 'jina-' + subcommand
if _cmd_exists(cmd):
subprocess.run([cmd] + argv[2:])
return True
from jina_cli.known_plugins import plugin_info
if subcommand in plugin_info:
from jina.helper import get_rich_console
cmd_info = plugin_info[subcommand]
project, package = cmd_info['display-name'], cmd_info['pip-package']
console = get_rich_console()
console.print(
f"It seems like [yellow]{project}[/yellow] is not installed in your environment."
f"To use it via the [green]'jina {subcommand}'[/green] command, "
f"install it first: [green]'pip install {package}'[/green]."
)
return True
return False
def main():
"""The main entrypoint of the CLI"""
found_plugin = _try_plugin_command()
if not found_plugin:
_quick_ac_lookup()
from jina_cli import api
args = _get_run_args()
getattr(api, args.cli.replace('-', '_'))(args)
|
import pathlib
from typing import Any, Dict, List, Tuple, Union
import torch
from torchdata.datapipes.iter import CSVParser, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.tv_tensors import OneHotLabel
from torchvision.tv_tensors import Image
from .._api import register_dataset, register_info
NAME = "semeion"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=[str(i) for i in range(10)])
@register_dataset(NAME)
class SEMEION(Dataset):
"""Semeion dataset
homepage="https://archive.ics.uci.edu/ml/datasets/Semeion+Handwritten+Digit",
"""
def __init__(self, root: Union[str, pathlib.Path], *, skip_integrity_check: bool = False) -> None:
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
data = HttpResource(
"http://archive.ics.uci.edu/ml/machine-learning-databases/semeion/semeion.data",
sha256="f43228ae3da5ea6a3c95069d53450b86166770e3b719dcc333182128fe08d4b1",
)
return [data]
def _prepare_sample(self, data: Tuple[str, ...]) -> Dict[str, Any]:
image_data, label_data = data[:256], data[256:-1]
return dict(
image=Image(torch.tensor([float(pixel) for pixel in image_data], dtype=torch.float).reshape(16, 16)),
label=OneHotLabel([int(label) for label in label_data], categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = CSVParser(dp, delimiter=" ")
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 1_593
|
import pathlib
from typing import Any, Dict, List, Tuple, Union
import torch
from torchdata.datapipes.iter import CSVParser, IterDataPipe, Mapper
from torchvision.datapoints import Image
from torchvision.prototype.datapoints import OneHotLabel
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from .._api import register_dataset, register_info
NAME = "semeion"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=[str(i) for i in range(10)])
@register_dataset(NAME)
class SEMEION(Dataset):
"""Semeion dataset
homepage="https://archive.ics.uci.edu/ml/datasets/Semeion+Handwritten+Digit",
"""
def __init__(self, root: Union[str, pathlib.Path], *, skip_integrity_check: bool = False) -> None:
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
data = HttpResource(
"http://archive.ics.uci.edu/ml/machine-learning-databases/semeion/semeion.data",
sha256="f43228ae3da5ea6a3c95069d53450b86166770e3b719dcc333182128fe08d4b1",
)
return [data]
def _prepare_sample(self, data: Tuple[str, ...]) -> Dict[str, Any]:
image_data, label_data = data[:256], data[256:-1]
return dict(
image=Image(torch.tensor([float(pixel) for pixel in image_data], dtype=torch.float).reshape(16, 16)),
label=OneHotLabel([int(label) for label in label_data], categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = CSVParser(dp, delimiter=" ")
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 1_593
|
from ._multi_channel import MVDR, PSD, RTFMVDR, SoudenMVDR
from ._transforms import (
AddNoise,
AmplitudeToDB,
ComputeDeltas,
Convolve,
Deemphasis,
Fade,
FFTConvolve,
FrequencyMasking,
GriffinLim,
InverseMelScale,
InverseSpectrogram,
LFCC,
Loudness,
MelScale,
MelSpectrogram,
MFCC,
MuLawDecoding,
MuLawEncoding,
PitchShift,
Preemphasis,
Resample,
RNNTLoss,
SlidingWindowCmn,
SpectralCentroid,
Spectrogram,
Speed,
SpeedPerturbation,
TimeMasking,
TimeStretch,
Vad,
Vol,
)
__all__ = [
"AddNoise",
"AmplitudeToDB",
"ComputeDeltas",
"Convolve",
"Deemphasis",
"Fade",
"FFTConvolve",
"FrequencyMasking",
"GriffinLim",
"InverseMelScale",
"InverseSpectrogram",
"LFCC",
"Loudness",
"MFCC",
"MVDR",
"MelScale",
"MelSpectrogram",
"MuLawDecoding",
"MuLawEncoding",
"PSD",
"PitchShift",
"Preemphasis",
"RNNTLoss",
"RTFMVDR",
"Resample",
"SlidingWindowCmn",
"SoudenMVDR",
"SpectralCentroid",
"Spectrogram",
"Speed",
"SpeedPerturbation",
"TimeMasking",
"TimeStretch",
"Vad",
"Vol",
]
|
from ._multi_channel import MVDR, PSD, RTFMVDR, SoudenMVDR
from ._transforms import (
AmplitudeToDB,
ComputeDeltas,
Fade,
FrequencyMasking,
GriffinLim,
InverseMelScale,
InverseSpectrogram,
LFCC,
Loudness,
MelScale,
MelSpectrogram,
MFCC,
MuLawDecoding,
MuLawEncoding,
PitchShift,
Resample,
RNNTLoss,
SlidingWindowCmn,
SpectralCentroid,
Spectrogram,
TimeMasking,
TimeStretch,
Vad,
Vol,
)
__all__ = [
"AmplitudeToDB",
"ComputeDeltas",
"Fade",
"FrequencyMasking",
"GriffinLim",
"InverseMelScale",
"InverseSpectrogram",
"LFCC",
"Loudness",
"MFCC",
"MVDR",
"MelScale",
"MelSpectrogram",
"MuLawDecoding",
"MuLawEncoding",
"PSD",
"PitchShift",
"RNNTLoss",
"RTFMVDR",
"Resample",
"SlidingWindowCmn",
"SoudenMVDR",
"SpectralCentroid",
"Spectrogram",
"TimeMasking",
"TimeStretch",
"Vad",
"Vol",
]
|
"""
Custom hook to customize the behavior of Hatchling.
Here, we customize the tag of the generated wheels.
"""
from typing import Any, Dict
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
from packaging.tags import platform_tags
def get_tag() -> str:
"""Get appropriate wheel tag according to system"""
platform_tag = next(platform_tags())
return f"py3-none-{platform_tag}"
class CustomBuildHook(BuildHookInterface):
"""A custom build hook"""
def initialize(self, version: str, build_data: Dict[str, Any]) -> None:
"""This step ccurs immediately before each build."""
build_data["tag"] = get_tag()
|
"""
Custom hook to customize the behavior of Hatchling.
Here, we customize the tag of the generated wheels.
"""
import sysconfig
from typing import Any, Dict
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
def get_tag() -> str:
"""Get appropriate wheel tag according to system"""
tag_platform = sysconfig.get_platform().replace("-", "_").replace(".", "_")
return f"py3-none-{tag_platform}"
class CustomBuildHook(BuildHookInterface):
"""A custom build hook"""
def initialize(self, version: str, build_data: Dict[str, Any]) -> None:
"""This step ccurs immediately before each build."""
build_data["tag"] = get_tag()
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv import ConfigDict
from mmcv.cnn import build_plugin_layer
from mmdet.models.plugins import DropBlock
def test_dropblock():
feat = torch.rand(1, 1, 11, 11)
drop_prob = 1.0
dropblock = DropBlock(drop_prob, block_size=11, warmup_iters=0)
out_feat = dropblock(feat)
assert (out_feat == 0).all() and out_feat.shape == feat.shape
drop_prob = 0.5
dropblock = DropBlock(drop_prob, block_size=5, warmup_iters=0)
out_feat = dropblock(feat)
assert out_feat.shape == feat.shape
# drop_prob must be (0,1]
with pytest.raises(AssertionError):
DropBlock(1.5, 3)
# block_size cannot be an even number
with pytest.raises(AssertionError):
DropBlock(0.5, 2)
# warmup_iters cannot be less than 0
with pytest.raises(AssertionError):
DropBlock(0.5, 3, -1)
def test_pixeldecoder():
base_channels = 64
pixel_decoder_cfg = ConfigDict(
dict(
type='PixelDecoder',
in_channels=[base_channels * 2**i for i in range(4)],
feat_channels=base_channels,
out_channels=base_channels,
norm_cfg=dict(type='GN', num_groups=32),
act_cfg=dict(type='ReLU')))
self = build_plugin_layer(pixel_decoder_cfg)[1]
img_metas = [{}, {}]
feats = [
torch.rand((2, base_channels * 2**i, 4 * 2**(3 - i), 5 * 2**(3 - i)))
for i in range(4)
]
mask_feature, memory = self(feats, img_metas)
assert (memory == feats[-1]).all()
assert mask_feature.shape == feats[0].shape
def test_transformerencoderpixeldecoer():
base_channels = 64
pixel_decoder_cfg = ConfigDict(
dict(
type='TransformerEncoderPixelDecoder',
in_channels=[base_channels * 2**i for i in range(4)],
feat_channels=base_channels,
out_channels=base_channels,
norm_cfg=dict(type='GN', num_groups=32),
act_cfg=dict(type='ReLU'),
encoder=dict(
type='DetrTransformerEncoder',
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=base_channels,
num_heads=8,
attn_drop=0.1,
proj_drop=0.1,
dropout_layer=None,
batch_first=False),
ffn_cfgs=dict(
embed_dims=base_channels,
feedforward_channels=base_channels * 8,
num_fcs=2,
act_cfg=dict(type='ReLU', inplace=True),
ffn_drop=0.1,
dropout_layer=None,
add_identity=True),
operation_order=('self_attn', 'norm', 'ffn', 'norm'),
norm_cfg=dict(type='LN'),
init_cfg=None,
batch_first=False),
init_cfg=None),
positional_encoding=dict(
type='SinePositionalEncoding',
num_feats=base_channels // 2,
normalize=True)))
self = build_plugin_layer(pixel_decoder_cfg)[1]
img_metas = [{
'batch_input_shape': (128, 160),
'img_shape': (120, 160, 3),
}, {
'batch_input_shape': (128, 160),
'img_shape': (125, 160, 3),
}]
feats = [
torch.rand((2, base_channels * 2**i, 4 * 2**(3 - i), 5 * 2**(3 - i)))
for i in range(4)
]
mask_feature, memory = self(feats, img_metas)
assert memory.shape[-2:] == feats[-1].shape[-2:]
assert mask_feature.shape == feats[0].shape
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.plugins import DropBlock
def test_dropblock():
feat = torch.rand(1, 1, 11, 11)
drop_prob = 1.0
dropblock = DropBlock(drop_prob, block_size=11, warmup_iters=0)
out_feat = dropblock(feat)
assert (out_feat == 0).all() and out_feat.shape == feat.shape
drop_prob = 0.5
dropblock = DropBlock(drop_prob, block_size=5, warmup_iters=0)
out_feat = dropblock(feat)
assert out_feat.shape == feat.shape
# drop_prob must be (0,1]
with pytest.raises(AssertionError):
DropBlock(1.5, 3)
# block_size cannot be an even number
with pytest.raises(AssertionError):
DropBlock(0.5, 2)
# warmup_iters cannot be less than 0
with pytest.raises(AssertionError):
DropBlock(0.5, 3, -1)
|
_base_ = './libra-faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
_base_ = './libra_faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
from __future__ import annotations
import os
from copy import deepcopy
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models import Pooling, StaticEmbedding, Transformer
from sentence_transformers.util import is_datasets_available
from tests.utils import SafeTemporaryDirectory
if is_datasets_available():
from datasets import DatasetDict, load_dataset
@pytest.fixture(scope="session")
def _stsb_bert_tiny_model() -> SentenceTransformer:
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
model.model_card_data.generate_widget_examples = False # Disable widget examples generation for testing
return model
@pytest.fixture()
def stsb_bert_tiny_model(_stsb_bert_tiny_model: SentenceTransformer) -> SentenceTransformer:
return deepcopy(_stsb_bert_tiny_model)
@pytest.fixture(scope="session")
def _avg_word_embeddings_levy() -> SentenceTransformer:
model = SentenceTransformer("sentence-transformers/average_word_embeddings_levy_dependency")
model.model_card_data.generate_widget_examples = False # Disable widget examples generation for testing
return model
@pytest.fixture()
def avg_word_embeddings_levy(_avg_word_embeddings_levy: SentenceTransformer) -> SentenceTransformer:
return deepcopy(_avg_word_embeddings_levy)
@pytest.fixture()
def stsb_bert_tiny_model_onnx() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-onnx")
@pytest.fixture()
def stsb_bert_tiny_model_openvino() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-openvino")
@pytest.fixture()
def paraphrase_distilroberta_base_v1_model() -> SentenceTransformer:
return SentenceTransformer("paraphrase-distilroberta-base-v1")
@pytest.fixture()
def clip_vit_b_32_model() -> SentenceTransformer:
return SentenceTransformer("clip-ViT-B-32")
@pytest.fixture(scope="session")
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding_model(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
@pytest.fixture()
def distilbert_base_uncased_model() -> SentenceTransformer:
word_embedding_model = Transformer("distilbert-base-uncased")
pooling_model = Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
return model
@pytest.fixture(scope="session")
def stsb_dataset_dict() -> DatasetDict:
return load_dataset("sentence-transformers/stsb")
@pytest.fixture()
def cache_dir():
"""
In the CI environment, we use a temporary directory as `cache_dir`
to avoid keeping the downloaded models on disk after the test.
"""
if os.environ.get("CI", None):
# Note: `ignore_cleanup_errors=True` is used to avoid NotADirectoryError in Windows on GitHub Actions.
# See https://github.com/python/cpython/issues/107408, https://www.scivision.dev/python-tempfile-permission-error-windows/
with SafeTemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield None
|
from __future__ import annotations
import os
from copy import deepcopy
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models import Pooling, StaticEmbedding, Transformer
from sentence_transformers.util import is_datasets_available
from tests.utils import SafeTemporaryDirectory
if is_datasets_available():
from datasets import DatasetDict, load_dataset
@pytest.fixture(scope="session")
def _stsb_bert_tiny_model() -> SentenceTransformer:
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
model.model_card_data.generate_widget_examples = False # Disable widget examples generation for testing
return model
@pytest.fixture()
def stsb_bert_tiny_model(_stsb_bert_tiny_model: SentenceTransformer) -> SentenceTransformer:
return deepcopy(_stsb_bert_tiny_model)
@pytest.fixture()
def stsb_bert_tiny_model_onnx() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-onnx")
@pytest.fixture()
def stsb_bert_tiny_model_openvino() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-openvino")
@pytest.fixture()
def paraphrase_distilroberta_base_v1_model() -> SentenceTransformer:
return SentenceTransformer("paraphrase-distilroberta-base-v1")
@pytest.fixture()
def clip_vit_b_32_model() -> SentenceTransformer:
return SentenceTransformer("clip-ViT-B-32")
@pytest.fixture(scope="session")
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding_model(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
@pytest.fixture()
def distilbert_base_uncased_model() -> SentenceTransformer:
word_embedding_model = Transformer("distilbert-base-uncased")
pooling_model = Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
return model
@pytest.fixture(scope="session")
def stsb_dataset_dict() -> DatasetDict:
return load_dataset("sentence-transformers/stsb")
@pytest.fixture()
def cache_dir():
"""
In the CI environment, we use a temporary directory as `cache_dir`
to avoid keeping the downloaded models on disk after the test.
"""
if os.environ.get("CI", None):
# Note: `ignore_cleanup_errors=True` is used to avoid NotADirectoryError in Windows on GitHub Actions.
# See https://github.com/python/cpython/issues/107408, https://www.scivision.dev/python-tempfile-permission-error-windows/
with SafeTemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield None
|
"""Filter that uses an LLM to drop documents that aren't relevant to the query."""
from collections.abc import Sequence
from typing import Any, Callable, Optional
from langchain_core.callbacks.manager import Callbacks
from langchain_core.documents import Document
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import BasePromptTemplate, PromptTemplate
from langchain_core.runnables import Runnable
from langchain_core.runnables.config import RunnableConfig
from pydantic import ConfigDict
from langchain.chains import LLMChain
from langchain.output_parsers.boolean import BooleanOutputParser
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from langchain.retrievers.document_compressors.chain_filter_prompt import (
prompt_template,
)
def _get_default_chain_prompt() -> PromptTemplate:
return PromptTemplate(
template=prompt_template,
input_variables=["question", "context"],
output_parser=BooleanOutputParser(),
)
def default_get_input(query: str, doc: Document) -> dict[str, Any]:
"""Return the compression chain input."""
return {"question": query, "context": doc.page_content}
class LLMChainFilter(BaseDocumentCompressor):
"""Filter that drops documents that aren't relevant to the query."""
llm_chain: Runnable
"""LLM wrapper to use for filtering documents.
The chain prompt is expected to have a BooleanOutputParser."""
get_input: Callable[[str, Document], dict] = default_get_input
"""Callable for constructing the chain input from the query and a Document."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Filter down documents based on their relevance to the query."""
filtered_docs = []
config = RunnableConfig(callbacks=callbacks)
outputs = zip(
self.llm_chain.batch(
[self.get_input(query, doc) for doc in documents], config=config
),
documents,
)
for output_, doc in outputs:
include_doc = None
if isinstance(self.llm_chain, LLMChain):
output = output_[self.llm_chain.output_key]
if self.llm_chain.prompt.output_parser is not None:
include_doc = self.llm_chain.prompt.output_parser.parse(output)
else:
if isinstance(output_, bool):
include_doc = output_
if include_doc:
filtered_docs.append(doc)
return filtered_docs
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Filter down documents based on their relevance to the query."""
filtered_docs = []
config = RunnableConfig(callbacks=callbacks)
outputs = zip(
await self.llm_chain.abatch(
[self.get_input(query, doc) for doc in documents], config=config
),
documents,
)
for output_, doc in outputs:
include_doc = None
if isinstance(self.llm_chain, LLMChain):
output = output_[self.llm_chain.output_key]
if self.llm_chain.prompt.output_parser is not None:
include_doc = self.llm_chain.prompt.output_parser.parse(output)
else:
if isinstance(output_, bool):
include_doc = output_
if include_doc:
filtered_docs.append(doc)
return filtered_docs
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> "LLMChainFilter":
"""Create a LLMChainFilter from a language model.
Args:
llm: The language model to use for filtering.
prompt: The prompt to use for the filter.
kwargs: Additional arguments to pass to the constructor.
Returns:
A LLMChainFilter that uses the given language model.
"""
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
if _prompt.output_parser is not None:
parser = _prompt.output_parser
else:
parser = StrOutputParser()
llm_chain = _prompt | llm | parser
return cls(llm_chain=llm_chain, **kwargs)
|
"""Filter that uses an LLM to drop documents that aren't relevant to the query."""
from typing import Any, Callable, Dict, Optional, Sequence
from langchain_core.callbacks.manager import Callbacks
from langchain_core.documents import Document
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import BasePromptTemplate, PromptTemplate
from langchain_core.runnables import Runnable
from langchain_core.runnables.config import RunnableConfig
from pydantic import ConfigDict
from langchain.chains import LLMChain
from langchain.output_parsers.boolean import BooleanOutputParser
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from langchain.retrievers.document_compressors.chain_filter_prompt import (
prompt_template,
)
def _get_default_chain_prompt() -> PromptTemplate:
return PromptTemplate(
template=prompt_template,
input_variables=["question", "context"],
output_parser=BooleanOutputParser(),
)
def default_get_input(query: str, doc: Document) -> Dict[str, Any]:
"""Return the compression chain input."""
return {"question": query, "context": doc.page_content}
class LLMChainFilter(BaseDocumentCompressor):
"""Filter that drops documents that aren't relevant to the query."""
llm_chain: Runnable
"""LLM wrapper to use for filtering documents.
The chain prompt is expected to have a BooleanOutputParser."""
get_input: Callable[[str, Document], dict] = default_get_input
"""Callable for constructing the chain input from the query and a Document."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Filter down documents based on their relevance to the query."""
filtered_docs = []
config = RunnableConfig(callbacks=callbacks)
outputs = zip(
self.llm_chain.batch(
[self.get_input(query, doc) for doc in documents], config=config
),
documents,
)
for output_, doc in outputs:
include_doc = None
if isinstance(self.llm_chain, LLMChain):
output = output_[self.llm_chain.output_key]
if self.llm_chain.prompt.output_parser is not None:
include_doc = self.llm_chain.prompt.output_parser.parse(output)
else:
if isinstance(output_, bool):
include_doc = output_
if include_doc:
filtered_docs.append(doc)
return filtered_docs
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Filter down documents based on their relevance to the query."""
filtered_docs = []
config = RunnableConfig(callbacks=callbacks)
outputs = zip(
await self.llm_chain.abatch(
[self.get_input(query, doc) for doc in documents], config=config
),
documents,
)
for output_, doc in outputs:
include_doc = None
if isinstance(self.llm_chain, LLMChain):
output = output_[self.llm_chain.output_key]
if self.llm_chain.prompt.output_parser is not None:
include_doc = self.llm_chain.prompt.output_parser.parse(output)
else:
if isinstance(output_, bool):
include_doc = output_
if include_doc:
filtered_docs.append(doc)
return filtered_docs
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> "LLMChainFilter":
"""Create a LLMChainFilter from a language model.
Args:
llm: The language model to use for filtering.
prompt: The prompt to use for the filter.
kwargs: Additional arguments to pass to the constructor.
Returns:
A LLMChainFilter that uses the given language model.
"""
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
if _prompt.output_parser is not None:
parser = _prompt.output_parser
else:
parser = StrOutputParser()
llm_chain = _prompt | llm | parser
return cls(llm_chain=llm_chain, **kwargs)
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
from sentence_transformers.util import fullname, import_from_string
class Dense(nn.Module):
"""
Feed-forward function with activation function.
This layer takes a fixed-sized sentence embedding and passes it through a feed-forward layer. Can be used to generate deep averaging networks (DAN).
Args:
in_features: Size of the input dimension
out_features: Output size
bias: Add a bias vector
activation_function: Pytorch activation function applied on
output
init_weight: Initial value for the matrix of the linear layer
init_bias: Initial value for the bias of the linear layer
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
activation_function=nn.Tanh(),
init_weight: Tensor = None,
init_bias: Tensor = None,
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.activation_function = activation_function
self.linear = nn.Linear(in_features, out_features, bias=bias)
if init_weight is not None:
self.linear.weight = nn.Parameter(init_weight)
if init_bias is not None:
self.linear.bias = nn.Parameter(init_bias)
def forward(self, features: dict[str, Tensor]):
features.update({"sentence_embedding": self.activation_function(self.linear(features["sentence_embedding"]))})
return features
def get_sentence_embedding_dimension(self) -> int:
return self.out_features
def get_config_dict(self):
return {
"in_features": self.in_features,
"out_features": self.out_features,
"bias": self.bias,
"activation_function": fullname(self.activation_function),
}
def save(self, output_path, safe_serialization: bool = True) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def __repr__(self):
return f"Dense({self.get_config_dict()})"
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
config["activation_function"] = import_from_string(config["activation_function"])()
model = Dense(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
from sentence_transformers.util import fullname, import_from_string
class Dense(nn.Module):
"""
Feed-forward function with activation function.
This layer takes a fixed-sized sentence embedding and passes it through a feed-forward layer. Can be used to generate deep averaging networks (DAN).
Args:
in_features: Size of the input dimension
out_features: Output size
bias: Add a bias vector
activation_function: Pytorch activation function applied on
output
init_weight: Initial value for the matrix of the linear layer
init_bias: Initial value for the bias of the linear layer
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
activation_function=nn.Tanh(),
init_weight: Tensor = None,
init_bias: Tensor = None,
):
super(Dense, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.activation_function = activation_function
self.linear = nn.Linear(in_features, out_features, bias=bias)
if init_weight is not None:
self.linear.weight = nn.Parameter(init_weight)
if init_bias is not None:
self.linear.bias = nn.Parameter(init_bias)
def forward(self, features: dict[str, Tensor]):
features.update({"sentence_embedding": self.activation_function(self.linear(features["sentence_embedding"]))})
return features
def get_sentence_embedding_dimension(self) -> int:
return self.out_features
def get_config_dict(self):
return {
"in_features": self.in_features,
"out_features": self.out_features,
"bias": self.bias,
"activation_function": fullname(self.activation_function),
}
def save(self, output_path, safe_serialization: bool = True) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def __repr__(self):
return "Dense({})".format(self.get_config_dict())
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
config["activation_function"] = import_from_string(config["activation_function"])()
model = Dense(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
from abc import ABC
from typing import TYPE_CHECKING, Any, Dict, Optional, Type, TypeVar, Union
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
if TYPE_CHECKING:
import trimesh
from pydantic import BaseConfig
from pydantic.fields import ModelField
MESH_FILE_FORMATS = ('obj', 'glb', 'ply')
T = TypeVar('T', bound='Url3D')
@_register_proto(proto_type_name='url3d')
class Url3D(AnyUrl, ABC):
"""
URL to a .obj, .glb, or .ply file containing 3D mesh or point cloud information.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config)
has_mesh_extension = any(url.endswith(ext) for ext in MESH_FILE_FORMATS)
if not has_mesh_extension:
raise ValueError(
f'{cls.__name__} must have one of the following extensions:'
f'{MESH_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def _load_trimesh_instance(
self: T,
force: Optional[str] = None,
skip_materials: bool = True,
trimesh_args: Optional[Dict[str, Any]] = None,
) -> Union['trimesh.Trimesh', 'trimesh.Scene']:
"""
Load the data from the url into a trimesh.Mesh or trimesh.Scene object.
:param force: str or None. For 'mesh' try to coerce scenes into a single mesh.
For 'scene' try to coerce everything into a scene.
:param skip_materials: Skip materials if True, else skip.
:param trimesh_args: dictionary of additional arguments for `trimesh.load()`
or `trimesh.load_remote()`.
:return: trimesh.Mesh or trimesh.Scene object
"""
import urllib.parse
import trimesh
if not trimesh_args:
trimesh_args = {}
scheme = urllib.parse.urlparse(self).scheme
loader = trimesh.load_remote if scheme in ['http', 'https'] else trimesh.load
mesh = loader(self, force=force, skip_materials=skip_materials, **trimesh_args)
return mesh
|
from abc import ABC
from typing import TYPE_CHECKING, Any, Dict, Optional, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
if TYPE_CHECKING:
import trimesh
from pydantic import BaseConfig
from pydantic.fields import ModelField
MESH_FILE_FORMATS = ('obj', 'glb', 'ply')
T = TypeVar('T', bound='Url3D')
@_register_proto(proto_type_name='url3d')
class Url3D(AnyUrl, ABC):
"""
URL to a .obj, .glb, or .ply file containing 3D mesh or point cloud information.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config)
has_mesh_extension = any(url.endswith(ext) for ext in MESH_FILE_FORMATS)
if not has_mesh_extension:
raise ValueError(
f'{cls.__name__} must have one of the following extensions:'
f'{MESH_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def _load_trimesh_instance(
self: T,
force: Optional[str] = None,
skip_materials: bool = True,
trimesh_args: Optional[Dict[str, Any]] = None,
) -> Union['trimesh.Trimesh', 'trimesh.Scene']:
"""
Load the data from the url into a trimesh.Mesh or trimesh.Scene object.
:param force: str or None. For 'mesh' try to coerce scenes into a single mesh.
For 'scene' try to coerce everything into a scene.
:param skip_materials: Skip materials if True, else skip.
:param trimesh_args: dictionary of additional arguments for `trimesh.load()`
or `trimesh.load_remote()`.
:return: trimesh.Mesh or trimesh.Scene object
"""
import urllib.parse
import trimesh
if not trimesh_args:
trimesh_args = {}
scheme = urllib.parse.urlparse(self).scheme
loader = trimesh.load_remote if scheme in ['http', 'https'] else trimesh.load
mesh = loader(self, force=force, skip_materials=skip_materials, **trimesh_args)
return mesh
|
import logging
import os
import sys
from torchaudio._internal.module_utils import eval_env, fail_with_message, is_module_available, no_op
try:
from .fb import _init_ffmpeg
except ImportError:
from .utils import _init_ffmpeg
from .utils import _check_cuda_version, _fail_since_no_ffmpeg, _fail_since_no_sox, _init_dll_path, _init_sox, _load_lib
_LG = logging.getLogger(__name__)
# Note:
# `_check_cuda_version` is not meant to be used by regular users.
# Builder uses it for debugging purpose, so we export it.
# https://github.com/pytorch/builder/blob/e2e4542b8eb0bdf491214451a1a4128bd606cce2/test/smoke_test/smoke_test.py#L80
__all__ = [
"fail_if_no_sox",
"fail_if_no_ffmpeg",
"_check_cuda_version",
"_IS_TORCHAUDIO_EXT_AVAILABLE",
"_IS_RIR_AVAILABLE",
"_SOX_INITIALIZED",
"_FFMPEG_EXT",
]
if os.name == "nt" and (3, 8) <= sys.version_info < (3, 9):
_init_dll_path()
# When the extension module is built, we initialize it.
# In case of an error, we do not catch the failure as it suggests there is something
# wrong with the installation.
_IS_TORCHAUDIO_EXT_AVAILABLE = is_module_available("torchaudio.lib._torchaudio")
# RIR features are implemented in _torchaudio extension, but they can be individually
# turned on/off at build time. Available means that _torchaudio is loaded properly, and
# RIR features are found there.
_IS_RIR_AVAILABLE = False
_IS_ALIGN_AVAILABLE = False
if _IS_TORCHAUDIO_EXT_AVAILABLE:
_load_lib("libtorchaudio")
import torchaudio.lib._torchaudio # noqa
_check_cuda_version()
_IS_RIR_AVAILABLE = torchaudio.lib._torchaudio.is_rir_available()
_IS_ALIGN_AVAILABLE = torchaudio.lib._torchaudio.is_align_available()
# Initialize libsox-related features
_SOX_INITIALIZED = False
_USE_SOX = False if os.name == "nt" else eval_env("TORCHAUDIO_USE_SOX", True)
_SOX_MODULE_AVAILABLE = is_module_available("torchaudio.lib._torchaudio_sox")
if _USE_SOX and _SOX_MODULE_AVAILABLE:
try:
_init_sox()
_SOX_INITIALIZED = True
except Exception:
# The initialization of sox extension will fail if supported sox
# libraries are not found in the system.
# Since the rest of the torchaudio works without it, we do not report the
# error here.
# The error will be raised when user code attempts to use these features.
_LG.debug("Failed to initialize sox extension", exc_info=True)
if os.name == "nt":
fail_if_no_sox = fail_with_message("requires sox extension, which is not supported on Windows.")
elif not _USE_SOX:
fail_if_no_sox = fail_with_message("requires sox extension, but it is disabled. (TORCHAUDIO_USE_SOX=0)")
elif not _SOX_MODULE_AVAILABLE:
fail_if_no_sox = fail_with_message(
"requires sox extension, but TorchAudio is not compiled with it. "
"Please build TorchAudio with libsox support. (BUILD_SOX=1)"
)
else:
fail_if_no_sox = no_op if _SOX_INITIALIZED else _fail_since_no_sox
# Initialize FFmpeg-related features
_FFMPEG_EXT = None
_USE_FFMPEG = eval_env("TORCHAUDIO_USE_FFMPEG", True)
if _USE_FFMPEG and _IS_TORCHAUDIO_EXT_AVAILABLE:
try:
_FFMPEG_EXT = _init_ffmpeg()
except Exception:
# The initialization of FFmpeg extension will fail if supported FFmpeg
# libraries are not found in the system.
# Since the rest of the torchaudio works without it, we do not report the
# error here.
# The error will be raised when user code attempts to use these features.
_LG.debug("Failed to initialize ffmpeg bindings", exc_info=True)
if _USE_FFMPEG:
fail_if_no_ffmpeg = _fail_since_no_ffmpeg if _FFMPEG_EXT is None else no_op
else:
fail_if_no_ffmpeg = fail_with_message("requires ffmpeg extension, but it is disabled. (TORCHAUDIO_USE_FFMPEG=0)")
fail_if_no_rir = (
no_op
if _IS_RIR_AVAILABLE
else fail_with_message(
"requires RIR extension, but TorchAudio is not compiled with it. Please build TorchAudio with RIR support."
)
)
fail_if_no_align = (
no_op
if _IS_ALIGN_AVAILABLE
else fail_with_message(
"Requires alignment extension, but TorchAudio is not compiled with it. \
Please build TorchAudio with alignment support."
)
)
|
import logging
import os
import sys
from torchaudio._internal.module_utils import eval_env, fail_with_message, is_module_available, no_op
try:
from .fb import _init_ffmpeg
except ImportError:
from .utils import _init_ffmpeg
from .utils import _check_cuda_version, _fail_since_no_ffmpeg, _init_dll_path, _init_sox, _load_lib
_LG = logging.getLogger(__name__)
# Note:
# `_check_cuda_version` is not meant to be used by regular users.
# Builder uses it for debugging purpose, so we export it.
# https://github.com/pytorch/builder/blob/e2e4542b8eb0bdf491214451a1a4128bd606cce2/test/smoke_test/smoke_test.py#L80
__all__ = [
"fail_if_no_sox",
"fail_if_no_ffmpeg",
"_check_cuda_version",
"_IS_TORCHAUDIO_EXT_AVAILABLE",
"_IS_RIR_AVAILABLE",
"_SOX_INITIALIZED",
"_FFMPEG_EXT",
]
if os.name == "nt" and (3, 8) <= sys.version_info < (3, 9):
_init_dll_path()
# When the extension module is built, we initialize it.
# In case of an error, we do not catch the failure as it suggests there is something
# wrong with the installation.
_IS_TORCHAUDIO_EXT_AVAILABLE = is_module_available("torchaudio.lib._torchaudio")
# RIR features are implemented in _torchaudio extension, but they can be individually
# turned on/off at build time. Available means that _torchaudio is loaded properly, and
# RIR features are found there.
_IS_RIR_AVAILABLE = False
_IS_ALIGN_AVAILABLE = False
if _IS_TORCHAUDIO_EXT_AVAILABLE:
_load_lib("libtorchaudio")
import torchaudio.lib._torchaudio # noqa
_check_cuda_version()
_IS_RIR_AVAILABLE = torchaudio.lib._torchaudio.is_rir_available()
_IS_ALIGN_AVAILABLE = torchaudio.lib._torchaudio.is_align_available()
# Similar to libtorchaudio, sox-related features should be importable when present.
#
# Note: This will be change in the future when sox is dynamically linked.
# At that point, this initialization should handle the case where
# sox integration is built but libsox is not found.
_SOX_INITIALIZED = False
_USE_SOX = False if os.name == "nt" else eval_env("TORCHAUDIO_USE_SOX", True)
_SOX_MODULE_AVAILABLE = is_module_available("torchaudio.lib._torchaudio_sox")
if _USE_SOX and _SOX_MODULE_AVAILABLE:
_init_sox()
_SOX_INITIALIZED = True
if os.name == "nt":
fail_if_no_sox = fail_with_message("requires sox extension, which is not supported on Windows.")
elif not _USE_SOX:
fail_if_no_sox = fail_with_message("requires sox extension, but it is disabled. (TORCHAUDIO_USE_SOX=0)")
elif not _SOX_MODULE_AVAILABLE:
fail_if_no_sox = fail_with_message(
"requires sox extension, but TorchAudio is not compiled with it. "
"Please build TorchAudio with libsox support. (BUILD_SOX=1)"
)
else:
fail_if_no_sox = no_op
# Initialize FFmpeg-related features
_FFMPEG_EXT = None
_USE_FFMPEG = eval_env("TORCHAUDIO_USE_FFMPEG", True)
if _USE_FFMPEG and _IS_TORCHAUDIO_EXT_AVAILABLE:
try:
_FFMPEG_EXT = _init_ffmpeg()
except Exception:
# The initialization of FFmpeg extension will fail if supported FFmpeg
# libraries are not found in the system.
# Since the rest of the torchaudio works without it, we do not report the
# error here.
# The error will be raised when user code attempts to use these features.
_LG.debug("Failed to initialize ffmpeg bindings", exc_info=True)
if _USE_FFMPEG:
fail_if_no_ffmpeg = _fail_since_no_ffmpeg if _FFMPEG_EXT is None else no_op
else:
fail_if_no_ffmpeg = fail_with_message("requires ffmpeg extension, but it is disabled. (TORCHAUDIO_USE_FFMPEG=0)")
fail_if_no_rir = (
no_op
if _IS_RIR_AVAILABLE
else fail_with_message(
"requires RIR extension, but TorchAudio is not compiled with it. Please build TorchAudio with RIR support."
)
)
fail_if_no_align = (
no_op
if _IS_ALIGN_AVAILABLE
else fail_with_message(
"Requires alignment extension, but TorchAudio is not compiled with it. \
Please build TorchAudio with alignment support."
)
)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import AINValueOps
from langchain_community.tools.ainetwork.value import ValueSchema
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ValueSchema": "langchain_community.tools.ainetwork.value",
"AINValueOps": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AINValueOps",
"ValueSchema",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import AINValueOps
from langchain_community.tools.ainetwork.value import ValueSchema
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ValueSchema": "langchain_community.tools.ainetwork.value",
"AINValueOps": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ValueSchema",
"AINValueOps",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .activations import SiLU
from .bbox_nms import fast_nms, multiclass_nms
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .dropblock import DropBlock
from .ema import ExpMomentumEMA
from .inverted_residual import InvertedResidual
from .matrix_nms import mask_matrix_nms
from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder
from .normed_predictor import NormedConv2d, NormedLinear
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import ChannelAttention, DyReLU, SELayer
# yapf: disable
from .transformer import (MLP, ConditionalDetrTransformerDecoder,
ConditionalDetrTransformerDecoderLayer,
DeformableDetrTransformerDecoder,
DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder,
DeformableDetrTransformerEncoderLayer,
DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer,
DynamicConv, PatchEmbed, PatchMerging,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
__all__ = [
'fast_nms', 'multiclass_nms', 'mask_matrix_nms', 'DropBlock',
'PixelDecoder', 'TransformerEncoderPixelDecoder',
'MSDeformAttnPixelDecoder', 'ResLayer', 'PatchMerging',
'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv',
'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'InvertedResidual',
'SELayer', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d',
'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'DyReLU',
'ExpMomentumEMA', 'inverse_sigmoid', 'ChannelAttention', 'SiLU', 'MLP',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer',
'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .activations import SiLU
from .bbox_nms import fast_nms, multiclass_nms
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .dropblock import DropBlock
from .ema import ExpMomentumEMA
from .inverted_residual import InvertedResidual
from .matrix_nms import mask_matrix_nms
from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder
from .normed_predictor import NormedConv2d, NormedLinear
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import ChannelAttention, DyReLU, SELayer
from .transformer import (MLP, DeformableDetrTransformerDecoder,
DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder,
DeformableDetrTransformerEncoderLayer,
DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer,
DynamicConv, PatchEmbed, PatchMerging,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
__all__ = [
'fast_nms', 'multiclass_nms', 'mask_matrix_nms', 'DropBlock',
'PixelDecoder', 'TransformerEncoderPixelDecoder',
'MSDeformAttnPixelDecoder', 'ResLayer', 'PatchMerging',
'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv',
'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'InvertedResidual',
'SELayer', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d',
'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'DyReLU',
'ExpMomentumEMA', 'inverse_sigmoid', 'ChannelAttention', 'SiLU', 'MLP',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer'
]
|
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List
import torch
@dataclass
class SentenceTransformerDataCollator:
"""Collator for a SentenceTransformers model.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
This works with the two text dataset that is used as the example in the training overview:
https://www.sbert.net/docs/training/overview.html
"""
tokenize_fn: Callable
valid_label_columns: List[str] = field(default_factory=lambda: ["label", "score"])
def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, torch.Tensor]:
columns = list(features[0].keys())
# We should always be able to return a loss, label or not:
batch = {"return_loss": True}
if "dataset_name" in columns:
columns.remove("dataset_name")
batch["dataset_name"] = features[0]["dataset_name"]
# Extract the label column if it exists
for label_column in self.valid_label_columns:
if label_column in columns:
batch["label"] = torch.tensor([row[label_column] for row in features])
columns.remove(label_column)
break
# Extract the feature columns
for column in columns:
tokenized = self.tokenize_fn([row[column] for row in features])
for key, value in tokenized.items():
batch[f"{column}_{key}"] = value
return batch
|
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List
import torch
@dataclass
class SentenceTransformerDataCollator:
"""Collator for a SentenceTransformers model.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
This works with the two text dataset that is used as the example in the training overview:
https://www.sbert.net/docs/training/overview.html"""
tokenize_fn: Callable
valid_label_columns: List[str] = field(default_factory=lambda: ["label", "score"])
def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, torch.Tensor]:
columns = list(features[0].keys())
# We should always be able to return a loss, label or not:
batch = {"return_loss": True}
if "dataset_name" in columns:
columns.remove("dataset_name")
batch["dataset_name"] = features[0]["dataset_name"]
# Extract the label column if it exists
for label_column in self.valid_label_columns:
if label_column in columns:
batch["label"] = torch.tensor([row[label_column] for row in features])
columns.remove(label_column)
break
# Extract the feature columns
for column in columns:
tokenized = self.tokenize_fn([row[column] for row in features])
for key, value in tokenized.items():
batch[f"{column}_{key}"] = value
return batch
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from typing import Dict
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor
from ...image_tf_encoder import ImageTFEncoder
input_dim = 336
target_output_dim = 1280
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.model_name == 'MobileNetV2'
def test_encoding_results():
num_doc = 2
test_data = np.random.rand(num_doc, input_dim, input_dim, 3)
doc = DocumentArray()
for i in range(num_doc):
doc.append(Document(blob=test_data[i]))
encoder = ImageTFEncoder()
encoder.encode(doc, parameters={})
assert len(doc) == num_doc
for i in range(num_doc):
assert doc[i].embedding.shape == (target_output_dim,)
def test_image_results(test_images: Dict[str, np.array]):
embeddings = {}
encoder = ImageTFEncoder()
for name, image_arr in test_images.items():
docs = DocumentArray([Document(blob=image_arr)])
encoder.encode(docs, parameters={})
embeddings[name] = docs[0].embedding
assert docs[0].embedding.shape == (target_output_dim,)
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist('banana1', 'banana2')
assert small_distance < dist('banana1', 'airplane')
assert small_distance < dist('banana1', 'satellite')
assert small_distance < dist('banana1', 'studio')
assert small_distance < dist('banana2', 'airplane')
assert small_distance < dist('banana2', 'satellite')
assert small_distance < dist('banana2', 'studio')
assert small_distance < dist('airplane', 'studio')
assert small_distance < dist('airplane', 'satellite')
@pytest.mark.gpu
def test_image_results_gpu(test_images: Dict[str, np.array]):
num_doc = 2
test_data = np.random.rand(num_doc, input_dim, input_dim, 3)
doc = DocumentArray()
for i in range(num_doc):
doc.append(Document(blob=test_data[i]))
encoder = ImageTFEncoder(device='/GPU:0')
encoder.encode(doc, parameters={})
assert len(doc) == num_doc
for i in range(num_doc):
assert doc[i].embedding.shape == (target_output_dim,)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from typing import Dict
import numpy as np
from jina import DocumentArray, Document, Executor
from ...image_tf_encoder import ImageTFEncoder
input_dim = 336
target_output_dim = 1280
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.model_name == 'MobileNetV2'
def test_encoding_results():
num_doc = 2
test_data = np.random.rand(num_doc, input_dim, input_dim, 3)
doc = DocumentArray()
for i in range(num_doc):
doc.append(Document(blob=test_data[i]))
encoder = ImageTFEncoder()
encoder.encode(doc, parameters={})
assert len(doc) == num_doc
for i in range(num_doc):
assert doc[i].embedding.shape == (target_output_dim,)
def test_image_results(test_images: Dict[str, np.array]):
embeddings = {}
encoder = ImageTFEncoder()
for name, image_arr in test_images.items():
docs = DocumentArray([Document(blob=image_arr)])
encoder.encode(docs, parameters={})
embeddings[name] = docs[0].embedding
assert docs[0].embedding.shape == (target_output_dim,)
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist('banana1', 'banana2')
assert small_distance < dist('banana1', 'airplane')
assert small_distance < dist('banana1', 'satellite')
assert small_distance < dist('banana1', 'studio')
assert small_distance < dist('banana2', 'airplane')
assert small_distance < dist('banana2', 'satellite')
assert small_distance < dist('banana2', 'studio')
assert small_distance < dist('airplane', 'studio')
assert small_distance < dist('airplane', 'satellite')
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class RetinaNet(SingleStageDetector):
"""Implementation of `RetinaNet <https://arxiv.org/abs/1708.02002>`_"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class RetinaNet(SingleStageDetector):
"""Implementation of `RetinaNet <https://arxiv.org/abs/1708.02002>`_"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
preprocess_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg,
preprocess_cfg=preprocess_cfg)
|
# dataset settings
dataset_type = 'MOTChallengeDataset'
data_root = 'data/MOT17/'
resized_shape = (1088, 1088)
# data pipeline
train_pipeline = [
dict(
type='UniformRefFrameSample',
num_ref_imgs=1,
frame_range=10,
filter_key_img=True),
dict(
type='TransformBroadcaster',
share_random_params=True,
transforms=[
dict(type='LoadImageFromFile'),
dict(type='LoadTrackAnnotations'),
dict(
type='RandomResize',
scale=resized_shape,
ratio_range=(0.8, 1.2),
keep_ratio=True,
clip_object_border=False),
dict(type='PhotoMetricDistortion')
]),
dict(
type='TransformBroadcaster',
# different cropped positions for different frames
share_random_params=False,
transforms=[
dict(
type='RandomCrop',
crop_size=resized_shape,
bbox_clip_border=False)
]),
dict(
type='TransformBroadcaster',
share_random_params=True,
transforms=[
dict(type='RandomFlip', prob=0.5),
]),
dict(type='PackTrackInputs')
]
test_pipeline = [
dict(
type='TransformBroadcaster',
transforms=[
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=resized_shape, keep_ratio=True),
dict(type='LoadTrackAnnotations')
]),
dict(type='PackTrackInputs')
]
# dataloader
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
# MOTChallengeDataset is a video-based dataset, so we don't need
# "AspectRatioBatchSampler"
# batch_sampler=dict(type='AspectRatioBatchSampler'),
sampler=dict(type='TrackImgSampler'), # image-based sampling
dataset=dict(
type=dataset_type,
data_root=data_root,
visibility_thr=-1,
ann_file='annotations/half-train_cocoformat.json',
data_prefix=dict(img_path='train'),
metainfo=dict(classes=('pedestrian', )),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='TrackImgSampler'), # image-based sampling
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/half-val_cocoformat.json',
data_prefix=dict(img_path='train'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
# evaluator
val_evaluator = dict(
type='MOTChallengeMetric', metric=['HOTA', 'CLEAR', 'Identity'])
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'MOTChallengeDataset'
data_root = 'data/MOT17/'
resized_shape = (1088, 1088)
# data pipeline
train_pipeline = [
dict(
type='UniformSample',
num_ref_imgs=1,
frame_range=10,
filter_key_img=True),
dict(
type='TransformBroadcaster',
share_random_params=True,
transforms=[
dict(type='LoadImageFromFile'),
dict(type='LoadTrackAnnotations'),
dict(
type='RandomResize',
scale=resized_shape,
ratio_range=(0.8, 1.2),
keep_ratio=True,
clip_object_border=False),
dict(type='PhotoMetricDistortion')
]),
dict(
type='TransformBroadcaster',
# different coppped positions for different frames
share_random_params=False,
transforms=[
dict(
type='RandomCrop',
crop_size=resized_shape,
bbox_clip_border=False)
]),
dict(
type='TransformBroadcaster',
share_random_params=True,
transforms=[
dict(type='RandomFlip', prob=0.5),
]),
dict(type='PackTrackInputs')
]
test_pipeline = [
dict(
type='TransformBroadcaster',
transforms=[
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=resized_shape, keep_ratio=True),
dict(type='LoadTrackAnnotations')
]),
dict(type='PackTrackInputs')
]
# dataloader
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
# MOTChallengeDataset is a video-based dataset, so we don't need
# "AspectRatioBatchSampler"
# batch_sampler=dict(type='AspectRatioBatchSampler'),
sampler=dict(type='TrackImgSampler'), # image-based sampling
dataset=dict(
type=dataset_type,
data_root=data_root,
visibility_thr=-1,
ann_file='annotations/half-train_cocoformat.json',
data_prefix=dict(img_path='train'),
metainfo=dict(classes=('pedestrian', )),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='TrackImgSampler'), # image-based sampling
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/half-val_cocoformat.json',
data_prefix=dict(img_path='train'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
# evaluator
val_evaluator = dict(
type='MOTChallengeMetric', metric=['HOTA', 'CLEAR', 'Identity'])
test_evaluator = val_evaluator
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.12.1.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.12.0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/cityscapes_detection.py',
'../_base_/default_runtime.py'
]
model = dict(
backbone=dict(init_cfg=None),
roi_head=dict(
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=8,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))))
# optimizer
# lr is set for a batch size of 8
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
# [7] yields higher performance than [6]
step=[7])
runner = dict(
type='EpochBasedRunner', max_epochs=8) # actual epoch = 8 * 8 = 64
log_config = dict(interval=100)
# For better, more stable performance initialize from COCO
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' # noqa
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
auto_scale_lr = dict(base_batch_size=8)
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/cityscapes_detection.py',
'../_base_/default_runtime.py'
]
model = dict(
backbone=dict(init_cfg=None),
roi_head=dict(
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=8,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))))
# optimizer
# lr is set for a batch size of 8
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
# [7] yields higher performance than [6]
step=[7])
runner = dict(
type='EpochBasedRunner', max_epochs=8) # actual epoch = 8 * 8 = 64
log_config = dict(interval=100)
# For better, more stable performance initialize from COCO
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' # noqa
|
"""Test embeddings base module."""
import pytest
from langchain.embeddings.base import (
_SUPPORTED_PROVIDERS,
_infer_model_and_provider,
_parse_model_string,
)
def test_parse_model_string() -> None:
"""Test parsing model strings into provider and model components."""
assert _parse_model_string("openai:text-embedding-3-small") == (
"openai",
"text-embedding-3-small",
)
assert _parse_model_string("bedrock:amazon.titan-embed-text-v1") == (
"bedrock",
"amazon.titan-embed-text-v1",
)
assert _parse_model_string("huggingface:BAAI/bge-base-en:v1.5") == (
"huggingface",
"BAAI/bge-base-en:v1.5",
)
def test_parse_model_string_errors() -> None:
"""Test error cases for model string parsing."""
with pytest.raises(ValueError, match="Model name must be"):
_parse_model_string("just-a-model-name")
with pytest.raises(ValueError, match="Invalid model format "):
_parse_model_string("")
with pytest.raises(ValueError, match="is not supported"):
_parse_model_string(":model-name")
with pytest.raises(ValueError, match="Model name cannot be empty"):
_parse_model_string("openai:")
with pytest.raises(
ValueError,
match="Provider 'invalid-provider' is not supported",
):
_parse_model_string("invalid-provider:model-name")
for provider in _SUPPORTED_PROVIDERS:
with pytest.raises(ValueError, match=f"{provider}"):
_parse_model_string("invalid-provider:model-name")
def test_infer_model_and_provider() -> None:
"""Test model and provider inference from different input formats."""
assert _infer_model_and_provider("openai:text-embedding-3-small") == (
"openai",
"text-embedding-3-small",
)
assert _infer_model_and_provider(
model="text-embedding-3-small",
provider="openai",
) == ("openai", "text-embedding-3-small")
assert _infer_model_and_provider(
model="ft:text-embedding-3-small",
provider="openai",
) == ("openai", "ft:text-embedding-3-small")
assert _infer_model_and_provider(model="openai:ft:text-embedding-3-small") == (
"openai",
"ft:text-embedding-3-small",
)
def test_infer_model_and_provider_errors() -> None:
"""Test error cases for model and provider inference."""
# Test missing provider
with pytest.raises(ValueError, match="Must specify either"):
_infer_model_and_provider("text-embedding-3-small")
# Test empty model
with pytest.raises(ValueError, match="Model name cannot be empty"):
_infer_model_and_provider("")
# Test empty provider with model
with pytest.raises(ValueError, match="Must specify either"):
_infer_model_and_provider("model", provider="")
# Test invalid provider
with pytest.raises(ValueError, match="is not supported"):
_infer_model_and_provider("model", provider="invalid")
# Test provider list is in error
with pytest.raises(ValueError) as exc:
_infer_model_and_provider("model", provider="invalid")
for provider in _SUPPORTED_PROVIDERS:
assert provider in str(exc.value)
@pytest.mark.parametrize(
"provider",
sorted(_SUPPORTED_PROVIDERS.keys()),
)
def test_supported_providers_package_names(provider: str) -> None:
"""Test that all supported providers have valid package names."""
package = _SUPPORTED_PROVIDERS[provider]
assert "-" not in package
assert package.startswith("langchain_")
assert package.islower()
def test_is_sorted() -> None:
assert list(_SUPPORTED_PROVIDERS) == sorted(_SUPPORTED_PROVIDERS.keys())
|
"""Test embeddings base module."""
import pytest
from langchain.embeddings.base import (
_SUPPORTED_PROVIDERS,
_infer_model_and_provider,
_parse_model_string,
)
def test_parse_model_string() -> None:
"""Test parsing model strings into provider and model components."""
assert _parse_model_string("openai:text-embedding-3-small") == (
"openai",
"text-embedding-3-small",
)
assert _parse_model_string("bedrock:amazon.titan-embed-text-v1") == (
"bedrock",
"amazon.titan-embed-text-v1",
)
assert _parse_model_string("huggingface:BAAI/bge-base-en:v1.5") == (
"huggingface",
"BAAI/bge-base-en:v1.5",
)
def test_parse_model_string_errors() -> None:
"""Test error cases for model string parsing."""
with pytest.raises(ValueError, match="Model name must be"):
_parse_model_string("just-a-model-name")
with pytest.raises(ValueError, match="Invalid model format "):
_parse_model_string("")
with pytest.raises(ValueError, match="is not supported"):
_parse_model_string(":model-name")
with pytest.raises(ValueError, match="Model name cannot be empty"):
_parse_model_string("openai:")
with pytest.raises(
ValueError, match="Provider 'invalid-provider' is not supported"
):
_parse_model_string("invalid-provider:model-name")
for provider in _SUPPORTED_PROVIDERS:
with pytest.raises(ValueError, match=f"{provider}"):
_parse_model_string("invalid-provider:model-name")
def test_infer_model_and_provider() -> None:
"""Test model and provider inference from different input formats."""
assert _infer_model_and_provider("openai:text-embedding-3-small") == (
"openai",
"text-embedding-3-small",
)
assert _infer_model_and_provider(
model="text-embedding-3-small", provider="openai"
) == ("openai", "text-embedding-3-small")
assert _infer_model_and_provider(
model="ft:text-embedding-3-small", provider="openai"
) == ("openai", "ft:text-embedding-3-small")
assert _infer_model_and_provider(model="openai:ft:text-embedding-3-small") == (
"openai",
"ft:text-embedding-3-small",
)
def test_infer_model_and_provider_errors() -> None:
"""Test error cases for model and provider inference."""
# Test missing provider
with pytest.raises(ValueError, match="Must specify either"):
_infer_model_and_provider("text-embedding-3-small")
# Test empty model
with pytest.raises(ValueError, match="Model name cannot be empty"):
_infer_model_and_provider("")
# Test empty provider with model
with pytest.raises(ValueError, match="Must specify either"):
_infer_model_and_provider("model", provider="")
# Test invalid provider
with pytest.raises(ValueError, match="is not supported"):
_infer_model_and_provider("model", provider="invalid")
# Test provider list is in error
with pytest.raises(ValueError) as exc:
_infer_model_and_provider("model", provider="invalid")
for provider in _SUPPORTED_PROVIDERS:
assert provider in str(exc.value)
@pytest.mark.parametrize(
"provider",
sorted(_SUPPORTED_PROVIDERS.keys()),
)
def test_supported_providers_package_names(provider: str) -> None:
"""Test that all supported providers have valid package names."""
package = _SUPPORTED_PROVIDERS[provider]
assert "-" not in package
assert package.startswith("langchain_")
assert package.islower()
def test_is_sorted() -> None:
assert list(_SUPPORTED_PROVIDERS) == sorted(_SUPPORTED_PROVIDERS.keys())
|
import os
from . import InputExample
class LabelSentenceReader:
"""Reads in a file that has at least two columns: a label and a sentence.
This reader can for example be used with the BatchHardTripletLoss.
Maps labels automatically to integers
"""
def __init__(self, folder, label_col_idx=0, sentence_col_idx=1, separator="\t"):
self.folder = folder
self.label_map = {}
self.label_col_idx = label_col_idx
self.sentence_col_idx = sentence_col_idx
self.separator = separator
def get_examples(self, filename, max_examples=0):
examples = []
id = 0
for line in open(os.path.join(self.folder, filename), encoding="utf-8"):
splits = line.strip().split(self.separator)
label = splits[self.label_col_idx]
sentence = splits[self.sentence_col_idx]
if label not in self.label_map:
self.label_map[label] = len(self.label_map)
label_id = self.label_map[label]
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid, texts=[sentence], label=label_id))
if 0 < max_examples <= id:
break
return examples
|
from . import InputExample
import csv
import gzip
import os
class LabelSentenceReader:
"""Reads in a file that has at least two columns: a label and a sentence.
This reader can for example be used with the BatchHardTripletLoss.
Maps labels automatically to integers"""
def __init__(self, folder, label_col_idx=0, sentence_col_idx=1, separator='\t'):
self.folder = folder
self.label_map = {}
self.label_col_idx = label_col_idx
self.sentence_col_idx = sentence_col_idx
self.separator = separator
def get_examples(self, filename, max_examples=0):
examples = []
id = 0
for line in open(os.path.join(self.folder, filename), encoding="utf-8"):
splits = line.strip().split(self.separator)
label = splits[self.label_col_idx]
sentence = splits[self.sentence_col_idx]
if label not in self.label_map:
self.label_map[label] = len(self.label_map)
label_id = self.label_map[label]
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid, texts=[sentence], label=label_id))
if 0 < max_examples <= id:
break
return examples
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from docarray import BaseDoc
from docarray.array.doc_vec.doc_vec import DocVec
from docarray.typing import AnyTensor, NdArray
def test_da_init():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros(10), name='hello') for _ in range(4)]
da = DocVec[MyDoc](docs, tensor_type=NdArray)
assert (da._storage.tensor_columns['tensor'] == np.zeros((4, 10))).all()
assert da._storage.any_columns['name'] == ['hello' for _ in range(4)]
def test_da_iter():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=i * np.zeros((10, 10)), name=f'hello{i}') for i in range(4)]
da = DocVec[MyDoc](docs, tensor_type=NdArray)
for i, doc in enumerate(da):
assert isinstance(doc, MyDoc)
assert (doc.tensor == i * np.zeros((10, 10))).all()
assert doc.name == f'hello{i}'
|
import numpy as np
from docarray import BaseDoc
from docarray.array.doc_vec.doc_vec import DocVec
from docarray.typing import AnyTensor, NdArray
def test_da_init():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros(10), name='hello') for _ in range(4)]
da = DocVec[MyDoc](docs, tensor_type=NdArray)
assert (da._storage.tensor_columns['tensor'] == np.zeros((4, 10))).all()
assert da._storage.any_columns['name'] == ['hello' for _ in range(4)]
def test_da_iter():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=i * np.zeros((10, 10)), name=f'hello{i}') for i in range(4)]
da = DocVec[MyDoc](docs, tensor_type=NdArray)
for i, doc in enumerate(da):
assert isinstance(doc, MyDoc)
assert (doc.tensor == i * np.zeros((10, 10))).all()
assert doc.name == f'hello{i}'
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
from mmcv.cnn import VGG
from mmengine.model import BaseModule
from mmdet.registry import MODELS
from ..necks import ssd_neck
@MODELS.register_module()
class SSDVGG(VGG, BaseModule):
"""VGG Backbone network for single-shot-detection.
Args:
depth (int): Depth of vgg, from {11, 13, 16, 19}.
with_last_pool (bool): Whether to add a pooling layer at the last
of the model
ceil_mode (bool): When True, will use `ceil` instead of `floor`
to compute the output shape.
out_indices (Sequence[int]): Output from which stages.
out_feature_indices (Sequence[int]): Output from which feature map.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
input_size (int, optional): Deprecated argumment.
Width and height of input, from {300, 512}.
l2_norm_scale (float, optional) : Deprecated argumment.
L2 normalization layer init scale.
Example:
>>> self = SSDVGG(input_size=300, depth=11)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 300, 300)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 1024, 19, 19)
(1, 512, 10, 10)
(1, 256, 5, 5)
(1, 256, 3, 3)
(1, 256, 1, 1)
"""
extra_setting = {
300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256),
512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128),
}
def __init__(self,
depth,
with_last_pool=False,
ceil_mode=True,
out_indices=(3, 4),
out_feature_indices=(22, 34),
pretrained=None,
init_cfg=None,
input_size=None,
l2_norm_scale=None):
# TODO: in_channels for mmcv.VGG
super(SSDVGG, self).__init__(
depth,
with_last_pool=with_last_pool,
ceil_mode=ceil_mode,
out_indices=out_indices)
self.features.add_module(
str(len(self.features)),
nn.MaxPool2d(kernel_size=3, stride=1, padding=1))
self.features.add_module(
str(len(self.features)),
nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6))
self.features.add_module(
str(len(self.features)), nn.ReLU(inplace=True))
self.features.add_module(
str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1))
self.features.add_module(
str(len(self.features)), nn.ReLU(inplace=True))
self.out_feature_indices = out_feature_indices
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if init_cfg is not None:
self.init_cfg = init_cfg
elif isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(type='Constant', val=1, layer='BatchNorm2d'),
dict(type='Normal', std=0.01, layer='Linear'),
]
else:
raise TypeError('pretrained must be a str or None')
if input_size is not None:
warnings.warn('DeprecationWarning: input_size is deprecated')
if l2_norm_scale is not None:
warnings.warn('DeprecationWarning: l2_norm_scale in VGG is '
'deprecated, it has been moved to SSDNeck.')
def init_weights(self, pretrained=None):
super(VGG, self).init_weights()
def forward(self, x):
"""Forward function."""
outs = []
for i, layer in enumerate(self.features):
x = layer(x)
if i in self.out_feature_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
else:
return tuple(outs)
class L2Norm(ssd_neck.L2Norm):
def __init__(self, **kwargs):
super(L2Norm, self).__init__(**kwargs)
warnings.warn('DeprecationWarning: L2Norm in ssd_vgg.py '
'is deprecated, please use L2Norm in '
'mmdet/models/necks/ssd_neck.py instead')
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
from mmcv.cnn import VGG
from mmcv.runner import BaseModule
from mmdet.registry import MODELS
from ..necks import ssd_neck
@MODELS.register_module()
class SSDVGG(VGG, BaseModule):
"""VGG Backbone network for single-shot-detection.
Args:
depth (int): Depth of vgg, from {11, 13, 16, 19}.
with_last_pool (bool): Whether to add a pooling layer at the last
of the model
ceil_mode (bool): When True, will use `ceil` instead of `floor`
to compute the output shape.
out_indices (Sequence[int]): Output from which stages.
out_feature_indices (Sequence[int]): Output from which feature map.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
input_size (int, optional): Deprecated argumment.
Width and height of input, from {300, 512}.
l2_norm_scale (float, optional) : Deprecated argumment.
L2 normalization layer init scale.
Example:
>>> self = SSDVGG(input_size=300, depth=11)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 300, 300)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 1024, 19, 19)
(1, 512, 10, 10)
(1, 256, 5, 5)
(1, 256, 3, 3)
(1, 256, 1, 1)
"""
extra_setting = {
300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256),
512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128),
}
def __init__(self,
depth,
with_last_pool=False,
ceil_mode=True,
out_indices=(3, 4),
out_feature_indices=(22, 34),
pretrained=None,
init_cfg=None,
input_size=None,
l2_norm_scale=None):
# TODO: in_channels for mmcv.VGG
super(SSDVGG, self).__init__(
depth,
with_last_pool=with_last_pool,
ceil_mode=ceil_mode,
out_indices=out_indices)
self.features.add_module(
str(len(self.features)),
nn.MaxPool2d(kernel_size=3, stride=1, padding=1))
self.features.add_module(
str(len(self.features)),
nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6))
self.features.add_module(
str(len(self.features)), nn.ReLU(inplace=True))
self.features.add_module(
str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1))
self.features.add_module(
str(len(self.features)), nn.ReLU(inplace=True))
self.out_feature_indices = out_feature_indices
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if init_cfg is not None:
self.init_cfg = init_cfg
elif isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(type='Constant', val=1, layer='BatchNorm2d'),
dict(type='Normal', std=0.01, layer='Linear'),
]
else:
raise TypeError('pretrained must be a str or None')
if input_size is not None:
warnings.warn('DeprecationWarning: input_size is deprecated')
if l2_norm_scale is not None:
warnings.warn('DeprecationWarning: l2_norm_scale in VGG is '
'deprecated, it has been moved to SSDNeck.')
def init_weights(self, pretrained=None):
super(VGG, self).init_weights()
def forward(self, x):
"""Forward function."""
outs = []
for i, layer in enumerate(self.features):
x = layer(x)
if i in self.out_feature_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
else:
return tuple(outs)
class L2Norm(ssd_neck.L2Norm):
def __init__(self, **kwargs):
super(L2Norm, self).__init__(**kwargs)
warnings.warn('DeprecationWarning: L2Norm in ssd_vgg.py '
'is deprecated, please use L2Norm in '
'mmdet/models/necks/ssd_neck.py instead')
|
"""Test EdenAi's object detection Tool .
In order to run this test, you need to have an EdenAI api key.
You can get it by registering for free at https://app.edenai.run/user/register.
A test key can be found at https://app.edenai.run/admin/account/settings by
clicking on the 'sandbox' toggle.
(calls will be free, and will return dummy results)
You'll then need to set EDENAI_API_KEY environment variable to your api key.
"""
from langchain_community.tools.edenai import EdenAiObjectDetectionTool
def test_edenai_call() -> None:
"""Test simple call to edenai's object detection endpoint."""
object_detection = EdenAiObjectDetectionTool(providers=["google"])
output = object_detection.invoke("https://static.javatpoint.com/images/objects.jpg")
assert object_detection.name == "edenai_object_detection"
assert object_detection.feature == "image"
assert object_detection.subfeature == "object_detection"
assert isinstance(output, str)
|
"""Test EdenAi's object detection Tool .
In order to run this test, you need to have an EdenAI api key.
You can get it by registering for free at https://app.edenai.run/user/register.
A test key can be found at https://app.edenai.run/admin/account/settings by
clicking on the 'sandbox' toggle.
(calls will be free, and will return dummy results)
You'll then need to set EDENAI_API_KEY environment variable to your api key.
"""
from langchain_community.tools.edenai import EdenAiObjectDetectionTool
def test_edenai_call() -> None:
"""Test simple call to edenai's object detection endpoint."""
object_detection = EdenAiObjectDetectionTool(providers=["google"]) # type: ignore[call-arg]
output = object_detection.invoke("https://static.javatpoint.com/images/objects.jpg")
assert object_detection.name == "edenai_object_detection"
assert object_detection.feature == "image"
assert object_detection.subfeature == "object_detection"
assert isinstance(output, str)
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.25.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.24.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
# Copyright (c) OpenMMLab. All rights reserved.
from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formatting import (Collect, DefaultFormatBundle, ImageToTensor,
ToDataContainer, ToTensor, Transpose, to_tensor)
from .instaboost import InstaBoost
from .loading import (LoadAnnotations, LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals)
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Albu, CutOut, Expand, MinIoURandomCrop, MixUp, Mosaic,
Normalize, Pad, PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomFlip,
RandomShift, Resize, SegRescale, YOLOXHSVRandomAug)
__all__ = [
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations',
'LoadImageFromFile', 'LoadImageFromWebcam', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'MultiScaleFlipAug',
'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 'Normalize', 'SegRescale',
'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion', 'Albu',
'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut', 'Shear',
'Rotate', 'ColorTransform', 'EqualizeTransform', 'BrightnessTransform',
'ContrastTransform', 'Translate', 'RandomShift', 'Mosaic', 'MixUp',
'RandomAffine', 'YOLOXHSVRandomAug'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formatting import (Collect, DefaultFormatBundle, ImageToTensor,
ToDataContainer, ToTensor, Transpose, to_tensor)
from .instaboost import InstaBoost
from .loading import (LoadAnnotations, LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles, LoadProposals)
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Albu, CutOut, Expand, MinIoURandomCrop, MixUp, Mosaic,
Normalize, Pad, PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomFlip,
RandomShift, Resize, SegRescale, YOLOXHSVRandomAug)
__all__ = [
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations',
'LoadImageFromFile', 'LoadImageFromWebcam',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'MultiScaleFlipAug',
'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 'Normalize', 'SegRescale',
'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion', 'Albu',
'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut', 'Shear',
'Rotate', 'ColorTransform', 'EqualizeTransform', 'BrightnessTransform',
'ContrastTransform', 'Translate', 'RandomShift', 'Mosaic', 'MixUp',
'RandomAffine', 'YOLOXHSVRandomAug'
]
|
from llama_index.core.instrumentation.events.base import BaseEvent
class StreamChatStartEvent(BaseEvent):
"""
StreamChatStartEvent.
Fired at the start of writing to the stream chat-engine queue.
"""
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "StreamChatStartEvent"
class StreamChatEndEvent(BaseEvent):
"""
StreamChatEndEvent.
Fired at the end of writing to the stream chat-engine queue.
"""
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "StreamChatEndEvent"
class StreamChatErrorEvent(BaseEvent):
"""
StreamChatErrorEvent.
Fired when an exception is raised during the stream chat-engine operation.
Args:
exception (Exception): Exception raised during the stream chat operation.
"""
exception: Exception
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "StreamChatErrorEvent"
class StreamChatDeltaReceivedEvent(BaseEvent):
"""
StreamChatDeltaReceivedEvent.
Args:
delta (str): Delta received from the stream chat.
"""
delta: str
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "StreamChatDeltaReceivedEvent"
|
from llama_index.core.instrumentation.events.base import BaseEvent
class StreamChatStartEvent(BaseEvent):
"""StreamChatStartEvent.
Fired at the start of writing to the stream chat-engine queue.
"""
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "StreamChatStartEvent"
class StreamChatEndEvent(BaseEvent):
"""StreamChatEndEvent.
Fired at the end of writing to the stream chat-engine queue.
"""
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "StreamChatEndEvent"
class StreamChatErrorEvent(BaseEvent):
"""StreamChatErrorEvent.
Fired when an exception is raised during the stream chat-engine operation.
Args:
exception (Exception): Exception raised during the stream chat operation.
"""
exception: Exception
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "StreamChatErrorEvent"
class StreamChatDeltaReceivedEvent(BaseEvent):
"""StreamChatDeltaReceivedEvent.
Args:
delta (str): Delta received from the stream chat.
"""
delta: str
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "StreamChatDeltaReceivedEvent"
|
_INITIALIZED = False
_LAZILY_IMPORTED = [
"Hypothesis",
"CTCDecoder",
"ctc_decoder",
"lexicon_decoder",
"download_pretrained_files",
]
def _init_extension():
import torchaudio
torchaudio._extension._load_lib("libtorchaudio_decoder")
global _INITIALIZED
_INITIALIZED = True
def __getattr__(name: str):
if name in _LAZILY_IMPORTED:
if not _INITIALIZED:
_init_extension()
try:
from . import _ctc_decoder
except AttributeError as err:
raise RuntimeError(
"CTC decoder requires the decoder extension. Please set BUILD_CTC_DECODER=1 when building from source."
) from err
item = getattr(_ctc_decoder, name)
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(__all__ + _LAZILY_IMPORTED)
__all__ = []
|
import torchaudio
try:
torchaudio._extension._load_lib("libtorchaudio_decoder")
from .ctc_decoder import Hypothesis, CTCDecoder, ctc_decoder, lexicon_decoder, download_pretrained_files
except ImportError as err:
raise ImportError(
"flashlight decoder bindings are required to use this functionality. "
"Please set BUILD_CTC_DECODER=1 when building from source."
) from err
__all__ = [
"Hypothesis",
"CTCDecoder",
"ctc_decoder",
"lexicon_decoder",
"download_pretrained_files",
]
|
"""Init file."""
from llama_index.readers.papers.arxiv.base import ArxivReader
from llama_index.readers.papers.pubmed.base import PubmedReader
__all__ = ["ArxivReader", "PubmedReader"]
|
"""Init file."""
from llama_index.readers.papers.arxiv.base import ArxivReader
from llama_index.readers.papers.pubmed.base import PubmedReader
__all__ = ["ArxivReader", "PubmedReader"]
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import ConvModule, Linear
from mmengine.model import ModuleList
from torch import Tensor
from mmdet.core.utils import MultiConfig
from mmdet.registry import MODELS
from .fcn_mask_head import FCNMaskHead
@MODELS.register_module()
class CoarseMaskHead(FCNMaskHead):
"""Coarse mask head used in PointRend.
Compared with standard ``FCNMaskHead``, ``CoarseMaskHead`` will downsample
the input feature map instead of upsample it.
Args:
num_convs (int): Number of conv layers in the head. Defaults to 0.
num_fcs (int): Number of fc layers in the head. Defaults to 2.
fc_out_channels (int): Number of output channels of fc layer.
Defaults to 1024.
downsample_factor (int): The factor that feature map is downsampled by.
Defaults to 2.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_convs: int = 0,
num_fcs: int = 2,
fc_out_channels: int = 1024,
downsample_factor: int = 2,
init_cfg: MultiConfig = dict(
type='Xavier',
override=[
dict(name='fcs'),
dict(type='Constant', val=0.001, name='fc_logits')
]),
*arg,
**kwarg) -> None:
super().__init__(
*arg,
num_convs=num_convs,
upsample_cfg=dict(type=None),
init_cfg=None,
**kwarg)
self.init_cfg = init_cfg
self.num_fcs = num_fcs
assert self.num_fcs > 0
self.fc_out_channels = fc_out_channels
self.downsample_factor = downsample_factor
assert self.downsample_factor >= 1
# remove conv_logit
delattr(self, 'conv_logits')
if downsample_factor > 1:
downsample_in_channels = (
self.conv_out_channels
if self.num_convs > 0 else self.in_channels)
self.downsample_conv = ConvModule(
downsample_in_channels,
self.conv_out_channels,
kernel_size=downsample_factor,
stride=downsample_factor,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
else:
self.downsample_conv = None
self.output_size = (self.roi_feat_size[0] // downsample_factor,
self.roi_feat_size[1] // downsample_factor)
self.output_area = self.output_size[0] * self.output_size[1]
last_layer_dim = self.conv_out_channels * self.output_area
self.fcs = ModuleList()
for i in range(num_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
self.fcs.append(Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
output_channels = self.num_classes * self.output_area
self.fc_logits = Linear(last_layer_dim, output_channels)
def init_weights(self) -> None:
"""Initialize weights."""
super(FCNMaskHead, self).init_weights()
def forward(self, x: Tensor) -> Tensor:
"""Forward features from the upstream network.
Args:
x (Tensor): Extract mask RoI features.
Returns:
Tensor: Predicted foreground masks.
"""
for conv in self.convs:
x = conv(x)
if self.downsample_conv is not None:
x = self.downsample_conv(x)
x = x.flatten(1)
for fc in self.fcs:
x = self.relu(fc(x))
mask_pred = self.fc_logits(x).view(
x.size(0), self.num_classes, *self.output_size)
return mask_pred
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import ConvModule, Linear
from mmcv.runner import ModuleList, auto_fp16
from mmdet.registry import MODELS
from .fcn_mask_head import FCNMaskHead
@MODELS.register_module()
class CoarseMaskHead(FCNMaskHead):
"""Coarse mask head used in PointRend.
Compared with standard ``FCNMaskHead``, ``CoarseMaskHead`` will downsample
the input feature map instead of upsample it.
Args:
num_convs (int): Number of conv layers in the head. Default: 0.
num_fcs (int): Number of fc layers in the head. Default: 2.
fc_out_channels (int): Number of output channels of fc layer.
Default: 1024.
downsample_factor (int): The factor that feature map is downsampled by.
Default: 2.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_convs=0,
num_fcs=2,
fc_out_channels=1024,
downsample_factor=2,
init_cfg=dict(
type='Xavier',
override=[
dict(name='fcs'),
dict(type='Constant', val=0.001, name='fc_logits')
]),
*arg,
**kwarg):
super(CoarseMaskHead, self).__init__(
*arg,
num_convs=num_convs,
upsample_cfg=dict(type=None),
init_cfg=None,
**kwarg)
self.init_cfg = init_cfg
self.num_fcs = num_fcs
assert self.num_fcs > 0
self.fc_out_channels = fc_out_channels
self.downsample_factor = downsample_factor
assert self.downsample_factor >= 1
# remove conv_logit
delattr(self, 'conv_logits')
if downsample_factor > 1:
downsample_in_channels = (
self.conv_out_channels
if self.num_convs > 0 else self.in_channels)
self.downsample_conv = ConvModule(
downsample_in_channels,
self.conv_out_channels,
kernel_size=downsample_factor,
stride=downsample_factor,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
else:
self.downsample_conv = None
self.output_size = (self.roi_feat_size[0] // downsample_factor,
self.roi_feat_size[1] // downsample_factor)
self.output_area = self.output_size[0] * self.output_size[1]
last_layer_dim = self.conv_out_channels * self.output_area
self.fcs = ModuleList()
for i in range(num_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
self.fcs.append(Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
output_channels = self.num_classes * self.output_area
self.fc_logits = Linear(last_layer_dim, output_channels)
def init_weights(self):
super(FCNMaskHead, self).init_weights()
@auto_fp16()
def forward(self, x):
for conv in self.convs:
x = conv(x)
if self.downsample_conv is not None:
x = self.downsample_conv(x)
x = x.flatten(1)
for fc in self.fcs:
x = self.relu(fc(x))
mask_pred = self.fc_logits(x).view(
x.size(0), self.num_classes, *self.output_size)
return mask_pred
|
from dataclasses import dataclass, field
from typing import Any, Dict, Type
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index.abstract import BaseDocIndex
from docarray.typing import NdArray
pytestmark = pytest.mark.index
class SimpleDoc(BaseDoc):
tens: NdArray[10] = Field(dim=1000)
class FakeQueryBuilder:
...
@dataclass
class DBConfig(BaseDocIndex.DBConfig):
work_dir: str = '.'
other: int = 5
default_column_config: Dict[Type, Dict[str, Any]] = field(
default_factory=lambda: {
str: {
'dim': 128,
'space': 'l2',
},
}
)
@dataclass
class RuntimeConfig(BaseDocIndex.RuntimeConfig):
default_ef: int = 50
def _identity(*x, **y):
return x, y
class DummyDocIndex(BaseDocIndex):
DBConfig = DBConfig
RuntimeConfig = RuntimeConfig
def python_type_to_db_type(self, x):
return str
_index = _identity
num_docs = _identity
_del_items = _identity
_get_items = _identity
execute_query = _identity
_find = _identity
_find_batched = _identity
_filter = _identity
_filter_batched = _identity
_text_search = _identity
_text_search_batched = _identity
_doc_exists = _identity
def test_defaults():
index = DummyDocIndex[SimpleDoc]()
assert index._db_config.other == 5
assert index._db_config.work_dir == '.'
assert index._db_config.default_column_config[str] == {
'dim': 128,
'space': 'l2',
}
def test_set_by_class():
# change all settings
index = DummyDocIndex[SimpleDoc](DBConfig(work_dir='hi', other=10))
assert index._db_config.other == 10
assert index._db_config.work_dir == 'hi'
index.configure(RuntimeConfig(default_ef=10))
assert index._runtime_config.default_ef == 10
# change only some settings
index = DummyDocIndex[SimpleDoc](DBConfig(work_dir='hi'))
assert index._db_config.other == 5
assert index._db_config.work_dir == 'hi'
def test_set_by_kwargs():
# change all settings
index = DummyDocIndex[SimpleDoc](work_dir='hi', other=10)
assert index._db_config.other == 10
assert index._db_config.work_dir == 'hi'
index.configure(default_ef=10)
assert index._runtime_config.default_ef == 10
# change only some settings
index = DummyDocIndex[SimpleDoc](work_dir='hi')
assert index._db_config.other == 5
assert index._db_config.work_dir == 'hi'
def test_default_column_config():
index = DummyDocIndex[SimpleDoc]()
assert index._db_config.default_column_config == {
str: {
'dim': 128,
'space': 'l2',
},
}
|
from dataclasses import dataclass, field
from typing import Any, Dict, Type
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index.abstract import BaseDocIndex
from docarray.typing import NdArray
pytestmark = pytest.mark.index
class SimpleDoc(BaseDoc):
tens: NdArray[10] = Field(dim=1000)
class FakeQueryBuilder:
...
@dataclass
class DBConfig(BaseDocIndex.DBConfig):
work_dir: str = '.'
other: int = 5
default_column_config: Dict[Type, Dict[str, Any]] = field(
default_factory=lambda: {
str: {
'dim': 128,
'space': 'l2',
},
}
)
@dataclass
class RuntimeConfig(BaseDocIndex.RuntimeConfig):
default_ef: int = 50
def _identity(*x, **y):
return x, y
class DummyDocIndex(BaseDocIndex):
DBConfig = DBConfig
RuntimeConfig = RuntimeConfig
def python_type_to_db_type(self, x):
return str
_index = _identity
num_docs = _identity
_del_items = _identity
_get_items = _identity
execute_query = _identity
_find = _identity
_find_batched = _identity
_filter = _identity
_filter_batched = _identity
_text_search = _identity
_text_search_batched = _identity
def test_defaults():
index = DummyDocIndex[SimpleDoc]()
assert index._db_config.other == 5
assert index._db_config.work_dir == '.'
assert index._db_config.default_column_config[str] == {
'dim': 128,
'space': 'l2',
}
def test_set_by_class():
# change all settings
index = DummyDocIndex[SimpleDoc](DBConfig(work_dir='hi', other=10))
assert index._db_config.other == 10
assert index._db_config.work_dir == 'hi'
index.configure(RuntimeConfig(default_ef=10))
assert index._runtime_config.default_ef == 10
# change only some settings
index = DummyDocIndex[SimpleDoc](DBConfig(work_dir='hi'))
assert index._db_config.other == 5
assert index._db_config.work_dir == 'hi'
def test_set_by_kwargs():
# change all settings
index = DummyDocIndex[SimpleDoc](work_dir='hi', other=10)
assert index._db_config.other == 10
assert index._db_config.work_dir == 'hi'
index.configure(default_ef=10)
assert index._runtime_config.default_ef == 10
# change only some settings
index = DummyDocIndex[SimpleDoc](work_dir='hi')
assert index._db_config.other == 5
assert index._db_config.work_dir == 'hi'
def test_default_column_config():
index = DummyDocIndex[SimpleDoc]()
assert index._db_config.default_column_config == {
str: {
'dim': 128,
'space': 'l2',
},
}
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.5.2.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.5.1"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
"""
Example of using callbacks with Dask
====================================
"""
from typing import Any
import numpy as np
from dask.distributed import Client, LocalCluster
from dask_ml.datasets import make_regression
from dask_ml.model_selection import train_test_split
import xgboost as xgb
import xgboost.dask as dxgb
from xgboost.dask import DaskDMatrix
def probability_for_going_backward(epoch: int) -> float:
return 0.999 / (1.0 + 0.05 * np.log(1.0 + epoch))
# All callback functions must inherit from TrainingCallback
class CustomEarlyStopping(xgb.callback.TrainingCallback):
"""A custom early stopping class where early stopping is determined stochastically.
In the beginning, allow the metric to become worse with a probability of 0.999.
As boosting progresses, the probability should be adjusted downward"""
def __init__(
self, *, validation_set: str, target_metric: str, maximize: bool, seed: int
) -> None:
self.validation_set = validation_set
self.target_metric = target_metric
self.maximize = maximize
self.seed = seed
self.rng = np.random.default_rng(seed=seed)
if maximize:
self.better = lambda x, y: x > y
else:
self.better = lambda x, y: x < y
def after_iteration(
self, model: Any, epoch: int, evals_log: xgb.callback.TrainingCallback.EvalsLog
) -> bool:
metric_history = evals_log[self.validation_set][self.target_metric]
if len(metric_history) < 2 or self.better(
metric_history[-1], metric_history[-2]
):
return False # continue training
p = probability_for_going_backward(epoch)
go_backward = self.rng.choice(2, size=(1,), replace=True, p=[1 - p, p]).astype(
np.bool_
)[0]
print(
"The validation metric went into the wrong direction. "
+ f"Stopping training with probability {1 - p}..."
)
if go_backward:
return False # continue training
else:
return True # stop training
def main(client: Client) -> None:
m = 100000
n = 100
X, y = make_regression(n_samples=m, n_features=n, chunks=200, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
dtrain = DaskDMatrix(client, X_train, y_train)
dtest = DaskDMatrix(client, X_test, y_test)
output = dxgb.train(
client,
{
"verbosity": 1,
"tree_method": "hist",
"objective": "reg:squarederror",
"eval_metric": "rmse",
"max_depth": 6,
"learning_rate": 1.0,
},
dtrain,
num_boost_round=1000,
evals=[(dtrain, "train"), (dtest, "test")],
callbacks=[
CustomEarlyStopping(
validation_set="test", target_metric="rmse", maximize=False, seed=0
)
],
)
if __name__ == "__main__":
# or use other clusters for scaling
with LocalCluster(n_workers=4, threads_per_worker=1) as cluster:
with Client(cluster) as client:
main(client)
|
"""
Example of using callbacks with Dask
====================================
"""
import numpy as np
from dask.distributed import Client, LocalCluster
from dask_ml.datasets import make_regression
from dask_ml.model_selection import train_test_split
import xgboost as xgb
import xgboost.dask as dxgb
from xgboost.dask import DaskDMatrix
def probability_for_going_backward(epoch):
return 0.999 / (1.0 + 0.05 * np.log(1.0 + epoch))
# All callback functions must inherit from TrainingCallback
class CustomEarlyStopping(xgb.callback.TrainingCallback):
"""A custom early stopping class where early stopping is determined stochastically.
In the beginning, allow the metric to become worse with a probability of 0.999.
As boosting progresses, the probability should be adjusted downward"""
def __init__(self, *, validation_set, target_metric, maximize, seed):
self.validation_set = validation_set
self.target_metric = target_metric
self.maximize = maximize
self.seed = seed
self.rng = np.random.default_rng(seed=seed)
if maximize:
self.better = lambda x, y: x > y
else:
self.better = lambda x, y: x < y
def after_iteration(self, model, epoch, evals_log):
metric_history = evals_log[self.validation_set][self.target_metric]
if len(metric_history) < 2 or self.better(
metric_history[-1], metric_history[-2]
):
return False # continue training
p = probability_for_going_backward(epoch)
go_backward = self.rng.choice(2, size=(1,), replace=True, p=[1 - p, p]).astype(
np.bool
)[0]
print(
"The validation metric went into the wrong direction. "
+ f"Stopping training with probability {1 - p}..."
)
if go_backward:
return False # continue training
else:
return True # stop training
def main(client):
m = 100000
n = 100
X, y = make_regression(n_samples=m, n_features=n, chunks=200, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
dtrain = DaskDMatrix(client, X_train, y_train)
dtest = DaskDMatrix(client, X_test, y_test)
output = dxgb.train(
client,
{
"verbosity": 1,
"tree_method": "hist",
"objective": "reg:squarederror",
"eval_metric": "rmse",
"max_depth": 6,
"learning_rate": 1.0,
},
dtrain,
num_boost_round=1000,
evals=[(dtrain, "train"), (dtest, "test")],
callbacks=[
CustomEarlyStopping(
validation_set="test", target_metric="rmse", maximize=False, seed=0
)
],
)
if __name__ == "__main__":
# or use other clusters for scaling
with LocalCluster(n_workers=4, threads_per_worker=1) as cluster:
with Client(cluster) as client:
main(client)
|
import base64
from os.path import exists
from typing import Any, Dict, List, Optional
from urllib.parse import urlparse
import requests
from langchain_core.embeddings import Embeddings
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, SecretStr, model_validator
JINA_API_URL: str = "https://api.jina.ai/v1/embeddings"
def is_local(url: str) -> bool:
"""Check if a URL is a local file.
Args:
url (str): The URL to check.
Returns:
bool: True if the URL is a local file, False otherwise.
"""
url_parsed = urlparse(url)
if url_parsed.scheme in ("file", ""): # Possibly a local file
return exists(url_parsed.path)
return False
def get_bytes_str(file_path: str) -> str:
"""Get the bytes string of a file.
Args:
file_path (str): The path to the file.
Returns:
str: The bytes string of the file.
"""
with open(file_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
class JinaEmbeddings(BaseModel, Embeddings):
"""Jina embedding models."""
session: Any #: :meta private:
model_name: str = "jina-embeddings-v2-base-en"
jina_api_key: Optional[SecretStr] = None
model_config = ConfigDict(protected_namespaces=())
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that auth token exists in environment."""
try:
jina_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "jina_api_key", "JINA_API_KEY")
)
except ValueError as original_exc:
try:
jina_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "jina_auth_token", "JINA_AUTH_TOKEN")
)
except ValueError:
raise original_exc
session = requests.Session()
session.headers.update(
{
"Authorization": f"Bearer {jina_api_key.get_secret_value()}",
"Accept-Encoding": "identity",
"Content-type": "application/json",
}
)
values["session"] = session
return values
def _embed(self, input: Any) -> List[List[float]]:
# Call Jina AI Embedding API
resp = self.session.post(
JINA_API_URL, json={"input": input, "model": self.model_name}
).json()
if "data" not in resp:
raise RuntimeError(resp["detail"])
embeddings = resp["data"]
# Sort resulting embeddings by index
sorted_embeddings = sorted(embeddings, key=lambda e: e["index"])
# Return just the embeddings
return [result["embedding"] for result in sorted_embeddings]
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Jina's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self._embed(texts)
def embed_query(self, text: str) -> List[float]:
"""Call out to Jina's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._embed([text])[0]
def embed_images(self, uris: List[str]) -> List[List[float]]:
"""Call out to Jina's image embedding endpoint.
Args:
uris: The list of uris to embed.
Returns:
List of embeddings, one for each text.
"""
input = []
for uri in uris:
if is_local(uri):
input.append({"bytes": get_bytes_str(uri)})
else:
input.append({"url": uri})
return self._embed(input)
|
import base64
from os.path import exists
from typing import Any, Dict, List, Optional
from urllib.parse import urlparse
import requests
from langchain_core.embeddings import Embeddings
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, SecretStr, model_validator
JINA_API_URL: str = "https://api.jina.ai/v1/embeddings"
def is_local(url: str) -> bool:
"""Check if a URL is a local file.
Args:
url (str): The URL to check.
Returns:
bool: True if the URL is a local file, False otherwise.
"""
url_parsed = urlparse(url)
if url_parsed.scheme in ("file", ""): # Possibly a local file
return exists(url_parsed.path)
return False
def get_bytes_str(file_path: str) -> str:
"""Get the bytes string of a file.
Args:
file_path (str): The path to the file.
Returns:
str: The bytes string of the file.
"""
with open(file_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
class JinaEmbeddings(BaseModel, Embeddings):
"""Jina embedding models."""
session: Any #: :meta private:
model_name: str = "jina-embeddings-v2-base-en"
jina_api_key: Optional[SecretStr] = None
model_config = ConfigDict(protected_namespaces=())
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that auth token exists in environment."""
try:
jina_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "jina_api_key", "JINA_API_KEY")
)
except ValueError as original_exc:
try:
jina_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "jina_auth_token", "JINA_AUTH_TOKEN")
)
except ValueError:
raise original_exc
session = requests.Session()
session.headers.update(
{
"Authorization": f"Bearer {jina_api_key.get_secret_value()}",
"Accept-Encoding": "identity",
"Content-type": "application/json",
}
)
values["session"] = session
return values
def _embed(self, input: Any) -> List[List[float]]:
# Call Jina AI Embedding API
resp = self.session.post( # type: ignore
JINA_API_URL, json={"input": input, "model": self.model_name}
).json()
if "data" not in resp:
raise RuntimeError(resp["detail"])
embeddings = resp["data"]
# Sort resulting embeddings by index
sorted_embeddings = sorted(embeddings, key=lambda e: e["index"]) # type: ignore
# Return just the embeddings
return [result["embedding"] for result in sorted_embeddings]
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Jina's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self._embed(texts)
def embed_query(self, text: str) -> List[float]:
"""Call out to Jina's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._embed([text])[0]
def embed_images(self, uris: List[str]) -> List[List[float]]:
"""Call out to Jina's image embedding endpoint.
Args:
uris: The list of uris to embed.
Returns:
List of embeddings, one for each text.
"""
input = []
for uri in uris:
if is_local(uri):
input.append({"bytes": get_bytes_str(uri)})
else:
input.append({"url": uri})
return self._embed(input)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.task_modules.assigners import TaskAlignedAssigner
class TestTaskAlignedAssigner(TestCase):
def test_task_aligned_assigner(self):
with self.assertRaises(AssertionError):
TaskAlignedAssigner(topk=0)
assigner = TaskAlignedAssigner(topk=13)
pred_score = torch.FloatTensor([[0.1, 0.2], [0.2, 0.3], [0.3, 0.4],
[0.4, 0.5]])
pred_bbox = torch.FloatTensor([
[1, 1, 12, 8],
[4, 4, 20, 20],
[1, 5, 15, 15],
[30, 5, 32, 42],
])
anchor = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([0, 1])
pred_instances = InstanceData()
pred_instances.priors = anchor
pred_instances.bboxes = pred_bbox
pred_instances.scores = pred_score
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
self.assertEqual(len(assign_result.gt_inds), 4)
self.assertEqual(len(assign_result.labels), 4)
# test empty gt
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0, 2).long()
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.data import InstanceData
from mmdet.models.task_modules.assigners import TaskAlignedAssigner
class TestTaskAlignedAssigner(TestCase):
def test_task_aligned_assigner(self):
with self.assertRaises(AssertionError):
TaskAlignedAssigner(topk=0)
assigner = TaskAlignedAssigner(topk=13)
pred_score = torch.FloatTensor([[0.1, 0.2], [0.2, 0.3], [0.3, 0.4],
[0.4, 0.5]])
pred_bbox = torch.FloatTensor([
[1, 1, 12, 8],
[4, 4, 20, 20],
[1, 5, 15, 15],
[30, 5, 32, 42],
])
anchor = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([0, 1])
pred_instances = InstanceData()
pred_instances.priors = anchor
pred_instances.bboxes = pred_bbox
pred_instances.scores = pred_score
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
self.assertEqual(len(assign_result.gt_inds), 4)
self.assertEqual(len(assign_result.labels), 4)
# test empty gt
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0, 2).long()
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.