input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from .extmath import stable_cumsum
def _weighted_percentile(array, sample_weight, percentile_rank=50):
"""Compute the weighted percentile with method 'inverted_cdf'.
When the percentile lies between two data points of `array`, the function returns
the lower value.
If `array` is a 2D array, the `values` are selected along axis 0.
`NaN` values are ignored by setting their weights to 0. If `array` is 2D, this
is done in a column-isolated manner: a `NaN` in the second column, does not impact
the percentile computed for the first column even if `sample_weight` is 1D.
.. versionchanged:: 0.24
Accepts 2D `array`.
.. versionchanged:: 1.7
Supports handling of `NaN` values.
Parameters
----------
array : 1D or 2D array
Values to take the weighted percentile of.
sample_weight: 1D or 2D array
Weights for each value in `array`. Must be same shape as `array` or of shape
`(array.shape[0],)`.
percentile_rank: int or float, default=50
The probability level of the percentile to compute, in percent. Must be between
0 and 100.
Returns
-------
percentile : int if `array` 1D, ndarray if `array` 2D
Weighted percentile at the requested probability level.
"""
n_dim = array.ndim
if n_dim == 0:
return array[()]
if array.ndim == 1:
array = array.reshape((-1, 1))
# When sample_weight 1D, repeat for each array.shape[1]
if array.shape != sample_weight.shape and array.shape[0] == sample_weight.shape[0]:
sample_weight = np.tile(sample_weight, (array.shape[1], 1)).T
# Sort `array` and `sample_weight` along axis=0:
sorted_idx = np.argsort(array, axis=0)
sorted_weights = np.take_along_axis(sample_weight, sorted_idx, axis=0)
# Set NaN values in `sample_weight` to 0. We only perform this operation if NaN
# values are present at all to avoid temporary allocations of size `(n_samples,
# n_features)`. If NaN values were present, they would sort to the end (which we can
# observe from `sorted_idx`).
n_features = array.shape[1]
largest_value_per_column = array[sorted_idx[-1, ...], np.arange(n_features)]
if np.isnan(largest_value_per_column).any():
sorted_nan_mask = np.take_along_axis(np.isnan(array), sorted_idx, axis=0)
sorted_weights[sorted_nan_mask] = 0
# Compute the weighted cumulative distribution function (CDF) based on
# sample_weight and scale percentile_rank along it:
weight_cdf = stable_cumsum(sorted_weights, axis=0)
adjusted_percentile_rank = percentile_rank / 100 * weight_cdf[-1]
# For percentile_rank=0, ignore leading observations with sample_weight=0; see
# PR #20528:
mask = adjusted_percentile_rank == 0
adjusted_percentile_rank[mask] = np.nextafter(
adjusted_percentile_rank[mask], adjusted_percentile_rank[mask] + 1
)
# Find index (i) of `adjusted_percentile` in `weight_cdf`,
# such that weight_cdf[i-1] < percentile <= weight_cdf[i]
percentile_idx = np.array(
[
np.searchsorted(weight_cdf[:, i], adjusted_percentile_rank[i])
for i in range(weight_cdf.shape[1])
]
)
# In rare cases, percentile_idx equals to sorted_idx.shape[0]:
max_idx = sorted_idx.shape[0] - 1
percentile_idx = np.apply_along_axis(
lambda x: np.clip(x, 0, max_idx), axis=0, arr=percentile_idx
)
col_indices = np.arange(array.shape[1])
percentile_in_sorted = sorted_idx[percentile_idx, col_indices]
result = array[percentile_in_sorted, col_indices]
return result[0] if n_dim == 1 else result
# TODO: refactor to do the symmetrisation inside _weighted_percentile to avoid
# sorting the input array twice.
def _averaged_weighted_percentile(array, sample_weight, percentile_rank=50):
return (
_weighted_percentile(array, sample_weight, percentile_rank)
- _weighted_percentile(-array, sample_weight, 100 - percentile_rank)
) / 2
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from .extmath import stable_cumsum
def _weighted_percentile(array, sample_weight, percentile=50):
"""Compute weighted percentile
Computes lower weighted percentile. If `array` is a 2D array, the
`percentile` is computed along the axis 0.
.. versionchanged:: 0.24
Accepts 2D `array`.
Parameters
----------
array : 1D or 2D array
Values to take the weighted percentile of.
sample_weight: 1D or 2D array
Weights for each value in `array`. Must be same shape as `array` or
of shape `(array.shape[0],)`.
percentile: int or float, default=50
Percentile to compute. Must be value between 0 and 100.
Returns
-------
percentile : int if `array` 1D, ndarray if `array` 2D
Weighted percentile.
"""
n_dim = array.ndim
if n_dim == 0:
return array[()]
if array.ndim == 1:
array = array.reshape((-1, 1))
# When sample_weight 1D, repeat for each array.shape[1]
if array.shape != sample_weight.shape and array.shape[0] == sample_weight.shape[0]:
sample_weight = np.tile(sample_weight, (array.shape[1], 1)).T
sorted_idx = np.argsort(array, axis=0)
sorted_weights = np.take_along_axis(sample_weight, sorted_idx, axis=0)
# Find index of median prediction for each sample
weight_cdf = stable_cumsum(sorted_weights, axis=0)
adjusted_percentile = percentile / 100 * weight_cdf[-1]
# For percentile=0, ignore leading observations with sample_weight=0. GH20528
mask = adjusted_percentile == 0
adjusted_percentile[mask] = np.nextafter(
adjusted_percentile[mask], adjusted_percentile[mask] + 1
)
percentile_idx = np.array(
[
np.searchsorted(weight_cdf[:, i], adjusted_percentile[i])
for i in range(weight_cdf.shape[1])
]
)
percentile_idx = np.array(percentile_idx)
# In rare cases, percentile_idx equals to sorted_idx.shape[0]
max_idx = sorted_idx.shape[0] - 1
percentile_idx = np.apply_along_axis(
lambda x: np.clip(x, 0, max_idx), axis=0, arr=percentile_idx
)
col_index = np.arange(array.shape[1])
percentile_in_sorted = sorted_idx[percentile_idx, col_index]
percentile = array[percentile_in_sorted, col_index]
return percentile[0] if n_dim == 1 else percentile
# TODO: refactor to do the symmetrisation inside _weighted_percentile to avoid
# sorting the input array twice.
def _averaged_weighted_percentile(array, sample_weight, percentile=50):
return (
_weighted_percentile(array, sample_weight, percentile)
- _weighted_percentile(-array, sample_weight, 100 - percentile)
) / 2
|
from __future__ import annotations
import os
from copy import deepcopy
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models import Pooling, StaticEmbedding, Transformer
from sentence_transformers.util import is_datasets_available
from tests.utils import SafeTemporaryDirectory
if is_datasets_available():
from datasets import DatasetDict, load_dataset
@pytest.fixture(scope="session")
def _stsb_bert_tiny_model() -> SentenceTransformer:
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
model.model_card_data.generate_widget_examples = False # Disable widget examples generation for testing
return model
@pytest.fixture()
def stsb_bert_tiny_model(_stsb_bert_tiny_model: SentenceTransformer) -> SentenceTransformer:
return deepcopy(_stsb_bert_tiny_model)
@pytest.fixture()
def stsb_bert_tiny_model_onnx() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-onnx")
@pytest.fixture()
def stsb_bert_tiny_model_openvino() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-openvino")
@pytest.fixture()
def paraphrase_distilroberta_base_v1_model() -> SentenceTransformer:
return SentenceTransformer("paraphrase-distilroberta-base-v1")
@pytest.fixture()
def clip_vit_b_32_model() -> SentenceTransformer:
return SentenceTransformer("clip-ViT-B-32")
@pytest.fixture(scope="session")
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding_model(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
@pytest.fixture()
def distilbert_base_uncased_model() -> SentenceTransformer:
word_embedding_model = Transformer("distilbert-base-uncased")
pooling_model = Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
return model
@pytest.fixture(scope="session")
def stsb_dataset_dict() -> DatasetDict:
return load_dataset("sentence-transformers/stsb")
@pytest.fixture()
def cache_dir():
"""
In the CI environment, we use a temporary directory as `cache_dir`
to avoid keeping the downloaded models on disk after the test.
"""
if os.environ.get("CI", None):
# Note: `ignore_cleanup_errors=True` is used to avoid NotADirectoryError in Windows on GitHub Actions.
# See https://github.com/python/cpython/issues/107408, https://www.scivision.dev/python-tempfile-permission-error-windows/
with SafeTemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield None
|
from __future__ import annotations
import os
import pytest
from sentence_transformers import SentenceTransformer
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.util import is_datasets_available
from tests.utils import SafeTemporaryDirectory
if is_datasets_available():
from datasets import DatasetDict, load_dataset
@pytest.fixture()
def stsb_bert_tiny_model() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture(scope="session")
def stsb_bert_tiny_model_reused() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture()
def stsb_bert_tiny_model_onnx() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-onnx")
@pytest.fixture()
def stsb_bert_tiny_model_openvino() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-openvino")
@pytest.fixture()
def paraphrase_distilroberta_base_v1_model() -> SentenceTransformer:
return SentenceTransformer("paraphrase-distilroberta-base-v1")
@pytest.fixture()
def clip_vit_b_32_model() -> SentenceTransformer:
return SentenceTransformer("clip-ViT-B-32")
@pytest.fixture()
def distilbert_base_uncased_model() -> SentenceTransformer:
word_embedding_model = Transformer("distilbert-base-uncased")
pooling_model = Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
return model
@pytest.fixture(scope="session")
def stsb_dataset_dict() -> DatasetDict:
return load_dataset("sentence-transformers/stsb")
@pytest.fixture()
def cache_dir():
"""
In the CI environment, we use a temporary directory as `cache_dir`
to avoid keeping the downloaded models on disk after the test.
"""
if os.environ.get("CI", None):
# Note: `ignore_cleanup_errors=True` is used to avoid NotADirectoryError in Windows on GitHub Actions.
# See https://github.com/python/cpython/issues/107408, https://www.scivision.dev/python-tempfile-permission-error-windows/
with SafeTemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield None
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Iterable, Optional
import torch
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from laserembeddings import Laser
class LaserEncoder(Executor):
"""
LaserEncoder is a text encoder based on Facebook Research's LASER encoder.
:class:`LaserEncoder` is a encoder based on Facebook Research's LASER
(Language-Agnostic SEntence Representations) to compute multilingual
sentence embeddings: https://github.com/facebookresearch/LASER
This encoder is suitable for producing multi-lingual sentence embeddings, enabling
you to have sentences from multiple languages in the same latent space.
"""
def __init__(
self,
path_to_bpe_codes: Optional[str] = None,
path_to_bpe_vocab: Optional[str] = None,
path_to_encoder: Optional[str] = None,
download_data: bool = True,
language: str = 'en',
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs,
):
"""
:param path_to_bpe_codes: path to bpe codes from Laser. Defaults to
``Laser.DEFAULT_BPE_CODES_FILE.``
:param path_to_bpe_vocab: path to bpe vocabs from Laser. Defaults to
``Laser.DEFAULT_BPE_VOCAB_FILE``.
:param path_to_encoder: path to the encoder from Laser. Defaults to
``Laser.DEFAULT_ENCODER_FILE``.
:param download_data: Whether data should be downloaded on initialization. This is
convenient when just trying out the encoder, but should be turned off in a
production setting (where you should already have the data on disk), as it can
lead to large startup times.
:param language: The default language of the text. Can be overriden by a
request parameter. The full list of possible values can be found at
[LASER](https://github.com/facebookresearch/LASER#supported-languages)
with the language code
([ISO 639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes))
:param traversal_paths: traversal path of the Documents, (e.g. 'r', 'c')
:param batch_size: size of each batch
:param device: Device string ('cpu'/'cuda'/'cuda:2')
"""
super().__init__(*args, **kwargs)
self.logger = JinaLogger(self.__class__.__name__)
self._path_to_bpe_codes = path_to_bpe_codes
self._path_to_bpe_vocab = path_to_bpe_vocab
self._path_to_encoder = path_to_encoder
self.device = device
self.batch_size = batch_size
self.traversal_paths = traversal_paths
self.language = language
if download_data:
self.logger.info("Downloading data for the Laser model")
subprocess.run(
['python', '-m', 'laserembeddings', 'download-models'], check=True
)
self.model = Laser(
bpe_codes=self._path_to_bpe_codes,
bpe_vocab=self._path_to_bpe_vocab,
encoder=self._path_to_encoder,
embedding_options={'cpu': self.device == 'cpu'},
)
self.device = torch.device(device)
self.model.bpeSentenceEmbedding.encoder.encoder.to(self.device)
@requests
def encode(
self, docs: Optional[DocumentArray] = None, parameters: dict = {}, **kwargs
):
"""
Encode all docs with text and store the encodings in the embedding attribute
of the docs.
:param docs: documents sent to the encoder. The docs must have the ``text``
attribute.
:param parameters: dictionary to define the ``traversal_path``, the
``batch_size`` and ``language``. For example,
``{'traversal_paths': ['r'], 'batch_size': 10}``. This will override the
default parameters set at init.
"""
if docs is None:
return
document_batches_generator = docs.batch(
traversal_paths=parameters.get('traversal_paths', self.traversal_paths),
batch_size=parameters.get('batch_size', self.batch_size),
require_attr='text',
)
for document_batch in document_batches_generator:
text_batch = document_batch.texts
language = parameters.get('language', self.language)
embeddings = self.model.embed_sentences(text_batch, lang=language)
for document, embedding in zip(document_batch, embeddings):
document.embedding = embedding
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Iterable, Optional
import torch
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from jina_commons.batching import get_docs_batch_generator
from laserembeddings import Laser
class LaserEncoder(Executor):
"""
LaserEncoder is a text encoder based on Facebook Research's LASER encoder.
:class:`LaserEncoder` is a encoder based on Facebook Research's LASER
(Language-Agnostic SEntence Representations) to compute multilingual
sentence embeddings: https://github.com/facebookresearch/LASER
This encoder is suitable for producing multi-lingual sentence embeddings, enabling
you to have sentences from multiple languages in the same latent space.
"""
def __init__(
self,
path_to_bpe_codes: Optional[str] = None,
path_to_bpe_vocab: Optional[str] = None,
path_to_encoder: Optional[str] = None,
download_data: bool = True,
language: str = 'en',
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs,
):
"""
:param path_to_bpe_codes: path to bpe codes from Laser. Defaults to
``Laser.DEFAULT_BPE_CODES_FILE.``
:param path_to_bpe_vocab: path to bpe vocabs from Laser. Defaults to
``Laser.DEFAULT_BPE_VOCAB_FILE``.
:param path_to_encoder: path to the encoder from Laser. Defaults to
``Laser.DEFAULT_ENCODER_FILE``.
:param download_data: Whether data should be downloaded on initialization. This is
convenient when just trying out the encoder, but should be turned off in a
production setting (where you should already have the data on disk), as it can
lead to large startup times.
:param language: The default language of the text. Can be overriden by a
request parameter. The full list of possible values can be found at
[LASER](https://github.com/facebookresearch/LASER#supported-languages)
with the language code
([ISO 639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes))
:param traversal_paths: traversal path of the Documents, (e.g. 'r', 'c')
:param batch_size: size of each batch
:param device: Device string ('cpu'/'cuda'/'cuda:2')
"""
super().__init__(*args, **kwargs)
self.logger = JinaLogger(self.__class__.__name__)
self._path_to_bpe_codes = path_to_bpe_codes
self._path_to_bpe_vocab = path_to_bpe_vocab
self._path_to_encoder = path_to_encoder
self.device = device
self.batch_size = batch_size
self.traversal_paths = traversal_paths
self.language = language
if download_data:
self.logger.info("Downloading data for the Laser model")
subprocess.run(
['python', '-m', 'laserembeddings', 'download-models'], check=True
)
self.model = Laser(
bpe_codes=self._path_to_bpe_codes,
bpe_vocab=self._path_to_bpe_vocab,
encoder=self._path_to_encoder,
embedding_options={'cpu': self.device == 'cpu'},
)
self.device = torch.device(device)
self.model.bpeSentenceEmbedding.encoder.encoder.to(self.device)
@requests
def encode(
self, docs: Optional[DocumentArray] = None, parameters: dict = {}, **kwargs
):
"""
Encode all docs with text and store the encodings in the embedding attribute
of the docs.
:param docs: documents sent to the encoder. The docs must have the ``text``
attribute.
:param parameters: dictionary to define the ``traversal_path``, the
``batch_size`` and ``language``. For example,
``{'traversal_paths': ['r'], 'batch_size': 10}``. This will override the
default parameters set at init.
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get('traversal_paths', self.traversal_paths),
batch_size=parameters.get('batch_size', self.batch_size),
needs_attr='text',
)
for document_batch in document_batches_generator:
text_batch = [d.text for d in document_batch]
language = parameters.get('language', self.language)
embeddings = self.model.embed_sentences(text_batch, lang=language)
for document, embedding in zip(document_batch, embeddings):
document.embedding = embedding
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import PLUGIN_LAYERS
eps = 1e-6
@PLUGIN_LAYERS.register_module()
class DropBlock(nn.Module):
"""Randomly drop some regions of feature maps.
Please refer to the method proposed in `DropBlock
<https://arxiv.org/abs/1810.12890>`_ for details.
Args:
drop_prob (float): The probability of dropping each block.
block_size (int): The size of dropped blocks.
warmup_iters (int): The drop probability will linearly increase
from `0` to `drop_prob` during the first `warmup_iters` iterations.
Default: 2000.
"""
def __init__(self, drop_prob, block_size, warmup_iters=2000, **kwargs):
super(DropBlock, self).__init__()
assert block_size % 2 == 1
assert 0 < drop_prob <= 1
assert warmup_iters >= 0
self.drop_prob = drop_prob
self.block_size = block_size
self.warmup_iters = warmup_iters
self.iter_cnt = 0
def forward(self, x):
"""
Args:
x (Tensor): Input feature map on which some areas will be randomly
dropped.
Returns:
Tensor: The tensor after DropBlock layer.
"""
if not self.training:
return x
self.iter_cnt += 1
N, C, H, W = list(x.shape)
gamma = self._compute_gamma((H, W))
mask_shape = (N, C, H - self.block_size + 1, W - self.block_size + 1)
mask = torch.bernoulli(torch.full(mask_shape, gamma, device=x.device))
mask = F.pad(mask, [self.block_size // 2] * 4, value=0)
mask = F.max_pool2d(
input=mask,
stride=(1, 1),
kernel_size=(self.block_size, self.block_size),
padding=self.block_size // 2)
mask = 1 - mask
x = x * mask * mask.numel() / (eps + mask.sum())
return x
def _compute_gamma(self, feat_size):
"""Compute the value of gamma according to paper. gamma is the
parameter of bernoulli distribution, which controls the number of
features to drop.
gamma = (drop_prob * fm_area) / (drop_area * keep_area)
Args:
feat_size (tuple[int, int]): The height and width of feature map.
Returns:
float: The value of gamma.
"""
gamma = (self.drop_prob * feat_size[0] * feat_size[1])
gamma /= ((feat_size[0] - self.block_size + 1) *
(feat_size[1] - self.block_size + 1))
gamma /= (self.block_size**2)
factor = (1.0 if self.iter_cnt > self.warmup_iters else self.iter_cnt /
self.warmup_iters)
return gamma * factor
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import PLUGIN_LAYERS
eps = 1e-6
@PLUGIN_LAYERS.register_module()
class DropBlock(nn.Module):
"""Randomly drop some regions of feature maps.
Please refer to the method proposed in `DropBlock
<https://arxiv.org/abs/1810.12890>`_ for details.
Args:
drop_prob (float): The probability of dropping each block.
block_size (int): The size of dropped blocks.
warmup_iters (int): The drop probability will linearly increase
from `0` to `drop_prob` during the first `warmup_iters` iterations.
Default: 2000.
"""
def __init__(self, drop_prob, block_size, warmup_iters=2000, **kwargs):
super(DropBlock, self).__init__()
assert block_size % 2 == 1
assert 0 < drop_prob <= 1
assert warmup_iters >= 0
self.drop_prob = drop_prob
self.block_size = block_size
self.warmup_iters = warmup_iters
self.iter_cnt = 0
def forward(self, x):
"""
Args:
x (Tensor): Input feature map on which some areas will be randomly
dropped.
Returns:
Tensor: The tensor after DropBlock layer.
"""
if not self.training:
return x
self.iter_cnt += 1
N, C, H, W = list(x.shape)
gamma = self._compute_gamma((H, W))
mask_shape = (N, C, H - self.block_size + 1, W - self.block_size + 1)
mask = torch.bernoulli(torch.full(mask_shape, gamma, device=x.device))
mask = F.pad(mask, [self.block_size // 2] * 4, value=0)
mask = F.max_pool2d(
input=mask,
stride=(1, 1),
kernel_size=(self.block_size, self.block_size),
padding=self.block_size // 2)
mask = 1 - mask
x = x * mask * mask.numel() / (eps + mask.sum())
return x
def _compute_gamma(self, feat_size):
"""Compute the value of gamma according to paper. gamma is the
parameter of bernoulli distribution, which controls the number of
features to drop.
gamma = (drop_prob * fm_area) / (drop_area * keep_area)
Args:
feat_size (tuple[int, int]): The height and width of feature map.
Returns:
float: The value of gamma.
"""
gamma = (self.drop_prob * feat_size[0] * feat_size[1])
gamma /= ((feat_size[0] - self.block_size + 1) *
(feat_size[1] - self.block_size + 1))
gamma /= (self.block_size**2)
factor = (1.0 if self.iter_cnt > self.warmup_iters else self.iter_cnt /
self.warmup_iters)
return gamma * factor
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.1.0'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.0.1'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.0.1'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
import pytest
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.computation.tensorflow_backend import TensorFlowCompBackend
from docarray.typing import TensorFlowTensor
metrics = TensorFlowCompBackend.Metrics
else:
metrics = None
@pytest.mark.tensorflow
def test_cosine_sim_tf():
a = TensorFlowTensor(tf.random.normal((128,)))
b = TensorFlowTensor(tf.random.normal((128,)))
assert metrics.cosine_sim(a, b).tensor.shape == (1,)
assert metrics.cosine_sim(a, b).tensor == metrics.cosine_sim(b, a).tensor
tf.experimental.numpy.allclose(metrics.cosine_sim(a, a).tensor, tf.ones(1))
a = TensorFlowTensor(tf.random.normal((10, 3)))
b = TensorFlowTensor(tf.random.normal((5, 3)))
assert metrics.cosine_sim(a, b).tensor.shape == (10, 5)
assert metrics.cosine_sim(b, a).tensor.shape == (5, 10)
diag_dists = tf.linalg.diag(metrics.cosine_sim(b, b).tensor) # self-comparisons
tf.experimental.numpy.allclose(diag_dists, tf.ones(5))
@pytest.mark.tensorflow
def test_euclidean_dist_tf():
a = TensorFlowTensor(tf.random.normal((128,)))
b = TensorFlowTensor(tf.random.normal((128,)))
assert metrics.euclidean_dist(a, b).tensor.shape == (1,)
assert metrics.euclidean_dist(a, b).tensor == metrics.euclidean_dist(b, a).tensor
tf.experimental.numpy.allclose(metrics.euclidean_dist(a, a).tensor, tf.zeros(1))
a = TensorFlowTensor(tf.zeros((1, 1)))
b = TensorFlowTensor(tf.ones((4, 1)))
assert metrics.euclidean_dist(a, b).tensor.shape == (4,)
tf.experimental.numpy.allclose(
metrics.euclidean_dist(a, b).tensor, metrics.euclidean_dist(b, a).tensor
)
tf.experimental.numpy.allclose(metrics.euclidean_dist(a, a).tensor, tf.zeros(1))
a = TensorFlowTensor(tf.constant([0.0, 2.0, 0.0]))
b = TensorFlowTensor(tf.constant([0.0, 0.0, 2.0]))
desired_output_singleton: tf.Tensor = tf.math.sqrt(
tf.constant([2.0**2.0 + 2.0**2.0])
)
tf.experimental.numpy.allclose(
metrics.euclidean_dist(a, b).tensor, desired_output_singleton
)
a = TensorFlowTensor(tf.constant([[0.0, 2.0, 0.0], [0.0, 0.0, 2.0]]))
b = TensorFlowTensor(tf.constant([[0.0, 0.0, 2.0], [0.0, 2.0, 0.0]]))
desired_output_singleton = tf.constant([[2.828427, 0.0], [0.0, 2.828427]])
tf.experimental.numpy.allclose(
metrics.euclidean_dist(a, b).tensor, desired_output_singleton
)
@pytest.mark.tensorflow
def test_sqeuclidean_dist_torch():
a = TensorFlowTensor(tf.random.normal((128,)))
b = TensorFlowTensor(tf.random.normal((128,)))
assert metrics.sqeuclidean_dist(a, b).tensor.shape == (1,)
tf.experimental.numpy.allclose(
metrics.sqeuclidean_dist(a, b).tensor,
metrics.euclidean_dist(a, b).tensor ** 2,
)
a = TensorFlowTensor(tf.random.normal((1, 1)))
b = TensorFlowTensor(tf.random.normal((4, 1)))
assert metrics.sqeuclidean_dist(b, a).tensor.shape == (4,)
tf.experimental.numpy.allclose(
metrics.sqeuclidean_dist(a, b).tensor,
metrics.euclidean_dist(a, b).tensor ** 2,
)
|
import pytest
try:
import tensorflow as tf
from docarray.computation.tensorflow_backend import TensorFlowCompBackend
from docarray.typing import TensorFlowTensor
metrics = TensorFlowCompBackend.Metrics
except (ImportError, TypeError):
metrics = None
@pytest.mark.tensorflow
def test_cosine_sim_tf():
a = TensorFlowTensor(tf.random.normal((128,)))
b = TensorFlowTensor(tf.random.normal((128,)))
assert metrics.cosine_sim(a, b).tensor.shape == (1,)
assert metrics.cosine_sim(a, b).tensor == metrics.cosine_sim(b, a).tensor
tf.experimental.numpy.allclose(metrics.cosine_sim(a, a).tensor, tf.ones(1))
a = TensorFlowTensor(tf.random.normal((10, 3)))
b = TensorFlowTensor(tf.random.normal((5, 3)))
assert metrics.cosine_sim(a, b).tensor.shape == (10, 5)
assert metrics.cosine_sim(b, a).tensor.shape == (5, 10)
diag_dists = tf.linalg.diag(metrics.cosine_sim(b, b).tensor) # self-comparisons
tf.experimental.numpy.allclose(diag_dists, tf.ones(5))
@pytest.mark.tensorflow
def test_euclidean_dist_tf():
a = TensorFlowTensor(tf.random.normal((128,)))
b = TensorFlowTensor(tf.random.normal((128,)))
assert metrics.euclidean_dist(a, b).tensor.shape == (1,)
assert metrics.euclidean_dist(a, b).tensor == metrics.euclidean_dist(b, a).tensor
tf.experimental.numpy.allclose(metrics.euclidean_dist(a, a).tensor, tf.zeros(1))
a = TensorFlowTensor(tf.zeros((1, 1)))
b = TensorFlowTensor(tf.ones((4, 1)))
assert metrics.euclidean_dist(a, b).tensor.shape == (4,)
tf.experimental.numpy.allclose(
metrics.euclidean_dist(a, b).tensor, metrics.euclidean_dist(b, a).tensor
)
tf.experimental.numpy.allclose(metrics.euclidean_dist(a, a).tensor, tf.zeros(1))
a = TensorFlowTensor(tf.constant([0.0, 2.0, 0.0]))
b = TensorFlowTensor(tf.constant([0.0, 0.0, 2.0]))
desired_output_singleton: tf.Tensor = tf.math.sqrt(
tf.constant([2.0**2.0 + 2.0**2.0])
)
tf.experimental.numpy.allclose(
metrics.euclidean_dist(a, b).tensor, desired_output_singleton
)
a = TensorFlowTensor(tf.constant([[0.0, 2.0, 0.0], [0.0, 0.0, 2.0]]))
b = TensorFlowTensor(tf.constant([[0.0, 0.0, 2.0], [0.0, 2.0, 0.0]]))
desired_output_singleton = tf.constant([[2.828427, 0.0], [0.0, 2.828427]])
tf.experimental.numpy.allclose(
metrics.euclidean_dist(a, b).tensor, desired_output_singleton
)
@pytest.mark.tensorflow
def test_sqeuclidean_dist_torch():
a = TensorFlowTensor(tf.random.normal((128,)))
b = TensorFlowTensor(tf.random.normal((128,)))
assert metrics.sqeuclidean_dist(a, b).tensor.shape == (1,)
tf.experimental.numpy.allclose(
metrics.sqeuclidean_dist(a, b).tensor,
metrics.euclidean_dist(a, b).tensor ** 2,
)
a = TensorFlowTensor(tf.random.normal((1, 1)))
b = TensorFlowTensor(tf.random.normal((4, 1)))
assert metrics.sqeuclidean_dist(b, a).tensor.shape == (4,)
tf.experimental.numpy.allclose(
metrics.sqeuclidean_dist(a, b).tensor,
metrics.euclidean_dist(a, b).tensor ** 2,
)
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'mmengine'
copyright = '2022, mmengine contributors'
author = 'mmengine contributors'
version_file = '../../mmengine/version.py'
with open(version_file) as f:
exec(compile(f.read(), version_file, 'exec'))
__version__ = locals()['__version__']
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.autosectionlabel',
'sphinx_markdown_tables',
'myst_parser',
'sphinx_copybutton',
'sphinx.ext.autodoc.typehints',
] # yapf: disable
autodoc_typehints = 'description'
# Configuration for intersphinx
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable', None),
'torch': ('https://pytorch.org/docs/stable/', None),
'mmcv': ('https://mmcv.readthedocs.io/en/dev-2.x/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmengine'
},
],
# Specify the language of shared menu
'menu_lang': 'en',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'mmengine'
copyright = '2022, mmengine contributors'
author = 'mmengine contributors'
version_file = '../../mmengine/version.py'
with open(version_file) as f:
exec(compile(f.read(), version_file, 'exec'))
__version__ = locals()['__version__']
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.autosectionlabel',
'sphinx_markdown_tables',
'myst_parser',
'sphinx_copybutton',
'sphinx.ext.autodoc.typehints',
] # yapf: disable
autodoc_typehints = 'description'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmengine'
},
],
# Specify the language of shared menu
'menu_lang': 'en',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
|
_CTC_DECODERS = [
"CTCHypothesis",
"CTCDecoder",
"CTCDecoderLM",
"CTCDecoderLMState",
"ctc_decoder",
"download_pretrained_files",
]
_CUDA_CTC_DECODERS = [
"CUCTCDecoder",
"CUCTCHypothesis",
"cuda_ctc_decoder",
]
def __getattr__(name: str):
if name in _CTC_DECODERS:
try:
from . import _ctc_decoder
except Exception as err:
raise RuntimeError(
"CTC Decoder suit requires flashlight-text package and optionally KenLM. Please install them."
) from err
item = getattr(_ctc_decoder, name)
globals()[name] = item
return item
elif name in _CUDA_CTC_DECODERS:
try:
from . import _cuda_ctc_decoder
except AttributeError as err:
raise RuntimeError(
"To use CUCTC decoder, please set BUILD_CUDA_CTC_DECODER=1 when building from source."
) from err
item = getattr(_cuda_ctc_decoder, name)
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(__all__)
__all__ = [_CTC_DECODERS, _CUDA_CTC_DECODERS]
|
_CTC_DECODERS = [
"CTCHypothesis",
"CTCDecoder",
"CTCDecoderLM",
"CTCDecoderLMState",
"ctc_decoder",
"download_pretrained_files",
]
def __getattr__(name: str):
if name in _CTC_DECODERS:
try:
from . import _ctc_decoder
except Exception as err:
raise RuntimeError(
"CTC Decoder suit requires flashlight-text package and optionally KenLM. Please install them."
) from err
item = getattr(_ctc_decoder, name)
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(__all__)
__all__ = _CTC_DECODERS
|
import matplotlib.pyplot as plt
import torch
from torchvision.utils import draw_bounding_boxes, draw_segmentation_masks
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F
def plot(imgs):
if not isinstance(imgs[0], list):
# Make a 2d grid even if there's just 1 row
imgs = [imgs]
num_rows = len(imgs)
num_cols = len(imgs[0])
_, axs = plt.subplots(nrows=num_rows, ncols=num_cols, squeeze=False)
for row_idx, row in enumerate(imgs):
for col_idx, img in enumerate(row):
boxes = None
masks = None
if isinstance(img, tuple):
img, target = img
if isinstance(target, dict):
boxes = target.get("boxes")
masks = target.get("masks")
elif isinstance(target, datapoints.BoundingBoxes):
boxes = target
else:
raise ValueError(f"Unexpected target type: {type(target)}")
img = F.to_image(img)
if img.dtype.is_floating_point and img.min() < 0:
# Poor man's re-normalization for the colors to be OK-ish. This
# is useful for images coming out of Normalize()
img -= img.min()
img /= img.max()
img = F.to_dtype(img, torch.uint8, scale=True)
if boxes is not None:
img = draw_bounding_boxes(img, boxes, colors="yellow", width=3)
if masks is not None:
img = draw_segmentation_masks(img, masks.to(torch.bool), colors=["green"] * masks.shape[0], alpha=.65)
ax = axs[row_idx, col_idx]
ax.imshow(img.permute(1, 2, 0).numpy())
ax.set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
plt.tight_layout()
|
import matplotlib.pyplot as plt
from torchvision.utils import draw_bounding_boxes
def plot(imgs):
if not isinstance(imgs[0], list):
# Make a 2d grid even if there's just 1 row
imgs = [imgs]
num_rows = len(imgs)
num_cols = len(imgs[0])
_, axs = plt.subplots(nrows=num_rows, ncols=num_cols, squeeze=False)
for row_idx, row in enumerate(imgs):
for col_idx, img in enumerate(row):
bboxes = None
if isinstance(img, tuple):
bboxes = img[1]
img = img[0]
if isinstance(bboxes, dict):
bboxes = bboxes['bboxes']
if img.dtype.is_floating_point and img.min() < 0:
# Poor man's re-normalization for the colors to be OK-ish. This
# is useful for images coming out of Normalize()
img -= img.min()
img /= img.max()
if bboxes is not None:
img = draw_bounding_boxes(img, bboxes, colors="yellow", width=3)
ax = axs[row_idx, col_idx]
ax.imshow(img.permute(1, 2, 0).numpy())
ax.set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
plt.tight_layout()
|
from typing import Iterator
from typing import Tuple
import torch
from keras.src.backend.common.stateless_scope import in_stateless_scope
from keras.src.ops.operation import Operation
class TorchLayer(torch.nn.Module):
def _post_build(self):
# Do not track variables when in a stateless scope.
# The variables are not initialized.
if in_stateless_scope():
return
self._track_variables()
def _track_variables(self):
# set torch_params attribute will have module automatically track
# parameters.
self.torch_params = torch.nn.ParameterDict(
{variable.path: variable.value for variable in self.variables}
)
def named_parameters(
self,
prefix: str = "",
recurse: bool = True,
remove_duplicate: bool = True,
) -> Iterator[Tuple[str, torch.nn.Parameter]]:
if not hasattr(self, "torch_params"):
self._track_variables()
return torch.nn.Module.named_parameters(
self, prefix, recurse, remove_duplicate
)
def forward(self, *args, **kwargs):
return Operation.__call__(self, *args, **kwargs)
def _setattr_hook(self, name, value):
from keras.src.layers import Layer
if (
isinstance(value, torch.nn.Module)
and not isinstance(value, Layer)
and not name == "torch_params"
):
from keras.src.utils.torch_utils import TorchModuleWrapper
if not isinstance(self, TorchModuleWrapper):
value = TorchModuleWrapper(value)
return name, value
def _post_track_variable(self, variable):
if hasattr(self, "torch_params"):
if variable.path not in self.torch_params:
self.torch_params[variable.path] = variable.value
def _post_untrack_variable(self, variable):
if hasattr(self, "torch_params"):
self.torch_params.pop(variable.path)
|
import torch
from keras.src.backend.common.stateless_scope import in_stateless_scope
from keras.src.ops.operation import Operation
class TorchLayer(torch.nn.Module):
def _post_build(self):
# Do not track variables when in a stateless scope.
# The variables are not initialized.
if in_stateless_scope():
return
self._track_variables()
def _track_variables(self):
# Index given to ParameterDict must be a string
self.torch_params = torch.nn.ParameterDict(
{str(id(variable)): variable.value for variable in self.variables}
)
def parameters(self, recurse=True):
if not hasattr(self, "torch_params"):
self._track_variables()
return torch.nn.Module.parameters(self, recurse=recurse)
def forward(self, *args, **kwargs):
return Operation.__call__(self, *args, **kwargs)
def _setattr_hook(self, name, value):
from keras.src.layers import Layer
if (
isinstance(value, torch.nn.Module)
and not isinstance(value, Layer)
and not name == "torch_params"
):
from keras.src.utils.torch_utils import TorchModuleWrapper
if not isinstance(self, TorchModuleWrapper):
value = TorchModuleWrapper(value)
return name, value
def _post_track_variable(self, variable):
if hasattr(self, "torch_params"):
# Index given to ParameterDict must be a string
key = str(id(variable))
if key not in self.torch_params:
self.torch_params[key] = variable.value
def _post_untrack_variable(self, variable):
if hasattr(self, "torch_params"):
# Index given to ParameterDict must be a string
key = str(id(variable))
self.torch_params.pop(key)
|
"""Helper functions for clients in Jina."""
from functools import wraps
from typing import Callable, Optional
from jina.excepts import BadClientCallback, BadServer
from jina.helper import get_rich_console
from jina.logging.logger import JinaLogger
from jina.proto import jina_pb2
from jina.types.request.data import Response
def pprint_routes(resp: 'Response', stack_limit: int = 3):
"""Pretty print routes with :mod:`prettytable`, fallback to :func:`print`.
:param resp: the :class:`Response` object
:param stack_limit: traceback limit
"""
routes = resp.routes
from rich import box
from rich.table import Table
table = Table(box=box.SIMPLE)
for v in ('Executor', 'Time', 'Exception'):
table.add_column(v)
for route in routes:
status_icon = '🟢'
if route.status.code == jina_pb2.StatusProto.ERROR:
status_icon = '🔴'
table.add_row(
f'{status_icon} {route.executor}',
f'{route.start_time.ToMilliseconds() - routes[0].start_time.ToMilliseconds()}ms',
''.join(route.status.exception.stacks[-stack_limit:]),
)
console = get_rich_console()
console.print(table)
def _safe_callback(func: Callable, continue_on_error: bool, logger) -> Callable:
@wraps(func)
def _arg_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as ex:
err_msg = f'uncaught exception in callback {func.__name__}(): {ex!r}'
if continue_on_error:
logger.error(err_msg)
else:
raise BadClientCallback(err_msg) from ex
return _arg_wrapper
def callback_exec(
response,
logger: JinaLogger,
on_done: Optional[Callable] = None,
on_error: Optional[Callable] = None,
on_always: Optional[Callable] = None,
continue_on_error: bool = False,
) -> None:
"""Execute the callback with the response.
:param response: the response
:param on_done: the on_done callback
:param on_error: the on_error callback
:param on_always: the on_always callback
:param continue_on_error: whether to continue on error
:param logger: a logger instance
"""
if response.header.status.code >= jina_pb2.StatusProto.ERROR:
if on_error:
_safe_callback(on_error, continue_on_error, logger)(response)
elif continue_on_error:
logger.error(f'Server error: {response.header}')
else:
raise BadServer(response.header)
elif on_done and response.header.status.code == jina_pb2.StatusProto.SUCCESS:
_safe_callback(on_done, continue_on_error, logger)(response)
if on_always:
_safe_callback(on_always, continue_on_error, logger)(response)
|
"""Helper functions for clients in Jina."""
from functools import wraps
from typing import Callable
from jina.excepts import BadClientCallback, BadServer
from jina.helper import get_rich_console
from jina.logging.logger import JinaLogger
from jina.proto import jina_pb2
from jina.types.request.data import Response
def pprint_routes(resp: 'Response', stack_limit: int = 3):
"""Pretty print routes with :mod:`prettytable`, fallback to :func:`print`.
:param resp: the :class:`Response` object
:param stack_limit: traceback limit
"""
routes = resp.routes
from rich import box
from rich.table import Table
table = Table(box=box.SIMPLE)
for v in ('Executor', 'Time', 'Exception'):
table.add_column(v)
for route in routes:
status_icon = '🟢'
if route.status.code == jina_pb2.StatusProto.ERROR:
status_icon = '🔴'
table.add_row(
f'{status_icon} {route.executor}',
f'{route.start_time.ToMilliseconds() - routes[0].start_time.ToMilliseconds()}ms',
''.join(route.status.exception.stacks[-stack_limit:]),
)
console = get_rich_console()
console.print(table)
def _safe_callback(func: Callable, continue_on_error: bool, logger) -> Callable:
@wraps(func)
def _arg_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as ex:
err_msg = f'uncaught exception in callback {func.__name__}(): {ex!r}'
if continue_on_error:
logger.error(err_msg)
else:
raise BadClientCallback(err_msg) from ex
return _arg_wrapper
def callback_exec(
response,
on_done: Callable,
on_error: Callable,
on_always: Callable,
continue_on_error: bool,
logger: JinaLogger,
) -> None:
"""Execute the callback with the response.
:param response: the response
:param on_done: the on_done callback
:param on_error: the on_error callback
:param on_always: the on_always callback
:param continue_on_error: whether to continue on error
:param logger: a logger instance
"""
if response.header.status.code >= jina_pb2.StatusProto.ERROR:
if on_error:
_safe_callback(on_error, continue_on_error, logger)(response)
elif continue_on_error:
logger.error(f'Server error: {response.header}')
else:
raise BadServer(response.header)
elif on_done and response.header.status.code == jina_pb2.StatusProto.SUCCESS:
_safe_callback(on_done, continue_on_error, logger)(response)
if on_always:
_safe_callback(on_always, continue_on_error, logger)(response)
|
"""Defines utilities for switching audio backends"""
import warnings
from typing import List, Optional
import torchaudio
from torchaudio._internal import module_utils as _mod_utils
from . import no_backend, soundfile_backend, sox_io_backend
__all__ = [
"list_audio_backends",
"get_audio_backend",
"set_audio_backend",
]
def list_audio_backends() -> List[str]:
"""List available backends
Returns:
List[str]: The list of available backends.
"""
backends = []
if _mod_utils.is_module_available("soundfile"):
backends.append("soundfile")
if _mod_utils.is_sox_available():
backends.append("sox_io")
return backends
def set_audio_backend(backend: Optional[str]):
"""Set the backend for I/O operation
Args:
backend (str or None): Name of the backend.
One of ``"sox_io"`` or ``"soundfile"`` based on availability
of the system. If ``None`` is provided the current backend is unassigned.
"""
if backend is not None and backend not in list_audio_backends():
raise RuntimeError(f'Backend "{backend}" is not one of ' f"available backends: {list_audio_backends()}.")
if backend is None:
module = no_backend
elif backend == "sox_io":
module = sox_io_backend
elif backend == "soundfile":
module = soundfile_backend
else:
raise NotImplementedError(f'Unexpected backend "{backend}"')
for func in ["save", "load", "info"]:
setattr(torchaudio, func, getattr(module, func))
def _init_audio_backend():
backends = list_audio_backends()
if "sox_io" in backends:
set_audio_backend("sox_io")
elif "soundfile" in backends:
set_audio_backend("soundfile")
else:
warnings.warn("No audio backend is available.")
set_audio_backend(None)
def get_audio_backend() -> Optional[str]:
"""Get the name of the current backend
Returns:
Optional[str]: The name of the current backend or ``None`` if no backend is assigned.
"""
if torchaudio.load == no_backend.load:
return None
if torchaudio.load == sox_io_backend.load:
return "sox_io"
if torchaudio.load == soundfile_backend.load:
return "soundfile"
raise ValueError("Unknown backend.")
|
"""Defines utilities for switching audio backends"""
import warnings
from typing import Optional, List
import torchaudio
from torchaudio._internal import module_utils as _mod_utils
from . import (
no_backend,
sox_io_backend,
soundfile_backend,
)
__all__ = [
"list_audio_backends",
"get_audio_backend",
"set_audio_backend",
]
def list_audio_backends() -> List[str]:
"""List available backends
Returns:
List[str]: The list of available backends.
"""
backends = []
if _mod_utils.is_module_available("soundfile"):
backends.append("soundfile")
if _mod_utils.is_sox_available():
backends.append("sox_io")
return backends
def set_audio_backend(backend: Optional[str]):
"""Set the backend for I/O operation
Args:
backend (str or None): Name of the backend.
One of ``"sox_io"`` or ``"soundfile"`` based on availability
of the system. If ``None`` is provided the current backend is unassigned.
"""
if backend is not None and backend not in list_audio_backends():
raise RuntimeError(f'Backend "{backend}" is not one of ' f"available backends: {list_audio_backends()}.")
if backend is None:
module = no_backend
elif backend == "sox_io":
module = sox_io_backend
elif backend == "soundfile":
module = soundfile_backend
else:
raise NotImplementedError(f'Unexpected backend "{backend}"')
for func in ["save", "load", "info"]:
setattr(torchaudio, func, getattr(module, func))
def _init_audio_backend():
backends = list_audio_backends()
if "sox_io" in backends:
set_audio_backend("sox_io")
elif "soundfile" in backends:
set_audio_backend("soundfile")
else:
warnings.warn("No audio backend is available.")
set_audio_backend(None)
def get_audio_backend() -> Optional[str]:
"""Get the name of the current backend
Returns:
Optional[str]: The name of the current backend or ``None`` if no backend is assigned.
"""
if torchaudio.load == no_backend.load:
return None
if torchaudio.load == sox_io_backend.load:
return "sox_io"
if torchaudio.load == soundfile_backend.load:
return "soundfile"
raise ValueError("Unknown backend.")
|
from llama_index.core.instrumentation.events.base import BaseEvent
class SpanDropEvent(BaseEvent):
"""
SpanDropEvent.
Args:
err_str (str): Error string.
"""
err_str: str
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "SpanDropEvent"
|
from llama_index.core.instrumentation.events.base import BaseEvent
class SpanDropEvent(BaseEvent):
"""SpanDropEvent.
Args:
err_str (str): Error string.
"""
err_str: str
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "SpanDropEvent"
|
import functools
import numbers
from collections import defaultdict
from typing import Any, Dict, Sequence, Type, TypeVar, Union
from torchvision.prototype import datapoints
from torchvision.prototype.datapoints._datapoint import FillType, FillTypeJIT
from torchvision.transforms.transforms import _check_sequence_input, _setup_angle, _setup_size # noqa: F401
from typing_extensions import Literal
def _setup_float_or_seq(arg: Union[float, Sequence[float]], name: str, req_size: int = 2) -> Sequence[float]:
if not isinstance(arg, (float, Sequence)):
raise TypeError(f"{name} should be float or a sequence of floats. Got {type(arg)}")
if isinstance(arg, Sequence) and len(arg) != req_size:
raise ValueError(f"If {name} is a sequence its length should be one of {req_size}. Got {len(arg)}")
if isinstance(arg, Sequence):
for element in arg:
if not isinstance(element, float):
raise ValueError(f"{name} should be a sequence of floats. Got {type(element)}")
if isinstance(arg, float):
arg = [float(arg), float(arg)]
if isinstance(arg, (list, tuple)) and len(arg) == 1:
arg = [arg[0], arg[0]]
return arg
def _check_fill_arg(fill: Union[FillType, Dict[Type, FillType]]) -> None:
if isinstance(fill, dict):
for key, value in fill.items():
# Check key for type
_check_fill_arg(value)
if isinstance(fill, defaultdict) and callable(fill.default_factory):
default_value = fill.default_factory()
_check_fill_arg(default_value)
else:
if fill is not None and not isinstance(fill, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate fill arg, only Numbers, tuples, lists and dicts are allowed.")
T = TypeVar("T")
def _default_arg(value: T) -> T:
return value
def _get_defaultdict(default: T) -> Dict[Any, T]:
# This weird looking construct only exists, since `lambda`'s cannot be serialized by pickle.
# If it were possible, we could replace this with `defaultdict(lambda: default)`
return defaultdict(functools.partial(_default_arg, default))
def _convert_fill_arg(fill: datapoints.FillType) -> datapoints.FillTypeJIT:
# Fill = 0 is not equivalent to None, https://github.com/pytorch/vision/issues/6517
# So, we can't reassign fill to 0
# if fill is None:
# fill = 0
if fill is None:
return fill
# This cast does Sequence -> List[float] to please mypy and torch.jit.script
if not isinstance(fill, (int, float)):
fill = [float(v) for v in list(fill)]
return fill
def _setup_fill_arg(fill: Union[FillType, Dict[Type, FillType]]) -> Dict[Type, FillTypeJIT]:
_check_fill_arg(fill)
if isinstance(fill, dict):
for k, v in fill.items():
fill[k] = _convert_fill_arg(v)
if isinstance(fill, defaultdict) and callable(fill.default_factory):
default_value = fill.default_factory()
sanitized_default = _convert_fill_arg(default_value)
fill.default_factory = functools.partial(_default_arg, sanitized_default)
return fill # type: ignore[return-value]
return _get_defaultdict(_convert_fill_arg(fill))
def _check_padding_arg(padding: Union[int, Sequence[int]]) -> None:
if not isinstance(padding, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate padding arg")
if isinstance(padding, (tuple, list)) and len(padding) not in [1, 2, 4]:
raise ValueError(f"Padding must be an int or a 1, 2, or 4 element tuple, not a {len(padding)} element tuple")
# TODO: let's use torchvision._utils.StrEnum to have the best of both worlds (strings and enums)
# https://github.com/pytorch/vision/issues/6250
def _check_padding_mode_arg(padding_mode: Literal["constant", "edge", "reflect", "symmetric"]) -> None:
if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")
|
import functools
import numbers
from collections import defaultdict
from typing import Any, Dict, Sequence, Type, TypeVar, Union
from torchvision.prototype import features
from torchvision.prototype.features._feature import FillType, FillTypeJIT
from torchvision.transforms.transforms import _check_sequence_input, _setup_angle, _setup_size # noqa: F401
from typing_extensions import Literal
def _setup_float_or_seq(arg: Union[float, Sequence[float]], name: str, req_size: int = 2) -> Sequence[float]:
if not isinstance(arg, (float, Sequence)):
raise TypeError(f"{name} should be float or a sequence of floats. Got {type(arg)}")
if isinstance(arg, Sequence) and len(arg) != req_size:
raise ValueError(f"If {name} is a sequence its length should be one of {req_size}. Got {len(arg)}")
if isinstance(arg, Sequence):
for element in arg:
if not isinstance(element, float):
raise ValueError(f"{name} should be a sequence of floats. Got {type(element)}")
if isinstance(arg, float):
arg = [float(arg), float(arg)]
if isinstance(arg, (list, tuple)) and len(arg) == 1:
arg = [arg[0], arg[0]]
return arg
def _check_fill_arg(fill: Union[FillType, Dict[Type, FillType]]) -> None:
if isinstance(fill, dict):
for key, value in fill.items():
# Check key for type
_check_fill_arg(value)
if isinstance(fill, defaultdict) and callable(fill.default_factory):
default_value = fill.default_factory()
_check_fill_arg(default_value)
else:
if fill is not None and not isinstance(fill, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate fill arg, only Numbers, tuples, lists and dicts are allowed.")
T = TypeVar("T")
def _default_arg(value: T) -> T:
return value
def _get_defaultdict(default: T) -> Dict[Any, T]:
# This weird looking construct only exists, since `lambda`'s cannot be serialized by pickle.
# If it were possible, we could replace this with `defaultdict(lambda: default)`
return defaultdict(functools.partial(_default_arg, default))
def _convert_fill_arg(fill: features.FillType) -> features.FillTypeJIT:
# Fill = 0 is not equivalent to None, https://github.com/pytorch/vision/issues/6517
# So, we can't reassign fill to 0
# if fill is None:
# fill = 0
if fill is None:
return fill
# This cast does Sequence -> List[float] to please mypy and torch.jit.script
if not isinstance(fill, (int, float)):
fill = [float(v) for v in list(fill)]
return fill
def _setup_fill_arg(fill: Union[FillType, Dict[Type, FillType]]) -> Dict[Type, FillTypeJIT]:
_check_fill_arg(fill)
if isinstance(fill, dict):
for k, v in fill.items():
fill[k] = _convert_fill_arg(v)
if isinstance(fill, defaultdict) and callable(fill.default_factory):
default_value = fill.default_factory()
sanitized_default = _convert_fill_arg(default_value)
fill.default_factory = functools.partial(_default_arg, sanitized_default)
return fill # type: ignore[return-value]
return _get_defaultdict(_convert_fill_arg(fill))
def _check_padding_arg(padding: Union[int, Sequence[int]]) -> None:
if not isinstance(padding, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate padding arg")
if isinstance(padding, (tuple, list)) and len(padding) not in [1, 2, 4]:
raise ValueError(f"Padding must be an int or a 1, 2, or 4 element tuple, not a {len(padding)} element tuple")
# TODO: let's use torchvision._utils.StrEnum to have the best of both worlds (strings and enums)
# https://github.com/pytorch/vision/issues/6250
def _check_padding_mode_arg(padding_mode: Literal["constant", "edge", "reflect", "symmetric"]) -> None:
if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")
|
_base_ = './reppoints-moment_r50_fpn-gn_head-gn_1x_coco.py'
model = dict(bbox_head=dict(transform_method='minmax'))
|
_base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py'
model = dict(bbox_head=dict(transform_method='minmax'))
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from mmdet.registry import MODELS
from .utils import weighted_loss
@weighted_loss
def mse_loss(pred, target):
"""Wrapper of mse loss."""
return F.mse_loss(pred, target, reduction='none')
@MODELS.register_module()
class MSELoss(nn.Module):
"""MSELoss.
Args:
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function of loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): Weight of the loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss = self.loss_weight * mse_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from mmdet.registry import MODELS
from .utils import weighted_loss
@weighted_loss
def mse_loss(pred, target):
"""Warpper of mse loss."""
return F.mse_loss(pred, target, reduction='none')
@MODELS.register_module()
class MSELoss(nn.Module):
"""MSELoss.
Args:
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function of loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): Weight of the loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss = self.loss_weight * mse_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss
|
import os
import subprocess
from pathlib import Path
from typing import Dict
import numpy as np
import pytest
from jina import Document, DocumentArray
from PIL import Image
@pytest.fixture()
def test_dir() -> str:
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def test_images(test_dir: str) -> Dict[str, np.ndarray]:
def get_path(file_name_no_suffix: str) -> str:
return os.path.join(test_dir, "test_data", file_name_no_suffix + ".png")
image_dict = {
file_name: np.array(Image.open(get_path(file_name)))[:, :, 0:3]
for file_name in ["airplane", "banana1", "banana2", "studio"]
}
return image_dict
@pytest.fixture()
def docs_with_blobs() -> DocumentArray:
return DocumentArray(
[Document(blob=np.ones((10, 10, 3), dtype=np.uint8)) for _ in range(11)]
)
@pytest.fixture()
def docs_with_chunk_blobs() -> DocumentArray:
return DocumentArray(
[
Document(chunks=[Document(blob=np.ones((10, 10, 3), dtype=np.uint8))])
for _ in range(11)
]
)
@pytest.fixture()
def docs_with_chunk_chunk_blobs() -> DocumentArray:
return DocumentArray(
[
Document(
chunks=[
Document(
chunks=[
Document(blob=np.ones((10, 10, 3), dtype=np.uint8))
for _ in range(11)
]
)
]
)
]
)
@pytest.fixture(scope="session")
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope="session")
def build_docker_image(docker_image_name: str) -> str:
subprocess.run(["docker", "build", "-t", docker_image_name, "."], check=True)
return docker_image_name
@pytest.fixture(scope='session')
def build_docker_image_gpu(docker_image_name: str) -> str:
image_name = f'{docker_image_name}:gpu'
subprocess.run(
['docker', 'build', '-t', image_name, '-f', 'Dockerfile.gpu', '.'], check=True
)
return image_name
|
import os
import subprocess
from pathlib import Path
from typing import Dict
import numpy as np
import pytest
from jina import Document, DocumentArray
from PIL import Image
@pytest.fixture()
def test_dir() -> str:
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def test_images(test_dir: str) -> Dict[str, np.ndarray]:
def get_path(file_name_no_suffix: str) -> str:
return os.path.join(test_dir, "test_data", file_name_no_suffix + ".png")
image_dict = {
file_name: np.array(Image.open(get_path(file_name)))[:, :, 0:3]
for file_name in ["airplane", "banana1", "banana2", "studio"]
}
return image_dict
@pytest.fixture()
def docs_with_blobs() -> DocumentArray:
return DocumentArray(
[Document(blob=np.ones((10, 10, 3), dtype=np.uint8)) for _ in range(11)]
)
@pytest.fixture()
def docs_with_chunk_blobs() -> DocumentArray:
return DocumentArray(
[
Document(chunks=[Document(blob=np.ones((10, 10, 3), dtype=np.uint8))])
for _ in range(11)
]
)
@pytest.fixture()
def docs_with_chunk_chunk_blobs() -> DocumentArray:
return DocumentArray(
[
Document(
chunks=[
Document(
chunks=[
Document(blob=np.ones((10, 10, 3), dtype=np.uint8))
for _ in range(11)
]
)
]
)
]
)
@pytest.fixture(scope="session")
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope="session")
def build_docker_image(docker_image_name: str) -> str:
subprocess.run(["docker", "build", "-t", docker_image_name, "."], check=True)
return docker_image_name
|
"""Node PostProcessor module."""
from llama_index.core.postprocessor.llm_rerank import LLMRerank
from llama_index.core.postprocessor.structured_llm_rerank import (
StructuredLLMRerank,
DocumentWithRelevance,
)
from llama_index.core.postprocessor.metadata_replacement import (
MetadataReplacementPostProcessor,
)
from llama_index.core.postprocessor.node import (
AutoPrevNextNodePostprocessor,
KeywordNodePostprocessor,
LongContextReorder,
PrevNextNodePostprocessor,
SimilarityPostprocessor,
)
from llama_index.core.postprocessor.node_recency import (
EmbeddingRecencyPostprocessor,
FixedRecencyPostprocessor,
TimeWeightedPostprocessor,
)
from llama_index.core.postprocessor.optimizer import SentenceEmbeddingOptimizer
from llama_index.core.postprocessor.pii import (
NERPIINodePostprocessor,
PIINodePostprocessor,
)
from llama_index.core.postprocessor.sbert_rerank import SentenceTransformerRerank
__all__ = [
"SimilarityPostprocessor",
"KeywordNodePostprocessor",
"PrevNextNodePostprocessor",
"AutoPrevNextNodePostprocessor",
"FixedRecencyPostprocessor",
"EmbeddingRecencyPostprocessor",
"TimeWeightedPostprocessor",
"PIINodePostprocessor",
"NERPIINodePostprocessor",
"LLMRerank",
"StructuredLLMRerank",
"DocumentWithRelevance",
"SentenceEmbeddingOptimizer",
"SentenceTransformerRerank",
"MetadataReplacementPostProcessor",
"LongContextReorder",
]
|
"""Node PostProcessor module."""
from llama_index.core.postprocessor.llm_rerank import LLMRerank
from llama_index.core.postprocessor.metadata_replacement import (
MetadataReplacementPostProcessor,
)
from llama_index.core.postprocessor.node import (
AutoPrevNextNodePostprocessor,
KeywordNodePostprocessor,
LongContextReorder,
PrevNextNodePostprocessor,
SimilarityPostprocessor,
)
from llama_index.core.postprocessor.node_recency import (
EmbeddingRecencyPostprocessor,
FixedRecencyPostprocessor,
TimeWeightedPostprocessor,
)
from llama_index.core.postprocessor.optimizer import SentenceEmbeddingOptimizer
from llama_index.core.postprocessor.pii import (
NERPIINodePostprocessor,
PIINodePostprocessor,
)
from llama_index.core.postprocessor.sbert_rerank import SentenceTransformerRerank
__all__ = [
"SimilarityPostprocessor",
"KeywordNodePostprocessor",
"PrevNextNodePostprocessor",
"AutoPrevNextNodePostprocessor",
"FixedRecencyPostprocessor",
"EmbeddingRecencyPostprocessor",
"TimeWeightedPostprocessor",
"PIINodePostprocessor",
"NERPIINodePostprocessor",
"LLMRerank",
"SentenceEmbeddingOptimizer",
"SentenceTransformerRerank",
"MetadataReplacementPostProcessor",
"LongContextReorder",
]
|
import functools
import warnings
from collections import defaultdict
from typing import Any, Dict, Optional, Sequence, Tuple, Type, TypeVar, Union
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import Transform
from torchvision.transforms.v2._utils import is_pure_tensor
T = TypeVar("T")
def _default_arg(value: T) -> T:
return value
def _get_defaultdict(default: T) -> Dict[Any, T]:
# This weird looking construct only exists, since `lambda`'s cannot be serialized by pickle.
# If it were possible, we could replace this with `defaultdict(lambda: default)`
return defaultdict(functools.partial(_default_arg, default))
class PermuteDimensions(Transform):
_transformed_types = (is_pure_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dims: Union[Sequence[int], Dict[Type, Optional[Sequence[int]]]]) -> None:
super().__init__()
if not isinstance(dims, dict):
dims = _get_defaultdict(dims)
if torch.Tensor in dims and any(cls in dims for cls in [datapoints.Image, datapoints.Video]):
warnings.warn(
"Got `dims` values for `torch.Tensor` and either `datapoints.Image` or `datapoints.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `datapoints.Image` or `datapoints.Video` is present in the input."
)
self.dims = dims
def _transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
return inpt.as_subclass(torch.Tensor)
return inpt.permute(*dims)
class TransposeDimensions(Transform):
_transformed_types = (is_pure_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dims: Union[Tuple[int, int], Dict[Type, Optional[Tuple[int, int]]]]) -> None:
super().__init__()
if not isinstance(dims, dict):
dims = _get_defaultdict(dims)
if torch.Tensor in dims and any(cls in dims for cls in [datapoints.Image, datapoints.Video]):
warnings.warn(
"Got `dims` values for `torch.Tensor` and either `datapoints.Image` or `datapoints.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `datapoints.Image` or `datapoints.Video` is present in the input."
)
self.dims = dims
def _transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
return inpt.as_subclass(torch.Tensor)
return inpt.transpose(*dims)
|
import functools
import warnings
from collections import defaultdict
from typing import Any, Dict, Optional, Sequence, Tuple, Type, TypeVar, Union
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import Transform
from torchvision.transforms.v2.utils import is_pure_tensor
T = TypeVar("T")
def _default_arg(value: T) -> T:
return value
def _get_defaultdict(default: T) -> Dict[Any, T]:
# This weird looking construct only exists, since `lambda`'s cannot be serialized by pickle.
# If it were possible, we could replace this with `defaultdict(lambda: default)`
return defaultdict(functools.partial(_default_arg, default))
class PermuteDimensions(Transform):
_transformed_types = (is_pure_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dims: Union[Sequence[int], Dict[Type, Optional[Sequence[int]]]]) -> None:
super().__init__()
if not isinstance(dims, dict):
dims = _get_defaultdict(dims)
if torch.Tensor in dims and any(cls in dims for cls in [datapoints.Image, datapoints.Video]):
warnings.warn(
"Got `dims` values for `torch.Tensor` and either `datapoints.Image` or `datapoints.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `datapoints.Image` or `datapoints.Video` is present in the input."
)
self.dims = dims
def _transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
return inpt.as_subclass(torch.Tensor)
return inpt.permute(*dims)
class TransposeDimensions(Transform):
_transformed_types = (is_pure_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dims: Union[Tuple[int, int], Dict[Type, Optional[Tuple[int, int]]]]) -> None:
super().__init__()
if not isinstance(dims, dict):
dims = _get_defaultdict(dims)
if torch.Tensor in dims and any(cls in dims for cls in [datapoints.Image, datapoints.Video]):
warnings.warn(
"Got `dims` values for `torch.Tensor` and either `datapoints.Image` or `datapoints.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `datapoints.Image` or `datapoints.Video` is present in the input."
)
self.dims = dims
def _transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
return inpt.as_subclass(torch.Tensor)
return inpt.transpose(*dims)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .cascade_rcnn import CascadeRCNN
@MODELS.register_module()
class HybridTaskCascade(CascadeRCNN):
"""Implementation of `HTC <https://arxiv.org/abs/1901.07518>`_"""
def __init__(self, **kwargs):
super(HybridTaskCascade, self).__init__(**kwargs)
@property
def with_semantic(self):
"""bool: whether the detector has a semantic head"""
return self.roi_head.with_semantic
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .cascade_rcnn import CascadeRCNN
@DETECTORS.register_module()
class HybridTaskCascade(CascadeRCNN):
"""Implementation of `HTC <https://arxiv.org/abs/1901.07518>`_"""
def __init__(self, **kwargs):
super(HybridTaskCascade, self).__init__(**kwargs)
@property
def with_semantic(self):
"""bool: whether the detector has a semantic head"""
return self.roi_head.with_semantic
|
"""langchain-core version information and utilities."""
VERSION = "0.3.52"
|
"""langchain-core version information and utilities."""
VERSION = "0.3.51"
|
from jina import Executor, requests
from .helper import get_doc_value
class MyExecutorToReload2(Executor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@requests()
def foo(self, docs, **kwargs):
for doc in docs:
doc.text = get_doc_value()
|
from jina import Executor, requests
from .helper import get_doc_value
class MyExecutorToReload2(Executor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@requests()
def foo(self, docs, **kwargs):
for doc in docs:
doc.text = get_doc_value()
|
import importlib
import os
import re
from pathlib import Path
from typing import Type, TypeVar
from backend.data.block import Block
# Dynamically load all modules under backend.blocks
AVAILABLE_MODULES = []
current_dir = Path(__file__).parent
modules = [
str(f.relative_to(current_dir))[:-3].replace(os.path.sep, ".")
for f in current_dir.rglob("*.py")
if f.is_file() and f.name != "__init__.py"
]
for module in modules:
if not re.match("^[a-z_.]+$", module):
raise ValueError(
f"Block module {module} error: module name must be lowercase, "
"separated by underscores, and contain only alphabet characters"
)
importlib.import_module(f".{module}", package=__name__)
AVAILABLE_MODULES.append(module)
# Load all Block instances from the available modules
AVAILABLE_BLOCKS: dict[str, Type[Block]] = {}
T = TypeVar("T")
def all_subclasses(cls: Type[T]) -> list[Type[T]]:
subclasses = cls.__subclasses__()
for subclass in subclasses:
subclasses += all_subclasses(subclass)
return subclasses
for block_cls in all_subclasses(Block):
name = block_cls.__name__
if block_cls.__name__.endswith("Base"):
continue
if not block_cls.__name__.endswith("Block"):
raise ValueError(
f"Block class {block_cls.__name__} does not end with 'Block', If you are creating an abstract class, please name the class with 'Base' at the end"
)
block = block_cls.create()
if not isinstance(block.id, str) or len(block.id) != 36:
raise ValueError(f"Block ID {block.name} error: {block.id} is not a valid UUID")
if block.id in AVAILABLE_BLOCKS:
raise ValueError(f"Block ID {block.name} error: {block.id} is already in use")
input_schema = block.input_schema.model_fields
output_schema = block.output_schema.model_fields
# Make sure `error` field is a string in the output schema
if "error" in output_schema and output_schema["error"].annotation is not str:
raise ValueError(
f"{block.name} `error` field in output_schema must be a string"
)
# Make sure all fields in input_schema and output_schema are annotated and has a value
for field_name, field in [*input_schema.items(), *output_schema.items()]:
if field.annotation is None:
raise ValueError(
f"{block.name} has a field {field_name} that is not annotated"
)
if field.json_schema_extra is None:
raise ValueError(
f"{block.name} has a field {field_name} not defined as SchemaField"
)
for field in block.input_schema.model_fields.values():
if field.annotation is bool and field.default not in (True, False):
raise ValueError(f"{block.name} has a boolean field with no default value")
if block.disabled:
continue
AVAILABLE_BLOCKS[block.id] = block_cls
__all__ = ["AVAILABLE_MODULES", "AVAILABLE_BLOCKS"]
|
import importlib
import os
import re
from pathlib import Path
from typing import Type, TypeVar
from backend.data.block import Block
# Dynamically load all modules under backend.blocks
AVAILABLE_MODULES = []
current_dir = Path(__file__).parent
modules = [
str(f.relative_to(current_dir))[:-3].replace(os.path.sep, ".")
for f in current_dir.rglob("*.py")
if f.is_file() and f.name != "__init__.py"
]
for module in modules:
if not re.match("^[a-z_.]+$", module):
raise ValueError(
f"Block module {module} error: module name must be lowercase, "
"separated by underscores, and contain only alphabet characters"
)
importlib.import_module(f".{module}", package=__name__)
AVAILABLE_MODULES.append(module)
# Load all Block instances from the available modules
AVAILABLE_BLOCKS: dict[str, Type[Block]] = {}
T = TypeVar("T")
def all_subclasses(cls: Type[T]) -> list[Type[T]]:
subclasses = cls.__subclasses__()
for subclass in subclasses:
subclasses += all_subclasses(subclass)
return subclasses
for block_cls in all_subclasses(Block):
name = block_cls.__name__
if block_cls.__name__.endswith("Base"):
continue
if not block_cls.__name__.endswith("Block"):
raise ValueError(
f"Block class {block_cls.__name__} does not end with 'Block', If you are creating an abstract class, please name the class with 'Base' at the end"
)
block = block_cls.create()
if not isinstance(block.id, str) or len(block.id) != 36:
raise ValueError(f"Block ID {block.name} error: {block.id} is not a valid UUID")
if block.id in AVAILABLE_BLOCKS:
raise ValueError(f"Block ID {block.name} error: {block.id} is already in use")
input_schema = block.input_schema.model_fields
output_schema = block.output_schema.model_fields
# Prevent duplicate field name in input_schema and output_schema
duplicate_field_names = set(input_schema.keys()) & set(output_schema.keys())
if duplicate_field_names:
raise ValueError(
f"{block.name} has duplicate field names in input_schema and output_schema: {duplicate_field_names}"
)
# Make sure `error` field is a string in the output schema
if "error" in output_schema and output_schema["error"].annotation is not str:
raise ValueError(
f"{block.name} `error` field in output_schema must be a string"
)
# Make sure all fields in input_schema and output_schema are annotated and has a value
for field_name, field in [*input_schema.items(), *output_schema.items()]:
if field.annotation is None:
raise ValueError(
f"{block.name} has a field {field_name} that is not annotated"
)
if field.json_schema_extra is None:
raise ValueError(
f"{block.name} has a field {field_name} not defined as SchemaField"
)
for field in block.input_schema.model_fields.values():
if field.annotation is bool and field.default not in (True, False):
raise ValueError(f"{block.name} has a boolean field with no default value")
if block.disabled:
continue
AVAILABLE_BLOCKS[block.id] = block_cls
__all__ = ["AVAILABLE_MODULES", "AVAILABLE_BLOCKS"]
|
_base_ = './mask-rcnn_r50_fpn_gn-all_2x_coco.py'
# learning policy
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
|
_base_ = './mask_rcnn_r50_fpn_gn-all_2x_coco.py'
# learning policy
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseMSEEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
teacher_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load any dataset with some texts
dataset = load_dataset("sentence-transformers/stsb", split="validation")
sentences = dataset["sentence1"] + dataset["sentence2"]
# Given queries, a corpus and a mapping with relevant documents, the SparseMSEEvaluator computes different MSE metrics.
mse_evaluator = SparseMSEEvaluator(
source_sentences=sentences,
target_sentences=sentences,
teacher_model=teacher_model,
name="stsb-dev",
)
results = mse_evaluator(student_model)
"""
MSE evaluation (lower = better) on the stsb-dev dataset:
MSE (*100): 0.035540
Model Sparsity Stats: Row Non-Zero Mean: 55.60933303833008, Row Sparsity Mean: 0.9981780648231506
"""
# Print the results
print(f"Primary metric: {mse_evaluator.primary_metric}")
# => Primary metric: stsb-dev_negative_mse
print(f"Primary metric value: {results[mse_evaluator.primary_metric]:.4f}")
# => Primary metric value: -0.0355
|
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseMSEEvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
student_model_name = "prithivida/Splade_PP_en_v1"
student_model = SparseEncoder(
modules=[
MLMTransformer(student_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Initialize the SPLADE model
teacher_model_name = "naver/splade-cocondenser-ensembledistil"
teacher_model = SparseEncoder(
modules=[
MLMTransformer(teacher_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load any dataset with some texts
dataset = load_dataset("sentence-transformers/stsb", split="validation")
sentences = dataset["sentence1"] + dataset["sentence2"]
# Given queries, a corpus and a mapping with relevant documents, the InformationRetrievalEvaluator computes different IR metrics.
mse_evaluator = SparseMSEEvaluator(
source_sentences=sentences,
target_sentences=sentences,
teacher_model=teacher_model,
name="stsb-dev",
)
results = mse_evaluator(student_model)
# Print the results
print(f"Primary metric: {mse_evaluator.primary_metric}")
print(f"Primary metric value: {results[mse_evaluator.primary_metric]:.4f}")
|
"""Top-level imports for LlamaIndex."""
__version__ = "0.12.37"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Init file of LlamaIndex."""
__version__ = "0.12.37"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
import importlib
import pytest
from fastapi.testclient import TestClient
from ...utils import needs_py39
@pytest.fixture(
name="client",
params=[
"tutorial004",
pytest.param("tutorial004_py39", marks=needs_py39),
],
)
def get_client(request: pytest.FixtureRequest):
mod = importlib.import_module(f"docs_src.extra_models.{request.param}")
client = TestClient(mod.app)
return client
def test_get_items(client: TestClient):
response = client.get("/items/")
assert response.status_code == 200, response.text
assert response.json() == [
{"name": "Foo", "description": "There comes my hero"},
{"name": "Red", "description": "It's my aeroplane"},
]
def test_openapi_schema(client: TestClient):
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Read Items Items Get",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
}
},
"summary": "Read Items",
"operationId": "read_items_items__get",
}
}
},
"components": {
"schemas": {
"Item": {
"title": "Item",
"required": ["name", "description"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"description": {"title": "Description", "type": "string"},
},
}
}
},
}
|
from fastapi.testclient import TestClient
from docs_src.extra_models.tutorial004 import app
client = TestClient(app)
def test_get_items():
response = client.get("/items/")
assert response.status_code == 200, response.text
assert response.json() == [
{"name": "Foo", "description": "There comes my hero"},
{"name": "Red", "description": "It's my aeroplane"},
]
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Read Items Items Get",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
}
},
"summary": "Read Items",
"operationId": "read_items_items__get",
}
}
},
"components": {
"schemas": {
"Item": {
"title": "Item",
"required": ["name", "description"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"description": {"title": "Description", "type": "string"},
},
}
}
},
}
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.image import affine_transform
from keras.src.ops.image import crop_images
from keras.src.ops.image import extract_patches
from keras.src.ops.image import hsv_to_rgb
from keras.src.ops.image import map_coordinates
from keras.src.ops.image import pad_images
from keras.src.ops.image import resize
from keras.src.ops.image import rgb_to_grayscale
from keras.src.ops.image import rgb_to_hsv
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.image import affine_transform
from keras.src.ops.image import crop_images
from keras.src.ops.image import extract_patches
from keras.src.ops.image import map_coordinates
from keras.src.ops.image import pad_images
from keras.src.ops.image import resize
from keras.src.ops.image import rgb_to_grayscale
|
from typing import TYPE_CHECKING, Type, TypeVar, Union
from uuid import UUID
from pydantic import BaseConfig, parse_obj_as
from pydantic.fields import ModelField
from docarray.typing.proto_register import _register_proto
if TYPE_CHECKING:
from docarray.proto import NodeProto
from docarray.typing.abstract_type import AbstractType
T = TypeVar('T', bound='ID')
@_register_proto(proto_type_name='id')
class ID(str, AbstractType):
"""
Represent an unique ID
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[str, int, UUID],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
try:
id: str = str(value)
return cls(id)
except Exception:
raise ValueError(f'Expected a str, int or UUID, got {type(value)}')
def _to_node_protobuf(self) -> 'NodeProto':
"""Convert an ID into a NodeProto message. This function should
be called when the self is nested into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(text=self, type=self._proto_type_name)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:
"""
read ndarray from a proto msg
:param pb_msg:
:return: a string
"""
return parse_obj_as(cls, pb_msg)
|
from typing import TYPE_CHECKING, Type, TypeVar, Union
from uuid import UUID
from pydantic import BaseConfig, parse_obj_as
from pydantic.fields import ModelField
from docarray.typing.proto_register import _register_proto
if TYPE_CHECKING:
from docarray.proto import NodeProto
from docarray.typing.abstract_type import AbstractType
T = TypeVar('T', bound='ID')
@_register_proto(proto_type_name='id')
class ID(str, AbstractType):
"""
Represent an unique ID
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[str, int, UUID],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
try:
id: str = str(value)
return cls(id)
except Exception:
raise ValueError(f'Expected a str, int or UUID, got {type(value)}')
def _to_node_protobuf(self) -> 'NodeProto':
"""Convert an ID into a NodeProto message. This function should
be called when the self is nested into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(text=self, type=self._proto_type_name)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:
"""
read ndarray from a proto msg
:param pb_msg:
:return: a string
"""
return parse_obj_as(cls, pb_msg)
|
"""DashVector reader."""
from typing import Dict, List, Optional
import json
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class DashVectorReader(BaseReader):
"""
DashVector reader.
Args:
api_key (str): DashVector API key.
endpoint (str): DashVector cluster endpoint.
"""
def __init__(self, api_key: str, endpoint: str):
"""Initialize with parameters."""
try:
import dashvector
except ImportError:
raise ImportError(
"`dashvector` package not found, please run `pip install dashvector`"
)
self._client: dashvector.Client = dashvector.Client(
api_key=api_key, endpoint=endpoint
)
def load_data(
self,
collection_name: str,
vector: Optional[List[float]],
topk: int,
filter: Optional[str] = None,
include_vector: bool = True,
partition: Optional[str] = None,
output_fields: Optional[List[str]] = None,
sparse_vector: Optional[Dict[int, float]] = None,
) -> List[Document]:
"""
Load data from DashVector.
Args:
collection_name (str): Name of the collection.
vector (List[float]): Query vector.
topk (int): Number of results to return.
filter (Optional[str]): doc fields filter
conditions that meet the SQL where clause specification.detail in https://help.aliyun.com/document_detail/2513006.html?spm=a2c4g.2510250.0.0.40d25637QMI4eV
include_vector (bool): Whether to include the embedding in the response.Defaults to True.
partition (Optional[str]): The partition name
to query. Defaults to None.
output_fields (Optional[List[str]]): The fields
to return. Defaults to None, meaning all fields
sparse_vector (Optional[Dict[int, float]]): The
sparse vector to query.Defaults to None.
Returns:
List[Document]: A list of documents.
"""
collection = self._client.get(collection_name)
if not collection:
raise ValueError(
f"Failed to get collection: {collection_name},Error: {collection}"
)
ret = collection.query(
vector=vector,
topk=topk,
filter=filter,
include_vector=include_vector,
partition=partition,
output_fields=output_fields,
sparse_vector=sparse_vector,
)
if not ret:
raise Exception(f"Failed to query document,Error: {ret}")
doc_metas = ret.output
documents = []
for doc_meta in doc_metas:
node_content = json.loads(doc_meta.fields["_node_content"])
document = Document(
id_=doc_meta.id,
text=node_content["text"],
metadata=node_content["metadata"],
embedding=doc_meta.vector,
)
documents.append(document)
return documents
|
"""DashVector reader."""
from typing import Dict, List, Optional
import json
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class DashVectorReader(BaseReader):
"""
DashVector reader.
Args:
api_key (str): DashVector API key.
endpoint (str): DashVector cluster endpoint.
"""
def __init__(self, api_key: str, endpoint: str):
"""Initialize with parameters."""
try:
import dashvector
except ImportError:
raise ImportError(
"`dashvector` package not found, please run `pip install dashvector`"
)
self._client: dashvector.Client = dashvector.Client(
api_key=api_key, endpoint=endpoint
)
def load_data(
self,
collection_name: str,
vector: Optional[List[float]],
topk: int,
filter: Optional[str] = None,
include_vector: bool = True,
partition: Optional[str] = None,
output_fields: Optional[List[str]] = None,
sparse_vector: Optional[Dict[int, float]] = None,
) -> List[Document]:
"""
Load data from DashVector.
Args:
collection_name (str): Name of the collection.
vector (List[float]): Query vector.
topk (int): Number of results to return.
filter (Optional[str]): doc fields filter
conditions that meet the SQL where clause specification.detail in https://help.aliyun.com/document_detail/2513006.html?spm=a2c4g.2510250.0.0.40d25637QMI4eV
include_vector (bool): Whether to include the embedding in the response.Defaults to True.
partition (Optional[str]): The partition name
to query. Defaults to None.
output_fields (Optional[List[str]]): The fields
to return. Defaults to None, meaning all fields
sparse_vector (Optional[Dict[int, float]]): The
sparse vector to query.Defaults to None.
Returns:
List[Document]: A list of documents.
"""
collection = self._client.get(collection_name)
if not collection:
raise ValueError(
f"Failed to get collection: {collection_name}," f"Error: {collection}"
)
ret = collection.query(
vector=vector,
topk=topk,
filter=filter,
include_vector=include_vector,
partition=partition,
output_fields=output_fields,
sparse_vector=sparse_vector,
)
if not ret:
raise Exception(f"Failed to query document," f"Error: {ret}")
doc_metas = ret.output
documents = []
for doc_meta in doc_metas:
node_content = json.loads(doc_meta.fields["_node_content"])
document = Document(
id_=doc_meta.id,
text=node_content["text"],
metadata=node_content["metadata"],
embedding=doc_meta.vector,
)
documents.append(document)
return documents
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for docs in self.streamer.stream_docs(
docs=DocumentArray([Document(text=text)]),
exec_endpoint='/',
):
doc = docs[0]
return {'text': doc.text, 'tags': doc.tags}
self.server = Server(Config(app, host=self.host, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=self.host, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
import os
import time
import numpy as np
import pytest
from jina import Document, DocumentArray
from .. import MongoDBStorage
NUM_DOCS = 10
@pytest.fixture
def storage():
return MongoDBStorage()
@pytest.fixture
def docs_to_index():
docu_array = DocumentArray()
for idx in range(0, NUM_DOCS):
d = Document(text=f'hello {idx}')
d.embedding = np.random.random(20)
docu_array.append(d)
return docu_array
@pytest.fixture
def docs_to_index_no_embedding():
docu_array = DocumentArray()
for idx in range(0, NUM_DOCS):
d = Document(text=f'hello {idx}')
docu_array.append(d)
return docu_array
@pytest.fixture
def docker_compose(request):
os.system(
f"docker-compose -f {request.param} --project-directory . up \
--build -d --remove-orphans"
)
time.sleep(5)
yield
os.system(
f"docker-compose -f {request.param} --project-directory . down --remove-orphans"
)
|
import os
import time
import pytest
import numpy as np
from jina import Document, DocumentArray
from .. import MongoDBStorage
NUM_DOCS = 10
@pytest.fixture
def storage():
return MongoDBStorage()
@pytest.fixture
def docs_to_index():
docu_array = DocumentArray()
for idx in range(0, NUM_DOCS):
d = Document(text=f'hello {idx}')
d.embedding = np.random.random(20)
docu_array.append(d)
return docu_array
@pytest.fixture
def docker_compose(request):
os.system(
f"docker-compose -f {request.param} --project-directory . up --build -d --remove-orphans"
)
time.sleep(5)
yield
os.system(
f"docker-compose -f {request.param} --project-directory . down --remove-orphans"
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestRPN(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand(['rpn/rpn_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
# if rpn.num_classes > 1, force set rpn.num_classes = 1
model.rpn_head.num_classes = 2
detector = build_detector(model)
self.assertEqual(detector.bbox_head.num_classes, 1)
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_loss_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, True)
# Test forward train
losses = detector.forward(batch_inputs, data_samples, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_predict_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
batch_inputs, data_samples, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_tensor_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
batch_results = detector.forward(
batch_inputs, data_samples, mode='tensor')
self.assertIsInstance(batch_results, tuple)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.data_elements import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestRPN(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand(['rpn/rpn_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
# if rpn.num_classes > 1, force set rpn.num_classes = 1
model.rpn_head.num_classes = 2
detector = build_detector(model)
self.assertEqual(detector.bbox_head.num_classes, 1)
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_loss_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, True)
# Test forward train
losses = detector.forward(batch_inputs, data_samples, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_predict_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
batch_inputs, data_samples, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_tensor_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
batch_results = detector.forward(
batch_inputs, data_samples, mode='tensor')
self.assertIsInstance(batch_results, tuple)
|
from argparse import Namespace
from copy import deepcopy
from typing import TYPE_CHECKING, Type
from hubble.executor.helper import is_valid_huburi
from hubble.executor.hubio import HubIO
from jina.enums import PodRoleType
from jina.orchestrate.pods import Pod
from jina.orchestrate.pods.container import ContainerPod
if TYPE_CHECKING:
from jina.orchestrate.pods import BasePod
class PodFactory:
"""
A PodFactory is a factory class, abstracting the Pod creation
"""
@staticmethod
def build_pod(args: 'Namespace') -> Type['BasePod']:
"""Build an implementation of a `BasePod` interface
:param args: deployment arguments parsed from the CLI.
:return: the created BaseDeployment
"""
# copy to update but forward original
cargs = deepcopy(args)
if is_valid_huburi(cargs.uses):
_hub_args = deepcopy(args)
_hub_args.uri = args.uses
_hub_args.no_usage = True
cargs.uses = HubIO(_hub_args).pull()
if (
cargs.pod_role != PodRoleType.HEAD
and cargs.uses
and cargs.uses.startswith('docker://')
):
return ContainerPod(cargs)
else:
return Pod(args)
|
from argparse import Namespace
from copy import deepcopy
from typing import TYPE_CHECKING, Type
from jina.enums import PodRoleType
from jina.hubble.helper import is_valid_huburi
from jina.hubble.hubio import HubIO
from jina.orchestrate.pods import Pod
from jina.orchestrate.pods.container import ContainerPod
if TYPE_CHECKING:
from jina.orchestrate.pods import BasePod
class PodFactory:
"""
A PodFactory is a factory class, abstracting the Pod creation
"""
@staticmethod
def build_pod(args: 'Namespace') -> Type['BasePod']:
"""Build an implementation of a `BasePod` interface
:param args: deployment arguments parsed from the CLI.
:return: the created BaseDeployment
"""
# copy to update but forward original
cargs = deepcopy(args)
if is_valid_huburi(cargs.uses):
_hub_args = deepcopy(args)
_hub_args.uri = args.uses
_hub_args.no_usage = True
cargs.uses = HubIO(_hub_args).pull()
if (
cargs.pod_role != PodRoleType.HEAD
and cargs.uses
and cargs.uses.startswith('docker://')
):
return ContainerPod(cargs)
else:
return Pod(args)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/cityscapes_instance.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(init_cfg=None),
roi_head=dict(
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=8,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=8,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))))
# optimizer
# lr is set for a batch size of 8
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
# [7] yields higher performance than [6]
step=[7])
runner = dict(
type='EpochBasedRunner', max_epochs=8) # actual epoch = 8 * 8 = 64
log_config = dict(interval=100)
# For better, more stable performance initialize from COCO
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth' # noqa
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
auto_scale_lr = dict(base_batch_size=8)
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/cityscapes_instance.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(init_cfg=None),
roi_head=dict(
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=8,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=8,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))))
# optimizer
# lr is set for a batch size of 8
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
# [7] yields higher performance than [6]
step=[7])
runner = dict(
type='EpochBasedRunner', max_epochs=8) # actual epoch = 8 * 8 = 64
log_config = dict(interval=100)
# For better, more stable performance initialize from COCO
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth' # noqa
|
from .transcribe import cli
cli()
|
from .transcribe import cli
cli()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .gaussian_target import (gather_feat, gaussian_radius,
gen_gaussian_target, get_local_maximum,
get_topk_from_heatmap, transpose_and_gather_feat)
from .make_divisible import make_divisible
from .misc import (cat_boxes, center_of_mass, empty_instances,
filter_scores_and_topk, flip_tensor, generate_coordinate,
get_box_tensor, get_box_wh, images_to_levels,
interpolate_as, levels_to_images, mask2ndarray, multi_apply,
samplelist_boxlist2tensor, scale_boxes, select_single_mlvl,
sigmoid_geometric_mean, stack_boxes, unmap,
unpack_gt_instances)
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
__all__ = [
'gaussian_radius', 'gen_gaussian_target', 'make_divisible',
'get_local_maximum', 'get_topk_from_heatmap', 'transpose_and_gather_feat',
'interpolate_as', 'sigmoid_geometric_mean', 'gather_feat',
'preprocess_panoptic_gt', 'get_uncertain_point_coords_with_randomness',
'get_uncertainty', 'unpack_gt_instances', 'empty_instances',
'center_of_mass', 'filter_scores_and_topk', 'flip_tensor',
'generate_coordinate', 'levels_to_images', 'mask2ndarray', 'multi_apply',
'select_single_mlvl', 'unmap', 'images_to_levels',
'samplelist_boxlist2tensor', 'cat_boxes', 'stack_boxes', 'scale_boxes',
'get_box_tensor', 'get_box_wh'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .gaussian_target import (gather_feat, gaussian_radius,
gen_gaussian_target, get_local_maximum,
get_topk_from_heatmap, transpose_and_gather_feat)
from .make_divisible import make_divisible
from .misc import (center_of_mass, empty_instances, filter_scores_and_topk,
flip_tensor, generate_coordinate, images_to_levels,
interpolate_as, levels_to_images, mask2ndarray, multi_apply,
samplelist_boxlist2tensor, select_single_mlvl,
sigmoid_geometric_mean, unmap, unpack_gt_instances)
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
__all__ = [
'gaussian_radius', 'gen_gaussian_target', 'make_divisible',
'get_local_maximum', 'get_topk_from_heatmap', 'transpose_and_gather_feat',
'interpolate_as', 'sigmoid_geometric_mean', 'gather_feat',
'preprocess_panoptic_gt', 'get_uncertain_point_coords_with_randomness',
'get_uncertainty', 'unpack_gt_instances', 'empty_instances',
'center_of_mass', 'filter_scores_and_topk', 'flip_tensor',
'generate_coordinate', 'levels_to_images', 'mask2ndarray', 'multi_apply',
'select_single_mlvl', 'unmap', 'images_to_levels',
'samplelist_boxlist2tensor'
]
|
from enum import Enum
from typing import Any, Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""The metric for the contrastive loss"""
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
COSINE_DISTANCE = lambda x, y: 1 - F.cosine_similarity(x, y)
class ContrastiveLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
distance_metric=SiameseDistanceMetric.COSINE_DISTANCE,
margin: float = 0.5,
size_average: bool = True,
) -> None:
"""
Contrastive loss. Expects as input two texts and a label of either 0 or 1. If the label == 1, then the distance between the
two embeddings is reduced. If the label == 0, then the distance between the embeddings is increased.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrices that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
size_average: Average by the size of the mini-batch.
References:
* Further information: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
* `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
Relations:
- :class:`OnlineContrastiveLoss` is similar, but uses hard positive and hard negative pairs.
It often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.ContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(ContrastiveLoss, self).__init__()
self.distance_metric = distance_metric
self.margin = margin
self.model = model
self.size_average = size_average
def get_config_dict(self) -> Dict[str, Any]:
distance_metric_name = self.distance_metric.__name__
for name, value in vars(SiameseDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "SiameseDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "margin": self.margin, "size_average": self.size_average}
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor) -> Tensor:
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
assert len(reps) == 2
rep_anchor, rep_other = reps
distances = self.distance_metric(rep_anchor, rep_other)
losses = 0.5 * (
labels.float() * distances.pow(2) + (1 - labels).float() * F.relu(self.margin - distances).pow(2)
)
return losses.mean() if self.size_average else losses.sum()
@property
def citation(self) -> str:
return """
@inproceedings{hadsell2006dimensionality,
author={Hadsell, R. and Chopra, S. and LeCun, Y.},
booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
title={Dimensionality Reduction by Learning an Invariant Mapping},
year={2006},
volume={2},
number={},
pages={1735-1742},
doi={10.1109/CVPR.2006.100}
}
"""
|
from enum import Enum
from typing import Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""The metric for the contrastive loss"""
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
COSINE_DISTANCE = lambda x, y: 1 - F.cosine_similarity(x, y)
class ContrastiveLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
distance_metric=SiameseDistanceMetric.COSINE_DISTANCE,
margin: float = 0.5,
size_average: bool = True,
):
"""
Contrastive loss. Expects as input two texts and a label of either 0 or 1. If the label == 1, then the distance between the
two embeddings is reduced. If the label == 0, then the distance between the embeddings is increased.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrices that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
size_average: Average by the size of the mini-batch.
References:
* Further information: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
* `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
Relations:
- :class:`OnlineContrastiveLoss` is similar, but uses hard positive and hard negative pairs.
It often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.ContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(ContrastiveLoss, self).__init__()
self.distance_metric = distance_metric
self.margin = margin
self.model = model
self.size_average = size_average
def get_config_dict(self):
distance_metric_name = self.distance_metric.__name__
for name, value in vars(SiameseDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "SiameseDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "margin": self.margin, "size_average": self.size_average}
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
assert len(reps) == 2
rep_anchor, rep_other = reps
distances = self.distance_metric(rep_anchor, rep_other)
losses = 0.5 * (
labels.float() * distances.pow(2) + (1 - labels).float() * F.relu(self.margin - distances).pow(2)
)
return losses.mean() if self.size_average else losses.sum()
@property
def citation(self) -> str:
return """
@inproceedings{hadsell2006dimensionality,
author={Hadsell, R. and Chopra, S. and LeCun, Y.},
booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
title={Dimensionality Reduction by Learning an Invariant Mapping},
year={2006},
volume={2},
number={},
pages={1735-1742},
doi={10.1109/CVPR.2006.100}
}
"""
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.inception_v3 import InceptionV3 as InceptionV3
from keras.src.applications.inception_v3 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.inception_v3 import (
preprocess_input as preprocess_input,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.inception_v3 import InceptionV3
from keras.src.applications.inception_v3 import decode_predictions
from keras.src.applications.inception_v3 import preprocess_input
|
"""
This script contains an example how to perform semantic search with OpenSearch.
You need OpenSearch up and running locally:
https://docs.opensearch.org/docs/latest/getting-started/quickstart/
Further, you need the Python OpenSearch Client installed: https://docs.opensearch.org/docs/latest/clients/python-low-level/, e.g.:
```
pip install opensearch-py
```
This script was created for `opensearch` v2.15.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder, models
from sentence_transformers.sparse_encoder.models import IDF, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.search_engines import semantic_search_opensearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
print(f"Finish loading data. Corpus size: {len(corpus)}")
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
model_id = "opensearch-project/opensearch-neural-sparse-encoding-doc-v3-distill"
doc_encoder = MLMTransformer(model_id)
asym = models.Asym(
{
"query": [
IDF.from_json(
model_id,
tokenizer=doc_encoder.tokenizer,
frozen=True,
),
],
"doc": [
doc_encoder,
SpladePooling("max", activation_function="log1p_relu"),
],
}
)
sparse_model = SparseEncoder(
modules=[asym],
similarity_fn_name="dot",
)
print("Start encoding corpus...")
start_time = time.time()
# 4. Encode the corpus
corpus_embeddings = sparse_model.encode(
[{"doc": doc} for doc in corpus], convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True
)
print(f"Corpus encoding time: {time.time() - start_time:.6f} seconds")
corpus_index = None
while True:
# 5. Encode the queries using inference-free mode
start_time = time.time()
query_embeddings = sparse_model.encode([{"query": query} for query in queries], convert_to_sparse_tensor=True)
print(f"Query encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using OpenSearch
results, search_time, corpus_index = semantic_search_opensearch(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=5,
output_index=True,
vocab=sparse_model.tokenizer.vocab,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""
This script contains an example how to perform semantic search with OpenSearch.
You need OpenSearch up and running locally:
https://docs.opensearch.org/docs/latest/getting-started/quickstart/
Further, you need the Python OpenSearch Client installed: https://docs.opensearch.org/docs/latest/clients/python-low-level/, e.g.:
```
pip install opensearch-py
```
This script was created for `opensearch` v2.15.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder, models
from sentence_transformers.sparse_encoder.models import IDF, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.search_engines import semantic_search_opensearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
model_id = "opensearch-project/opensearch-neural-sparse-encoding-doc-v3-distill"
doc_encoder = MLMTransformer(model_id)
asym = models.Asym(
{
"query": [
IDF.from_json(
model_id,
tokenizer=doc_encoder.tokenizer,
frozen=True,
),
],
"doc": [
doc_encoder,
SpladePooling("max", activation_function="log1p_relu"),
],
}
)
sparse_model = SparseEncoder(
modules=[asym],
similarity_fn_name="dot",
)
# 4. Encode the corpus
corpus_embeddings = sparse_model.encode(
[{"doc": doc for doc in corpus}], convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True
)
corpus_index = None
while True:
# 5. Encode the queries using inference-free mode
start_time = time.time()
query_embeddings = sparse_model.encode([{"query": query for query in queries}], convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using OpenSearch
results, search_time, corpus_index = semantic_search_opensearch(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
# model settings
model = dict(
roi_head=dict(
bbox_roi_extractor=dict(
type='GenericRoIExtractor',
aggregation='sum',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
pre_cfg=dict(
type='ConvModule',
in_channels=256,
out_channels=256,
kernel_size=5,
padding=2,
inplace=False,
),
post_cfg=dict(
type='GeneralizedAttention',
in_channels=256,
spatial_range=-1,
num_heads=6,
attention_type='0100',
kv_stride=2)),
mask_roi_extractor=dict(
type='GenericRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
pre_cfg=dict(
type='ConvModule',
in_channels=256,
out_channels=256,
kernel_size=5,
padding=2,
inplace=False,
),
post_cfg=dict(
type='GeneralizedAttention',
in_channels=256,
spatial_range=-1,
num_heads=6,
attention_type='0100',
kv_stride=2))))
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
# model settings
model = dict(
roi_head=dict(
bbox_roi_extractor=dict(
type='GenericRoIExtractor',
aggregation='sum',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
pre_cfg=dict(
type='ConvModule',
in_channels=256,
out_channels=256,
kernel_size=5,
padding=2,
inplace=False,
),
post_cfg=dict(
type='GeneralizedAttention',
in_channels=256,
spatial_range=-1,
num_heads=6,
attention_type='0100',
kv_stride=2)),
mask_roi_extractor=dict(
type='GenericRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
pre_cfg=dict(
type='ConvModule',
in_channels=256,
out_channels=256,
kernel_size=5,
padding=2,
inplace=False,
),
post_cfg=dict(
type='GeneralizedAttention',
in_channels=256,
spatial_range=-1,
num_heads=6,
attention_type='0100',
kv_stride=2))))
|
from __future__ import annotations
__version__ = "4.1.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder import (
CrossEncoder,
CrossEncoderModelCardData,
CrossEncoderTrainer,
CrossEncoderTrainingArguments,
)
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
from sentence_transformers.util import mine_hard_negatives
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"CrossEncoderTrainer",
"CrossEncoderTrainingArguments",
"CrossEncoderModelCardData",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
"mine_hard_negatives",
"SparseEncoder",
]
|
from __future__ import annotations
__version__ = "4.1.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder import (
CrossEncoder,
CrossEncoderModelCardData,
CrossEncoderTrainer,
CrossEncoderTrainingArguments,
)
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
from sentence_transformers.util import mine_hard_negatives
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"CrossEncoderTrainer",
"CrossEncoderTrainingArguments",
"CrossEncoderModelCardData",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
"mine_hard_negatives",
]
|
"""Run smoke tests"""
import os
import torchvision
from torchvision.io import read_image
image_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "assets", "encode_jpeg", "grace_hopper_517x606.jpg"
)
print("torchvision version is ", torchvision.__version__)
img = read_image(image_path)
|
"""Run smoke tests"""
import torchvision
print("torchvision version is ", torchvision.__version__)
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseMSEEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
teacher_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load any dataset with some texts
dataset = load_dataset("sentence-transformers/stsb", split="validation")
sentences = dataset["sentence1"] + dataset["sentence2"]
# Given queries, a corpus and a mapping with relevant documents, the SparseMSEEvaluator computes different MSE metrics.
mse_evaluator = SparseMSEEvaluator(
source_sentences=sentences,
target_sentences=sentences,
teacher_model=teacher_model,
name="stsb-dev",
)
results = mse_evaluator(student_model)
"""
MSE evaluation (lower = better) on the stsb-dev dataset:
MSE (*100): 0.035540
Model Sparsity: Active Dimensions: 55.6, Sparsity Ratio: 0.9982
"""
# Print the results
print(f"Primary metric: {mse_evaluator.primary_metric}")
# => Primary metric: stsb-dev_negative_mse
print(f"Primary metric value: {results[mse_evaluator.primary_metric]:.4f}")
# => Primary metric value: -0.0355
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseMSEEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
teacher_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load any dataset with some texts
dataset = load_dataset("sentence-transformers/stsb", split="validation")
sentences = dataset["sentence1"] + dataset["sentence2"]
# Given queries, a corpus and a mapping with relevant documents, the SparseMSEEvaluator computes different MSE metrics.
mse_evaluator = SparseMSEEvaluator(
source_sentences=sentences,
target_sentences=sentences,
teacher_model=teacher_model,
name="stsb-dev",
)
results = mse_evaluator(student_model)
"""
MSE evaluation (lower = better) on the stsb-dev dataset:
MSE (*100): 0.035540
Model Sparsity Stats: Row Non-Zero Mean: 55.60933303833008, Row Sparsity Mean: 0.9981780648231506
"""
# Print the results
print(f"Primary metric: {mse_evaluator.primary_metric}")
# => Primary metric: stsb-dev_negative_mse
print(f"Primary metric value: {results[mse_evaluator.primary_metric]:.4f}")
# => Primary metric value: -0.0355
|
from io import BytesIO
from typing import TYPE_CHECKING, Optional, Tuple, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.bytes.base_bytes import BaseBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.image_ndarray import ImageNdArray
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from PIL import Image as PILImage
T = TypeVar('T', bound='ImageBytes')
@_register_proto(proto_type_name='image_bytes')
class ImageBytes(BaseBytes):
"""
Bytes that store an image and that can be load into an image tensor
"""
def load_pil(
self,
) -> 'PILImage.Image':
"""
Load the image from the bytes into a `PIL.Image.Image` instance
---
```python
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import ImageUrl
img_url = "https://upload.wikimedia.org/wikipedia/commons/8/80/Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
img_url = parse_obj_as(ImageUrl, img_url)
img = img_url.load_pil()
from PIL.Image import Image
assert isinstance(img, Image)
```
---
:return: a Pillow image
"""
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
return PILImage.open(BytesIO(self))
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
) -> ImageNdArray:
"""
Load the image from the [`ImageBytes`][docarray.typing.ImageBytes] into an
[`ImageNdArray`][docarray.typing.ImageNdArray].
---
```python
from docarray import BaseDoc
from docarray.typing import ImageNdArray, ImageUrl
class MyDoc(BaseDoc):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, ImageNdArray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
```
---
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:return: [`ImageNdArray`][docarray.typing.ImageNdArray] representing the image as RGB values
"""
raw_img = self.load_pil()
if width or height:
new_width = width or raw_img.width
new_height = height or raw_img.height
raw_img = raw_img.resize((new_width, new_height))
try:
tensor = np.array(raw_img.convert('RGB'))
except Exception:
tensor = np.array(raw_img)
img = self._move_channel_axis(tensor, axis_layout=axis_layout)
return parse_obj_as(ImageNdArray, img)
@staticmethod
def _move_channel_axis(
tensor: np.ndarray, axis_layout: Tuple[str, str, str] = ('H', 'W', 'C')
) -> np.ndarray:
"""Moves channel axis around."""
channel_to_offset = {'H': 0, 'W': 1, 'C': 2}
permutation = tuple(channel_to_offset[axis] for axis in axis_layout)
return np.transpose(tensor, permutation)
|
from io import BytesIO
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.image_ndarray import ImageNdArray
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from PIL import Image as PILImage
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='ImageBytes')
@_register_proto(proto_type_name='image_bytes')
class ImageBytes(bytes, AbstractType):
"""
Bytes that store an image and that can be load into an image tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load_pil(
self,
) -> 'PILImage.Image':
"""
Load the image from the bytes into a `PIL.Image.Image` instance
---
```python
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import ImageUrl
img_url = "https://upload.wikimedia.org/wikipedia/commons/8/80/Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
img_url = parse_obj_as(ImageUrl, img_url)
img = img_url.load_pil()
from PIL.Image import Image
assert isinstance(img, Image)
```
---
:return: a Pillow image
"""
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
return PILImage.open(BytesIO(self))
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
) -> ImageNdArray:
"""
Load the image from the [`ImageBytes`][docarray.typing.ImageBytes] into an
[`ImageNdArray`][docarray.typing.ImageNdArray].
---
```python
from docarray import BaseDoc
from docarray.typing import ImageNdArray, ImageUrl
class MyDoc(BaseDoc):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, ImageNdArray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
```
---
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:return: [`ImageNdArray`][docarray.typing.ImageNdArray] representing the image as RGB values
"""
raw_img = self.load_pil()
if width or height:
new_width = width or raw_img.width
new_height = height or raw_img.height
raw_img = raw_img.resize((new_width, new_height))
try:
tensor = np.array(raw_img.convert('RGB'))
except Exception:
tensor = np.array(raw_img)
img = self._move_channel_axis(tensor, axis_layout=axis_layout)
return parse_obj_as(ImageNdArray, img)
@staticmethod
def _move_channel_axis(
tensor: np.ndarray, axis_layout: Tuple[str, str, str] = ('H', 'W', 'C')
) -> np.ndarray:
"""Moves channel axis around."""
channel_to_offset = {'H': 0, 'W': 1, 'C': 2}
permutation = tuple(channel_to_offset[axis] for axis in axis_layout)
return np.transpose(tensor, permutation)
|
# CoSENTLoss must be imported before AnglELoss
from __future__ import annotations
from .CoSENTLoss import CoSENTLoss # isort: skip
from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .AnglELoss import AnglELoss
from .BatchAllTripletLoss import BatchAllTripletLoss
from .BatchHardSoftMarginTripletLoss import BatchHardSoftMarginTripletLoss
from .BatchHardTripletLoss import BatchHardTripletLoss, BatchHardTripletLossDistanceFunction
from .BatchSemiHardTripletLoss import BatchSemiHardTripletLoss
from .CachedGISTEmbedLoss import CachedGISTEmbedLoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .CachedMultipleNegativesSymmetricRankingLoss import CachedMultipleNegativesSymmetricRankingLoss
from .ContrastiveLoss import ContrastiveLoss, SiameseDistanceMetric
from .ContrastiveTensionLoss import (
ContrastiveTensionDataLoader,
ContrastiveTensionLoss,
ContrastiveTensionLossInBatchNegatives,
)
from .CosineSimilarityLoss import CosineSimilarityLoss
from .DenoisingAutoEncoderLoss import DenoisingAutoEncoderLoss
from .DistillKLDivLoss import DistillKLDivLoss
from .GISTEmbedLoss import GISTEmbedLoss
from .MarginMSELoss import MarginMSELoss
from .Matryoshka2dLoss import Matryoshka2dLoss
from .MatryoshkaLoss import MatryoshkaLoss
from .MegaBatchMarginLoss import MegaBatchMarginLoss
from .MSELoss import MSELoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from .MultipleNegativesSymmetricRankingLoss import MultipleNegativesSymmetricRankingLoss
from .OnlineContrastiveLoss import OnlineContrastiveLoss
from .SoftmaxLoss import SoftmaxLoss
from .TripletLoss import TripletDistanceMetric, TripletLoss
__all__ = [
"AdaptiveLayerLoss",
"CosineSimilarityLoss",
"SoftmaxLoss",
"MultipleNegativesRankingLoss",
"MultipleNegativesSymmetricRankingLoss",
"TripletLoss",
"TripletDistanceMetric",
"MarginMSELoss",
"MatryoshkaLoss",
"Matryoshka2dLoss",
"MSELoss",
"ContrastiveLoss",
"SiameseDistanceMetric",
"CachedGISTEmbedLoss",
"CachedMultipleNegativesRankingLoss",
"CachedMultipleNegativesSymmetricRankingLoss",
"ContrastiveTensionLoss",
"ContrastiveTensionLossInBatchNegatives",
"ContrastiveTensionDataLoader",
"CoSENTLoss",
"AnglELoss",
"OnlineContrastiveLoss",
"MegaBatchMarginLoss",
"DenoisingAutoEncoderLoss",
"GISTEmbedLoss",
"BatchHardTripletLoss",
"BatchHardTripletLossDistanceFunction",
"BatchHardSoftMarginTripletLoss",
"BatchSemiHardTripletLoss",
"BatchAllTripletLoss",
"DistillKLDivLoss",
]
|
# CoSENTLoss must be imported before AnglELoss
from __future__ import annotations
from .CoSENTLoss import CoSENTLoss # isort: skip
from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .AnglELoss import AnglELoss
from .BatchAllTripletLoss import BatchAllTripletLoss
from .BatchHardSoftMarginTripletLoss import BatchHardSoftMarginTripletLoss
from .BatchHardTripletLoss import BatchHardTripletLoss, BatchHardTripletLossDistanceFunction
from .BatchSemiHardTripletLoss import BatchSemiHardTripletLoss
from .CachedGISTEmbedLoss import CachedGISTEmbedLoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .CachedMultipleNegativesSymmetricRankingLoss import CachedMultipleNegativesSymmetricRankingLoss
from .ContrastiveLoss import ContrastiveLoss, SiameseDistanceMetric
from .ContrastiveTensionLoss import (
ContrastiveTensionDataLoader,
ContrastiveTensionLoss,
ContrastiveTensionLossInBatchNegatives,
)
from .CosineSimilarityLoss import CosineSimilarityLoss
from .DenoisingAutoEncoderLoss import DenoisingAutoEncoderLoss
from .GISTEmbedLoss import GISTEmbedLoss
from .MarginMSELoss import MarginMSELoss
from .Matryoshka2dLoss import Matryoshka2dLoss
from .MatryoshkaLoss import MatryoshkaLoss
from .MegaBatchMarginLoss import MegaBatchMarginLoss
from .MSELoss import MSELoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from .MultipleNegativesSymmetricRankingLoss import MultipleNegativesSymmetricRankingLoss
from .OnlineContrastiveLoss import OnlineContrastiveLoss
from .SoftmaxLoss import SoftmaxLoss
from .TripletLoss import TripletDistanceMetric, TripletLoss
__all__ = [
"AdaptiveLayerLoss",
"CosineSimilarityLoss",
"SoftmaxLoss",
"MultipleNegativesRankingLoss",
"MultipleNegativesSymmetricRankingLoss",
"TripletLoss",
"TripletDistanceMetric",
"MarginMSELoss",
"MatryoshkaLoss",
"Matryoshka2dLoss",
"MSELoss",
"ContrastiveLoss",
"SiameseDistanceMetric",
"CachedGISTEmbedLoss",
"CachedMultipleNegativesRankingLoss",
"CachedMultipleNegativesSymmetricRankingLoss",
"ContrastiveTensionLoss",
"ContrastiveTensionLossInBatchNegatives",
"ContrastiveTensionDataLoader",
"CoSENTLoss",
"AnglELoss",
"OnlineContrastiveLoss",
"MegaBatchMarginLoss",
"DenoisingAutoEncoderLoss",
"GISTEmbedLoss",
"BatchHardTripletLoss",
"BatchHardTripletLossDistanceFunction",
"BatchHardSoftMarginTripletLoss",
"BatchSemiHardTripletLoss",
"BatchAllTripletLoss",
]
|
import os
from functools import lru_cache
from typing import Union
import ffmpeg
import numpy as np
import torch
import torch.nn.functional as F
from .utils import exact_div
# hard-coded audio hyperparameters
SAMPLE_RATE = 16000
N_FFT = 400
N_MELS = 80
HOP_LENGTH = 160
CHUNK_LENGTH = 30
N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000: number of samples in a chunk
N_FRAMES = exact_div(N_SAMPLES, HOP_LENGTH) # 3000: number of frames in a mel spectrogram input
def load_audio(file: str, sr: int = SAMPLE_RATE):
"""
Open an audio file and read as mono waveform, resampling as necessary
Parameters
----------
file: str
The audio file to open
sr: int
The sample rate to resample the audio if necessary
Returns
-------
A NumPy array containing the audio waveform, in float32 dtype.
"""
try:
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
# Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
out, _ = (
ffmpeg.input(file, threads=0)
.output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sr)
.run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
)
except ffmpeg.Error as e:
raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):
"""
Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
"""
if torch.is_tensor(array):
if array.shape[axis] > length:
array = array.index_select(dim=axis, index=torch.arange(length, device=array.device))
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])
else:
if array.shape[axis] > length:
array = array.take(indices=range(length), axis=axis)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = np.pad(array, pad_widths)
return array
@lru_cache(maxsize=None)
def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor:
"""
load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
Allows decoupling librosa dependency; saved using:
np.savez_compressed(
"mel_filters.npz",
mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
)
"""
assert n_mels == 80, f"Unsupported n_mels: {n_mels}"
with np.load(os.path.join(os.path.dirname(__file__), "assets", "mel_filters.npz")) as f:
return torch.from_numpy(f[f"mel_{n_mels}"]).to(device)
def log_mel_spectrogram(audio: Union[str, np.ndarray, torch.Tensor], n_mels: int = N_MELS):
"""
Compute the log-Mel spectrogram of
Parameters
----------
audio: Union[str, np.ndarray, torch.Tensor], shape = (*)
The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
n_mels: int
The number of Mel-frequency filters, only 80 is supported
Returns
-------
torch.Tensor, shape = (80, n_frames)
A Tensor that contains the Mel spectrogram
"""
if not torch.is_tensor(audio):
if isinstance(audio, str):
audio = load_audio(audio)
audio = torch.from_numpy(audio)
window = torch.hann_window(N_FFT).to(audio.device)
stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True)
magnitudes = stft[:, :-1].abs() ** 2
filters = mel_filters(audio.device, n_mels)
mel_spec = filters @ magnitudes
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
return log_spec
|
import os
from functools import lru_cache
from typing import Union
import ffmpeg
import numpy as np
import torch
import torch.nn.functional as F
from .utils import exact_div
# hard-coded audio hyperparameters
SAMPLE_RATE = 16000
N_FFT = 400
N_MELS = 80
HOP_LENGTH = 160
CHUNK_LENGTH = 30
N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000: number of samples in a chunk
N_FRAMES = exact_div(N_SAMPLES, HOP_LENGTH) # 3000: number of frames in a mel spectrogram input
def load_audio(file: str, sr: int = SAMPLE_RATE):
"""
Open an audio file and read as mono waveform, resampling as necessary
Parameters
----------
file: str
The audio file to open
sr: int
The sample rate to resample the audio if necessary
Returns
-------
A NumPy array containing the audio waveform, in float32 dtype.
"""
try:
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
# Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
out, _ = (
ffmpeg.input(file, threads=0)
.output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sr)
.run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
)
except ffmpeg.Error as e:
raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):
"""
Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
"""
if torch.is_tensor(array):
if array.shape[axis] > length:
array = array.index_select(dim=axis, index=torch.arange(length))
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])
else:
if array.shape[axis] > length:
array = array.take(indices=range(length), axis=axis)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = np.pad(array, pad_widths)
return array
@lru_cache(maxsize=None)
def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor:
"""
load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
Allows decoupling librosa dependency; saved using:
np.savez_compressed(
"mel_filters.npz",
mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
)
"""
assert n_mels == 80, f"Unsupported n_mels: {n_mels}"
with np.load(os.path.join(os.path.dirname(__file__), "assets", "mel_filters.npz")) as f:
return torch.from_numpy(f[f"mel_{n_mels}"]).to(device)
def log_mel_spectrogram(audio: Union[str, np.ndarray, torch.Tensor], n_mels: int = N_MELS):
"""
Compute the log-Mel spectrogram of
Parameters
----------
audio: Union[str, np.ndarray, torch.Tensor], shape = (*)
The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
n_mels: int
The number of Mel-frequency filters, only 80 is supported
Returns
-------
torch.Tensor, shape = (80, n_frames)
A Tensor that contains the Mel spectrogram
"""
if not torch.is_tensor(audio):
if isinstance(audio, str):
audio = load_audio(audio)
audio = torch.from_numpy(audio)
window = torch.hann_window(N_FFT).to(audio.device)
stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True)
magnitudes = stft[:, :-1].abs() ** 2
filters = mel_filters(audio.device, n_mels)
mel_spec = filters @ magnitudes
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
return log_spec
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval", "MSMARCO"]
evaluator = SparseNanoBEIREvaluator(
dataset_names=datasets,
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
results = evaluator(model)
"""
Evaluating NanoQuoraRetrieval
Information Retrieval Evaluation of the model on the NanoQuoraRetrieval dataset:
Queries: 50
Corpus: 5046
Score-Function: dot
Accuracy@1: 92.00%
Accuracy@3: 96.00%
Accuracy@5: 98.00%
Accuracy@10: 100.00%
Precision@1: 92.00%
Precision@3: 40.00%
Precision@5: 24.80%
Precision@10: 13.20%
Recall@1: 79.73%
Recall@3: 92.53%
Recall@5: 94.93%
Recall@10: 98.27%
MRR@10: 0.9439
NDCG@10: 0.9339
MAP@100: 0.9070
Model Query Sparsity: Active Dimensions: 59.4, Sparsity Ratio: 0.9981
Model Corpus Sparsity: Active Dimensions: 61.9, Sparsity Ratio: 0.9980
Information Retrieval Evaluation of the model on the NanoMSMARCO dataset:
Queries: 50
Corpus: 5043
Score-Function: dot
Accuracy@1: 48.00%
Accuracy@3: 74.00%
Accuracy@5: 76.00%
Accuracy@10: 86.00%
Precision@1: 48.00%
Precision@3: 24.67%
Precision@5: 15.20%
Precision@10: 8.60%
Recall@1: 48.00%
Recall@3: 74.00%
Recall@5: 76.00%
Recall@10: 86.00%
MRR@10: 0.6191
NDCG@10: 0.6780
MAP@100: 0.6277
Model Query Sparsity: Active Dimensions: 45.4, Sparsity Ratio: 0.9985
Model Corpus Sparsity: Active Dimensions: 122.6, Sparsity Ratio: 0.9960
Average Queries: 50.0
Average Corpus: 5044.5
Aggregated for Score Function: dot
Accuracy@1: 70.00%
Accuracy@3: 85.00%
Accuracy@5: 87.00%
Accuracy@10: 93.00%
Precision@1: 70.00%
Recall@1: 63.87%
Precision@3: 32.33%
Recall@3: 83.27%
Precision@5: 20.00%
Recall@5: 85.47%
Precision@10: 10.90%
Recall@10: 92.13%
MRR@10: 0.7815
NDCG@10: 0.8060
Model Query Sparsity: Active Dimensions: 52.4, Sparsity Ratio: 0.9983
Model Corpus Sparsity: Active Dimensions: 92.2, Sparsity Ratio: 0.9970
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8060
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval", "MSMARCO"]
evaluator = SparseNanoBEIREvaluator(
dataset_names=datasets,
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
results = evaluator(model)
"""
Evaluating NanoQuoraRetrieval
Information Retrieval Evaluation of the model on the NanoQuoraRetrieval dataset:
Queries: 50
Corpus: 5046
Score-Function: dot
Accuracy@1: 92.00%
Accuracy@3: 96.00%
Accuracy@5: 98.00%
Accuracy@10: 100.00%
Precision@1: 92.00%
Precision@3: 40.00%
Precision@5: 24.80%
Precision@10: 13.20%
Recall@1: 79.73%
Recall@3: 92.53%
Recall@5: 94.93%
Recall@10: 98.27%
MRR@10: 0.9439
NDCG@10: 0.9339
MAP@100: 0.9072
Model Query Sparsity: Active Dimensions: 63.0, Sparsity Ratio: 0.9979
Model Corpus Sparsity: Active Dimensions: 63.4, Sparsity Ratio: 0.9979
Information Retrieval Evaluation of the model on the NanoMSMARCO dataset:
Queries: 50
Corpus: 5043
Score-Function: dot
Accuracy@1: 48.00%
Accuracy@3: 74.00%
Accuracy@5: 76.00%
Accuracy@10: 88.00%
Precision@1: 48.00%
Precision@3: 24.67%
Precision@5: 15.20%
Precision@10: 8.80%
Recall@1: 48.00%
Recall@3: 74.00%
Recall@5: 76.00%
Recall@10: 88.00%
MRR@10: 0.6211
NDCG@10: 0.6838
MAP@100: 0.6277
Model Query Sparsity: Active Dimensions: 48.1, Sparsity Ratio: 0.9984
Model Corpus Sparsity: Active Dimensions: 125.4, Sparsity Ratio: 0.9959
Average Queries: 50.0
Average Corpus: 5044.5
Aggregated for Score Function: dot
Accuracy@1: 70.00%
Accuracy@3: 85.00%
Accuracy@5: 87.00%
Accuracy@10: 94.00%
Precision@1: 70.00%
Recall@1: 63.87%
Precision@3: 32.33%
Recall@3: 83.27%
Precision@5: 20.00%
Recall@5: 85.47%
Precision@10: 11.00%
Recall@10: 93.13%
MRR@10: 0.7825
NDCG@10: 0.8089
Model Query Sparsity: Active Dimensions: 55.5, Sparsity Ratio: 0.9982
Model Corpus Sparsity: Active Dimensions: 94.4, Sparsity Ratio: 0.9969
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8089
|
# Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussian_radius, gen_gaussian_target
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .misc import interpolate_as, sigmoid_geometric_mean
from .normed_predictor import NormedConv2d, NormedLinear
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, Transformer, nchw_to_nlc,
nlc_to_nchw)
__all__ = [
'ResLayer', 'gaussian_radius', 'gen_gaussian_target',
'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer',
'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',
'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual',
'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer',
'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc',
'nlc_to_nchw', 'pvt_convert', 'sigmoid_geometric_mean'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussian_radius, gen_gaussian_target
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .misc import interpolate_as
from .normed_predictor import NormedConv2d, NormedLinear
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, Transformer, nchw_to_nlc,
nlc_to_nchw)
__all__ = [
'ResLayer', 'gaussian_radius', 'gen_gaussian_target',
'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer',
'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',
'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual',
'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer',
'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc',
'nlc_to_nchw', 'pvt_convert'
]
|
"""
S3 file and directory reader.
A loader that fetches a file or iterates through a directory on AWS S3 or other compatible service.
"""
from typing import Dict, List, Optional, Union
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.readers.opendal.base import OpendalReader
class OpendalS3Reader(BaseReader):
"""General reader for any S3 file or directory."""
def __init__(
self,
bucket: str,
path: str = "/",
endpoint: str = "",
region: str = "",
access_key_id: str = "",
secret_access_key: str = "",
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
) -> None:
"""
Initialize S3 bucket and key, along with credentials if needed.
If key is not set, the entire bucket (filtered by prefix) is parsed.
Args:
bucket (str): the name of your S3 bucket
path (str): the path of the data. If none is provided,
this loader will iterate through the entire bucket. If path is endswith `/`, this loader will iterate through the entire dir. Otherwise, this loeader will load the file.
endpoint Optional[str]: the endpoint of the S3 service.
region: Optional[str]: the region of the S3 service.
access_key_id (Optional[str]): provide AWS access key directly.
secret_access_key (Optional[str]): provide AWS access key directly.
file_extractor (Optional[Dict[str, BaseReader]]): A mapping of file
extension to a BaseReader class that specifies how to convert that file
to text. See `SimpleDirectoryReader` for more details.
"""
super().__init__()
self.path = path
self.file_extractor = file_extractor
# opendal service related config.
self.options = {
"access_key": access_key_id,
"secret_key": secret_access_key,
"endpoint": endpoint,
"region": region,
"bucket": bucket,
}
def load_data(self) -> List[Document]:
"""Load file(s) from OpenDAL."""
loader = OpendalReader(
scheme="s3",
path=self.path,
file_extractor=self.file_extractor,
**self.options,
)
return loader.load_data()
|
"""S3 file and directory reader.
A loader that fetches a file or iterates through a directory on AWS S3 or other compatible service.
"""
from typing import Dict, List, Optional, Union
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.readers.opendal.base import OpendalReader
class OpendalS3Reader(BaseReader):
"""General reader for any S3 file or directory."""
def __init__(
self,
bucket: str,
path: str = "/",
endpoint: str = "",
region: str = "",
access_key_id: str = "",
secret_access_key: str = "",
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
) -> None:
"""Initialize S3 bucket and key, along with credentials if needed.
If key is not set, the entire bucket (filtered by prefix) is parsed.
Args:
bucket (str): the name of your S3 bucket
path (str): the path of the data. If none is provided,
this loader will iterate through the entire bucket. If path is endswith `/`, this loader will iterate through the entire dir. Otherwise, this loeader will load the file.
endpoint Optional[str]: the endpoint of the S3 service.
region: Optional[str]: the region of the S3 service.
access_key_id (Optional[str]): provide AWS access key directly.
secret_access_key (Optional[str]): provide AWS access key directly.
file_extractor (Optional[Dict[str, BaseReader]]): A mapping of file
extension to a BaseReader class that specifies how to convert that file
to text. See `SimpleDirectoryReader` for more details.
"""
super().__init__()
self.path = path
self.file_extractor = file_extractor
# opendal service related config.
self.options = {
"access_key": access_key_id,
"secret_key": secret_access_key,
"endpoint": endpoint,
"region": region,
"bucket": bucket,
}
def load_data(self) -> List[Document]:
"""Load file(s) from OpenDAL."""
loader = OpendalReader(
scheme="s3",
path=self.path,
file_extractor=self.file_extractor,
**self.options,
)
return loader.load_data()
|
from docarray import DocList, BaseDoc
from docarray.documents.text import TextDoc
from jina import Executor, requests, Flow
def test_issue():
class QuoteFile(BaseDoc):
quote_file_id: int = None
texts: DocList[TextDoc] = None
class SearchResult(BaseDoc):
results: DocList[QuoteFile] = None
class InitialExecutor(Executor):
@requests(on='/search')
async def search(
self, docs: DocList[SearchResult], **kwargs
) -> DocList[SearchResult]:
return docs
f = Flow(protocol='http').add(name='initial', uses=InitialExecutor)
with f:
resp = f.post(
on='/search',
inputs=DocList[SearchResult](
[
SearchResult(
results=DocList[QuoteFile](
[
QuoteFile(
quote_file_id=999,
texts=DocList[TextDoc]([TextDoc(text='hey here')]),
)
]
)
)
]
),
return_type=DocList[SearchResult],
)
assert resp[0].results[0].quote_file_id == 999
assert resp[0].results[0].texts[0].text == 'hey here'
|
from docarray import DocList, BaseDoc
from docarray.documents.text import TextDoc
from jina import Executor, requests, Flow
def test_issue():
class QuoteFile(BaseDoc):
quote_file_id: int = None
texts: DocList[TextDoc] = None
class SearchResult(BaseDoc):
results: DocList[QuoteFile] = None
class InitialExecutor(Executor):
@requests(on='/search')
async def search(self, docs: DocList[SearchResult], **kwargs) -> DocList[SearchResult]:
return docs
f = (
Flow(protocol='http')
.add(name='initial', uses=InitialExecutor)
)
with f:
resp = f.post(on='/search', inputs=DocList[SearchResult]([SearchResult(results=DocList[QuoteFile](
[QuoteFile(quote_file_id=999, texts=DocList[TextDoc]([TextDoc(text='hey here')]))]))]),
return_type=DocList[SearchResult])
assert resp[0].results[0].quote_file_id == 999
assert resp[0].results[0].texts[0].text == 'hey here'
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from docarray import BaseDoc, DocList
from docarray.typing import TorchTensor
def test_torch_train():
class Mmdoc(BaseDoc):
text: str
tensor: Optional[TorchTensor[3, 224, 224]] = None
N = 10
batch = DocList[Mmdoc](Mmdoc(text=f'hello{i}') for i in range(N))
batch.tensor = torch.zeros(N, 3, 224, 224)
batch = batch.to_doc_vec()
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 16, 3)
def forward(self, x):
return self.conv(x)
model = Model()
opt = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
for _ in range(2):
loss = model(batch.tensor).sum()
loss.backward()
opt.step()
|
from typing import Optional
import torch
from docarray import BaseDoc, DocList
from docarray.typing import TorchTensor
def test_torch_train():
class Mmdoc(BaseDoc):
text: str
tensor: Optional[TorchTensor[3, 224, 224]] = None
N = 10
batch = DocList[Mmdoc](Mmdoc(text=f'hello{i}') for i in range(N))
batch.tensor = torch.zeros(N, 3, 224, 224)
batch = batch.to_doc_vec()
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 16, 3)
def forward(self, x):
return self.conv(x)
model = Model()
opt = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
for _ in range(2):
loss = model(batch.tensor).sum()
loss.backward()
opt.step()
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
def check_matplotlib_support(caller_name):
"""Raise ImportError with detailed error message if mpl is not installed.
Plot utilities like any of the Display's plotting functions should lazily import
matplotlib and call this helper before any computation.
Parameters
----------
caller_name : str
The name of the caller that requires matplotlib.
"""
try:
import matplotlib # noqa
except ImportError as e:
raise ImportError(
"{} requires matplotlib. You can install matplotlib with "
"`pip install matplotlib`".format(caller_name)
) from e
def check_pandas_support(caller_name):
"""Raise ImportError with detailed error message if pandas is not installed.
Plot utilities like :func:`fetch_openml` should lazily import
pandas and call this helper before any computation.
Parameters
----------
caller_name : str
The name of the caller that requires pandas.
Returns
-------
pandas
The pandas package.
"""
try:
import pandas
return pandas
except ImportError as e:
raise ImportError("{} requires pandas.".format(caller_name)) from e
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
def check_matplotlib_support(caller_name):
"""Raise ImportError with detailed error message if mpl is not installed.
Plot utilities like any of the Display's plotting functions should lazily import
matplotlib and call this helper before any computation.
Parameters
----------
caller_name : str
The name of the caller that requires matplotlib.
"""
try:
import matplotlib # noqa
except ImportError as e:
raise ImportError(
"{} requires matplotlib. You can install matplotlib with "
"`pip install matplotlib`".format(caller_name)
) from e
def check_pandas_support(caller_name):
"""Raise ImportError with detailed error message if pandas is not installed.
Plot utilities like :func:`fetch_openml` should lazily import
pandas and call this helper before any computation.
Parameters
----------
caller_name : str
The name of the caller that requires pandas.
Returns
-------
pandas
The pandas package.
"""
try:
import pandas # noqa
return pandas
except ImportError as e:
raise ImportError("{} requires pandas.".format(caller_name)) from e
|
_base_ = './ga-retinanet_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
_base_ = './ga_retinanet_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centernet_update_head import CenterNetUpdateHead
from .centripetal_head import CentripetalHead
from .condinst_head import CondInstBboxHead, CondInstMaskHead
from .conditional_detr_head import ConditionalDETRHead
from .corner_head import CornerHead
from .dab_detr_head import DABDETRHead
from .ddod_head import DDODHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .dino_head import DINOHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .rtmdet_head import RTMDetHead, RTMDetSepBNHead
from .rtmdet_ins_head import RTMDetInsHead, RTMDetInsSepBNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead',
'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead',
'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead',
'DeformableDETRHead', 'CenterNetHead', 'YOLOXHead', 'SOLOHead',
'DecoupledSOLOHead', 'DecoupledSOLOLightHead', 'SOLOV2Head', 'LADHead',
'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'DDODHead',
'CenterNetUpdateHead', 'RTMDetHead', 'RTMDetSepBNHead', 'CondInstBboxHead',
'CondInstMaskHead', 'RTMDetInsHead', 'RTMDetInsSepBNHead',
'BoxInstBboxHead', 'BoxInstMaskHead', 'ConditionalDETRHead', 'DINOHead',
'DABDETRHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centernet_update_head import CenterNetUpdateHead
from .centripetal_head import CentripetalHead
from .condinst_head import CondInstBboxHead, CondInstMaskHead
from .conditional_detr_head import ConditionalDETRHead
from .corner_head import CornerHead
from .ddod_head import DDODHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .dino_head import DINOHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .rtmdet_head import RTMDetHead, RTMDetSepBNHead
from .rtmdet_ins_head import RTMDetInsHead, RTMDetInsSepBNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead',
'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead',
'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead',
'DeformableDETRHead', 'CenterNetHead', 'YOLOXHead', 'SOLOHead',
'DecoupledSOLOHead', 'DecoupledSOLOLightHead', 'SOLOV2Head', 'LADHead',
'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'DDODHead',
'CenterNetUpdateHead', 'RTMDetHead', 'RTMDetSepBNHead', 'CondInstBboxHead',
'CondInstMaskHead', 'RTMDetInsHead', 'RTMDetInsSepBNHead',
'BoxInstBboxHead', 'BoxInstMaskHead', 'ConditionalDETRHead', 'DINOHead'
]
|
import pytest
from ldclient import LDClient
from autogpt_libs.feature_flag.client import feature_flag, mock_flag_variation
@pytest.fixture
def ld_client(mocker):
client = mocker.Mock(spec=LDClient)
mocker.patch("ldclient.get", return_value=client)
client.is_initialized.return_value = True
return client
@pytest.mark.asyncio
async def test_feature_flag_enabled(ld_client):
ld_client.variation.return_value = True
@feature_flag("test-flag")
async def test_function(user_id: str):
return "success"
result = test_function(user_id="test-user")
assert result == "success"
ld_client.variation.assert_called_once()
@pytest.mark.asyncio
async def test_feature_flag_unauthorized_response(ld_client):
ld_client.variation.return_value = False
@feature_flag("test-flag")
async def test_function(user_id: str):
return "success"
result = test_function(user_id="test-user")
assert result == {"error": "disabled"}
def test_mock_flag_variation(ld_client):
with mock_flag_variation("test-flag", True):
assert ld_client.variation("test-flag", None, False)
with mock_flag_variation("test-flag", False):
assert ld_client.variation("test-flag", None, False)
|
import pytest
from autogpt_libs.feature_flag.client import feature_flag, mock_flag_variation
from ldclient import LDClient
@pytest.fixture
def ld_client(mocker):
client = mocker.Mock(spec=LDClient)
mocker.patch("ldclient.get", return_value=client)
client.is_initialized.return_value = True
return client
@pytest.mark.asyncio
async def test_feature_flag_enabled(ld_client):
ld_client.variation.return_value = True
@feature_flag("test-flag")
async def test_function(user_id: str):
return "success"
result = test_function(user_id="test-user")
assert result == "success"
ld_client.variation.assert_called_once()
@pytest.mark.asyncio
async def test_feature_flag_unauthorized_response(ld_client):
ld_client.variation.return_value = False
@feature_flag("test-flag")
async def test_function(user_id: str):
return "success"
result = test_function(user_id="test-user")
assert result == {"error": "disabled"}
def test_mock_flag_variation(ld_client):
with mock_flag_variation("test-flag", True):
assert ld_client.variation("test-flag", None, False)
with mock_flag_variation("test-flag", False):
assert ld_client.variation("test-flag", None, False)
|
"""Defines utilities for switching audio backends"""
import warnings
from typing import List, Optional
import torchaudio
from torchaudio._internal import module_utils as _mod_utils
from . import no_backend, soundfile_backend, sox_io_backend
__all__ = [
"list_audio_backends",
"get_audio_backend",
"set_audio_backend",
]
def list_audio_backends() -> List[str]:
"""List available backends
Returns:
List[str]: The list of available backends.
"""
backends = []
if _mod_utils.is_module_available("soundfile"):
backends.append("soundfile")
if torchaudio._extension._SOX_INITIALIZED:
backends.append("sox_io")
return backends
def set_audio_backend(backend: Optional[str]):
"""Set the backend for I/O operation
Args:
backend (str or None): Name of the backend.
One of ``"sox_io"`` or ``"soundfile"`` based on availability
of the system. If ``None`` is provided the current backend is unassigned.
"""
if backend is not None and backend not in list_audio_backends():
raise RuntimeError(f'Backend "{backend}" is not one of ' f"available backends: {list_audio_backends()}.")
if backend is None:
module = no_backend
elif backend == "sox_io":
module = sox_io_backend
elif backend == "soundfile":
module = soundfile_backend
else:
raise NotImplementedError(f'Unexpected backend "{backend}"')
for func in ["save", "load", "info"]:
setattr(torchaudio, func, getattr(module, func))
def _init_audio_backend():
backends = list_audio_backends()
if "sox_io" in backends:
set_audio_backend("sox_io")
elif "soundfile" in backends:
set_audio_backend("soundfile")
else:
warnings.warn("No audio backend is available.")
set_audio_backend(None)
def get_audio_backend() -> Optional[str]:
"""Get the name of the current backend
Returns:
Optional[str]: The name of the current backend or ``None`` if no backend is assigned.
"""
if torchaudio.load == no_backend.load:
return None
if torchaudio.load == sox_io_backend.load:
return "sox_io"
if torchaudio.load == soundfile_backend.load:
return "soundfile"
raise ValueError("Unknown backend.")
|
"""Defines utilities for switching audio backends"""
import warnings
from typing import List, Optional
import torchaudio
from torchaudio._internal import module_utils as _mod_utils
from . import no_backend, soundfile_backend, sox_io_backend
__all__ = [
"list_audio_backends",
"get_audio_backend",
"set_audio_backend",
]
def list_audio_backends() -> List[str]:
"""List available backends
Returns:
List[str]: The list of available backends.
"""
backends = []
if _mod_utils.is_module_available("soundfile"):
backends.append("soundfile")
if _mod_utils.is_sox_available():
backends.append("sox_io")
return backends
def set_audio_backend(backend: Optional[str]):
"""Set the backend for I/O operation
Args:
backend (str or None): Name of the backend.
One of ``"sox_io"`` or ``"soundfile"`` based on availability
of the system. If ``None`` is provided the current backend is unassigned.
"""
if backend is not None and backend not in list_audio_backends():
raise RuntimeError(f'Backend "{backend}" is not one of ' f"available backends: {list_audio_backends()}.")
if backend is None:
module = no_backend
elif backend == "sox_io":
module = sox_io_backend
elif backend == "soundfile":
module = soundfile_backend
else:
raise NotImplementedError(f'Unexpected backend "{backend}"')
for func in ["save", "load", "info"]:
setattr(torchaudio, func, getattr(module, func))
def _init_audio_backend():
backends = list_audio_backends()
if "sox_io" in backends:
set_audio_backend("sox_io")
elif "soundfile" in backends:
set_audio_backend("soundfile")
else:
warnings.warn("No audio backend is available.")
set_audio_backend(None)
def get_audio_backend() -> Optional[str]:
"""Get the name of the current backend
Returns:
Optional[str]: The name of the current backend or ``None`` if no backend is assigned.
"""
if torchaudio.load == no_backend.load:
return None
if torchaudio.load == sox_io_backend.load:
return "sox_io"
if torchaudio.load == soundfile_backend.load:
return "soundfile"
raise ValueError("Unknown backend.")
|
"""Experiment with different models."""
from __future__ import annotations
from collections.abc import Sequence
from typing import Optional
from langchain_core.language_models.llms import BaseLLM
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.utils.input import get_color_mapping, print_text
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
class ModelLaboratory:
"""A utility to experiment with and compare the performance of different models."""
def __init__(self, chains: Sequence[Chain], names: Optional[list[str]] = None):
"""Initialize the ModelLaboratory with chains to experiment with.
Args:
chains (Sequence[Chain]): A sequence of chains to experiment with.
Each chain must have exactly one input and one output variable.
names (Optional[List[str]]): Optional list of names corresponding to each chain.
If provided, its length must match the number of chains.
Raises:
ValueError: If any chain is not an instance of `Chain`.
ValueError: If a chain does not have exactly one input variable.
ValueError: If a chain does not have exactly one output variable.
ValueError: If the length of `names` does not match the number of chains.
"""
for chain in chains:
if not isinstance(chain, Chain):
msg = (
"ModelLaboratory should now be initialized with Chains. "
"If you want to initialize with LLMs, use the `from_llms` method "
"instead (`ModelLaboratory.from_llms(...)`)"
)
raise ValueError(msg)
if len(chain.input_keys) != 1:
msg = (
"Currently only support chains with one input variable, "
f"got {chain.input_keys}"
)
raise ValueError(msg)
if len(chain.output_keys) != 1:
msg = (
"Currently only support chains with one output variable, "
f"got {chain.output_keys}"
)
raise ValueError(msg)
if names is not None:
if len(names) != len(chains):
msg = "Length of chains does not match length of names."
raise ValueError(msg)
self.chains = chains
chain_range = [str(i) for i in range(len(self.chains))]
self.chain_colors = get_color_mapping(chain_range)
self.names = names
@classmethod
def from_llms(
cls, llms: list[BaseLLM], prompt: Optional[PromptTemplate] = None
) -> ModelLaboratory:
"""Initialize the ModelLaboratory with LLMs and an optional prompt.
Args:
llms (List[BaseLLM]): A list of LLMs to experiment with.
prompt (Optional[PromptTemplate]): An optional prompt to use with the LLMs.
If provided, the prompt must contain exactly one input variable.
Returns:
ModelLaboratory: An instance of `ModelLaboratory` initialized with LLMs.
"""
if prompt is None:
prompt = PromptTemplate(input_variables=["_input"], template="{_input}")
chains = [LLMChain(llm=llm, prompt=prompt) for llm in llms]
names = [str(llm) for llm in llms]
return cls(chains, names=names)
def compare(self, text: str) -> None:
"""Compare model outputs on an input text.
If a prompt was provided with starting the laboratory, then this text will be
fed into the prompt. If no prompt was provided, then the input text is the
entire prompt.
Args:
text: input text to run all models on.
"""
print(f"\033[1mInput:\033[0m\n{text}\n") # noqa: T201
for i, chain in enumerate(self.chains):
if self.names is not None:
name = self.names[i]
else:
name = str(chain)
print_text(name, end="\n")
output = chain.run(text)
print_text(output, color=self.chain_colors[str(i)], end="\n\n")
|
"""Experiment with different models."""
from __future__ import annotations
from collections.abc import Sequence
from typing import Optional
from langchain_core.language_models.llms import BaseLLM
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.utils.input import get_color_mapping, print_text
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
class ModelLaboratory:
"""A utility to experiment with and compare the performance of different models."""
def __init__(self, chains: Sequence[Chain], names: Optional[list[str]] = None):
"""Initialize the ModelLaboratory with chains to experiment with.
Args:
chains (Sequence[Chain]): A sequence of chains to experiment with.
Each chain must have exactly one input and one output variable.
names (Optional[List[str]]): Optional list of names corresponding to each chain.
If provided, its length must match the number of chains.
Raises:
ValueError: If any chain is not an instance of `Chain`.
ValueError: If a chain does not have exactly one input variable.
ValueError: If a chain does not have exactly one output variable.
ValueError: If the length of `names` does not match the number of chains.
"""
for chain in chains:
if not isinstance(chain, Chain):
raise ValueError(
"ModelLaboratory should now be initialized with Chains. "
"If you want to initialize with LLMs, use the `from_llms` method "
"instead (`ModelLaboratory.from_llms(...)`)"
)
if len(chain.input_keys) != 1:
raise ValueError(
"Currently only support chains with one input variable, "
f"got {chain.input_keys}"
)
if len(chain.output_keys) != 1:
raise ValueError(
"Currently only support chains with one output variable, "
f"got {chain.output_keys}"
)
if names is not None:
if len(names) != len(chains):
raise ValueError("Length of chains does not match length of names.")
self.chains = chains
chain_range = [str(i) for i in range(len(self.chains))]
self.chain_colors = get_color_mapping(chain_range)
self.names = names
@classmethod
def from_llms(
cls, llms: list[BaseLLM], prompt: Optional[PromptTemplate] = None
) -> ModelLaboratory:
"""Initialize the ModelLaboratory with LLMs and an optional prompt.
Args:
llms (List[BaseLLM]): A list of LLMs to experiment with.
prompt (Optional[PromptTemplate]): An optional prompt to use with the LLMs.
If provided, the prompt must contain exactly one input variable.
Returns:
ModelLaboratory: An instance of `ModelLaboratory` initialized with LLMs.
"""
if prompt is None:
prompt = PromptTemplate(input_variables=["_input"], template="{_input}")
chains = [LLMChain(llm=llm, prompt=prompt) for llm in llms]
names = [str(llm) for llm in llms]
return cls(chains, names=names)
def compare(self, text: str) -> None:
"""Compare model outputs on an input text.
If a prompt was provided with starting the laboratory, then this text will be
fed into the prompt. If no prompt was provided, then the input text is the
entire prompt.
Args:
text: input text to run all models on.
"""
print(f"\033[1mInput:\033[0m\n{text}\n") # noqa: T201
for i, chain in enumerate(self.chains):
if self.names is not None:
name = self.names[i]
else:
name = str(chain)
print_text(name, end="\n")
output = chain.run(text)
print_text(output, color=self.chain_colors[str(i)], end="\n\n")
|
import gzip
import os
from . import InputExample
class NLIDataReader(object):
"""Reads in the Stanford NLI dataset and the MultiGenre NLI dataset"""
def __init__(self, dataset_folder):
self.dataset_folder = dataset_folder
def get_examples(self, filename, max_examples=0):
"""
data_splits specified which data split to use (train, dev, test).
Expects that self.dataset_folder contains the files s1.$data_split.gz, s2.$data_split.gz,
labels.$data_split.gz, e.g., for the train split, s1.train.gz, s2.train.gz, labels.train.gz
"""
s1 = gzip.open(os.path.join(self.dataset_folder, "s1." + filename), mode="rt", encoding="utf-8").readlines()
s2 = gzip.open(os.path.join(self.dataset_folder, "s2." + filename), mode="rt", encoding="utf-8").readlines()
labels = gzip.open(
os.path.join(self.dataset_folder, "labels." + filename), mode="rt", encoding="utf-8"
).readlines()
examples = []
id = 0
for sentence_a, sentence_b, label in zip(s1, s2, labels):
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid, texts=[sentence_a, sentence_b], label=self.map_label(label)))
if 0 < max_examples <= len(examples):
break
return examples
@staticmethod
def get_labels():
return {"contradiction": 0, "entailment": 1, "neutral": 2}
def get_num_labels(self):
return len(self.get_labels())
def map_label(self, label):
return self.get_labels()[label.strip().lower()]
|
from . import InputExample
import gzip
import os
class NLIDataReader(object):
"""Reads in the Stanford NLI dataset and the MultiGenre NLI dataset"""
def __init__(self, dataset_folder):
self.dataset_folder = dataset_folder
def get_examples(self, filename, max_examples=0):
"""
data_splits specified which data split to use (train, dev, test).
Expects that self.dataset_folder contains the files s1.$data_split.gz, s2.$data_split.gz,
labels.$data_split.gz, e.g., for the train split, s1.train.gz, s2.train.gz, labels.train.gz
"""
s1 = gzip.open(os.path.join(self.dataset_folder, "s1." + filename), mode="rt", encoding="utf-8").readlines()
s2 = gzip.open(os.path.join(self.dataset_folder, "s2." + filename), mode="rt", encoding="utf-8").readlines()
labels = gzip.open(
os.path.join(self.dataset_folder, "labels." + filename), mode="rt", encoding="utf-8"
).readlines()
examples = []
id = 0
for sentence_a, sentence_b, label in zip(s1, s2, labels):
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid, texts=[sentence_a, sentence_b], label=self.map_label(label)))
if 0 < max_examples <= len(examples):
break
return examples
@staticmethod
def get_labels():
return {"contradiction": 0, "entailment": 1, "neutral": 2}
def get_num_labels(self):
return len(self.get_labels())
def map_label(self, label):
return self.get_labels()[label.strip().lower()]
|
import gzip
import logging
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, evaluation, losses, models, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Some training parameters. We use a batch size of 16, for every positive example we include 8-1=7 negative examples
# Sentences are truncated to 75 word pieces
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 128
epochs = 1
max_seq_length = 75
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = "askubuntu"
output_path = "output/train_askubuntu_ct-improved-{}-{}-{}".format(
model_name, batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ["text_tokenized.txt.gz", "dev.txt", "test.txt", "train_random.txt"]:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get("https://github.com/taolei87/askubuntu/raw/master/" + filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, "text_tokenized.txt.gz"), "rt", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: # Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append(
{
"query": corpus[query_id],
"positive": [corpus[pid] for pid in relevant_id],
"negative": [corpus[pid] for pid in negative_ids],
}
)
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "dev.txt"))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "test.txt"))
## Now we need a list of train sentences.
## In this example we simply use all sentences that don't appear in the train/dev set
train_sentences = []
for id, sentence in corpus.items():
if id not in dev_test_ids:
train_sentences.append(InputExample(texts=[sentence, sentence]))
logging.info("{} train sentences".format(len(train_sentences)))
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train the model #################
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLossInBatchNegatives(model)
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name="AskUbuntu dev")
test_evaluator = evaluation.RerankingEvaluator(test_dataset, name="AskUbuntu test")
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
evaluation_steps=100,
epochs=1,
warmup_steps=100,
use_amp=True, # Set to True, if your GPU has optimized FP16 cores
)
latest_output_path = output_path + "-latest"
model.save(latest_output_path)
### Run test evaluation on the latest model. This is equivalent to not having a dev dataset
model = SentenceTransformer(latest_output_path)
test_evaluator(model)
|
from sentence_transformers import SentenceTransformer, LoggingHandler, InputExample
from sentence_transformers import models, util, evaluation, losses
import logging
import os
import gzip
from datetime import datetime
import torch
from torch.utils.data import DataLoader
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
# Some training parameters. We use a batch size of 16, for every positive example we include 8-1=7 negative examples
# Sentences are truncated to 75 word pieces
## Training parameters
model_name = 'distilbert-base-uncased'
batch_size = 128
epochs = 1
max_seq_length = 75
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = 'askubuntu'
output_path = 'output/train_askubuntu_ct-improved-{}-{}-{}'.format(model_name, batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ['text_tokenized.txt.gz', 'dev.txt', 'test.txt', 'train_random.txt']:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get('https://github.com/taolei87/askubuntu/raw/master/'+filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, 'text_tokenized.txt.gz'), 'rt', encoding='utf8') as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: #Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append({
'query': corpus[query_id],
'positive': [corpus[pid] for pid in relevant_id],
'negative': [corpus[pid] for pid in negative_ids]
})
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, 'dev.txt'))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, 'test.txt'))
## Now we need a list of train sentences.
## In this example we simply use all sentences that don't appear in the train/dev set
train_sentences = []
for id, sentence in corpus.items():
if id not in dev_test_ids:
train_sentences.append(InputExample(texts=[sentence, sentence]))
logging.info("{} train sentences".format(len(train_sentences)))
################# Intialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train the model #################
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLossInBatchNegatives(model)
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name='AskUbuntu dev')
test_evaluator = evaluation.RerankingEvaluator(test_dataset, name='AskUbuntu test')
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=1,
warmup_steps=100,
use_amp=True #Set to True, if your GPU has optimized FP16 cores
)
latest_output_path = output_path + "-latest"
model.save(latest_output_path)
### Run test evaluation on the latest model. This is equivalent to not having a dev dataset
model = SentenceTransformer(latest_output_path)
test_evaluator(model)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import (ConvModule, caffe2_xavier_init, constant_init, is_norm,
normal_init)
from torch.nn import BatchNorm2d
from mmdet.registry import MODELS
class Bottleneck(nn.Module):
"""Bottleneck block for DilatedEncoder used in `YOLOF.
<https://arxiv.org/abs/2103.09460>`.
The Bottleneck contains three ConvLayers and one residual connection.
Args:
in_channels (int): The number of input channels.
mid_channels (int): The number of middle output channels.
dilation (int): Dilation rate.
norm_cfg (dict): Dictionary to construct and config norm layer.
"""
def __init__(self,
in_channels,
mid_channels,
dilation,
norm_cfg=dict(type='BN', requires_grad=True)):
super(Bottleneck, self).__init__()
self.conv1 = ConvModule(
in_channels, mid_channels, 1, norm_cfg=norm_cfg)
self.conv2 = ConvModule(
mid_channels,
mid_channels,
3,
padding=dilation,
dilation=dilation,
norm_cfg=norm_cfg)
self.conv3 = ConvModule(
mid_channels, in_channels, 1, norm_cfg=norm_cfg)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
out = out + identity
return out
@MODELS.register_module()
class DilatedEncoder(nn.Module):
"""Dilated Encoder for YOLOF <https://arxiv.org/abs/2103.09460>`.
This module contains two types of components:
- the original FPN lateral convolution layer and fpn convolution layer,
which are 1x1 conv + 3x3 conv
- the dilated residual block
Args:
in_channels (int): The number of input channels.
out_channels (int): The number of output channels.
block_mid_channels (int): The number of middle block output channels
num_residual_blocks (int): The number of residual blocks.
block_dilations (list): The list of residual blocks dilation.
"""
def __init__(self, in_channels, out_channels, block_mid_channels,
num_residual_blocks, block_dilations):
super(DilatedEncoder, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.block_mid_channels = block_mid_channels
self.num_residual_blocks = num_residual_blocks
self.block_dilations = block_dilations
self._init_layers()
def _init_layers(self):
self.lateral_conv = nn.Conv2d(
self.in_channels, self.out_channels, kernel_size=1)
self.lateral_norm = BatchNorm2d(self.out_channels)
self.fpn_conv = nn.Conv2d(
self.out_channels, self.out_channels, kernel_size=3, padding=1)
self.fpn_norm = BatchNorm2d(self.out_channels)
encoder_blocks = []
for i in range(self.num_residual_blocks):
dilation = self.block_dilations[i]
encoder_blocks.append(
Bottleneck(
self.out_channels,
self.block_mid_channels,
dilation=dilation))
self.dilated_encoder_blocks = nn.Sequential(*encoder_blocks)
def init_weights(self):
caffe2_xavier_init(self.lateral_conv)
caffe2_xavier_init(self.fpn_conv)
for m in [self.lateral_norm, self.fpn_norm]:
constant_init(m, 1)
for m in self.dilated_encoder_blocks.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, mean=0, std=0.01)
if is_norm(m):
constant_init(m, 1)
def forward(self, feature):
out = self.lateral_norm(self.lateral_conv(feature[-1]))
out = self.fpn_norm(self.fpn_conv(out))
return self.dilated_encoder_blocks(out),
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import (ConvModule, caffe2_xavier_init, constant_init, is_norm,
normal_init)
from torch.nn import BatchNorm2d
from ..builder import NECKS
class Bottleneck(nn.Module):
"""Bottleneck block for DilatedEncoder used in `YOLOF.
<https://arxiv.org/abs/2103.09460>`.
The Bottleneck contains three ConvLayers and one residual connection.
Args:
in_channels (int): The number of input channels.
mid_channels (int): The number of middle output channels.
dilation (int): Dilation rate.
norm_cfg (dict): Dictionary to construct and config norm layer.
"""
def __init__(self,
in_channels,
mid_channels,
dilation,
norm_cfg=dict(type='BN', requires_grad=True)):
super(Bottleneck, self).__init__()
self.conv1 = ConvModule(
in_channels, mid_channels, 1, norm_cfg=norm_cfg)
self.conv2 = ConvModule(
mid_channels,
mid_channels,
3,
padding=dilation,
dilation=dilation,
norm_cfg=norm_cfg)
self.conv3 = ConvModule(
mid_channels, in_channels, 1, norm_cfg=norm_cfg)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
out = out + identity
return out
@NECKS.register_module()
class DilatedEncoder(nn.Module):
"""Dilated Encoder for YOLOF <https://arxiv.org/abs/2103.09460>`.
This module contains two types of components:
- the original FPN lateral convolution layer and fpn convolution layer,
which are 1x1 conv + 3x3 conv
- the dilated residual block
Args:
in_channels (int): The number of input channels.
out_channels (int): The number of output channels.
block_mid_channels (int): The number of middle block output channels
num_residual_blocks (int): The number of residual blocks.
block_dilations (list): The list of residual blocks dilation.
"""
def __init__(self, in_channels, out_channels, block_mid_channels,
num_residual_blocks, block_dilations):
super(DilatedEncoder, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.block_mid_channels = block_mid_channels
self.num_residual_blocks = num_residual_blocks
self.block_dilations = block_dilations
self._init_layers()
def _init_layers(self):
self.lateral_conv = nn.Conv2d(
self.in_channels, self.out_channels, kernel_size=1)
self.lateral_norm = BatchNorm2d(self.out_channels)
self.fpn_conv = nn.Conv2d(
self.out_channels, self.out_channels, kernel_size=3, padding=1)
self.fpn_norm = BatchNorm2d(self.out_channels)
encoder_blocks = []
for i in range(self.num_residual_blocks):
dilation = self.block_dilations[i]
encoder_blocks.append(
Bottleneck(
self.out_channels,
self.block_mid_channels,
dilation=dilation))
self.dilated_encoder_blocks = nn.Sequential(*encoder_blocks)
def init_weights(self):
caffe2_xavier_init(self.lateral_conv)
caffe2_xavier_init(self.fpn_conv)
for m in [self.lateral_norm, self.fpn_norm]:
constant_init(m, 1)
for m in self.dilated_encoder_blocks.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, mean=0, std=0.01)
if is_norm(m):
constant_init(m, 1)
def forward(self, feature):
out = self.lateral_norm(self.lateral_conv(feature[-1]))
out = self.fpn_norm(self.fpn_conv(out))
return self.dilated_encoder_blocks(out),
|
"""
This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
Nowadays, with Sentence Transformers v3+, it is recommended to use the `SentenceTransformerTrainer` class to train models.
See https://www.sbert.net/docs/sentence_transformer/training_overview.html for more information.
In particular, you can pass "no_duplicates" to `batch_sampler` in the `SentenceTransformerTrainingArguments` class.
"""
from __future__ import annotations
import math
import random
class NoDuplicatesDataLoader:
def __init__(self, train_examples, batch_size):
"""
A special data loader to be used with MultipleNegativesRankingLoss.
The data loader ensures that there are no duplicate sentences within the same batch
"""
self.batch_size = batch_size
self.data_pointer = 0
self.collate_fn = None
self.train_examples = train_examples
random.shuffle(self.train_examples)
def __iter__(self):
for _ in range(self.__len__()):
batch = []
texts_in_batch = set()
while len(batch) < self.batch_size:
example = self.train_examples[self.data_pointer]
valid_example = True
for text in example.texts:
if text.strip().lower() in texts_in_batch:
valid_example = False
break
if valid_example:
batch.append(example)
for text in example.texts:
texts_in_batch.add(text.strip().lower())
self.data_pointer += 1
if self.data_pointer >= len(self.train_examples):
self.data_pointer = 0
random.shuffle(self.train_examples)
yield self.collate_fn(batch) if self.collate_fn is not None else batch
def __len__(self):
return math.floor(len(self.train_examples) / self.batch_size)
|
from __future__ import annotations
import math
import random
class NoDuplicatesDataLoader:
def __init__(self, train_examples, batch_size):
"""
A special data loader to be used with MultipleNegativesRankingLoss.
The data loader ensures that there are no duplicate sentences within the same batch
"""
self.batch_size = batch_size
self.data_pointer = 0
self.collate_fn = None
self.train_examples = train_examples
random.shuffle(self.train_examples)
def __iter__(self):
for _ in range(self.__len__()):
batch = []
texts_in_batch = set()
while len(batch) < self.batch_size:
example = self.train_examples[self.data_pointer]
valid_example = True
for text in example.texts:
if text.strip().lower() in texts_in_batch:
valid_example = False
break
if valid_example:
batch.append(example)
for text in example.texts:
texts_in_batch.add(text.strip().lower())
self.data_pointer += 1
if self.data_pointer >= len(self.train_examples):
self.data_pointer = 0
random.shuffle(self.train_examples)
yield self.collate_fn(batch) if self.collate_fn is not None else batch
def __len__(self):
return math.floor(len(self.train_examples) / self.batch_size)
|
"""Test OCI Generative AI embedding service."""
from unittest.mock import MagicMock
from typing import Any
import pytest
from pytest import MonkeyPatch
from llama_index.embeddings.oci_genai import OCIGenAIEmbeddings
class MockResponseDict(dict):
def __getattr__(self, val) -> Any: # type: ignore[no-untyped-def]
return self[val]
@pytest.mark.parametrize(
"test_model_id", ["cohere.embed-english-light-v3.0", "cohere.embed-english-v3.0"]
)
def test_embedding_call(monkeypatch: MonkeyPatch, test_model_id: str) -> None:
"""Test valid call to OCI Generative AI embedding service."""
oci_gen_ai_client = MagicMock()
embedding = OCIGenAIEmbeddings(
model_name=test_model_id,
service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
client=oci_gen_ai_client,
)
def mocked_response(invocation_obj): # type: ignore[no-untyped-def]
docs = invocation_obj.inputs
embeddings = []
for d in docs:
if "Hello" in d:
v = [1.0, 0.0, 0.0]
elif "World" in d:
v = [0.0, 1.0, 0.0]
else:
v = [0.0, 0.0, 1.0]
embeddings.append(v)
return MockResponseDict(
{"status": 200, "data": MockResponseDict({"embeddings": embeddings})}
)
monkeypatch.setattr(embedding._client, "embed_text", mocked_response)
output = embedding.get_text_embedding_batch(["Hello", "World"])
correct_output = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]
assert output == correct_output
|
"""Test OCI Generative AI embedding service."""
from unittest.mock import MagicMock
from typing import Any
import pytest
from pytest import MonkeyPatch
from llama_index.embeddings.oci_genai import OCIGenAIEmbeddings
class MockResponseDict(dict):
def __getattr__(self, val) -> Any: # type: ignore[no-untyped-def]
return self[val]
@pytest.mark.parametrize(
"test_model_id", ["cohere.embed-english-light-v3.0", "cohere.embed-english-v3.0"]
)
def test_embedding_call(monkeypatch: MonkeyPatch, test_model_id: str) -> None:
"""Test valid call to OCI Generative AI embedding service."""
oci_gen_ai_client = MagicMock()
embedding = OCIGenAIEmbeddings(
model_name=test_model_id,
service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
client=oci_gen_ai_client,
)
def mocked_response(invocation_obj): # type: ignore[no-untyped-def]
docs = invocation_obj.inputs
embeddings = []
for d in docs:
if "Hello" in d:
v = [1.0, 0.0, 0.0]
elif "World" in d:
v = [0.0, 1.0, 0.0]
else:
v = [0.0, 0.0, 1.0]
embeddings.append(v)
return MockResponseDict(
{"status": 200, "data": MockResponseDict({"embeddings": embeddings})}
)
monkeypatch.setattr(embedding._client, "embed_text", mocked_response)
output = embedding.get_text_embedding_batch(["Hello", "World"])
correct_output = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]
assert output == correct_output
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# dataset settings
input_size = 300
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean={{_base_.model.data_preprocessor.mean}},
to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}},
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8,
num_workers=2,
batch_sampler=None,
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type={{_base_.dataset_type}},
data_root={{_base_.data_root}},
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(batch_size=8, dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4))
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
input_size = 300
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean={{_base_.model.data_preprocessor.mean}},
to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}},
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8,
num_workers=2,
batch_sampler=None,
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=8,
num_workers=2,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4))
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
from contextlib import suppress
from docutils import nodes
from docutils.parsers.rst import Directive
from sklearn.utils import all_estimators
from sklearn.utils._test_common.instance_generator import _construct_instances
from sklearn.utils._testing import SkipTest
class AllowNanEstimators(Directive):
@staticmethod
def make_paragraph_for_estimator_type(estimator_type):
intro = nodes.list_item()
intro += nodes.strong(text="Estimators that allow NaN values for type ")
intro += nodes.literal(text=f"{estimator_type}")
intro += nodes.strong(text=":\n")
exists = False
lst = nodes.bullet_list()
for name, est_class in all_estimators(type_filter=estimator_type):
with suppress(SkipTest):
# Here we generate the text only for one instance. This directive
# should not be used for meta-estimators where tags depend on the
# sub-estimator.
est = next(_construct_instances(est_class))
if est.__sklearn_tags__().input_tags.allow_nan:
module_name = ".".join(est_class.__module__.split(".")[:2])
class_title = f"{est_class.__name__}"
class_url = f"./generated/{module_name}.{class_title}.html"
item = nodes.list_item()
para = nodes.paragraph()
para += nodes.reference(
class_title, text=class_title, internal=False, refuri=class_url
)
exists = True
item += para
lst += item
intro += lst
return [intro] if exists else None
def run(self):
lst = nodes.bullet_list()
for i in ["cluster", "regressor", "classifier", "transformer"]:
item = self.make_paragraph_for_estimator_type(i)
if item is not None:
lst += item
return [lst]
def setup(app):
app.add_directive("allow_nan_estimators", AllowNanEstimators)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
from contextlib import suppress
from docutils import nodes
from docutils.parsers.rst import Directive
from sklearn.utils import all_estimators
from sklearn.utils._test_common.instance_generator import _construct_instance
from sklearn.utils._testing import SkipTest
class AllowNanEstimators(Directive):
@staticmethod
def make_paragraph_for_estimator_type(estimator_type):
intro = nodes.list_item()
intro += nodes.strong(text="Estimators that allow NaN values for type ")
intro += nodes.literal(text=f"{estimator_type}")
intro += nodes.strong(text=":\n")
exists = False
lst = nodes.bullet_list()
for name, est_class in all_estimators(type_filter=estimator_type):
with suppress(SkipTest):
est = _construct_instance(est_class)
if est.__sklearn_tags__().input_tags.allow_nan:
module_name = ".".join(est_class.__module__.split(".")[:2])
class_title = f"{est_class.__name__}"
class_url = f"./generated/{module_name}.{class_title}.html"
item = nodes.list_item()
para = nodes.paragraph()
para += nodes.reference(
class_title, text=class_title, internal=False, refuri=class_url
)
exists = True
item += para
lst += item
intro += lst
return [intro] if exists else None
def run(self):
lst = nodes.bullet_list()
for i in ["cluster", "regressor", "classifier", "transformer"]:
item = self.make_paragraph_for_estimator_type(i)
if item is not None:
lst += item
return [lst]
def setup(app):
app.add_directive("allow_nan_estimators", AllowNanEstimators)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
import pytest
from unittest.mock import Mock, patch, AsyncMock
from llama_index.embeddings.nvidia import NVIDIAEmbedding
class MockEmbeddingResponse:
"""Mock response matching the structure expected by the code."""
def __init__(self):
self.data = [Mock(embedding=[1.0, 2.0, 3.0], index=0)]
@pytest.fixture(autouse=True)
def mock_openai():
"""Set up mock for OpenAI client."""
# Create mock response
mock_response = MockEmbeddingResponse()
# Patch at the module level where the code imports from
# NVIDIAEmbedding uses: from openai import OpenAI, AsyncOpenAI
with (
patch("llama_index.embeddings.nvidia.base.OpenAI") as mock_openai_cls,
patch(
"llama_index.embeddings.nvidia.base.AsyncOpenAI"
) as mock_async_openai_cls,
):
# Set up the sync client mock
mock_client = Mock()
mock_embeddings = Mock()
mock_embeddings.create.return_value = mock_response
mock_client.embeddings = mock_embeddings
mock_openai_cls.return_value = mock_client
# Set up the async client mock properly
mock_aclient = Mock()
mock_aembeddings = Mock()
# Use AsyncMock for the create method to make it awaitable
mock_aembeddings.create = AsyncMock(return_value=mock_response)
mock_aclient.embeddings = mock_aembeddings
mock_async_openai_cls.return_value = mock_aclient
yield mock_client, mock_aclient
@pytest.mark.parametrize("method_name", ["get_query_embedding", "get_text_embedding"])
@pytest.mark.parametrize("truncate", ["END", "START", "NONE"])
def test_single_truncate(method_name: str, truncate: str):
# Call the method
getattr(NVIDIAEmbedding(api_key="BOGUS", truncate=truncate), method_name)("nvidia")
@pytest.mark.parametrize("method_name", ["aget_query_embedding", "aget_text_embedding"])
@pytest.mark.parametrize("truncate", ["END", "START", "NONE"])
@pytest.mark.asyncio
async def test_asingle_truncate(method_name: str, truncate: str):
# Call the method
await getattr(NVIDIAEmbedding(api_key="BOGUS", truncate=truncate), method_name)(
"nvidia"
)
@pytest.mark.parametrize("method_name", ["get_text_embedding_batch"])
@pytest.mark.parametrize("truncate", ["END", "START", "NONE"])
def test_batch_truncate(method_name: str, truncate: str):
# Call the method
getattr(NVIDIAEmbedding(api_key="BOGUS", truncate=truncate), method_name)(
["nvidia"]
)
@pytest.mark.parametrize("method_name", ["aget_text_embedding_batch"])
@pytest.mark.parametrize("truncate", ["END", "START", "NONE"])
@pytest.mark.asyncio
async def test_abatch_truncate(method_name: str, truncate: str):
# Call the method
await getattr(NVIDIAEmbedding(api_key="BOGUS", truncate=truncate), method_name)(
["nvidia"]
)
|
import pytest
from unittest.mock import Mock, patch, AsyncMock
from llama_index.embeddings.nvidia import NVIDIAEmbedding
class MockEmbeddingResponse:
"""Mock response matching the structure expected by the code."""
def __init__(self):
self.data = [
Mock(embedding=[1.0, 2.0, 3.0], index=0)
]
@pytest.fixture(autouse=True)
def mock_openai():
"""Set up mock for OpenAI client."""
# Create mock response
mock_response = MockEmbeddingResponse()
# Patch at the module level where the code imports from
# NVIDIAEmbedding uses: from openai import OpenAI, AsyncOpenAI
with patch("llama_index.embeddings.nvidia.base.OpenAI") as mock_openai_cls, \
patch("llama_index.embeddings.nvidia.base.AsyncOpenAI") as mock_async_openai_cls:
# Set up the sync client mock
mock_client = Mock()
mock_embeddings = Mock()
mock_embeddings.create.return_value = mock_response
mock_client.embeddings = mock_embeddings
mock_openai_cls.return_value = mock_client
# Set up the async client mock properly
mock_aclient = Mock()
mock_aembeddings = Mock()
# Use AsyncMock for the create method to make it awaitable
mock_aembeddings.create = AsyncMock(return_value=mock_response)
mock_aclient.embeddings = mock_aembeddings
mock_async_openai_cls.return_value = mock_aclient
yield mock_client, mock_aclient
@pytest.mark.parametrize("method_name", ["get_query_embedding", "get_text_embedding"])
@pytest.mark.parametrize("truncate", ["END", "START", "NONE"])
def test_single_truncate(method_name: str, truncate: str):
# Call the method
getattr(NVIDIAEmbedding(api_key="BOGUS", truncate=truncate), method_name)("nvidia")
@pytest.mark.parametrize("method_name", ["aget_query_embedding", "aget_text_embedding"])
@pytest.mark.parametrize("truncate", ["END", "START", "NONE"])
@pytest.mark.asyncio
async def test_asingle_truncate(method_name: str, truncate: str):
# Call the method
await getattr(NVIDIAEmbedding(api_key="BOGUS", truncate=truncate), method_name)("nvidia")
@pytest.mark.parametrize("method_name", ["get_text_embedding_batch"])
@pytest.mark.parametrize("truncate", ["END", "START", "NONE"])
def test_batch_truncate(method_name: str, truncate: str):
# Call the method
getattr(NVIDIAEmbedding(api_key="BOGUS", truncate=truncate), method_name)(["nvidia"])
@pytest.mark.parametrize("method_name", ["aget_text_embedding_batch"])
@pytest.mark.parametrize("truncate", ["END", "START", "NONE"])
@pytest.mark.asyncio
async def test_abatch_truncate(method_name: str, truncate: str):
# Call the method
await getattr(NVIDIAEmbedding(api_key="BOGUS", truncate=truncate), method_name)(["nvidia"])
|
"""Xgboost pyspark integration submodule for params."""
from typing import Dict
from pyspark.ml.param import TypeConverters
from pyspark.ml.param.shared import Param, Params
class HasArbitraryParamsDict(Params):
"""
This is a Params based class that is extended by _SparkXGBParams
and holds the variable to store the **kwargs parts of the XGBoost
input.
"""
arbitrary_params_dict: "Param[Dict]" = Param(
Params._dummy(),
"arbitrary_params_dict",
"arbitrary_params_dict This parameter holds all of the additional parameters which are "
"not exposed as the XGBoost Spark estimator params but can be recognized by "
"underlying XGBoost library. It is stored as a dictionary.",
)
class HasBaseMarginCol(Params):
"""
This is a Params based class that is extended by _SparkXGBParams
and holds the variable to store the base margin column part of XGboost.
"""
base_margin_col = Param(
Params._dummy(),
"base_margin_col",
"This stores the name for the column of the base margin",
typeConverter=TypeConverters.toString,
)
class HasFeaturesCols(Params):
"""
Mixin for param features_cols: a list of feature column names.
This parameter is taken effect only when use_gpu is enabled.
"""
features_cols = Param(
Params._dummy(),
"features_cols",
"feature column names.",
typeConverter=TypeConverters.toListString,
)
def __init__(self) -> None:
super().__init__()
self._setDefault(features_cols=[])
class HasEnableSparseDataOptim(Params):
"""
This is a Params based class that is extended by _SparkXGBParams
and holds the variable to store the boolean config of enabling sparse data optimization.
"""
enable_sparse_data_optim = Param(
Params._dummy(),
"enable_sparse_data_optim",
"This stores the boolean config of enabling sparse data optimization, if enabled, "
"Xgboost DMatrix object will be constructed from sparse matrix instead of "
"dense matrix. This config is disabled by default. If most of examples in your "
"training dataset contains sparse features, we suggest to enable this config.",
typeConverter=TypeConverters.toBoolean,
)
def __init__(self) -> None:
super().__init__()
self._setDefault(enable_sparse_data_optim=False)
class HasQueryIdCol(Params):
"""
Mixin for param qid_col: query id column name.
"""
qid_col = Param(
Params._dummy(),
"qid_col",
"query id column name",
typeConverter=TypeConverters.toString,
)
class HasContribPredictionCol(Params):
"""
Mixin for param pred_contrib_col: contribution prediction column name.
Output is a 3-dim array, with (rows, groups, columns + 1) for classification case.
Else, it can be a 2 dimension for regression case.
"""
pred_contrib_col: "Param[str]" = Param(
Params._dummy(),
"pred_contrib_col",
"feature contributions to individual predictions.",
typeConverter=TypeConverters.toString,
)
|
"""Xgboost pyspark integration submodule for params."""
from typing import Dict
# pylint: disable=too-few-public-methods
from pyspark.ml.param import TypeConverters
from pyspark.ml.param.shared import Param, Params
class HasArbitraryParamsDict(Params):
"""
This is a Params based class that is extended by _SparkXGBParams
and holds the variable to store the **kwargs parts of the XGBoost
input.
"""
arbitrary_params_dict: "Param[Dict]" = Param(
Params._dummy(),
"arbitrary_params_dict",
"arbitrary_params_dict This parameter holds all of the additional parameters which are "
"not exposed as the XGBoost Spark estimator params but can be recognized by "
"underlying XGBoost library. It is stored as a dictionary.",
)
class HasBaseMarginCol(Params):
"""
This is a Params based class that is extended by _SparkXGBParams
and holds the variable to store the base margin column part of XGboost.
"""
base_margin_col = Param(
Params._dummy(),
"base_margin_col",
"This stores the name for the column of the base margin",
typeConverter=TypeConverters.toString,
)
class HasFeaturesCols(Params):
"""
Mixin for param features_cols: a list of feature column names.
This parameter is taken effect only when use_gpu is enabled.
"""
features_cols = Param(
Params._dummy(),
"features_cols",
"feature column names.",
typeConverter=TypeConverters.toListString,
)
def __init__(self) -> None:
super().__init__()
self._setDefault(features_cols=[])
class HasEnableSparseDataOptim(Params):
"""
This is a Params based class that is extended by _SparkXGBParams
and holds the variable to store the boolean config of enabling sparse data optimization.
"""
enable_sparse_data_optim = Param(
Params._dummy(),
"enable_sparse_data_optim",
"This stores the boolean config of enabling sparse data optimization, if enabled, "
"Xgboost DMatrix object will be constructed from sparse matrix instead of "
"dense matrix. This config is disabled by default. If most of examples in your "
"training dataset contains sparse features, we suggest to enable this config.",
typeConverter=TypeConverters.toBoolean,
)
def __init__(self) -> None:
super().__init__()
self._setDefault(enable_sparse_data_optim=False)
class HasQueryIdCol(Params):
"""
Mixin for param qid_col: query id column name.
"""
qid_col = Param(
Params._dummy(),
"qid_col",
"query id column name",
typeConverter=TypeConverters.toString,
)
class HasContribPredictionCol(Params):
"""
Mixin for param pred_contrib_col: contribution prediction column name.
Output is a 3-dim array, with (rows, groups, columns + 1) for classification case.
Else, it can be a 2 dimension for regression case.
"""
pred_contrib_col: "Param[str]" = Param(
Params._dummy(),
"pred_contrib_col",
"feature contributions to individual predictions.",
typeConverter=TypeConverters.toString,
)
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseBinaryClassificationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with two text columns and a class label column (https://huggingface.co/datasets/sentence-transformers/quora-duplicates)
eval_dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train[-1000:]")
# Initialize the evaluator
binary_acc_evaluator = SparseBinaryClassificationEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
labels=eval_dataset["label"],
name="quora_duplicates_dev",
show_progress_bar=True,
similarity_fn_names=["cosine", "dot", "euclidean", "manhattan"],
)
results = binary_acc_evaluator(model)
"""
Accuracy with Cosine-Similarity: 74.90 (Threshold: 0.8668)
F1 with Cosine-Similarity: 67.37 (Threshold: 0.5959)
Precision with Cosine-Similarity: 54.15
Recall with Cosine-Similarity: 89.13
Average Precision with Cosine-Similarity: 67.81
Matthews Correlation with Cosine-Similarity: 49.89
Accuracy with Dot-Product: 76.50 (Threshold: 24.3460)
F1 with Dot-Product: 66.93 (Threshold: 20.0762)
Precision with Dot-Product: 57.62
Recall with Dot-Product: 79.81
Average Precision with Dot-Product: 65.94
Matthews Correlation with Dot-Product: 48.82
Accuracy with Euclidean-Distance: 67.70 (Threshold: -10.0062)
F1 with Euclidean-Distance: 48.60 (Threshold: -0.2346)
Precision with Euclidean-Distance: 32.13
Recall with Euclidean-Distance: 99.69
Average Precision with Euclidean-Distance: 20.52
Matthews Correlation with Euclidean-Distance: -4.59
Accuracy with Manhattan-Distance: 67.70 (Threshold: -103.1993)
F1 with Manhattan-Distance: 48.60 (Threshold: -1.1565)
Precision with Manhattan-Distance: 32.13
Recall with Manhattan-Distance: 99.69
Average Precision with Manhattan-Distance: 21.05
Matthews Correlation with Manhattan-Distance: -4.59
Model Sparsity: Active Dimensions: 63.1, Sparsity Ratio: 0.9979
"""
# Print the results
print(f"Primary metric: {binary_acc_evaluator.primary_metric}")
# => Primary metric: quora_duplicates_dev_max_ap
print(f"Primary metric value: {results[binary_acc_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6781
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseBinaryClassificationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with two text columns and a class label column (https://huggingface.co/datasets/sentence-transformers/quora-duplicates)
eval_dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train[-1000:]")
# Initialize the evaluator
binary_acc_evaluator = SparseBinaryClassificationEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
labels=eval_dataset["label"],
name="quora_duplicates_dev",
show_progress_bar=True,
similarity_fn_names=["cosine", "dot", "euclidean", "manhattan"],
)
results = binary_acc_evaluator(model)
"""
Accuracy with Cosine-Similarity: 74.90 (Threshold: 0.8668)
F1 with Cosine-Similarity: 67.37 (Threshold: 0.5959)
Precision with Cosine-Similarity: 54.15
Recall with Cosine-Similarity: 89.13
Average Precision with Cosine-Similarity: 67.81
Matthews Correlation with Cosine-Similarity: 49.89
Accuracy with Dot-Product: 76.50 (Threshold: 24.3460)
F1 with Dot-Product: 66.93 (Threshold: 20.0762)
Precision with Dot-Product: 57.62
Recall with Dot-Product: 79.81
Average Precision with Dot-Product: 65.94
Matthews Correlation with Dot-Product: 48.82
Accuracy with Euclidean-Distance: 67.70 (Threshold: -10.0062)
F1 with Euclidean-Distance: 48.60 (Threshold: -0.2346)
Precision with Euclidean-Distance: 32.13
Recall with Euclidean-Distance: 99.69
Average Precision with Euclidean-Distance: 20.52
Matthews Correlation with Euclidean-Distance: -4.59
Accuracy with Manhattan-Distance: 67.70 (Threshold: -103.1993)
F1 with Manhattan-Distance: 48.60 (Threshold: -1.1565)
Precision with Manhattan-Distance: 32.13
Recall with Manhattan-Distance: 99.69
Average Precision with Manhattan-Distance: 21.05
Matthews Correlation with Manhattan-Distance: -4.59
Model Sparsity Stats: Row Non-Zero Mean: 63.13884735107422, Row Sparsity Mean: 0.9979313611984253
"""
# Print the results
print(f"Primary metric: {binary_acc_evaluator.primary_metric}")
# => Primary metric: quora_duplicates_dev_max_ap
print(f"Primary metric value: {results[binary_acc_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6781
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def balanced_l1_loss(pred,
target,
beta=1.0,
alpha=0.5,
gamma=1.5,
reduction='mean'):
"""Calculate balanced L1 loss.
Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_
Args:
pred (torch.Tensor): The prediction with shape (N, 4).
target (torch.Tensor): The learning target of the prediction with
shape (N, 4).
beta (float): The loss is a piecewise function of prediction and target
and ``beta`` serves as a threshold for the difference between the
prediction and target. Defaults to 1.0.
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
Defaults to 0.5.
gamma (float): The ``gamma`` in the balanced L1 loss.
Defaults to 1.5.
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert beta > 0
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
diff = torch.abs(pred - target)
b = np.e**(gamma / alpha) - 1
loss = torch.where(
diff < beta, alpha / b *
(b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff,
gamma * diff + gamma / b - alpha * beta)
return loss
@LOSSES.register_module()
class BalancedL1Loss(nn.Module):
"""Balanced L1 Loss.
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
Args:
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
Defaults to 0.5.
gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5.
beta (float, optional): The loss is a piecewise function of prediction
and target. ``beta`` serves as a threshold for the difference
between the prediction and target. Defaults to 1.0.
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
"""
def __init__(self,
alpha=0.5,
gamma=1.5,
beta=1.0,
reduction='mean',
loss_weight=1.0):
super(BalancedL1Loss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function of loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 4).
target (torch.Tensor): The learning target of the prediction with
shape (N, 4).
weight (torch.Tensor, optional): Sample-wise loss weight with
shape (N, ).
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * balanced_l1_loss(
pred,
target,
weight,
alpha=self.alpha,
gamma=self.gamma,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def balanced_l1_loss(pred,
target,
beta=1.0,
alpha=0.5,
gamma=1.5,
reduction='mean'):
"""Calculate balanced L1 loss.
Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_
Args:
pred (torch.Tensor): The prediction with shape (N, 4).
target (torch.Tensor): The learning target of the prediction with
shape (N, 4).
beta (float): The loss is a piecewise function of prediction and target
and ``beta`` serves as a threshold for the difference between the
prediction and target. Defaults to 1.0.
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
Defaults to 0.5.
gamma (float): The ``gamma`` in the balanced L1 loss.
Defaults to 1.5.
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
b = np.e**(gamma / alpha) - 1
loss = torch.where(
diff < beta, alpha / b *
(b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff,
gamma * diff + gamma / b - alpha * beta)
return loss
@LOSSES.register_module()
class BalancedL1Loss(nn.Module):
"""Balanced L1 Loss.
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
Args:
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
Defaults to 0.5.
gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5.
beta (float, optional): The loss is a piecewise function of prediction
and target. ``beta`` serves as a threshold for the difference
between the prediction and target. Defaults to 1.0.
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
"""
def __init__(self,
alpha=0.5,
gamma=1.5,
beta=1.0,
reduction='mean',
loss_weight=1.0):
super(BalancedL1Loss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function of loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 4).
target (torch.Tensor): The learning target of the prediction with
shape (N, 4).
weight (torch.Tensor, optional): Sample-wise loss weight with
shape (N, ).
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * balanced_l1_loss(
pred,
target,
weight,
alpha=self.alpha,
gamma=self.gamma,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
|
import os
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray.documents import Audio
from docarray.typing import AudioUrl
from docarray.typing.tensor.audio import AudioNdArray, AudioTorchTensor
from tests import TOYDATA_DIR
LOCAL_AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
REMOTE_AUDIO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/olleh.wav?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [*LOCAL_AUDIO_FILES, REMOTE_AUDIO_FILE])
def test_audio(file_url):
audio = Audio(url=file_url)
audio.tensor = audio.url.load()
assert isinstance(audio.tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [*LOCAL_AUDIO_FILES, REMOTE_AUDIO_FILE])
def test_save_audio_ndarray(file_url, tmpdir):
tmp_file = str(tmpdir / 'tmp.wav')
audio = Audio(url=file_url)
audio.tensor = audio.url.load()
assert isinstance(audio.tensor, np.ndarray)
assert isinstance(audio.tensor, AudioNdArray)
audio.tensor.save_to_wav_file(tmp_file)
assert os.path.isfile(tmp_file)
audio_from_file = Audio(url=tmp_file)
audio_from_file.tensor = audio_from_file.url.load()
assert np.allclose(audio.tensor, audio_from_file.tensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [*LOCAL_AUDIO_FILES, REMOTE_AUDIO_FILE])
def test_save_audio_torch_tensor(file_url, tmpdir):
tmp_file = str(tmpdir / 'tmp.wav')
audio = Audio(url=file_url)
audio.tensor = parse_obj_as(AudioTorchTensor, torch.from_numpy(audio.url.load()))
assert isinstance(audio.tensor, torch.Tensor)
assert isinstance(audio.tensor, AudioTorchTensor)
audio.tensor.save_to_wav_file(tmp_file)
assert os.path.isfile(tmp_file)
audio_from_file = Audio(url=tmp_file)
audio_from_file.tensor = parse_obj_as(
AudioTorchTensor, torch.from_numpy(audio_from_file.url.load())
)
assert torch.allclose(audio.tensor, audio_from_file.tensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*LOCAL_AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_extend_audio(file_url):
class MyAudio(Audio):
title: str
tensor: Optional[AudioNdArray]
my_audio = MyAudio(title='my extended audio', url=file_url)
my_audio.tensor = parse_obj_as(AudioNdArray, my_audio.url.load())
assert isinstance(my_audio.tensor, AudioNdArray)
assert isinstance(my_audio.url, AudioUrl)
|
import os
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray.documents import Audio
from docarray.typing import AudioUrl
from docarray.typing.tensor.audio import AudioNdArray, AudioTorchTensor
from tests import TOYDATA_DIR
LOCAL_AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
REMOTE_AUDIO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/olleh.wav?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [*LOCAL_AUDIO_FILES, REMOTE_AUDIO_FILE])
def test_audio(file_url):
audio = Audio(url=file_url)
audio.tensor = audio.url.load()
assert isinstance(audio.tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [*LOCAL_AUDIO_FILES, REMOTE_AUDIO_FILE])
def test_save_audio_ndarray(file_url, tmpdir):
tmp_file = str(tmpdir / 'tmp.wav')
audio = Audio(url=file_url)
audio.tensor = parse_obj_as(AudioNdArray, audio.url.load())
assert isinstance(audio.tensor, np.ndarray)
assert isinstance(audio.tensor, AudioNdArray)
audio.tensor.save_to_wav_file(tmp_file)
assert os.path.isfile(tmp_file)
audio_from_file = Audio(url=tmp_file)
audio_from_file.tensor = audio_from_file.url.load()
assert np.allclose(audio.tensor, audio_from_file.tensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [*LOCAL_AUDIO_FILES, REMOTE_AUDIO_FILE])
def test_save_audio_torch_tensor(file_url, tmpdir):
tmp_file = str(tmpdir / 'tmp.wav')
audio = Audio(url=file_url)
audio.tensor = parse_obj_as(AudioTorchTensor, torch.from_numpy(audio.url.load()))
assert isinstance(audio.tensor, torch.Tensor)
assert isinstance(audio.tensor, AudioTorchTensor)
audio.tensor.save_to_wav_file(tmp_file)
assert os.path.isfile(tmp_file)
audio_from_file = Audio(url=tmp_file)
audio_from_file.tensor = parse_obj_as(
AudioTorchTensor, torch.from_numpy(audio_from_file.url.load())
)
assert torch.allclose(audio.tensor, audio_from_file.tensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*LOCAL_AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_extend_audio(file_url):
class MyAudio(Audio):
title: str
tensor: Optional[AudioNdArray]
my_audio = MyAudio(title='my extended audio', url=file_url)
my_audio.tensor = parse_obj_as(AudioNdArray, my_audio.url.load())
assert isinstance(my_audio.tensor, AudioNdArray)
assert isinstance(my_audio.url, AudioUrl)
|
import importlib
from typing import List
import fsspec
from . import compression
from .hffilesystem import HfFileSystem
_has_s3fs = importlib.util.find_spec("s3fs") is not None
if _has_s3fs:
from .s3filesystem import S3FileSystem # noqa: F401
COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [
compression.Bz2FileSystem,
compression.GzipFileSystem,
compression.Lz4FileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
fsspec.register_implementation(fs_class.protocol, fs_class)
def extract_path_from_uri(dataset_path: str) -> str:
"""
Preprocesses `dataset_path` and removes remote filesystem (e.g. removing `s3://`).
Args:
dataset_path (`str`):
Path (e.g. `dataset/train`) or remote uri (e.g. `s3://my-bucket/dataset/train`) of the dataset directory.
"""
if "://" in dataset_path:
dataset_path = dataset_path.split("://")[1]
return dataset_path
def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
"""
Validates if filesystem has remote protocol.
Args:
fs (`fsspec.spec.AbstractFileSystem`):
An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or [`datasets.filesystems.S3FileSystem`].
"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
|
import importlib
from typing import List
import fsspec
from . import compression
from .hffilesystem import HfFileSystem
_has_s3fs = importlib.util.find_spec("s3fs") is not None
if _has_s3fs:
from .s3filesystem import S3FileSystem # noqa: F401
COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [
compression.Bz2FileSystem,
compression.GzipFileSystem,
compression.Lz4FileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
fsspec.register_implementation(fs_class.protocol, fs_class)
def extract_path_from_uri(dataset_path: str) -> str:
"""
preprocesses `dataset_path` and removes remote filesystem (e.g. removing ``s3://``)
Args:
dataset_path (``str``): path (e.g. ``dataset/train``) or remote uri (e.g. ``s3://my-bucket/dataset/train``) of the dataset directory
"""
if "://" in dataset_path:
dataset_path = dataset_path.split("://")[1]
return dataset_path
def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
"""
Validates if filesystem has remote protocol.
Args:
fs (``fsspec.spec.AbstractFileSystem``): An abstract super-class for pythonic file-systems, e.g. :code:`fsspec.filesystem(\'file\')` or :class:`datasets.filesystems.S3FileSystem`
"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_boxes import BaseBoxes
from .bbox_overlaps import bbox_overlaps
from .box_type import (convert_box_type, get_box_type, register_box,
register_box_converter)
from .horizontal_boxes import HorizontalBoxes
from .transforms import (bbox2corner, bbox2distance, bbox2result, bbox2roi,
bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping,
bbox_mapping_back, bbox_project, bbox_rescale,
bbox_xyxy_to_cxcywh, corner2bbox, distance2bbox,
find_inside_bboxes, roi2bbox)
__all__ = [
'bbox_overlaps', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back',
'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance',
'bbox_rescale', 'bbox_cxcywh_to_xyxy', 'bbox_xyxy_to_cxcywh',
'find_inside_bboxes', 'bbox2corner', 'corner2bbox', 'bbox_project',
'BaseBoxes', 'convert_box_type', 'get_box_type', 'register_box',
'register_box_converter', 'HorizontalBoxes'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_overlaps import bbox_overlaps
from .transforms import (bbox2corner, bbox2distance, bbox2result, bbox2roi,
bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping,
bbox_mapping_back, bbox_project, bbox_rescale,
bbox_xyxy_to_cxcywh, corner2bbox, distance2bbox,
find_inside_bboxes, roi2bbox)
__all__ = [
'bbox_overlaps',
'bbox_flip',
'bbox_mapping',
'bbox_mapping_back',
'bbox2roi',
'roi2bbox',
'bbox2result',
'distance2bbox',
'bbox2distance',
'bbox_rescale',
'bbox_cxcywh_to_xyxy',
'bbox_xyxy_to_cxcywh',
'find_inside_bboxes',
'bbox2corner',
'corner2bbox',
'bbox_project',
]
|
import logging
from collections import defaultdict
from typing import Annotated, Any, Dict, List, Optional, Sequence
from fastapi import APIRouter, Body, Depends, HTTPException
from prisma.enums import AgentExecutionStatus, APIKeyPermission
from typing_extensions import TypedDict
import backend.data.block
from backend.data import execution as execution_db
from backend.data import graph as graph_db
from backend.data.api_key import APIKey
from backend.data.block import BlockInput, CompletedBlockOutput
from backend.data.execution import NodeExecutionResult
from backend.executor.utils import add_graph_execution_async
from backend.server.external.middleware import require_permission
from backend.util.settings import Settings
settings = Settings()
logger = logging.getLogger(__name__)
v1_router = APIRouter()
class NodeOutput(TypedDict):
key: str
value: Any
class ExecutionNode(TypedDict):
node_id: str
input: Any
output: Dict[str, Any]
class ExecutionNodeOutput(TypedDict):
node_id: str
outputs: List[NodeOutput]
class GraphExecutionResult(TypedDict):
execution_id: str
status: str
nodes: List[ExecutionNode]
output: Optional[List[Dict[str, str]]]
def get_outputs_with_names(results: list[NodeExecutionResult]) -> list[dict[str, str]]:
outputs = []
for result in results:
if "output" in result.output_data:
output_value = result.output_data["output"][0]
name = result.output_data.get("name", [None])[0]
if output_value and name:
outputs.append({name: output_value})
return outputs
@v1_router.get(
path="/blocks",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.READ_BLOCK))],
)
def get_graph_blocks() -> Sequence[dict[Any, Any]]:
blocks = [block() for block in backend.data.block.get_blocks().values()]
return [b.to_dict() for b in blocks if not b.disabled]
@v1_router.post(
path="/blocks/{block_id}/execute",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK))],
)
def execute_graph_block(
block_id: str,
data: BlockInput,
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK)),
) -> CompletedBlockOutput:
obj = backend.data.block.get_block(block_id)
if not obj:
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
output = defaultdict(list)
for name, data in obj.execute(data):
output[name].append(data)
return output
@v1_router.post(
path="/graphs/{graph_id}/execute/{graph_version}",
tags=["graphs"],
)
async def execute_graph(
graph_id: str,
graph_version: int,
node_input: Annotated[dict[str, Any], Body(..., embed=True, default_factory=dict)],
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_GRAPH)),
) -> dict[str, Any]:
try:
graph_exec = await add_graph_execution_async(
graph_id=graph_id,
user_id=api_key.user_id,
inputs=node_input,
graph_version=graph_version,
)
return {"id": graph_exec.id}
except Exception as e:
msg = str(e).encode().decode("unicode_escape")
raise HTTPException(status_code=400, detail=msg)
@v1_router.get(
path="/graphs/{graph_id}/executions/{graph_exec_id}/results",
tags=["graphs"],
)
async def get_graph_execution_results(
graph_id: str,
graph_exec_id: str,
api_key: APIKey = Depends(require_permission(APIKeyPermission.READ_GRAPH)),
) -> GraphExecutionResult:
graph = await graph_db.get_graph(graph_id, user_id=api_key.user_id)
if not graph:
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
results = await execution_db.get_node_executions(graph_exec_id)
last_result = results[-1] if results else None
execution_status = (
last_result.status if last_result else AgentExecutionStatus.INCOMPLETE
)
outputs = get_outputs_with_names(results)
return GraphExecutionResult(
execution_id=graph_exec_id,
status=execution_status,
nodes=[
ExecutionNode(
node_id=result.node_id,
input=result.input_data.get("value", result.input_data),
output={k: v for k, v in result.output_data.items()},
)
for result in results
],
output=outputs if execution_status == AgentExecutionStatus.COMPLETED else None,
)
|
import logging
from collections import defaultdict
from typing import Annotated, Any, Dict, List, Optional, Sequence
from fastapi import APIRouter, Body, Depends, HTTPException
from prisma.enums import AgentExecutionStatus, APIKeyPermission
from typing_extensions import TypedDict
import backend.data.block
from backend.data import execution as execution_db
from backend.data import graph as graph_db
from backend.data.api_key import APIKey
from backend.data.block import BlockInput, CompletedBlockOutput
from backend.data.execution import NodeExecutionResult
from backend.executor.utils import add_graph_execution_async
from backend.server.external.middleware import require_permission
from backend.util.settings import Settings
settings = Settings()
logger = logging.getLogger(__name__)
v1_router = APIRouter()
class NodeOutput(TypedDict):
key: str
value: Any
class ExecutionNode(TypedDict):
node_id: str
input: Any
output: Dict[str, Any]
class ExecutionNodeOutput(TypedDict):
node_id: str
outputs: List[NodeOutput]
class GraphExecutionResult(TypedDict):
execution_id: str
status: str
nodes: List[ExecutionNode]
output: Optional[List[Dict[str, str]]]
def get_outputs_with_names(results: list[NodeExecutionResult]) -> list[dict[str, str]]:
outputs = []
for result in results:
if "output" in result.output_data:
output_value = result.output_data["output"][0]
name = result.output_data.get("name", [None])[0]
if output_value and name:
outputs.append({name: output_value})
return outputs
@v1_router.get(
path="/blocks",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.READ_BLOCK))],
)
def get_graph_blocks() -> Sequence[dict[Any, Any]]:
blocks = [block() for block in backend.data.block.get_blocks().values()]
return [b.to_dict() for b in blocks if not b.disabled]
@v1_router.post(
path="/blocks/{block_id}/execute",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK))],
)
def execute_graph_block(
block_id: str,
data: BlockInput,
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK)),
) -> CompletedBlockOutput:
obj = backend.data.block.get_block(block_id)
if not obj:
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
output = defaultdict(list)
for name, data in obj.execute(data):
output[name].append(data)
return output
@v1_router.post(
path="/graphs/{graph_id}/execute/{graph_version}",
tags=["graphs"],
)
async def execute_graph(
graph_id: str,
graph_version: int,
node_input: Annotated[dict[str, Any], Body(..., embed=True, default_factory=dict)],
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_GRAPH)),
) -> dict[str, Any]:
try:
graph_exec = await add_graph_execution_async(
graph_id=graph_id,
user_id=api_key.user_id,
inputs=node_input,
graph_version=graph_version,
)
return {"id": graph_exec.id}
except Exception as e:
msg = str(e).encode().decode("unicode_escape")
raise HTTPException(status_code=400, detail=msg)
@v1_router.get(
path="/graphs/{graph_id}/executions/{graph_exec_id}/results",
tags=["graphs"],
)
async def get_graph_execution_results(
graph_id: str,
graph_exec_id: str,
api_key: APIKey = Depends(require_permission(APIKeyPermission.READ_GRAPH)),
) -> GraphExecutionResult:
graph = await graph_db.get_graph(graph_id, user_id=api_key.user_id)
if not graph:
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
results = await execution_db.get_node_execution_results(graph_exec_id)
last_result = results[-1] if results else None
execution_status = (
last_result.status if last_result else AgentExecutionStatus.INCOMPLETE
)
outputs = get_outputs_with_names(results)
return GraphExecutionResult(
execution_id=graph_exec_id,
status=execution_status,
nodes=[
ExecutionNode(
node_id=result.node_id,
input=result.input_data.get("value", result.input_data),
output={k: v for k, v in result.output_data.items()},
)
for result in results
],
output=outputs if execution_status == AgentExecutionStatus.COMPLETED else None,
)
|
_base_ = '../mask_rcnn/mask-rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True, True),
position='after_conv3')
]))
|
_base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True, True),
position='after_conv3')
]))
|
"""
Tool for the AskNews API.
To use this tool, you must first set your credentials as environment variables:
ASKNEWS_CLIENT_ID
ASKNEWS_CLIENT_SECRET
"""
from typing import Any, Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.asknews import AskNewsAPIWrapper
class SearchInput(BaseModel):
"""Input for the AskNews Search tool."""
query: str = Field(
description="Search query to be used for finding real-time or historical news "
"information."
)
hours_back: Optional[int] = Field(
0,
description="If the Assistant deems that the event may have occurred more "
"than 48 hours ago, it estimates the number of hours back to search. For "
"example, if the event was one month ago, the Assistant may set this to 720. "
"One week would be 168. The Assistant can estimate up to on year back (8760).",
)
class AskNewsSearch(BaseTool):
"""Tool that searches the AskNews API."""
name: str = "asknews_search"
description: str = (
"This tool allows you to perform a search on up-to-date news and historical "
"news. If you needs news from more than 48 hours ago, you can estimate the "
"number of hours back to search."
)
api_wrapper: AskNewsAPIWrapper = Field(default_factory=AskNewsAPIWrapper)
max_results: int = 10
args_schema: Optional[Type[BaseModel]] = SearchInput
def _run(
self,
query: str,
hours_back: int = 0,
run_manager: Optional[CallbackManagerForToolRun] = None,
**kwargs: Any,
) -> str:
"""Use the tool."""
try:
return self.api_wrapper.search_news(
query,
hours_back=hours_back,
max_results=self.max_results,
)
except Exception as e:
return repr(e)
async def _arun(
self,
query: str,
hours_back: int = 0,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
**kwargs: Any,
) -> str:
"""Use the tool asynchronously."""
try:
return await self.api_wrapper.asearch_news(
query,
hours_back=hours_back,
max_results=self.max_results,
)
except Exception as e:
return repr(e)
|
"""
Tool for the AskNews API.
To use this tool, you must first set your credentials as environment variables:
ASKNEWS_CLIENT_ID
ASKNEWS_CLIENT_SECRET
"""
from typing import Any, Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.asknews import AskNewsAPIWrapper
class SearchInput(BaseModel):
"""Input for the AskNews Search tool."""
query: str = Field(
description="Search query to be used for finding real-time or historical news "
"information."
)
hours_back: Optional[int] = Field(
0,
description="If the Assistant deems that the event may have occurred more "
"than 48 hours ago, it estimates the number of hours back to search. For "
"example, if the event was one month ago, the Assistant may set this to 720. "
"One week would be 168. The Assistant can estimate up to on year back (8760).",
)
class AskNewsSearch(BaseTool): # type: ignore[override]
"""Tool that searches the AskNews API."""
name: str = "asknews_search"
description: str = (
"This tool allows you to perform a search on up-to-date news and historical "
"news. If you needs news from more than 48 hours ago, you can estimate the "
"number of hours back to search."
)
api_wrapper: AskNewsAPIWrapper = Field(default_factory=AskNewsAPIWrapper) # type: ignore[arg-type]
max_results: int = 10
args_schema: Optional[Type[BaseModel]] = SearchInput
def _run(
self,
query: str,
hours_back: int = 0,
run_manager: Optional[CallbackManagerForToolRun] = None,
**kwargs: Any,
) -> str:
"""Use the tool."""
try:
return self.api_wrapper.search_news(
query,
hours_back=hours_back,
max_results=self.max_results,
)
except Exception as e:
return repr(e)
async def _arun(
self,
query: str,
hours_back: int = 0,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
**kwargs: Any,
) -> str:
"""Use the tool asynchronously."""
try:
return await self.api_wrapper.asearch_news(
query,
hours_back=hours_back,
max_results=self.max_results,
)
except Exception as e:
return repr(e)
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
evaluator = SparseNanoBEIREvaluator(
dataset_names=None, # None means evaluate on all datasets
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = evaluator(model)
"""
Average Queries: 49.92307692307692
Average Corpus: 4334.7692307692305
Aggregated for Score Function: dot
Accuracy@1: 58.72%
Accuracy@3: 75.37%
Accuracy@5: 80.76%
Accuracy@10: 87.07%
Precision@1: 58.72%
Recall@1: 35.61%
Precision@3: 36.31%
Recall@3: 50.84%
Precision@5: 27.72%
Recall@5: 56.55%
Precision@10: 19.18%
Recall@10: 64.21%
MRR@10: 0.6822
NDCG@10: 0.6204
Model Query Sparsity: Active Dimensions: 74.9, Sparsity Ratio: 0.9975
Model Corpus Sparsity: Active Dimensions: 174.8, Sparsity Ratio: 0.9943
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6204
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
evaluator = SparseNanoBEIREvaluator(
dataset_names=None, # None means evaluate on all datasets
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = evaluator(model)
"""
Average Queries: 49.92307692307692
Average Corpus: 4334.7692307692305
Aggregated for Score Function: dot
Accuracy@1: 58.72%
Accuracy@3: 75.37%
Accuracy@5: 80.76%
Accuracy@10: 87.07%
Precision@1: 58.72%
Recall@1: 35.61%
Precision@3: 36.31%
Recall@3: 50.84%
Precision@5: 27.72%
Recall@5: 56.55%
Precision@10: 19.18%
Recall@10: 64.21%
MRR@10: 0.6822
NDCG@10: 0.6204
Model Sparsity Stats Query : Row Non-Zero Mean: 74.93406589214618, Row Sparsity Mean: 0.9975449305314285
Model Sparsity Stats Corpus : Row Non-Zero Mean: 174.8070262028621, Row Sparsity Mean: 0.9942727547425491
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6204
|
"""
This example starts multiple processes (1 per GPU), which encode
sentences in parallel. This gives a near linear speed-up
when encoding large text collections.
"""
import logging
from sentence_transformers import LoggingHandler, SentenceTransformer
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
# Important, you need to shield your code with if __name__. Otherwise, CUDA runs into issues when spawning new processes.
if __name__ == "__main__":
# Create a large list of 100k sentences
sentences = [f"This is sentence {i}" for i in range(100000)]
# Define the model
model = SentenceTransformer("all-MiniLM-L6-v2")
# Start the multi-process pool on all available CUDA devices
pool = model.start_multi_process_pool()
# Compute the embeddings using the multi-process pool
emb = model.encode_multi_process(sentences, pool)
print("Embeddings computed. Shape:", emb.shape)
# Optional: Stop the processes in the pool
model.stop_multi_process_pool(pool)
|
"""
This example starts multiple processes (1 per GPU), which encode
sentences in parallel. This gives a near linear speed-up
when encoding large text collections.
"""
import logging
from sentence_transformers import LoggingHandler, SentenceTransformer
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
# Important, you need to shield your code with if __name__. Otherwise, CUDA runs into issues when spawning new processes.
if __name__ == "__main__":
# Create a large list of 100k sentences
sentences = ["This is sentence {}".format(i) for i in range(100000)]
# Define the model
model = SentenceTransformer("all-MiniLM-L6-v2")
# Start the multi-process pool on all available CUDA devices
pool = model.start_multi_process_pool()
# Compute the embeddings using the multi-process pool
emb = model.encode_multi_process(sentences, pool)
print("Embeddings computed. Shape:", emb.shape)
# Optional: Stop the processes in the pool
model.stop_multi_process_pool(pool)
|
import os
import random
import time
from typing import Dict, OrderedDict
import numpy as np
import pytest
from jina import Document, Flow, DocumentArray, requests, Executor
from jina_commons.indexers.dump import dump_docs
from jinahub.indexers.searcher.compound.FaissLMDBSearcher.faiss_lmdb import FaissLMDBSearcher
from jinahub.indexers.storage.LMDBStorage.lmdb_storage import LMDBStorage
random.seed(0)
np.random.seed(0)
cur_dir = os.path.dirname(os.path.abspath(__file__))
ORIGIN_TAG = 'origin'
TOP_K = 100
class TagMatchMerger(Executor):
@requests(on='/tag_search')
def merge(self, docs_matrix, parameters: Dict, **kwargs):
if docs_matrix:
# noinspection PyTypeHints
results = OrderedDict()
for docs in docs_matrix:
for doc in docs:
if doc.id in results:
results[doc.id].matches.extend(doc.matches)
else:
results[doc.id] = doc
top_k = parameters.get('top_k')
if top_k:
top_k = int(top_k)
for doc in results.values():
doc.matches = sorted(
doc.matches,
key=lambda m: m.scores['l2'].value,
reverse=True,
)[:top_k]
docs = DocumentArray(list(results.values()))
return docs
class TaggingFileSearcher(LMDBStorage):
def __init__(
self,
**kwargs,
):
super().__init__(**kwargs)
def search(self, docs: DocumentArray, parameters: Dict = None, **kwargs) -> None:
# TODO shouldn't be necessary
parameters = {'traversal_paths': ['m']}
LMDBStorage.search(self, docs, parameters=parameters, **kwargs)
for doc in docs:
for match in doc.matches:
match.tags[ORIGIN_TAG] = self.runtime_args.pea_id
class FaissTaggingFileSearcher(FaissLMDBSearcher):
def __init__(
self,
dump_path=None,
**kwargs,
):
super().__init__(**kwargs)
self._kv_indexer = TaggingFileSearcher(dump_path=dump_path, **kwargs)
@requests(on='/tag_search')
def search(self, docs: 'DocumentArray', parameters: Dict = None, **kwargs):
super().search(docs, parameters, **kwargs)
def random_docs(start, end, embed_dim=10):
for j in range(start, end):
d = Document()
d.content = f'hello world from {j}'
d.embedding = np.random.random([embed_dim]).astype(dtype=np.float32)
yield d
def validate_diff_sources(results, num_shards, docs_before: DocumentArray):
distinct_shards = {}
for doc in results[0].docs:
for match in doc.matches:
if match.tags[ORIGIN_TAG] not in distinct_shards:
distinct_shards[match.tags[ORIGIN_TAG]] = 0
distinct_shards[match.tags[ORIGIN_TAG]] += 1
np.testing.assert_equal(len(distinct_shards.keys()), num_shards)
np.testing.assert_equal(sum(distinct_shards.values()), TOP_K)
# TODO we do not support shards=1 for replicas>1
def assert_folder(dump_path, num_shards):
assert os.path.exists(dump_path)
for i in range(num_shards):
assert os.path.exists(os.path.join(dump_path, str(i)))
assert os.path.exists(os.path.join(dump_path, str(i), 'ids'))
assert os.path.exists(os.path.join(dump_path, str(i), 'vectors'))
assert os.path.exists(os.path.join(dump_path, str(i), 'metas'))
@pytest.mark.parametrize('num_shards', (2, 3, 7))
def test_shards_numpy_filequery(tmpdir, num_shards):
pod_name = 'index'
os.environ['WORKSPACE'] = str(tmpdir)
os.environ['SHARDS'] = str(num_shards)
docs_indexed = list(random_docs(0, 201))
dump_path = os.path.join(tmpdir, 'dump_path')
dump_docs(docs_indexed, dump_path, num_shards)
assert_folder(dump_path, num_shards)
inputs = list(random_docs(0, 1))
# TODO workspace is wrongly saved to curdir
with Flow.load_config('flow.yml') as flow:
flow.rolling_update(pod_name=pod_name, dump_path=dump_path)
time.sleep(2)
results = flow.post(
on='/tag_search',
inputs=inputs,
parameters={'top_k': TOP_K},
return_results=True,
)
validate_diff_sources(results, num_shards, docs_indexed)
|
import os
import random
import time
from typing import Dict
import numpy as np
import pytest
from jina import Document, Flow, DocumentArray, requests
from jina_commons.indexers.dump import dump_docs
from jinahub.indexers.searcher.compound.NumpyLMDBSearcher.npfile import NumpyLMDBSearcher
from jinahub.indexers.storage.LMDBStorage.lmdb_storage import LMDBStorage
from tests.integration.psql_dump_reload.test_dump_psql import (
MatchMerger,
)
random.seed(0)
np.random.seed(0)
cur_dir = os.path.dirname(os.path.abspath(__file__))
ORIGIN_TAG = 'origin'
TOP_K = 30
class TagMatchMerger(MatchMerger):
@requests(on='/tag_search')
def merge(self, docs_matrix, parameters: Dict, **kwargs):
MatchMerger.merge(
self, docs_matrix=docs_matrix, parameters=parameters, **kwargs
)
class TaggingFileSearcher(LMDBStorage):
def __init__(
self,
**kwargs,
):
super().__init__(**kwargs)
def search(self, docs: DocumentArray, parameters: Dict = None, **kwargs) -> None:
# TODO shouldn't be necessary
parameters = {'traversal_paths': ['m']}
LMDBStorage.search(self, docs, parameters=parameters, **kwargs)
for doc in docs:
for match in doc.matches:
match.tags[ORIGIN_TAG] = self.runtime_args.pea_id
class NumpyTaggingFileSearcher(NumpyLMDBSearcher):
def __init__(
self,
dump_path=None,
**kwargs,
):
super().__init__(**kwargs)
self._kv_indexer = TaggingFileSearcher(dump_path=dump_path, **kwargs)
@requests(on='/tag_search')
def search(self, docs: 'DocumentArray', parameters: Dict = None, **kwargs):
super().search(docs, parameters, **kwargs)
def random_docs(start, end, embed_dim=10):
for j in range(start, end):
d = Document()
d.content = f'hello world from {j}'
d.embedding = np.random.random([embed_dim])
yield d
def validate_diff_sources(results, num_shards, docs_before: DocumentArray):
distinct_shards = {}
for doc in results[0].docs:
for match in doc.matches:
if match.tags[ORIGIN_TAG] not in distinct_shards:
distinct_shards[match.tags[ORIGIN_TAG]] = 0
distinct_shards[match.tags[ORIGIN_TAG]] += 1
np.testing.assert_equal(len(distinct_shards.keys()), num_shards)
np.testing.assert_equal(sum(distinct_shards.values()), TOP_K)
# TODO we do not support shards=1 for replicas>1
def assert_folder(dump_path, num_shards):
assert os.path.exists(dump_path)
for i in range(num_shards):
assert os.path.exists(os.path.join(dump_path, str(i)))
assert os.path.exists(os.path.join(dump_path, str(i), 'ids'))
assert os.path.exists(os.path.join(dump_path, str(i), 'vectors'))
assert os.path.exists(os.path.join(dump_path, str(i), 'metas'))
# TODO: add num_shards=7
@pytest.mark.parametrize('num_shards', (2, 3))
def test_shards_numpy_filequery(tmpdir, num_shards):
pod_name = 'index'
os.environ['WORKSPACE'] = str(tmpdir)
os.environ['SHARDS'] = str(num_shards)
docs_indexed = list(random_docs(0, 201))
dump_path = os.path.join(tmpdir, 'dump_path')
dump_docs(docs_indexed, dump_path, num_shards)
assert_folder(dump_path, num_shards)
inputs = list(random_docs(0, 1))
# TODO workspace is wrongly saved to curdir
with Flow.load_config('flow.yml') as flow:
flow.rolling_update(pod_name=pod_name, dump_path=dump_path)
time.sleep(2)
results = flow.post(
on='/tag_search',
inputs=inputs,
parameters={'top_k': TOP_K},
return_results=True,
)
validate_diff_sources(results, num_shards, docs_indexed)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import torch
from transformers import Gemma2Model, GemmaTokenizer
from diffusers import AutoencoderDC, FlowMatchEulerDiscreteScheduler, SanaPipeline, SanaTransformer2DModel
from diffusers.utils.testing_utils import floats_tensor, require_peft_backend
sys.path.append(".")
from utils import PeftLoraLoaderMixinTests # noqa: E402
@require_peft_backend
class SanaLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
pipeline_class = SanaPipeline
scheduler_cls = FlowMatchEulerDiscreteScheduler(shift=7.0)
scheduler_kwargs = {}
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
transformer_kwargs = {
"patch_size": 1,
"in_channels": 4,
"out_channels": 4,
"num_layers": 1,
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_cross_attention_heads": 2,
"cross_attention_head_dim": 4,
"cross_attention_dim": 8,
"caption_channels": 8,
"sample_size": 32,
}
transformer_cls = SanaTransformer2DModel
vae_kwargs = {
"in_channels": 3,
"latent_channels": 4,
"attention_head_dim": 2,
"encoder_block_types": (
"ResBlock",
"EfficientViTBlock",
),
"decoder_block_types": (
"ResBlock",
"EfficientViTBlock",
),
"encoder_block_out_channels": (8, 8),
"decoder_block_out_channels": (8, 8),
"encoder_qkv_multiscales": ((), (5,)),
"decoder_qkv_multiscales": ((), (5,)),
"encoder_layers_per_block": (1, 1),
"decoder_layers_per_block": [1, 1],
"downsample_block_type": "conv",
"upsample_block_type": "interpolate",
"decoder_norm_types": "rms_norm",
"decoder_act_fns": "silu",
"scaling_factor": 0.41407,
}
vae_cls = AutoencoderDC
tokenizer_cls, tokenizer_id = GemmaTokenizer, "hf-internal-testing/dummy-gemma"
text_encoder_cls, text_encoder_id = Gemma2Model, "hf-internal-testing/dummy-gemma-for-diffusers"
@property
def output_shape(self):
return (1, 32, 32, 3)
def get_dummy_inputs(self, with_generator=True):
batch_size = 1
sequence_length = 16
num_channels = 4
sizes = (32, 32)
generator = torch.manual_seed(0)
noise = floats_tensor((batch_size, num_channels) + sizes)
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
pipeline_inputs = {
"prompt": "",
"negative_prompt": "",
"num_inference_steps": 4,
"guidance_scale": 4.5,
"height": 32,
"width": 32,
"max_sequence_length": sequence_length,
"output_type": "np",
"complex_human_instruction": None,
}
if with_generator:
pipeline_inputs.update({"generator": generator})
return noise, input_ids, pipeline_inputs
@unittest.skip("Not supported in SANA.")
def test_modify_padding_mode(self):
pass
@unittest.skip("Not supported in SANA.")
def test_simple_inference_with_text_denoiser_block_scale(self):
pass
@unittest.skip("Not supported in SANA.")
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
pass
@unittest.skip("Text encoder LoRA is not supported in SANA.")
def test_simple_inference_with_partial_text_lora(self):
pass
@unittest.skip("Text encoder LoRA is not supported in SANA.")
def test_simple_inference_with_text_lora(self):
pass
@unittest.skip("Text encoder LoRA is not supported in SANA.")
def test_simple_inference_with_text_lora_and_scale(self):
pass
@unittest.skip("Text encoder LoRA is not supported in SANA.")
def test_simple_inference_with_text_lora_fused(self):
pass
@unittest.skip("Text encoder LoRA is not supported in SANA.")
def test_simple_inference_with_text_lora_save_load(self):
pass
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import torch
from transformers import Gemma2ForCausalLM, GemmaTokenizer
from diffusers import AutoencoderDC, FlowMatchEulerDiscreteScheduler, SanaPipeline, SanaTransformer2DModel
from diffusers.utils.testing_utils import floats_tensor, require_peft_backend
sys.path.append(".")
from utils import PeftLoraLoaderMixinTests # noqa: E402
@require_peft_backend
class SanaLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
pipeline_class = SanaPipeline
scheduler_cls = FlowMatchEulerDiscreteScheduler(shift=7.0)
scheduler_kwargs = {}
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
transformer_kwargs = {
"patch_size": 1,
"in_channels": 4,
"out_channels": 4,
"num_layers": 1,
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_cross_attention_heads": 2,
"cross_attention_head_dim": 4,
"cross_attention_dim": 8,
"caption_channels": 8,
"sample_size": 32,
}
transformer_cls = SanaTransformer2DModel
vae_kwargs = {
"in_channels": 3,
"latent_channels": 4,
"attention_head_dim": 2,
"encoder_block_types": (
"ResBlock",
"EfficientViTBlock",
),
"decoder_block_types": (
"ResBlock",
"EfficientViTBlock",
),
"encoder_block_out_channels": (8, 8),
"decoder_block_out_channels": (8, 8),
"encoder_qkv_multiscales": ((), (5,)),
"decoder_qkv_multiscales": ((), (5,)),
"encoder_layers_per_block": (1, 1),
"decoder_layers_per_block": [1, 1],
"downsample_block_type": "conv",
"upsample_block_type": "interpolate",
"decoder_norm_types": "rms_norm",
"decoder_act_fns": "silu",
"scaling_factor": 0.41407,
}
vae_cls = AutoencoderDC
tokenizer_cls, tokenizer_id = GemmaTokenizer, "hf-internal-testing/dummy-gemma"
text_encoder_cls, text_encoder_id = Gemma2ForCausalLM, "hf-internal-testing/dummy-gemma-for-diffusers"
@property
def output_shape(self):
return (1, 32, 32, 3)
def get_dummy_inputs(self, with_generator=True):
batch_size = 1
sequence_length = 16
num_channels = 4
sizes = (32, 32)
generator = torch.manual_seed(0)
noise = floats_tensor((batch_size, num_channels) + sizes)
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
pipeline_inputs = {
"prompt": "",
"negative_prompt": "",
"num_inference_steps": 4,
"guidance_scale": 4.5,
"height": 32,
"width": 32,
"max_sequence_length": sequence_length,
"output_type": "np",
"complex_human_instruction": None,
}
if with_generator:
pipeline_inputs.update({"generator": generator})
return noise, input_ids, pipeline_inputs
@unittest.skip("Not supported in Sana.")
def test_modify_padding_mode(self):
pass
@unittest.skip("Not supported in Mochi.")
def test_simple_inference_with_text_denoiser_block_scale(self):
pass
@unittest.skip("Not supported in Mochi.")
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Mochi.")
def test_simple_inference_with_partial_text_lora(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Mochi.")
def test_simple_inference_with_text_lora(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Mochi.")
def test_simple_inference_with_text_lora_and_scale(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Mochi.")
def test_simple_inference_with_text_lora_fused(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Mochi.")
def test_simple_inference_with_text_lora_save_load(self):
pass
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Helium model."""
import unittest
from transformers import AutoModelForCausalLM, AutoTokenizer, HeliumConfig, is_torch_available
from transformers.testing_utils import (
Expectations,
require_read_token,
require_torch,
slow,
torch_device,
)
from ...test_configuration_common import ConfigTester
from ..gemma.test_modeling_gemma import GemmaModelTest, GemmaModelTester
if is_torch_available():
import torch
from transformers import (
HeliumForCausalLM,
HeliumForSequenceClassification,
HeliumForTokenClassification,
HeliumModel,
)
class HeliumModelTester(GemmaModelTester):
if is_torch_available():
config_class = HeliumConfig
model_class = HeliumModel
for_causal_lm_class = HeliumForCausalLM
for_sequence_class = HeliumForSequenceClassification
for_token_class = HeliumForTokenClassification
@require_torch
class HeliumModelTest(GemmaModelTest, unittest.TestCase):
all_model_classes = (
(HeliumModel, HeliumForCausalLM, HeliumForSequenceClassification, HeliumForTokenClassification)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": HeliumModel,
"text-classification": HeliumForSequenceClassification,
"token-classification": HeliumForTokenClassification,
"text-generation": HeliumForCausalLM,
"zero-shot": HeliumForSequenceClassification,
}
if is_torch_available()
else {}
)
test_headmasking = False
test_pruning = False
_is_stateful = True
model_split_percents = [0.5, 0.6]
def setUp(self):
self.model_tester = HeliumModelTester(self)
self.config_tester = ConfigTester(self, config_class=HeliumConfig, hidden_size=37)
@slow
# @require_torch_gpu
class HeliumIntegrationTest(unittest.TestCase):
input_text = ["Hello, today is a great day to"]
@require_read_token
def test_model_2b(self):
model_id = "kyutai/helium-1-preview"
expected_texts = Expectations(
{
("rocm", (9, 5)): ["Hello, today is a great day to start a new project. I have been working on a new project for a while now, and I"],
("cuda", None): ["Hello, today is a great day to start a new project. I have been working on a new project for a while now and I have"],
}
) # fmt: skip
EXPECTED_TEXTS = expected_texts.get_expectation()
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, revision="refs/pr/1").to(
torch_device
)
tokenizer = AutoTokenizer.from_pretrained(model_id, revision="refs/pr/1")
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXTS)
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Helium model."""
import unittest
from transformers import AutoModelForCausalLM, AutoTokenizer, HeliumConfig, is_torch_available
from transformers.testing_utils import (
require_read_token,
require_torch,
slow,
torch_device,
)
from ...test_configuration_common import ConfigTester
from ..gemma.test_modeling_gemma import GemmaModelTest, GemmaModelTester
if is_torch_available():
import torch
from transformers import (
HeliumForCausalLM,
HeliumForSequenceClassification,
HeliumForTokenClassification,
HeliumModel,
)
class HeliumModelTester(GemmaModelTester):
if is_torch_available():
config_class = HeliumConfig
model_class = HeliumModel
for_causal_lm_class = HeliumForCausalLM
for_sequence_class = HeliumForSequenceClassification
for_token_class = HeliumForTokenClassification
@require_torch
class HeliumModelTest(GemmaModelTest, unittest.TestCase):
all_model_classes = (
(HeliumModel, HeliumForCausalLM, HeliumForSequenceClassification, HeliumForTokenClassification)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": HeliumModel,
"text-classification": HeliumForSequenceClassification,
"token-classification": HeliumForTokenClassification,
"text-generation": HeliumForCausalLM,
"zero-shot": HeliumForSequenceClassification,
}
if is_torch_available()
else {}
)
test_headmasking = False
test_pruning = False
_is_stateful = True
model_split_percents = [0.5, 0.6]
def setUp(self):
self.model_tester = HeliumModelTester(self)
self.config_tester = ConfigTester(self, config_class=HeliumConfig, hidden_size=37)
@slow
# @require_torch_gpu
class HeliumIntegrationTest(unittest.TestCase):
input_text = ["Hello, today is a great day to"]
@require_read_token
def test_model_2b(self):
model_id = "kyutai/helium-1-preview"
EXPECTED_TEXTS = [
"Hello, today is a great day to start a new project. I have been working on a new project for a while now and I have"
]
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, revision="refs/pr/1").to(
torch_device
)
tokenizer = AutoTokenizer.from_pretrained(model_id, revision="refs/pr/1")
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXTS)
|
import csv
import os
from pathlib import Path
from typing import Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "wavs",
"url": "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2",
"checksum": "be1a30453f28eb8dd26af4101ae40cbf2c50413b1bb21936cbcdc6fae3de8aa5",
}
}
class LJSPEECH(Dataset):
"""Create a Dataset for *LJSpeech-1.1* :cite:`ljspeech17`.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"wavs"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
def __init__(
self,
root: Union[str, Path],
url: str = _RELEASE_CONFIGS["release1"]["url"],
folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"],
download: bool = False,
) -> None:
self._parse_filesystem(root, url, folder_in_archive, download)
def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None:
root = Path(root)
basename = os.path.basename(url)
archive = root / basename
basename = Path(basename.split(".tar.bz2")[0])
folder_in_archive = basename / folder_in_archive
self._path = root / folder_in_archive
self._metadata_path = root / basename / "metadata.csv"
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS["release1"]["checksum"]
download_url_to_file(url, archive, hash_prefix=checksum)
extract_archive(archive)
else:
if not os.path.exists(self._path):
raise RuntimeError(
f"The path {self._path} doesn't exist. "
"Please check the ``root`` path or set `download=True` to download it"
)
with open(self._metadata_path, "r", newline="") as metadata:
flist = csv.reader(metadata, delimiter="|", quoting=csv.QUOTE_NONE)
self._flist = list(flist)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, str):
``(waveform, sample_rate, transcript, normalized_transcript)``
"""
line = self._flist[n]
fileid, transcript, normalized_transcript = line
fileid_audio = self._path / (fileid + ".wav")
# Load audio
waveform, sample_rate = torchaudio.load(fileid_audio)
return (
waveform,
sample_rate,
transcript,
normalized_transcript,
)
def __len__(self) -> int:
return len(self._flist)
|
import csv
import os
from pathlib import Path
from typing import Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "wavs",
"url": "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2",
"checksum": "be1a30453f28eb8dd26af4101ae40cbf2c50413b1bb21936cbcdc6fae3de8aa5",
}
}
class LJSPEECH(Dataset):
"""Create a Dataset for *LJSpeech-1.1* [:footcite:`ljspeech17`].
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"wavs"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
def __init__(
self,
root: Union[str, Path],
url: str = _RELEASE_CONFIGS["release1"]["url"],
folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"],
download: bool = False,
) -> None:
self._parse_filesystem(root, url, folder_in_archive, download)
def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None:
root = Path(root)
basename = os.path.basename(url)
archive = root / basename
basename = Path(basename.split(".tar.bz2")[0])
folder_in_archive = basename / folder_in_archive
self._path = root / folder_in_archive
self._metadata_path = root / basename / "metadata.csv"
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS["release1"]["checksum"]
download_url_to_file(url, archive, hash_prefix=checksum)
extract_archive(archive)
else:
if not os.path.exists(self._path):
raise RuntimeError(
f"The path {self._path} doesn't exist. "
"Please check the ``root`` path or set `download=True` to download it"
)
with open(self._metadata_path, "r", newline="") as metadata:
flist = csv.reader(metadata, delimiter="|", quoting=csv.QUOTE_NONE)
self._flist = list(flist)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, str):
``(waveform, sample_rate, transcript, normalized_transcript)``
"""
line = self._flist[n]
fileid, transcript, normalized_transcript = line
fileid_audio = self._path / (fileid + ".wav")
# Load audio
waveform, sample_rate = torchaudio.load(fileid_audio)
return (
waveform,
sample_rate,
transcript,
normalized_transcript,
)
def __len__(self) -> int:
return len(self._flist)
|
_base_ = '../faster_rcnn/faster-rcnn_x101-32x4d_fpn_1x_coco.py'
model = dict(
roi_head=dict(
type='PISARoIHead',
bbox_head=dict(
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
train_cfg=dict(
rpn_proposal=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
sampler=dict(
type='ScoreHLRSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True,
k=0.5,
bias=0.),
isr=dict(k=2, bias=0),
carl=dict(k=1, bias=0.2))),
test_cfg=dict(
rpn=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0)))
|
_base_ = '../faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco.py'
model = dict(
roi_head=dict(
type='PISARoIHead',
bbox_head=dict(
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
train_cfg=dict(
rpn_proposal=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
sampler=dict(
type='ScoreHLRSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True,
k=0.5,
bias=0.),
isr=dict(k=2, bias=0),
carl=dict(k=1, bias=0.2))),
test_cfg=dict(
rpn=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0)))
|
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.AveragePooling2D", "keras.layers.AvgPool2D"])
class AveragePooling2D(BasePooling):
"""Average pooling operation for 2D spatial data.
Downsamples the input along its spatial dimensions (height and width)
by taking the average value over an input window
(of size defined by `pool_size`) for each channel of the input.
The window is shifted by `strides` along each dimension.
The resulting output when using the `"valid"` padding option has a spatial
shape (number of rows or columns) of:
`output_shape = math.floor((input_shape - pool_size) / strides) + 1`
(when `input_shape >= pool_size`)
The resulting output shape when using the `"same"` padding option is:
`output_shape = math.floor((input_shape - 1) / strides) + 1`
Args:
pool_size: int or tuple of 2 integers, factors by which to downscale
(dim1, dim2). If only one integer is specified, the same
window length will be used for all dimensions.
strides: int or tuple of 2 integers, or None. Strides values. If None,
it will default to `pool_size`. If only one int is specified, the
same stride size will be used for all dimensions.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
4D tensor with shape `(batch_size, height, width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape `(batch_size, channels, height, width)`.
Output shape:
- If `data_format="channels_last"`:
4D tensor with shape
`(batch_size, pooled_height, pooled_width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape
`(batch_size, channels, pooled_height, pooled_width)`.
Examples:
`strides=(1, 1)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="valid")
>>> avg_pool_2d(x)
`strides=(2, 2)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.]])
>>> x = np.reshape(x, [1, 3, 4, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(2, 2), padding="valid")
>>> avg_pool_2d(x)
`stride=(1, 1)` and `padding="same"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="same")
>>> avg_pool_2d(x)
"""
def __init__(
self,
pool_size,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs,
):
super().__init__(
pool_size,
strides,
pool_dimensions=2,
pool_mode="average",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.AveragePooling2D", "keras.layers.AvgPool2D"])
class AveragePooling2D(BasePooling):
"""Average pooling operation for 2D spatial data.
Downsamples the input along its spatial dimensions (height and width)
by taking the average value over an input window
(of size defined by `pool_size`) for each channel of the input.
The window is shifted by `strides` along each dimension.
The resulting output when using the `"valid"` padding option has a spatial
shape (number of rows or columns) of:
`output_shape = math.floor((input_shape - pool_size) / strides) + 1`
(when `input_shape >= pool_size`)
The resulting output shape when using the `"same"` padding option is:
`output_shape = math.floor((input_shape - 1) / strides) + 1`
Args:
pool_size: int or tuple of 2 integers, factors by which to downscale
(dim1, dim2). If only one integer is specified, the same
window length will be used for all dimensions.
strides: int or tuple of 2 integers, or None. Strides values. If None,
it will default to `pool_size`. If only one int is specified, the
same stride size will be used for all dimensions.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
4D tensor with shape `(batch_size, height, width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape `(batch_size, channels, height, width)`.
Output shape:
- If `data_format="channels_last"`:
4D tensor with shape
`(batch_size, pooled_height, pooled_width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape
`(batch_size, channels, pooled_height, pooled_width)`.
Examples:
`strides=(1, 1)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="valid")
>>> avg_pool_2d(x)
`strides=(2, 2)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.]])
>>> x = np.reshape(x, [1, 3, 4, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(2, 2), padding="valid")
>>> avg_pool_2d(x)
`stride=(1, 1)` and `padding="same"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="same")
>>> avg_pool_2d(x)
"""
def __init__(
self,
pool_size,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs
):
super().__init__(
pool_size,
strides,
pool_dimensions=2,
pool_mode="average",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import mmengine
from mmengine.utils import digit_version
from .version import __version__, version_info
mmcv_minimum_version = '2.0.0rc4'
mmcv_maximum_version = '2.1.0'
mmcv_version = digit_version(mmcv.__version__)
mmengine_minimum_version = '0.4.0'
mmengine_maximum_version = '1.0.0'
mmengine_version = digit_version(mmengine.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version < digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <{mmcv_maximum_version}.'
assert (mmengine_version >= digit_version(mmengine_minimum_version)
and mmengine_version < digit_version(mmengine_maximum_version)), \
f'MMEngine=={mmengine.__version__} is used but incompatible. ' \
f'Please install mmengine>={mmengine_minimum_version}, ' \
f'<{mmengine_maximum_version}.'
__all__ = ['__version__', 'version_info', 'digit_version']
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import mmengine
from mmengine.utils import digit_version
from .version import __version__, version_info
mmcv_minimum_version = '2.0.0rc4'
mmcv_maximum_version = '2.1.0'
mmcv_version = digit_version(mmcv.__version__)
mmengine_minimum_version = '0.3.0'
mmengine_maximum_version = '1.0.0'
mmengine_version = digit_version(mmengine.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version < digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <{mmcv_maximum_version}.'
assert (mmengine_version >= digit_version(mmengine_minimum_version)
and mmengine_version < digit_version(mmengine_maximum_version)), \
f'MMEngine=={mmengine.__version__} is used but incompatible. ' \
f'Please install mmengine>={mmengine_minimum_version}, ' \
f'<{mmengine_maximum_version}.'
__all__ = ['__version__', 'version_info', 'digit_version']
|
import multiprocessing
import os
import signal
import time
import pytest
from jina import Document, DocumentArray, Executor, requests
from jina.clients.request import request_generator
from jina.parsers import set_gateway_parser
from jina.serve.networking import GrpcConnectionPool
from jina_cli.api import executor_native, gateway
from tests.helper import _generate_pod_args
class DummyExecutor(Executor):
def __init__(self, dir=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dir = dir
self.request_count = 0
@requests
def slow_count(self, **kwargs):
time.sleep(0.5)
self.request_count += 1
def close(self):
super().close()
with open(f'{self.dir}/test.txt', 'w') as fp:
fp.write(f'proper close;{self.request_count}')
def _create_test_data_message():
req = list(
request_generator(
'/', DocumentArray([Document(text='input document') for _ in range(10)])
)
)[0]
return req
@pytest.mark.parametrize('signal', [signal.SIGTERM, signal.SIGINT])
def test_executor_runtimes(signal, tmpdir):
import time
args = _generate_pod_args()
def run(args):
args.uses = {
'jtype': 'DummyExecutor',
'with': {'dir': str(tmpdir)},
'metas': {'workspace': str(tmpdir)},
}
executor_native(args)
process = multiprocessing.Process(target=run, args=(args,))
process.start()
time.sleep(0.5)
GrpcConnectionPool.send_request_sync(
_create_test_data_message(), target=f'{args.host}:{args.port}'
)
time.sleep(0.1)
os.kill(process.pid, signal)
process.join()
with open(f'{tmpdir}/test.txt', 'r') as fp:
output = fp.read()
split = output.split(';')
assert split[0] == 'proper close'
assert split[1] == '1'
@pytest.mark.parametrize('signal', [signal.SIGTERM, signal.SIGINT])
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_gateway(signal, protocol):
import time
def run():
args = set_gateway_parser().parse_args(
[
'--protocol',
protocol,
'--graph-description',
'{}',
'--deployments-addresses',
'{}',
]
)
gateway(args)
process = multiprocessing.Process(target=run)
process.start()
time.sleep(0.5)
os.kill(process.pid, signal)
process.join()
|
import multiprocessing
import os
import signal
import time
import pytest
from jina import Document, DocumentArray, Executor, requests
from jina.clients.request import request_generator
from jina.parsers import set_gateway_parser, set_pod_parser
from jina.serve.networking import GrpcConnectionPool
from jina_cli.api import executor_native, gateway
class DummyExecutor(Executor):
def __init__(self, dir=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dir = dir
self.request_count = 0
@requests
def slow_count(self, **kwargs):
time.sleep(0.5)
self.request_count += 1
def close(self):
super().close()
with open(f'{self.dir}/test.txt', 'w') as fp:
fp.write(f'proper close;{self.request_count}')
def _create_test_data_message():
req = list(
request_generator(
'/', DocumentArray([Document(text='input document') for _ in range(10)])
)
)[0]
return req
@pytest.mark.parametrize('signal', [signal.SIGTERM, signal.SIGINT])
def test_executor_runtimes(signal, tmpdir):
import time
args = set_pod_parser().parse_args([])
def run(args):
args.uses = {
'jtype': 'DummyExecutor',
'with': {'dir': str(tmpdir)},
'metas': {'workspace': str(tmpdir)},
}
executor_native(args)
process = multiprocessing.Process(target=run, args=(args,))
process.start()
time.sleep(0.5)
GrpcConnectionPool.send_request_sync(
_create_test_data_message(), target=f'{args.host}:{args.port}'
)
time.sleep(0.1)
os.kill(process.pid, signal)
process.join()
with open(f'{tmpdir}/test.txt', 'r') as fp:
output = fp.read()
split = output.split(';')
assert split[0] == 'proper close'
assert split[1] == '1'
@pytest.mark.parametrize('signal', [signal.SIGTERM, signal.SIGINT])
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_gateway(signal, protocol):
import time
def run():
args = set_gateway_parser().parse_args(
[
'--protocol',
protocol,
'--graph-description',
'{}',
'--deployments-addresses',
'{}',
]
)
gateway(args)
process = multiprocessing.Process(target=run)
process.start()
time.sleep(0.5)
os.kill(process.pid, signal)
process.join()
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.resnet_v2 import ResNet50V2 as ResNet50V2
from keras.src.applications.resnet_v2 import ResNet101V2 as ResNet101V2
from keras.src.applications.resnet_v2 import ResNet152V2 as ResNet152V2
from keras.src.applications.resnet_v2 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.resnet_v2 import (
preprocess_input as preprocess_input,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.resnet_v2 import ResNet50V2
from keras.src.applications.resnet_v2 import ResNet101V2
from keras.src.applications.resnet_v2 import ResNet152V2
from keras.src.applications.resnet_v2 import decode_predictions
from keras.src.applications.resnet_v2 import preprocess_input
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.dtype_policies import dtype_policy
from keras.src.dtype_policies.dtype_policy import QUANTIZATION_MODES
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy
ALL_OBJECTS = {
DTypePolicy,
FloatDTypePolicy,
QuantizedDTypePolicy,
QuantizedFloat8DTypePolicy,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
@keras_export("keras.dtype_policies.serialize")
def serialize(dtype_policy):
"""Serializes `DTypePolicy` instance.
Args:
dtype_policy: A Keras `DTypePolicy` instance.
Returns:
`DTypePolicy` configuration dictionary.
"""
from keras.src.saving import serialization_lib
return serialization_lib.serialize_keras_object(dtype_policy)
@keras_export("keras.dtype_policies.deserialize")
def deserialize(config, custom_objects=None):
"""Deserializes a serialized `DTypePolicy` instance.
Args:
config: `DTypePolicy` configuration.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during
deserialization.
Returns:
A Keras `DTypePolicy` instance.
"""
from keras.src.saving import serialization_lib
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.dtype_policies.get")
def get(identifier):
"""Retrieves a Keras `DTypePolicy` instance.
The `identifier` may be the string name of a `DTypePolicy` class.
>>> policy = dtype_policies.get("mixed_bfloat16")
>>> type(policy)
<class '...FloatDTypePolicy'>
You can also specify `config` of the dtype policy to this function by
passing dict containing `class_name` and `config` as an identifier. Also
note that the `class_name` must map to a `DTypePolicy` class
>>> identifier = {"class_name": "FloatDTypePolicy",
... "config": {"name": "float32"}}
>>> policy = dtype_policies.get(identifier)
>>> type(policy)
<class '...FloatDTypePolicy'>
Args:
identifier: A dtype policy identifier. One of `None` or string name of a
`DTypePolicy` or `DTypePolicy` configuration dictionary or a
`DTypePolicy` instance.
Returns:
A Keras `DTypePolicy` instance.
"""
from keras.src.dtype_policies.dtype_policy import (
_get_quantized_dtype_policy_by_str,
)
if identifier is None:
return dtype_policy.dtype_policy()
if isinstance(identifier, (FloatDTypePolicy, QuantizedDTypePolicy)):
return identifier
if isinstance(identifier, dict):
return deserialize(identifier)
if isinstance(identifier, str):
if identifier.startswith(QUANTIZATION_MODES):
return _get_quantized_dtype_policy_by_str(identifier)
else:
return FloatDTypePolicy(identifier)
try:
return FloatDTypePolicy(backend.standardize_dtype(identifier))
except:
raise ValueError(
"Cannot interpret `dtype` argument. Expected a string "
f"or an instance of DTypePolicy. Received: dtype={identifier}"
)
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.dtype_policies import dtype_policy
from keras.src.dtype_policies.dtype_policy import QUANTIZATION_MODES
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy
ALL_OBJECTS = {
DTypePolicy,
FloatDTypePolicy,
QuantizedDTypePolicy,
QuantizedFloat8DTypePolicy,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
@keras_export("keras.dtype_policies.serialize")
def serialize(dtype_policy):
"""Serializes `DTypePolicy` instance.
Args:
dtype_policy: A Keras `DTypePolicy` instance.
Returns:
`DTypePolicy` configuration dictionary.
"""
from keras.src.saving import serialization_lib
return serialization_lib.serialize_keras_object(dtype_policy)
@keras_export("keras.dtype_policies.deserialize")
def deserialize(config, custom_objects=None):
"""Deserializes a serialized `DTypePolicy` instance.
Args:
config: `DTypePolicy` configuration.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during
deserialization.
Returns:
A Keras `DTypePolicy` instance.
"""
from keras.src.saving import serialization_lib
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.dtype_policies.get")
def get(identifier):
"""Retrieves a Keras `DTypePolicy` instance.
The `identifier` may be the string name of a `DTypePolicy` class.
>>> policy = dtype_policies.get("mixed_bfloat16")
>>> type(loss)
<class '...FloatDTypePolicy'>
You can also specify `config` of the dtype policy to this function by
passing dict containing `class_name` and `config` as an identifier. Also
note that the `class_name` must map to a `DTypePolicy` class
>>> identifier = {"class_name": "FloatDTypePolicy",
... "config": {"name": "float32"}}
>>> policy = dtype_policies.get(identifier)
>>> type(loss)
<class '...FloatDTypePolicy'>
Args:
identifier: A dtype policy identifier. One of `None` or string name of a
`DTypePolicy` or `DTypePolicy` configuration dictionary or a
`DTypePolicy` instance.
Returns:
A Keras `DTypePolicy` instance.
"""
from keras.src.dtype_policies.dtype_policy import (
_get_quantized_dtype_policy_by_str,
)
if identifier is None:
return dtype_policy.dtype_policy()
if isinstance(identifier, (FloatDTypePolicy, QuantizedDTypePolicy)):
return identifier
if isinstance(identifier, dict):
return deserialize(identifier)
if isinstance(identifier, str):
if identifier.startswith(QUANTIZATION_MODES):
return _get_quantized_dtype_policy_by_str(identifier)
else:
return FloatDTypePolicy(identifier)
try:
return FloatDTypePolicy(backend.standardize_dtype(identifier))
except:
raise ValueError(
"Cannot interpret `dtype` argument. Expected a string "
f"or an instance of DTypePolicy. Received: dtype={identifier}"
)
|
"""
Example of training with Dask on GPU
====================================
"""
import dask
import dask_cudf
from dask import array as da
from dask import dataframe as dd
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def using_dask_matrix(client: Client, X: da.Array, y: da.Array) -> da.Array:
# DaskDMatrix acts like normal DMatrix, works as a proxy for local DMatrix scatter
# around workers.
dtrain = DaskDMatrix(client, X, y)
# Use train method from xgboost.dask instead of xgboost. This distributed version
# of train returns a dictionary containing the resulting booster and evaluation
# history obtained from evaluation metrics.
output = dxgb.train(
client,
# Make sure the device is set to CUDA.
{"tree_method": "hist", "device": "cuda"},
dtrain,
num_boost_round=4,
evals=[(dtrain, "train")],
)
bst = output["booster"]
history = output["history"]
# you can pass output directly into `predict` too.
prediction = dxgb.predict(client, bst, dtrain)
print("Evaluation history:", history)
return prediction
def using_quantile_device_dmatrix(client: Client, X: da.Array, y: da.Array) -> da.Array:
"""`DaskQuantileDMatrix` is a data type specialized for `hist` tree methods for
reducing memory usage.
.. versionadded:: 1.2.0
"""
# `DaskQuantileDMatrix` is used instead of `DaskDMatrix`, be careful that it can not
# be used for anything else other than training unless a reference is specified. See
# the `ref` argument of `DaskQuantileDMatrix`.
dtrain = dxgb.DaskQuantileDMatrix(client, X, y)
output = dxgb.train(
client,
# Make sure the device is set to CUDA.
{"tree_method": "hist", "device": "cuda"},
dtrain,
num_boost_round=4,
evals=[(dtrain, "train")],
)
prediction = dxgb.predict(client, output, X)
return prediction
if __name__ == "__main__":
# `LocalCUDACluster` is used for assigning GPU to XGBoost processes. Here
# `n_workers` represents the number of GPUs since we use one GPU per worker process.
with LocalCUDACluster(n_workers=2, threads_per_worker=4) as cluster:
# Create client from cluster, set the backend to GPU array (cupy).
with Client(cluster) as client, dask.config.set({"array.backend": "cupy"}):
# Generate some random data for demonstration
rng = da.random.default_rng(1)
m = 2**18
n = 100
X = rng.uniform(size=(m, n), chunks=(128**2, -1))
y = X.sum(axis=1)
X = dd.from_dask_array(X)
y = dd.from_dask_array(y)
# XGBoost can take arrays. This is to show that DataFrame uses the GPU
# backend as well.
assert isinstance(X, dask_cudf.DataFrame)
assert isinstance(y, dask_cudf.Series)
print("Using DaskQuantileDMatrix")
from_ddqdm = using_quantile_device_dmatrix(client, X, y).compute()
print("Using DMatrix")
from_dmatrix = using_dask_matrix(client, X, y).compute()
|
"""
Example of training with Dask on GPU
====================================
"""
import dask
import dask_cudf
from dask import array as da
from dask import dataframe as dd
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def using_dask_matrix(client: Client, X: da.Array, y: da.Array) -> da.Array:
# DaskDMatrix acts like normal DMatrix, works as a proxy for local DMatrix scatter
# around workers.
dtrain = DaskDMatrix(client, X, y)
# Use train method from xgboost.dask instead of xgboost. This distributed version
# of train returns a dictionary containing the resulting booster and evaluation
# history obtained from evaluation metrics.
output = dxgb.train(
client,
# Make sure the device is set to CUDA.
{"tree_method": "hist", "device": "cuda"},
dtrain,
num_boost_round=4,
evals=[(dtrain, "train")],
)
bst = output["booster"]
history = output["history"]
# you can pass output directly into `predict` too.
prediction = dxgb.predict(client, bst, dtrain)
print("Evaluation history:", history)
return prediction
def using_quantile_device_dmatrix(client: Client, X: da.Array, y: da.Array) -> da.Array:
"""`DaskQuantileDMatrix` is a data type specialized for `hist` tree methods for
reducing memory usage.
.. versionadded:: 1.2.0
"""
# `DaskQuantileDMatrix` is used instead of `DaskDMatrix`, be careful that it can not
# be used for anything else other than training unless a reference is specified. See
# the `ref` argument of `DaskQuantileDMatrix`.
dtrain = dxgb.DaskQuantileDMatrix(client, X, y)
output = dxgb.train(
client,
# Make sure the device is set to CUDA.
{"tree_method": "hist", "device": "cuda"},
dtrain,
num_boost_round=4,
evals=[(dtrain, "train")],
)
prediction = dxgb.predict(client, output, X)
return prediction
if __name__ == "__main__":
# `LocalCUDACluster` is used for assigning GPU to XGBoost processes. Here
# `n_workers` represents the number of GPUs since we use one GPU per worker process.
with LocalCUDACluster(n_workers=2, threads_per_worker=4) as cluster:
# Create client from cluster, set the backend to GPU array (cupy).
with Client(cluster) as client, dask.config.set({"array.backend": "cupy"}):
# Generate some random data for demonstration
rng = da.random.default_rng(1)
m = 2**18
n = 100
X = rng.uniform(size=(m, n), chunks=(128**2, -1))
y = X.sum(axis=1)
X = dd.from_dask_array(X)
y = dd.from_dask_array(y)
# XGBoost can take arrays. This is to show that DataFrame uses the GPU
# backend as well.
assert isinstance(X, dask_cudf.DataFrame)
assert isinstance(y, dask_cudf.Series)
print("Using DaskQuantileDMatrix")
from_ddqdm = using_quantile_device_dmatrix(client, X, y)
print("Using DMatrix")
from_dmatrix = using_dask_matrix(client, X, y)
|
from . import ffmpeg_utils, sox_utils
from .download import download_asset
__all__ = [
"download_asset",
"sox_utils",
"ffmpeg_utils",
]
|
from torchaudio._internal import module_utils as _mod_utils
from . import ffmpeg_utils, sox_utils
from .download import download_asset
if _mod_utils.is_sox_available():
sox_utils.set_verbosity(0)
__all__ = [
"download_asset",
"sox_utils",
"ffmpeg_utils",
]
|
from keras.src import testing
from keras.src.datasets import boston_housing
class BostonHousingTest(testing.TestCase):
def test_load_data(self):
(x_train, y_train), (x_test, y_test) = boston_housing.load_data()
self.assertEqual(x_train.shape[1], 13)
self.assertEqual(x_train.shape[0] + x_test.shape[0], 506)
def test_seed_reproducibility(self):
seed = 123
first_load = boston_housing.load_data(seed=seed)
second_load = boston_housing.load_data(seed=seed)
self.assertAllClose(first_load[0][0], second_load[0][0])
self.assertAllClose(first_load[1][0], second_load[1][0])
def test_invalid_test_split(self):
with self.assertRaises(AssertionError):
boston_housing.load_data(test_split=-0.1)
with self.assertRaises(AssertionError):
boston_housing.load_data(test_split=1.0)
|
from keras.src import testing
from keras.src.datasets import boston_housing
class BostonHousingTest(testing.TestCase):
def test_load_data(self):
(x_train, y_train), (x_test, y_test) = boston_housing.load_data()
self.assertEqual(x_train.shape[1], 13)
self.assertEqual(x_train.shape[0] + x_test.shape[0], 506)
def test_seed_reproducibility(self):
seed = 123
first_load = boston_housing.load_data(seed=seed)
second_load = boston_housing.load_data(seed=seed)
self.assertAllClose(first_load[0][0], second_load[0][0])
self.assertAllClose(first_load[1][0], second_load[1][0])
def test_invalid_test_split(self):
with self.assertRaises(AssertionError):
boston_housing.load_data(test_split=-0.1)
with self.assertRaises(AssertionError):
boston_housing.load_data(test_split=1.0)
|
from __future__ import annotations
from .Asym import Asym
from .BoW import BoW
from .CLIPModel import CLIPModel
from .CNN import CNN
from .Dense import Dense
from .Dropout import Dropout
from .InputModule import InputModule
from .LayerNorm import LayerNorm
from .LSTM import LSTM
from .Module import Module
from .Normalize import Normalize
from .Pooling import Pooling
from .StaticEmbedding import StaticEmbedding
from .Transformer import Transformer
from .WeightedLayerPooling import WeightedLayerPooling
from .WordEmbeddings import WordEmbeddings
from .WordWeights import WordWeights
__all__ = [
"Transformer",
"StaticEmbedding",
"Asym",
"BoW",
"CNN",
"Dense",
"Dropout",
"LayerNorm",
"LSTM",
"Normalize",
"Pooling",
"WeightedLayerPooling",
"WordEmbeddings",
"WordWeights",
"CLIPModel",
"Module",
"InputModule",
]
|
from __future__ import annotations
from .Asym import Asym
from .BoW import BoW
from .CLIPModel import CLIPModel
from .CNN import CNN
from .Dense import Dense
from .Dropout import Dropout
from .LayerNorm import LayerNorm
from .LSTM import LSTM
from .Normalize import Normalize
from .Pooling import Pooling
from .StaticEmbedding import StaticEmbedding
from .Transformer import Transformer
from .WeightedLayerPooling import WeightedLayerPooling
from .WordEmbeddings import WordEmbeddings
from .WordWeights import WordWeights
__all__ = [
"Transformer",
"StaticEmbedding",
"Asym",
"BoW",
"CNN",
"Dense",
"Dropout",
"LayerNorm",
"LSTM",
"Normalize",
"Pooling",
"WeightedLayerPooling",
"WordEmbeddings",
"WordWeights",
"CLIPModel",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Any, Dict, Union
from torch.utils.data import DataLoader
class BaseLoop(metaclass=ABCMeta):
"""Base loop class.
All subclasses inherited from ``BaseLoop`` should overwrite the
:meth:`run` method.
Args:
runner (Runner): A reference of runner.
dataloader (Dataloader or dict): An iterator to generate one batch of
dataset each iteration.
"""
def __init__(self, runner, dataloader: Union[DataLoader, Dict]) -> None:
self._runner = runner
if isinstance(dataloader, dict):
# Determine whether or not different ranks use different seed.
diff_rank_seed = runner._randomness_cfg.get(
'diff_rank_seed', False)
self.dataloader = runner.build_dataloader(
dataloader, seed=runner.seed, diff_rank_seed=diff_rank_seed)
else:
self.dataloader = dataloader
@property
def runner(self):
return self._runner
@abstractmethod
def run(self) -> Any:
"""Execute loop."""
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, Union
from torch.utils.data import DataLoader
class BaseLoop(metaclass=ABCMeta):
"""Base loop class.
All subclasses inherited from ``BaseLoop`` should overwrite the
:meth:`run` method.
Args:
runner (Runner): A reference of runner.
dataloader (Dataloader or dict): An iterator to generate one batch of
dataset each iteration.
"""
def __init__(self, runner, dataloader: Union[DataLoader, Dict]) -> None:
self._runner = runner
if isinstance(dataloader, dict):
# Determine whether or not different ranks use different seed.
diff_rank_seed = runner._randomness_cfg.get(
'diff_rank_seed', False)
self.dataloader = runner.build_dataloader(
dataloader, seed=runner.seed, diff_rank_seed=diff_rank_seed)
else:
self.dataloader = dataloader
@property
def runner(self):
return self._runner
@abstractmethod
def run(self) -> None:
"""Execute loop."""
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from unittest.mock import patch
import transformers.commands.transformers_cli as cli
from transformers.commands.chat import ChatArguments, ChatCommand
from transformers.testing_utils import CaptureStd
class ChatCLITest(unittest.TestCase):
def test_help(self):
with patch("sys.argv", ["transformers", "chat", "--help"]), CaptureStd() as cs:
with self.assertRaises(SystemExit):
cli.main()
self.assertIn("chat interface", cs.out.lower())
@patch.object(ChatCommand, "run")
def test_cli_dispatch(self, run_mock):
args = ["transformers", "chat", "hf-internal-testing/tiny-random-gpt2"]
with patch("sys.argv", args):
cli.main()
run_mock.assert_called_once()
def test_parsed_args(self):
with (
patch.object(ChatCommand, "__init__", return_value=None) as init_mock,
patch.object(ChatCommand, "run") as run_mock,
patch(
"sys.argv",
[
"transformers",
"chat",
"test-model",
"max_new_tokens=64",
],
),
):
cli.main()
init_mock.assert_called_once()
run_mock.assert_called_once()
parsed_args = init_mock.call_args[0][0]
self.assertEqual(parsed_args.model_name_or_path_or_address, "test-model")
self.assertEqual(parsed_args.generate_flags, ["max_new_tokens=64"])
class ChatUtilitiesTest(unittest.TestCase):
def test_save_and_clear_chat(self):
tmp_path = tempfile.mkdtemp()
args = ChatArguments(save_folder=str(tmp_path))
args.model_name_or_path_or_address = "test-model"
chat_history = [{"role": "user", "content": "hi"}]
filename = ChatCommand.save_chat(chat_history, args)
self.assertTrue(os.path.isfile(filename))
cleared = ChatCommand.clear_chat_history()
self.assertEqual(cleared, [])
def test_parse_generate_flags(self):
dummy = ChatCommand.__new__(ChatCommand)
parsed = ChatCommand.parse_generate_flags(dummy, ["temperature=0.5", "max_new_tokens=10"])
self.assertEqual(parsed["temperature"], 0.5)
self.assertEqual(parsed["max_new_tokens"], 10)
|
import os
import tempfile
import unittest
from unittest.mock import patch
import transformers.commands.transformers_cli as cli
from transformers.commands.chat import ChatArguments, ChatCommand
from transformers.testing_utils import CaptureStd
class ChatCLITest(unittest.TestCase):
def test_help(self):
with patch("sys.argv", ["transformers", "chat", "--help"]), CaptureStd() as cs:
with self.assertRaises(SystemExit):
cli.main()
self.assertIn("chat interface", cs.out.lower())
@patch.object(ChatCommand, "run")
def test_cli_dispatch(self, run_mock):
args = ["transformers", "chat", "hf-internal-testing/tiny-random-gpt2"]
with patch("sys.argv", args):
cli.main()
run_mock.assert_called_once()
def test_parsed_args(self):
with (
patch.object(ChatCommand, "__init__", return_value=None) as init_mock,
patch.object(ChatCommand, "run") as run_mock,
patch(
"sys.argv",
[
"transformers",
"chat",
"test-model",
"max_new_tokens=64",
],
),
):
cli.main()
init_mock.assert_called_once()
run_mock.assert_called_once()
parsed_args = init_mock.call_args[0][0]
self.assertEqual(parsed_args.model_name_or_path_or_address, "test-model")
self.assertEqual(parsed_args.generate_flags, ["max_new_tokens=64"])
class ChatUtilitiesTest(unittest.TestCase):
def test_save_and_clear_chat(self):
tmp_path = tempfile.mkdtemp()
args = ChatArguments(save_folder=str(tmp_path))
args.model_name_or_path_or_address = "test-model"
chat_history = [{"role": "user", "content": "hi"}]
filename = ChatCommand.save_chat(chat_history, args)
self.assertTrue(os.path.isfile(filename))
cleared = ChatCommand.clear_chat_history()
self.assertEqual(cleared, [])
def test_parse_generate_flags(self):
dummy = ChatCommand.__new__(ChatCommand)
parsed = ChatCommand.parse_generate_flags(dummy, ["temperature=0.5", "max_new_tokens=10"])
self.assertEqual(parsed["temperature"], 0.5)
self.assertEqual(parsed["max_new_tokens"], 10)
|
"""
Visual demo for survival analysis (regression) with Accelerated Failure Time (AFT) model.
=========================================================================================
This demo uses 1D toy data and visualizes how XGBoost fits a tree ensemble. The ensemble
model starts out as a flat line and evolves into a step function in order to account for
all ranged labels.
"""
import matplotlib.pyplot as plt
import numpy as np
import xgboost as xgb
plt.rcParams.update({"font.size": 13})
# Function to visualize censored labels
def plot_censored_labels(
X: np.ndarray, y_lower: np.ndarray, y_upper: np.ndarray
) -> None:
def replace_inf(x: np.ndarray, target_value: float) -> np.ndarray:
x[np.isinf(x)] = target_value
return x
plt.plot(X, y_lower, "o", label="y_lower", color="blue")
plt.plot(X, y_upper, "o", label="y_upper", color="fuchsia")
plt.vlines(
X,
ymin=replace_inf(y_lower, 0.01),
ymax=replace_inf(y_upper, 1000.0),
label="Range for y",
color="gray",
)
# Toy data
X = np.array([1, 2, 3, 4, 5]).reshape((-1, 1))
INF = np.inf
y_lower = np.array([10, 15, -INF, 30, 100])
y_upper = np.array([INF, INF, 20, 50, INF])
# Visualize toy data
plt.figure(figsize=(5, 4))
plot_censored_labels(X, y_lower, y_upper)
plt.ylim((6, 200))
plt.legend(loc="lower right")
plt.title("Toy data")
plt.xlabel("Input feature")
plt.ylabel("Label")
plt.yscale("log")
plt.tight_layout()
plt.show(block=True)
# Will be used to visualize XGBoost model
grid_pts = np.linspace(0.8, 5.2, 1000).reshape((-1, 1))
# Train AFT model using XGBoost
dmat = xgb.DMatrix(X)
dmat.set_float_info("label_lower_bound", y_lower)
dmat.set_float_info("label_upper_bound", y_upper)
params = {"max_depth": 3, "objective": "survival:aft", "min_child_weight": 0}
accuracy_history = []
class PlotIntermediateModel(xgb.callback.TrainingCallback):
"""Custom callback to plot intermediate models."""
def __init__(self) -> None:
super().__init__()
def after_iteration(
self,
model: xgb.Booster,
epoch: int,
evals_log: xgb.callback.TrainingCallback.EvalsLog,
) -> bool:
"""Run after training is finished."""
# Compute y_pred = prediction using the intermediate model, at current boosting
# iteration
y_pred = model.predict(dmat)
# "Accuracy" = the number of data points whose ranged label (y_lower, y_upper)
# includes the corresponding predicted label (y_pred)
acc = np.sum(
np.logical_and(y_pred >= y_lower, y_pred <= y_upper) / len(X) * 100
)
accuracy_history.append(acc)
# Plot ranged labels as well as predictions by the model
plt.subplot(5, 3, epoch + 1)
plot_censored_labels(X, y_lower, y_upper)
y_pred_grid_pts = model.predict(xgb.DMatrix(grid_pts))
plt.plot(
grid_pts, y_pred_grid_pts, "r-", label="XGBoost AFT model", linewidth=4
)
plt.title("Iteration {}".format(epoch), x=0.5, y=0.8)
plt.xlim((0.8, 5.2))
plt.ylim((1 if np.min(y_pred) < 6 else 6, 200))
plt.yscale("log")
return False
res: xgb.callback.TrainingCallback.EvalsLog = {}
plt.figure(figsize=(12, 13))
bst = xgb.train(
params,
dmat,
15,
[(dmat, "train")],
evals_result=res,
callbacks=[PlotIntermediateModel()],
)
plt.tight_layout()
plt.legend(
loc="lower center",
ncol=4,
bbox_to_anchor=(0.5, 0),
bbox_transform=plt.gcf().transFigure,
)
plt.tight_layout()
# Plot negative log likelihood over boosting iterations
plt.figure(figsize=(8, 3))
plt.subplot(1, 2, 1)
plt.plot(res["train"]["aft-nloglik"], "b-o", label="aft-nloglik")
plt.xlabel("# Boosting Iterations")
plt.legend(loc="best")
# Plot "accuracy" over boosting iterations
# "Accuracy" = the number of data points whose ranged label (y_lower, y_upper) includes
# the corresponding predicted label (y_pred)
plt.subplot(1, 2, 2)
plt.plot(accuracy_history, "r-o", label="Accuracy (%)")
plt.xlabel("# Boosting Iterations")
plt.legend(loc="best")
plt.tight_layout()
plt.show()
|
"""
Visual demo for survival analysis (regression) with Accelerated Failure Time (AFT) model.
=========================================================================================
This demo uses 1D toy data and visualizes how XGBoost fits a tree ensemble. The ensemble
model starts out as a flat line and evolves into a step function in order to account for
all ranged labels.
"""
import matplotlib.pyplot as plt
import numpy as np
import xgboost as xgb
plt.rcParams.update({"font.size": 13})
# Function to visualize censored labels
def plot_censored_labels(
X: np.ndarray, y_lower: np.ndarray, y_upper: np.ndarray
) -> None:
def replace_inf(x: np.ndarray, target_value: float) -> np.ndarray:
x[np.isinf(x)] = target_value
return x
plt.plot(X, y_lower, "o", label="y_lower", color="blue")
plt.plot(X, y_upper, "o", label="y_upper", color="fuchsia")
plt.vlines(
X,
ymin=replace_inf(y_lower, 0.01),
ymax=replace_inf(y_upper, 1000.0),
label="Range for y",
color="gray",
)
# Toy data
X = np.array([1, 2, 3, 4, 5]).reshape((-1, 1))
INF = np.inf
y_lower = np.array([10, 15, -INF, 30, 100])
y_upper = np.array([INF, INF, 20, 50, INF])
# Visualize toy data
plt.figure(figsize=(5, 4))
plot_censored_labels(X, y_lower, y_upper)
plt.ylim((6, 200))
plt.legend(loc="lower right")
plt.title("Toy data")
plt.xlabel("Input feature")
plt.ylabel("Label")
plt.yscale("log")
plt.tight_layout()
plt.show(block=True)
# Will be used to visualize XGBoost model
grid_pts = np.linspace(0.8, 5.2, 1000).reshape((-1, 1))
# Train AFT model using XGBoost
dmat = xgb.DMatrix(X)
dmat.set_float_info("label_lower_bound", y_lower)
dmat.set_float_info("label_upper_bound", y_upper)
params = {"max_depth": 3, "objective": "survival:aft", "min_child_weight": 0}
accuracy_history = []
class PlotIntermediateModel(xgb.callback.TrainingCallback):
"""Custom callback to plot intermediate models."""
def __init__(self) -> None:
super().__init__()
def after_iteration(
self,
model: xgb.Booster,
epoch: int,
evals_log: xgb.callback.TrainingCallback.EvalsLog,
) -> bool:
"""Run after training is finished."""
# Compute y_pred = prediction using the intermediate model, at current boosting
# iteration
y_pred = model.predict(dmat)
# "Accuracy" = the number of data points whose ranged label (y_lower, y_upper)
# includes the corresponding predicted label (y_pred)
acc = np.sum(
np.logical_and(y_pred >= y_lower, y_pred <= y_upper) / len(X) * 100
)
accuracy_history.append(acc)
# Plot ranged labels as well as predictions by the model
plt.subplot(5, 3, epoch + 1)
plot_censored_labels(X, y_lower, y_upper)
y_pred_grid_pts = model.predict(xgb.DMatrix(grid_pts))
plt.plot(
grid_pts, y_pred_grid_pts, "r-", label="XGBoost AFT model", linewidth=4
)
plt.title("Iteration {}".format(epoch), x=0.5, y=0.8)
plt.xlim((0.8, 5.2))
plt.ylim((1 if np.min(y_pred) < 6 else 6, 200))
plt.yscale("log")
return False
res: xgb.callback.TrainingCallback.EvalsLog = {}
plt.figure(figsize=(12, 13))
bst = xgb.train(
params,
dmat,
15,
[(dmat, "train")],
evals_result=res,
callbacks=[PlotIntermediateModel()],
)
plt.tight_layout()
plt.legend(
loc="lower center",
ncol=4,
bbox_to_anchor=(0.5, 0),
bbox_transform=plt.gcf().transFigure,
)
plt.tight_layout()
# Plot negative log likelihood over boosting iterations
plt.figure(figsize=(8, 3))
plt.subplot(1, 2, 1)
plt.plot(res["train"]["aft-nloglik"], "b-o", label="aft-nloglik")
plt.xlabel("# Boosting Iterations")
plt.legend(loc="best")
# Plot "accuracy" over boosting iterations
# "Accuracy" = the number of data points whose ranged label (y_lower, y_upper) includes
# the corresponding predicted label (y_pred)
plt.subplot(1, 2, 2)
plt.plot(accuracy_history, "r-o", label="Accuracy (%)")
plt.xlabel("# Boosting Iterations")
plt.legend(loc="best")
plt.tight_layout()
plt.show()
|
from typing import Any
from unittest.mock import patch
import asyncio
import pytest
from llama_index.core.base.llms.types import ChatResponse, ChatMessage, MessageRole
from llama_index.core.llms.mock import MockLLM
from llama_index.core.postprocessor.rankGPT_rerank import RankGPTRerank
from llama_index.core.schema import TextNode, NodeWithScore
def mock_rankgpt_chat(self: Any, messages, **kwargs: Any) -> ChatResponse:
return ChatResponse(
message=ChatMessage(role=MessageRole.SYSTEM, content="[2] > [1] > [3]")
)
async def mock_rankgpt_achat(self, messages, **kwargs: Any) -> ChatResponse:
# Mock api call
await asyncio.sleep(1)
return ChatResponse(
message=ChatMessage(role=MessageRole.SYSTEM, content="[2] > [1] > [3]")
)
nodes = [
TextNode(text="Test"),
TextNode(text="Test2"),
TextNode(text="Test3"),
]
nodes_with_score = [NodeWithScore(node=n) for n in nodes]
@patch.object(
MockLLM,
"chat",
mock_rankgpt_chat,
)
def test_rankgpt_rerank():
rankgpt_rerank = RankGPTRerank(
top_n=2,
llm=MockLLM(),
)
result = rankgpt_rerank.postprocess_nodes(nodes_with_score, query_str="Test query")
assert len(result) == 2
assert result[0].node.get_content() == "Test2"
assert result[1].node.get_content() == "Test"
@patch.object(
MockLLM,
"achat",
mock_rankgpt_achat,
)
@pytest.mark.asyncio
async def test_rankgpt_rerank_async():
rankgpt_rerank = RankGPTRerank(
top_n=2,
llm=MockLLM(),
)
result = await rankgpt_rerank.apostprocess_nodes(
nodes_with_score, query_str="Test query"
)
assert len(result) == 2
assert result[0].node.get_content() == "Test2"
assert result[1].node.get_content() == "Test"
|
from typing import Any
from unittest.mock import patch
import asyncio
import pytest
from llama_index.core.base.llms.types import ChatResponse, ChatMessage, MessageRole
from llama_index.core.llms.mock import MockLLM
from llama_index.core.postprocessor.rankGPT_rerank import RankGPTRerank
from llama_index.core.schema import TextNode, NodeWithScore
def mock_rankgpt_chat(self: Any, messages, **kwargs: Any) -> ChatResponse:
return ChatResponse(
message=ChatMessage(role=MessageRole.SYSTEM, content="[2] > [1] > [3]")
)
async def mock_rankgpt_achat(self, messages, **kwargs: Any) -> ChatResponse:
# Mock api call
await asyncio.sleep(1)
return ChatResponse(
message=ChatMessage(role=MessageRole.SYSTEM, content="[2] > [1] > [3]")
)
nodes = [
TextNode(text="Test"),
TextNode(text="Test2"),
TextNode(text="Test3"),
]
nodes_with_score = [NodeWithScore(node=n) for n in nodes]
@patch.object(
MockLLM,
"chat",
mock_rankgpt_chat,
)
def test_rankgpt_rerank():
rankgpt_rerank = RankGPTRerank(
top_n=2,
llm=MockLLM(),
)
result = rankgpt_rerank.postprocess_nodes(nodes_with_score, query_str="Test query")
assert len(result) == 2
assert result[0].node.get_content() == "Test2"
assert result[1].node.get_content() == "Test"
@patch.object(
MockLLM,
"achat",
mock_rankgpt_achat,
)
@pytest.mark.asyncio()
async def test_rankgpt_rerank_async():
rankgpt_rerank = RankGPTRerank(
top_n=2,
llm=MockLLM(),
)
result = await rankgpt_rerank.apostprocess_nodes(
nodes_with_score, query_str="Test query"
)
assert len(result) == 2
assert result[0].node.get_content() == "Test2"
assert result[1].node.get_content() == "Test"
|
import contextlib
import os
import shutil
import threading
import time
from jina import Client, Deployment, DocumentArray, Flow
cur_dir = os.path.dirname(__file__)
@contextlib.contextmanager
def _update_file(input_file_path, output_file_path, temp_path):
backup_file = os.path.join(temp_path, 'backup.yaml')
try:
shutil.copy2(output_file_path, backup_file)
shutil.copy(input_file_path, output_file_path)
time.sleep(2.0)
yield
finally:
shutil.copy2(backup_file, output_file_path)
time.sleep(5.0)
def flow_run(flow, stop_event):
with flow:
flow.block(stop_event)
def deployment_run(depl, stop_event):
with depl:
depl.block(stop_event)
def test_flow_reload(tmpdir):
stop_event = threading.Event()
flow = Flow().add(
uses=os.path.join(os.path.join(cur_dir, 'exec'), 'config.yml'), reload=True
)
t = threading.Thread(target=flow_run, args=(flow, stop_event))
t.start()
time.sleep(5)
try:
client = Client(port=flow.port, protocol=str(flow.protocol))
res = client.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
with _update_file(
os.path.join(os.path.join(cur_dir, 'exec'), 'config_alt.yml'),
os.path.join(os.path.join(cur_dir, 'exec'), 'config.yml'),
str(tmpdir),
):
client = Client(port=flow.port, protocol=str(flow.protocol))
res = client.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorAfterReload'
client = Client(port=flow.port, protocol=str(flow.protocol))
res = client.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
finally:
stop_event.set()
t.join()
def test_deployment_reload(tmpdir):
stop_event = threading.Event()
depl = Deployment(
uses=os.path.join(os.path.join(cur_dir, 'exec'), 'config.yml'), reload=True
)
t = threading.Thread(target=deployment_run, args=(depl, stop_event))
t.start()
time.sleep(5)
try:
res = depl.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
with _update_file(
os.path.join(os.path.join(cur_dir, 'exec'), 'config_alt.yml'),
os.path.join(os.path.join(cur_dir, 'exec'), 'config.yml'),
str(tmpdir),
):
res = depl.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorAfterReload'
res = depl.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
finally:
stop_event.set()
t.join()
|
import contextlib
import os
import shutil
import threading
import time
import pytest
from jina import Client, DocumentArray, Executor, Flow, requests, Deployment
from jina.helper import random_port
cur_dir = os.path.dirname(__file__)
@contextlib.contextmanager
def _update_file(input_file_path, output_file_path, temp_path):
backup_file = os.path.join(temp_path, 'backup.yaml')
try:
shutil.copy2(output_file_path, backup_file)
shutil.copy(input_file_path, output_file_path)
time.sleep(2.0)
yield
finally:
shutil.copy2(backup_file, output_file_path)
time.sleep(5.0)
def flow_run(flow, stop_event):
with flow:
flow.block(stop_event)
def deployment_run(depl, stop_event):
with depl:
depl.block(stop_event)
def test_flow_reload(tmpdir):
stop_event = threading.Event()
flow = Flow().add(
uses=os.path.join(os.path.join(cur_dir, 'exec'), 'config.yml'), reload=True
)
t = threading.Thread(target=flow_run, args=(flow, stop_event))
t.start()
time.sleep(5)
try:
client = Client(port=flow.port, protocol=str(flow.protocol))
res = client.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
with _update_file(
os.path.join(os.path.join(cur_dir, 'exec'), 'config_alt.yml'),
os.path.join(os.path.join(cur_dir, 'exec'), 'config.yml'),
str(tmpdir),
):
client = Client(port=flow.port, protocol=str(flow.protocol))
res = client.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorAfterReload'
client = Client(port=flow.port, protocol=str(flow.protocol))
res = client.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
finally:
stop_event.set()
t.join()
def test_deployment_reload(tmpdir):
stop_event = threading.Event()
depl = Deployment(
uses=os.path.join(os.path.join(cur_dir, 'exec'), 'config.yml'), reload=True
)
t = threading.Thread(target=deployment_run, args=(depl, stop_event))
t.start()
time.sleep(5)
try:
client = Client(port=depl.port, protocol=str(depl.protocol))
res = client.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
with _update_file(
os.path.join(os.path.join(cur_dir, 'exec'), 'config_alt.yml'),
os.path.join(os.path.join(cur_dir, 'exec'), 'config.yml'),
str(tmpdir),
):
client = Client(port=depl.port, protocol=str(depl.protocol))
res = client.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorAfterReload'
client = Client(port=depl.port, protocol=str(depl.protocol))
res = client.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
finally:
stop_event.set()
t.join()
|
"""
This script contains an example how to perform semantic search with Elasticsearch.
You need Elasticsearch up and running locally:
https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html
Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.readthedocs.io/, e.g.:
```
pip install elasticsearch
```
This script was created for `elasticsearch` v8.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_elasticsearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train", trust_remote_code=True)
corpus = dataset["answer"]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 5. Encode the corpus
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True)
corpus_index = None
while True:
# 6. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 7. Perform semantic search using qdrant
results, search_time, corpus_index = semantic_search_elasticsearch(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=5,
output_index=True,
)
# 8. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 10. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""
This script contains an example how to perform semantic search with Elasticsearch.
You need Elasticsearch up and running locally:
https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html
Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.readthedocs.io/, e.g.:
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_elasticsearch
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train", trust_remote_code=True).map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
# 2. Come up with some queries
queries = [
"How do I become a good programmer?",
"How do I become a good data scientist?",
]
# 3. Load the model
sparse_model = SparseEncoder("sparse-embedding/splade_example")
# 5. Encode the corpus
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True)
corpus_index = None
while True:
# 6. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 7. Perform semantic search using qdrant
results, search_time, corpus_index = semantic_search_elasticsearch(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=10,
output_index=True,
)
# 8. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 10. Prompt for more queries
queries = [input("Please enter a question: ")]
|
_base_ = [
'../_base_/models/mask_rcnn_r50_caffe_c4.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
|
_base_ = [
'../_base_/models/mask_rcnn_r50_caffe_c4.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import CogVideoXTransformer3DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class CogVideoXTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = CogVideoXTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
model_split_percents = [0.7, 0.7, 0.8]
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 1
height = 8
width = 8
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_frames, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (1, 4, 8, 8)
@property
def output_shape(self):
return (1, 4, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
# Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings.
"num_attention_heads": 2,
"attention_head_dim": 8,
"in_channels": 4,
"out_channels": 4,
"time_embed_dim": 2,
"text_embed_dim": 8,
"num_layers": 2,
"sample_width": 8,
"sample_height": 8,
"sample_frames": 8,
"patch_size": 2,
"patch_size_t": None,
"temporal_compression_ratio": 4,
"max_text_seq_length": 8,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"CogVideoXTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
class CogVideoX1_5TransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = CogVideoXTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 2
height = 8
width = 8
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_frames, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (1, 4, 8, 8)
@property
def output_shape(self):
return (1, 4, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
# Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings.
"num_attention_heads": 2,
"attention_head_dim": 8,
"in_channels": 4,
"out_channels": 4,
"time_embed_dim": 2,
"text_embed_dim": 8,
"num_layers": 2,
"sample_width": 8,
"sample_height": 8,
"sample_frames": 8,
"patch_size": 2,
"patch_size_t": 2,
"temporal_compression_ratio": 4,
"max_text_seq_length": 8,
"use_rotary_positional_embeddings": True,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"CogVideoXTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import CogVideoXTransformer3DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class CogVideoXTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = CogVideoXTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 1
height = 8
width = 8
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_frames, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (1, 4, 8, 8)
@property
def output_shape(self):
return (1, 4, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
# Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings.
"num_attention_heads": 2,
"attention_head_dim": 8,
"in_channels": 4,
"out_channels": 4,
"time_embed_dim": 2,
"text_embed_dim": 8,
"num_layers": 2,
"sample_width": 8,
"sample_height": 8,
"sample_frames": 8,
"patch_size": 2,
"patch_size_t": None,
"temporal_compression_ratio": 4,
"max_text_seq_length": 8,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"CogVideoXTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
class CogVideoX1_5TransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = CogVideoXTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 2
height = 8
width = 8
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_frames, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (1, 4, 8, 8)
@property
def output_shape(self):
return (1, 4, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
# Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings.
"num_attention_heads": 2,
"attention_head_dim": 8,
"in_channels": 4,
"out_channels": 4,
"time_embed_dim": 2,
"text_embed_dim": 8,
"num_layers": 2,
"sample_width": 8,
"sample_height": 8,
"sample_frames": 8,
"patch_size": 2,
"patch_size_t": 2,
"temporal_compression_ratio": 4,
"max_text_seq_length": 8,
"use_rotary_positional_embeddings": True,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"CogVideoXTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.