input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
"""Generate migrations for partner packages."""
import importlib
from langchain_core.documents import BaseDocumentCompressor, BaseDocumentTransformer
from langchain_core.embeddings import Embeddings
from langchain_core.language_models import BaseLanguageModel
from langchain_core.retrievers import BaseRetriever
from langchain_core.vectorstores import VectorStore
from langchain_cli.namespaces.migrate.generate.utils import (
COMMUNITY_PKG,
find_subclasses_in_module,
list_classes_by_package,
list_init_imports_by_package,
)
# PUBLIC API
def get_migrations_for_partner_package(pkg_name: str) -> list[tuple[str, str]]:
"""Generate migrations from community package to partner package.
This code works
Args:
pkg_name (str): The name of the partner package.
Returns:
List of 2-tuples containing old and new import paths.
"""
package = importlib.import_module(pkg_name)
classes_ = find_subclasses_in_module(
package,
[
BaseLanguageModel,
Embeddings,
BaseRetriever,
VectorStore,
BaseDocumentTransformer,
BaseDocumentCompressor,
],
)
community_classes = list_classes_by_package(str(COMMUNITY_PKG))
imports_for_pkg = list_init_imports_by_package(str(COMMUNITY_PKG))
old_paths = community_classes + imports_for_pkg
return [
(f"{module}.{item}", f"{pkg_name}.{item}")
for module, item in old_paths
if item in classes_
]
|
"""Generate migrations for partner packages."""
import importlib
from langchain_core.documents import BaseDocumentCompressor, BaseDocumentTransformer
from langchain_core.embeddings import Embeddings
from langchain_core.language_models import BaseLanguageModel
from langchain_core.retrievers import BaseRetriever
from langchain_core.vectorstores import VectorStore
from langchain_cli.namespaces.migrate.generate.utils import (
COMMUNITY_PKG,
find_subclasses_in_module,
list_classes_by_package,
list_init_imports_by_package,
)
# PUBLIC API
def get_migrations_for_partner_package(pkg_name: str) -> list[tuple[str, str]]:
"""Generate migrations from community package to partner package.
This code works
Args:
pkg_name (str): The name of the partner package.
Returns:
List of 2-tuples containing old and new import paths.
"""
package = importlib.import_module(pkg_name)
classes_ = find_subclasses_in_module(
package,
[
BaseLanguageModel,
Embeddings,
BaseRetriever,
VectorStore,
BaseDocumentTransformer,
BaseDocumentCompressor,
],
)
community_classes = list_classes_by_package(str(COMMUNITY_PKG))
imports_for_pkg = list_init_imports_by_package(str(COMMUNITY_PKG))
old_paths = community_classes + imports_for_pkg
return [
(f"{module}.{item}", f"{pkg_name}.{item}")
for module, item in old_paths
if item in classes_
]
|
import numpy as np
import torch
from docarray import Document
from docarray.typing import AnyTensor, NdArray, TorchTensor
def test_set_tensor():
class MyDocument(Document):
tensor: AnyTensor
d = MyDocument(tensor=np.zeros((3, 224, 224)))
assert isinstance(d.tensor, NdArray)
assert isinstance(d.tensor, np.ndarray)
assert (d.tensor == np.zeros((3, 224, 224))).all()
d = MyDocument(tensor=torch.zeros((3, 224, 224)))
assert isinstance(d.tensor, TorchTensor)
assert isinstance(d.tensor, torch.Tensor)
assert (d.tensor == torch.zeros((3, 224, 224))).all()
|
import numpy as np
import torch
from docarray import Document
from docarray.typing import NdArray, Tensor, TorchTensor
def test_set_tensor():
class MyDocument(Document):
tensor: Tensor
d = MyDocument(tensor=np.zeros((3, 224, 224)))
assert isinstance(d.tensor, NdArray)
assert isinstance(d.tensor, np.ndarray)
assert (d.tensor == np.zeros((3, 224, 224))).all()
d = MyDocument(tensor=torch.zeros((3, 224, 224)))
assert isinstance(d.tensor, TorchTensor)
assert isinstance(d.tensor, torch.Tensor)
assert (d.tensor == torch.zeros((3, 224, 224))).all()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_nms import fast_nms, multiclass_nms
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .dropblock import DropBlock
from .ema import ExpMomentumEMA
from .inverted_residual import InvertedResidual
from .matrix_nms import mask_matrix_nms
from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder
from .normed_predictor import NormedConv2d, NormedLinear
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import DyReLU, SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, PatchMerging, Transformer,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
__all__ = [
'fast_nms', 'multiclass_nms', 'mask_matrix_nms', 'DropBlock',
'PixelDecoder', 'TransformerEncoderPixelDecoder',
'MSDeformAttnPixelDecoder', 'ResLayer', 'DetrTransformerDecoderLayer',
'DetrTransformerDecoder', 'Transformer', 'PatchMerging',
'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv',
'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'InvertedResidual',
'SELayer', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d',
'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'DyReLU',
'ExpMomentumEMA', 'inverse_sigmoid'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_nms import fast_nms, multiclass_nms
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .dropblock import DropBlock
from .ema import ExpMomentumEMA
from .inverted_residual import InvertedResidual
from .matrix_nms import mask_matrix_nms
from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder
from .normed_predictor import NormedConv2d, NormedLinear
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import DyReLU, SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, PatchMerging, Transformer,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
__all__ = [
'fast_nms', 'multiclass_nms', 'mask_matrix_nms', 'DropBlock',
'PixelDecoder', 'TransformerEncoderPixelDecoder',
'MSDeformAttnPixelDecoder', 'ResLayer', 'DetrTransformerDecoderLayer',
'DetrTransformerDecoder', 'Transformer', 'PatchMerging',
'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',
'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
'NormedLinear', 'NormedConv2d', 'InvertedResidual', 'SELayer',
'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d', 'AdaptiveAvgPool2d',
'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'DyReLU', 'ExpMomentumEMA',
'inverse_sigmoid'
]
|
"""
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Generate sample data
# --------------------
import numpy as np
from sklearn.decomposition import PCA, FastICA
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.0
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng, whiten="arbitrary-variance")
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
# %%
# Plot results
# ------------
import matplotlib.pyplot as plt
def plot_samples(S, axis_list=None):
plt.scatter(
S[:, 0], S[:, 1], s=2, marker="o", zorder=10, color="steelblue", alpha=0.5
)
if axis_list is not None:
for axis, color, label in axis_list:
x_axis, y_axis = axis / axis.std()
plt.quiver(
(0, 0),
(0, 0),
x_axis,
y_axis,
zorder=11,
width=0.01,
scale=6,
color=color,
label=label,
)
plt.hlines(0, -5, 5, color="black", linewidth=0.5)
plt.vlines(0, -3, 3, color="black", linewidth=0.5)
plt.xlim(-5, 5)
plt.ylim(-3, 3)
plt.gca().set_aspect("equal")
plt.xlabel("x")
plt.ylabel("y")
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title("True Independent Sources")
axis_list = [(pca.components_.T, "orange", "PCA"), (ica.mixing_, "red", "ICA")]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(loc="upper left")
legend.set_zorder(100)
plt.title("Observations")
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_))
plt.title("PCA recovered signals")
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title("ICA recovered signals")
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.tight_layout()
plt.show()
|
"""
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Generate sample data
# --------------------
import numpy as np
from sklearn.decomposition import PCA, FastICA
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.0
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng, whiten="arbitrary-variance")
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
# %%
# Plot results
# ------------
import matplotlib.pyplot as plt
def plot_samples(S, axis_list=None):
plt.scatter(
S[:, 0], S[:, 1], s=2, marker="o", zorder=10, color="steelblue", alpha=0.5
)
if axis_list is not None:
for axis, color, label in axis_list:
axis /= axis.std()
x_axis, y_axis = axis
plt.quiver(
(0, 0),
(0, 0),
x_axis,
y_axis,
zorder=11,
width=0.01,
scale=6,
color=color,
label=label,
)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel("x")
plt.ylabel("y")
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title("True Independent Sources")
axis_list = [(pca.components_.T, "orange", "PCA"), (ica.mixing_, "red", "ICA")]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(loc="lower right")
legend.set_zorder(100)
plt.title("Observations")
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title("PCA recovered signals")
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title("ICA recovered signals")
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.tight_layout()
plt.show()
|
"""
Experimental support for external memory
========================================
This is similar to the one in `quantile_data_iterator.py`, but for external memory
instead of Quantile DMatrix. The feature is not ready for production use yet.
.. versionadded:: 1.5.0
See :doc:`the tutorial </tutorials/external_memory>` for more details.
"""
import os
import tempfile
from typing import Callable, List, Tuple
import numpy as np
from sklearn.datasets import make_regression
import xgboost
def make_batches(
n_samples_per_batch: int,
n_features: int,
n_batches: int,
tmpdir: str,
) -> List[Tuple[str, str]]:
files: List[Tuple[str, str]] = []
rng = np.random.RandomState(1994)
for i in range(n_batches):
X, y = make_regression(n_samples_per_batch, n_features, random_state=rng)
X_path = os.path.join(tmpdir, "X-" + str(i) + ".npy")
y_path = os.path.join(tmpdir, "y-" + str(i) + ".npy")
np.save(X_path, X)
np.save(y_path, y)
files.append((X_path, y_path))
return files
class Iterator(xgboost.DataIter):
"""A custom iterator for loading files in batches."""
def __init__(self, file_paths: List[Tuple[str, str]]):
self._file_paths = file_paths
self._it = 0
# XGBoost will generate some cache files under current directory with the prefix
# "cache"
super().__init__(cache_prefix=os.path.join(".", "cache"))
def load_file(self) -> Tuple[np.ndarray, np.ndarray]:
X_path, y_path = self._file_paths[self._it]
X = np.load(X_path)
y = np.load(y_path)
assert X.shape[0] == y.shape[0]
return X, y
def next(self, input_data: Callable) -> int:
"""Advance the iterator by 1 step and pass the data to XGBoost. This function is
called by XGBoost during the construction of ``DMatrix``
"""
if self._it == len(self._file_paths):
# return 0 to let XGBoost know this is the end of iteration
return 0
# input_data is a function passed in by XGBoost who has the similar signature to
# the ``DMatrix`` constructor.
X, y = self.load_file()
input_data(data=X, label=y)
self._it += 1
return 1
def reset(self) -> None:
"""Reset the iterator to its beginning"""
self._it = 0
def main(tmpdir: str) -> xgboost.Booster:
# generate some random data for demo
files = make_batches(1024, 17, 31, tmpdir)
it = Iterator(files)
# For non-data arguments, specify it here once instead of passing them by the `next`
# method.
missing = np.NaN
Xy = xgboost.DMatrix(it, missing=missing, enable_categorical=False)
# ``approx`` is also supported, but less efficient due to sketching. GPU behaves
# differently than CPU tree methods as it uses a hybrid approach. See tutorial in
# doc for details.
booster = xgboost.train(
{"tree_method": "hist", "max_depth": 4},
Xy,
evals=[(Xy, "Train")],
num_boost_round=10,
)
return booster
if __name__ == "__main__":
with tempfile.TemporaryDirectory() as tmpdir:
main(tmpdir)
|
"""
Experimental support for external memory
========================================
This is similar to the one in `quantile_data_iterator.py`, but for external memory
instead of Quantile DMatrix. The feature is not ready for production use yet.
.. versionadded:: 1.5.0
See :doc:`the tutorial </tutorials/external_memory>` for more details.
"""
import os
import tempfile
from typing import Callable, List, Tuple
import numpy as np
from sklearn.datasets import make_regression
import xgboost
def make_batches(
n_samples_per_batch: int, n_features: int, n_batches: int, tmpdir: str,
) -> List[Tuple[str, str]]:
files: List[Tuple[str, str]] = []
rng = np.random.RandomState(1994)
for i in range(n_batches):
X, y = make_regression(n_samples_per_batch, n_features, random_state=rng)
X_path = os.path.join(tmpdir, "X-" + str(i) + ".npy")
y_path = os.path.join(tmpdir, "y-" + str(i) + ".npy")
np.save(X_path, X)
np.save(y_path, y)
files.append((X_path, y_path))
return files
class Iterator(xgboost.DataIter):
"""A custom iterator for loading files in batches."""
def __init__(self, file_paths: List[Tuple[str, str]]):
self._file_paths = file_paths
self._it = 0
# XGBoost will generate some cache files under current directory with the prefix
# "cache"
super().__init__(cache_prefix=os.path.join(".", "cache"))
def load_file(self) -> Tuple[np.ndarray, np.ndarray]:
X_path, y_path = self._file_paths[self._it]
X = np.load(X_path)
y = np.load(y_path)
assert X.shape[0] == y.shape[0]
return X, y
def next(self, input_data: Callable) -> int:
"""Advance the iterator by 1 step and pass the data to XGBoost. This function is
called by XGBoost during the construction of ``DMatrix``
"""
if self._it == len(self._file_paths):
# return 0 to let XGBoost know this is the end of iteration
return 0
# input_data is a function passed in by XGBoost who has the similar signature to
# the ``DMatrix`` constructor.
X, y = self.load_file()
input_data(data=X, label=y)
self._it += 1
return 1
def reset(self) -> None:
"""Reset the iterator to its beginning"""
self._it = 0
def main(tmpdir: str) -> xgboost.Booster:
# generate some random data for demo
files = make_batches(1024, 17, 31, tmpdir)
it = Iterator(files)
# For non-data arguments, specify it here once instead of passing them by the `next`
# method.
missing = np.NaN
Xy = xgboost.DMatrix(it, missing=missing, enable_categorical=False)
# ``approx`` is also supported, but less efficient due to sketching. GPU behaves
# differently than CPU tree methods as it uses a hybrid approach. See tutorial in
# doc for details.
booster = xgboost.train(
{"tree_method": "hist", "max_depth": 4},
Xy,
evals=[(Xy, "Train")],
num_boost_round=10,
)
return booster
if __name__ == "__main__":
with tempfile.TemporaryDirectory() as tmpdir:
main(tmpdir)
|
"""
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
we can also visualize the top tokens for each text."""
from sentence_transformers import SparseEncoder
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Embed a list of sentences
sentences = [
"This framework generates embeddings for each input sentence",
"Sentences are passed as a list of string.",
"The quick brown fox jumps over the lazy dog.",
]
# Generate embeddings
embeddings = model.encode(sentences)
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
print(f"Embedding sparsity: {model.get_sparsity_stats(embeddings)}")
"""
Embedding dim: 30522
Embedding sparsity: {'num_rows': 3, 'num_cols': 30522, 'row_non_zero_mean': 56.66666793823242, 'row_sparsity_mean': 0.9981433749198914}
"""
# Visualize top tokens for each text
top_k = 10
token_weights = model.decode(embeddings, top_k=top_k)["decoded_1"]
print(f"\nTop tokens {top_k} for each text:")
# The result is a list of sentence embeddings as numpy arrays
for i, sentence in enumerate(sentences):
token_scores = ", ".join([f'("{token.strip()}", {value:.2f})' for token, value in token_weights[i]])
print(f"{i}: {sentence} -> Top tokens: {token_scores}")
"""
Top tokens 10 for each text:
0: This framework generates embeddings for each input sentence -> Top tokens: ("framework", 2.19), ("##bed", 2.12), ("input", 1.99), ("each", 1.60), ("em", 1.58), ("sentence", 1.49), ("generate", 1.42), ("##ding", 1.33), ("sentences", 1.10), ("create", 0.93)
1: Sentences are passed as a list of string. -> Top tokens: ("string", 2.72), ("pass", 2.24), ("sentences", 2.15), ("passed", 2.07), ("sentence", 1.90), ("strings", 1.86), ("list", 1.84), ("lists", 1.49), ("as", 1.18), ("passing", 0.73)
2: The quick brown fox jumps over the lazy dog. -> Top tokens: ("lazy", 2.18), ("fox", 1.67), ("brown", 1.56), ("over", 1.52), ("dog", 1.50), ("quick", 1.49), ("jump", 1.39), ("dogs", 1.25), ("foxes", 0.99), ("jumping", 0.84)
"""
|
"""
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
we can also visualize the top tokens for each text."""
import numpy as np
from sentence_transformers import SparseEncoder
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Embed a list of sentences
sentences = [
"This framework generates embeddings for each input sentence",
"Sentences are passed as a list of string.",
"The quick brown fox jumps over the lazy dog.",
]
# Generate embeddings
embeddings = model.encode(sentences)
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
print(f"Embedding sparsity: {model.get_sparsity_stats(embeddings)}")
"""
Embedding dim: 30522
Embedding sparsity: {'num_rows': 3, 'num_cols': 30522, 'row_non_zero_mean': 56.66666793823242, 'row_sparsity_mean': 0.9981433749198914}
"""
# Visualize top tokens for each text
top_k = 10
print(f"\nTop tokens {top_k} for each text:")
# The result is a list of sentence embeddings as numpy arrays
for i, sentence in enumerate(sentences):
top_indices = np.argsort(-embeddings[i].to_dense().cpu().numpy())[:top_k]
top_values = embeddings[i].to_dense().cpu().numpy()[top_indices]
top_tokens = [model.tokenizer.decode([idx]) for idx in top_indices]
token_scores = ", ".join([f'("{token.strip()}", {value:.2f})' for token, value in zip(top_tokens, top_values)])
print(f"{i}: {sentence} -> Top tokens: {token_scores}")
"""
Top tokens 10 for each text:
0: This framework generates embeddings for each input sentence -> Top tokens: ("framework", 2.19), ("##bed", 2.12), ("input", 1.99), ("each", 1.60), ("em", 1.58), ("sentence", 1.49), ("generate", 1.42), ("##ding", 1.33), ("sentences", 1.10), ("create", 0.93)
1: Sentences are passed as a list of string. -> Top tokens: ("string", 2.72), ("pass", 2.24), ("sentences", 2.15), ("passed", 2.07), ("sentence", 1.90), ("strings", 1.86), ("list", 1.84), ("lists", 1.49), ("as", 1.18), ("passing", 0.73)
2: The quick brown fox jumps over the lazy dog. -> Top tokens: ("lazy", 2.18), ("fox", 1.67), ("brown", 1.56), ("over", 1.52), ("dog", 1.50), ("quick", 1.49), ("jump", 1.39), ("dogs", 1.25), ("foxes", 0.99), ("jumping", 0.84)
"""
|
from typing import TYPE_CHECKING
from docarray.array.storage.qdrant.backend import BackendMixin, QdrantConfig
from docarray.array.storage.qdrant.find import FindMixin
from docarray.array.storage.qdrant.getsetdel import GetSetDelMixin
from docarray.array.storage.qdrant.helper import DISTANCES
from docarray.array.storage.qdrant.seqlike import SequenceLikeMixin
__all__ = ['StorageMixins', 'QdrantConfig']
if TYPE_CHECKING:
from qdrant_client import QdrantClient
from qdrant_client.http.models.models import Distance
class StorageMixins(FindMixin, BackendMixin, GetSetDelMixin, SequenceLikeMixin):
@property
def serialize_config(self) -> dict:
return self._config.serialize_config
@property
def distance(self) -> 'Distance':
return DISTANCES[self._config.distance]
@property
def serialization_config(self) -> dict:
return self._serialize_config
@property
def n_dim(self) -> int:
return self._n_dim
@property
def collection_name(self) -> str:
return self._config.collection_name
@property
def collection_name_meta(self) -> str:
return f'{self.collection_name}_meta'
@property
def config(self):
return self._config
@property
def client(self) -> 'QdrantClient':
return self._client
@property
def scroll_batch_size(self) -> int:
return self._config.scroll_batch_size
|
from typing import TYPE_CHECKING
from .backend import BackendMixin, QdrantConfig
from .find import FindMixin
from .getsetdel import GetSetDelMixin
from .helper import DISTANCES
from .seqlike import SequenceLikeMixin
__all__ = ['StorageMixins', 'QdrantConfig']
if TYPE_CHECKING:
from qdrant_client import QdrantClient
from qdrant_client.http.models.models import Distance
class StorageMixins(FindMixin, BackendMixin, GetSetDelMixin, SequenceLikeMixin):
@property
def serialize_config(self) -> dict:
return self._config.serialize_config
@property
def distance(self) -> 'Distance':
return DISTANCES[self._config.distance]
@property
def serialization_config(self) -> dict:
return self._serialize_config
@property
def n_dim(self) -> int:
return self._n_dim
@property
def collection_name(self) -> str:
return self._config.collection_name
@property
def collection_name_meta(self) -> str:
return f'{self.collection_name}_meta'
@property
def config(self):
return self._config
@property
def client(self) -> 'QdrantClient':
return self._client
@property
def scroll_batch_size(self) -> int:
return self._config.scroll_batch_size
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.32.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.31.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidationError,
)
class WriteFileInput(BaseModel):
"""Input for WriteFileTool."""
file_path: str = Field(..., description="name of file")
text: str = Field(..., description="text to write to file")
append: bool = Field(
default=False, description="Whether to append to an existing file."
)
class WriteFileTool(BaseFileToolMixin, BaseTool):
"""Tool that writes a file to disk."""
name: str = "write_file"
args_schema: Type[BaseModel] = WriteFileInput
description: str = "Write file to disk"
def _run(
self,
file_path: str,
text: str,
append: bool = False,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
write_path = self.get_relative_path(file_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path)
try:
write_path.parent.mkdir(exist_ok=True, parents=False)
mode = "a" if append else "w"
with write_path.open(mode, encoding="utf-8") as f:
f.write(text)
return f"File written successfully to {file_path}."
except Exception as e:
return "Error: " + str(e)
# TODO: Add aiofiles method
|
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidationError,
)
class WriteFileInput(BaseModel):
"""Input for WriteFileTool."""
file_path: str = Field(..., description="name of file")
text: str = Field(..., description="text to write to file")
append: bool = Field(
default=False, description="Whether to append to an existing file."
)
class WriteFileTool(BaseFileToolMixin, BaseTool): # type: ignore[override, override]
"""Tool that writes a file to disk."""
name: str = "write_file"
args_schema: Type[BaseModel] = WriteFileInput
description: str = "Write file to disk"
def _run(
self,
file_path: str,
text: str,
append: bool = False,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
write_path = self.get_relative_path(file_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path)
try:
write_path.parent.mkdir(exist_ok=True, parents=False)
mode = "a" if append else "w"
with write_path.open(mode, encoding="utf-8") as f:
f.write(text)
return f"File written successfully to {file_path}."
except Exception as e:
return "Error: " + str(e)
# TODO: Add aiofiles method
|
from typing import Literal
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import ToolsIntegrationTests
from langchain_tests.unit_tests import ToolsUnitTests
class ParrotMultiplyTool(BaseTool):
name: str = "ParrotMultiplyTool"
description: str = (
"Multiply two numbers like a parrot. Parrots always add eighty for their matey."
)
def _run(self, a: int, b: int) -> int:
return a * b + 80
class ParrotMultiplyArtifactTool(BaseTool):
name: str = "ParrotMultiplyArtifactTool"
description: str = (
"Multiply two numbers like a parrot. Parrots always add eighty for their matey."
)
response_format: Literal["content_and_artifact"] = "content_and_artifact"
def _run(self, a: int, b: int) -> tuple[int, str]:
return a * b + 80, "parrot artifact"
class TestParrotMultiplyToolUnit(ToolsUnitTests):
@property
def tool_constructor(self) -> type[ParrotMultiplyTool]:
return ParrotMultiplyTool
@property
def tool_constructor_params(self) -> dict:
# if your tool constructor instead required initialization arguments like
# `def __init__(self, some_arg: int):`, you would return those here
# as a dictionary, e.g.: `return {'some_arg': 42}`
return {}
@property
def tool_invoke_params_example(self) -> dict:
"""
Returns a dictionary representing the "args" of an example tool call.
This should NOT be a ToolCall dict - i.e. it should not
have {"name", "id", "args"} keys.
"""
return {"a": 2, "b": 3}
class TestParrotMultiplyToolIntegration(ToolsIntegrationTests):
@property
def tool_constructor(self) -> type[ParrotMultiplyTool]:
return ParrotMultiplyTool
@property
def tool_constructor_params(self) -> dict:
# if your tool constructor instead required initialization arguments like
# `def __init__(self, some_arg: int):`, you would return those here
# as a dictionary, e.g.: `return {'some_arg': 42}`
return {}
@property
def tool_invoke_params_example(self) -> dict:
"""
Returns a dictionary representing the "args" of an example tool call.
This should NOT be a ToolCall dict - i.e. it should not
have {"name", "id", "args"} keys.
"""
return {"a": 2, "b": 3}
class TestParrotMultiplyArtifactToolIntegration(ToolsIntegrationTests):
@property
def tool_constructor(self) -> type[ParrotMultiplyArtifactTool]:
return ParrotMultiplyArtifactTool
@property
def tool_constructor_params(self) -> dict:
# if your tool constructor instead required initialization arguments like
# `def __init__(self, some_arg: int):`, you would return those here
# as a dictionary, e.g.: `return {'some_arg': 42}`
return {}
@property
def tool_invoke_params_example(self) -> dict:
"""
Returns a dictionary representing the "args" of an example tool call.
This should NOT be a ToolCall dict - i.e. it should not
have {"name", "id", "args"} keys.
"""
return {"a": 2, "b": 3}
|
from typing import Literal
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import ToolsIntegrationTests
from langchain_tests.unit_tests import ToolsUnitTests
class ParrotMultiplyTool(BaseTool): # type: ignore
name: str = "ParrotMultiplyTool"
description: str = (
"Multiply two numbers like a parrot. Parrots always add eighty for their matey."
)
def _run(self, a: int, b: int) -> int:
return a * b + 80
class ParrotMultiplyArtifactTool(BaseTool): # type: ignore
name: str = "ParrotMultiplyArtifactTool"
description: str = (
"Multiply two numbers like a parrot. Parrots always add eighty for their matey."
)
response_format: Literal["content_and_artifact"] = "content_and_artifact"
def _run(self, a: int, b: int) -> tuple[int, str]:
return a * b + 80, "parrot artifact"
class TestParrotMultiplyToolUnit(ToolsUnitTests):
@property
def tool_constructor(self) -> type[ParrotMultiplyTool]:
return ParrotMultiplyTool
@property
def tool_constructor_params(self) -> dict:
# if your tool constructor instead required initialization arguments like
# `def __init__(self, some_arg: int):`, you would return those here
# as a dictionary, e.g.: `return {'some_arg': 42}`
return {}
@property
def tool_invoke_params_example(self) -> dict:
"""
Returns a dictionary representing the "args" of an example tool call.
This should NOT be a ToolCall dict - i.e. it should not
have {"name", "id", "args"} keys.
"""
return {"a": 2, "b": 3}
class TestParrotMultiplyToolIntegration(ToolsIntegrationTests):
@property
def tool_constructor(self) -> type[ParrotMultiplyTool]:
return ParrotMultiplyTool
@property
def tool_constructor_params(self) -> dict:
# if your tool constructor instead required initialization arguments like
# `def __init__(self, some_arg: int):`, you would return those here
# as a dictionary, e.g.: `return {'some_arg': 42}`
return {}
@property
def tool_invoke_params_example(self) -> dict:
"""
Returns a dictionary representing the "args" of an example tool call.
This should NOT be a ToolCall dict - i.e. it should not
have {"name", "id", "args"} keys.
"""
return {"a": 2, "b": 3}
class TestParrotMultiplyArtifactToolIntegration(ToolsIntegrationTests):
@property
def tool_constructor(self) -> type[ParrotMultiplyArtifactTool]:
return ParrotMultiplyArtifactTool
@property
def tool_constructor_params(self) -> dict:
# if your tool constructor instead required initialization arguments like
# `def __init__(self, some_arg: int):`, you would return those here
# as a dictionary, e.g.: `return {'some_arg': 42}`
return {}
@property
def tool_invoke_params_example(self) -> dict:
"""
Returns a dictionary representing the "args" of an example tool call.
This should NOT be a ToolCall dict - i.e. it should not
have {"name", "id", "args"} keys.
"""
return {"a": 2, "b": 3}
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from sentence_encoder import TransformerSentenceEncoder
_EMBEDDING_DIM = 384
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=TransformerSentenceEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...sentence_encoder import TransformerSentenceEncoder
_EMBEDDING_DIM = 384
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=TransformerSentenceEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
import os
from argparse import ArgumentParser
import mmcv
import requests
import torch
from mmengine.structures import InstanceData
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
from mmdet.structures import DetDataSample
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument('model_name', help='The model name in the server')
parser.add_argument(
'--inference-addr',
default='127.0.0.1:8080',
help='Address and port of the inference server')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.5, help='bbox score threshold')
parser.add_argument(
'--work-dir',
type=str,
default=None,
help='output directory to save drawn results.')
args = parser.parse_args()
return args
def align_ts_output(inputs, metainfo, device):
bboxes = []
labels = []
scores = []
for i, pred in enumerate(inputs):
bboxes.append(pred['bbox'])
labels.append(pred['class_label'])
scores.append(pred['score'])
pred_instances = InstanceData(metainfo=metainfo)
pred_instances.bboxes = torch.tensor(
bboxes, dtype=torch.float32, device=device)
pred_instances.labels = torch.tensor(
labels, dtype=torch.int64, device=device)
pred_instances.scores = torch.tensor(
scores, dtype=torch.float32, device=device)
ts_data_sample = DetDataSample(pred_instances=pred_instances)
return ts_data_sample
def main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
pytorch_results = inference_detector(model, args.img)
keep = pytorch_results.pred_instances.scores >= args.score_thr
pytorch_results.pred_instances = pytorch_results.pred_instances[keep]
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
# show the results
img = mmcv.imread(args.img)
img = mmcv.imconvert(img, 'bgr', 'rgb')
pt_out_file = None
ts_out_file = None
if args.work_dir is not None:
os.makedirs(args.work_dir, exist_ok=True)
pt_out_file = os.path.join(args.work_dir, 'pytorch_result.png')
ts_out_file = os.path.join(args.work_dir, 'torchserve_result.png')
visualizer.add_datasample(
'pytorch_result',
img.copy(),
data_sample=pytorch_results,
draw_gt=False,
out_file=pt_out_file,
show=True,
wait_time=0)
url = 'http://' + args.inference_addr + '/predictions/' + args.model_name
with open(args.img, 'rb') as image:
response = requests.post(url, image)
metainfo = pytorch_results.pred_instances.metainfo
ts_results = align_ts_output(response.json(), metainfo, args.device)
visualizer.add_datasample(
'torchserve_result',
img,
data_sample=ts_results,
draw_gt=False,
out_file=ts_out_file,
show=True,
wait_time=0)
assert torch.allclose(pytorch_results.pred_instances.bboxes,
ts_results.pred_instances.bboxes)
assert torch.allclose(pytorch_results.pred_instances.labels,
ts_results.pred_instances.labels)
assert torch.allclose(pytorch_results.pred_instances.scores,
ts_results.pred_instances.scores)
if __name__ == '__main__':
args = parse_args()
main(args)
|
import os
from argparse import ArgumentParser
import mmcv
import requests
import torch
from mmengine.structures import InstanceData
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
from mmdet.structures import DetDataSample
from mmdet.utils import register_all_modules
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument('model_name', help='The model name in the server')
parser.add_argument(
'--inference-addr',
default='127.0.0.1:8080',
help='Address and port of the inference server')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.5, help='bbox score threshold')
parser.add_argument(
'--work-dir',
type=str,
default=None,
help='output directory to save drawn results.')
args = parser.parse_args()
return args
def align_ts_output(inputs, metainfo, device):
bboxes = []
labels = []
scores = []
for i, pred in enumerate(inputs):
bboxes.append(pred['bbox'])
labels.append(pred['class_label'])
scores.append(pred['score'])
pred_instances = InstanceData(metainfo=metainfo)
pred_instances.bboxes = torch.tensor(
bboxes, dtype=torch.float32, device=device)
pred_instances.labels = torch.tensor(
labels, dtype=torch.int64, device=device)
pred_instances.scores = torch.tensor(
scores, dtype=torch.float32, device=device)
ts_data_sample = DetDataSample(pred_instances=pred_instances)
return ts_data_sample
def main(args):
register_all_modules()
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
pytorch_results = inference_detector(model, args.img)
keep = pytorch_results.pred_instances.scores >= args.score_thr
pytorch_results.pred_instances = pytorch_results.pred_instances[keep]
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
# show the results
img = mmcv.imread(args.img)
img = mmcv.imconvert(img, 'bgr', 'rgb')
pt_out_file = None
ts_out_file = None
if args.work_dir is not None:
os.makedirs(args.work_dir, exist_ok=True)
pt_out_file = os.path.join(args.work_dir, 'pytorch_result.png')
ts_out_file = os.path.join(args.work_dir, 'torchserve_result.png')
visualizer.add_datasample(
'pytorch_result',
img.copy(),
data_sample=pytorch_results,
draw_gt=False,
out_file=pt_out_file,
show=True,
wait_time=0)
url = 'http://' + args.inference_addr + '/predictions/' + args.model_name
with open(args.img, 'rb') as image:
response = requests.post(url, image)
metainfo = pytorch_results.pred_instances.metainfo
ts_results = align_ts_output(response.json(), metainfo, args.device)
visualizer.add_datasample(
'torchserve_result',
img,
data_sample=ts_results,
draw_gt=False,
out_file=ts_out_file,
show=True,
wait_time=0)
assert torch.allclose(pytorch_results.pred_instances.bboxes,
ts_results.pred_instances.bboxes)
assert torch.allclose(pytorch_results.pred_instances.labels,
ts_results.pred_instances.labels)
assert torch.allclose(pytorch_results.pred_instances.scores,
ts_results.pred_instances.scores)
if __name__ == '__main__':
args = parse_args()
main(args)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import O365SearchEmails
from langchain_community.tools.office365.messages_search import SearchEmailsInput
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SearchEmailsInput": "langchain_community.tools.office365.messages_search",
"O365SearchEmails": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"O365SearchEmails",
"SearchEmailsInput",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import O365SearchEmails
from langchain_community.tools.office365.messages_search import SearchEmailsInput
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SearchEmailsInput": "langchain_community.tools.office365.messages_search",
"O365SearchEmails": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SearchEmailsInput",
"O365SearchEmails",
]
|
import base64
import hashlib
from datetime import datetime, timedelta, timezone
import os
import jwt
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import (
Encoding,
PublicFormat,
load_pem_private_key,
)
SPCS_TOKEN_PATH = "/snowflake/session/token"
def get_default_spcs_token() -> str:
"""
Returns the value of the SnowPark default JWT Oauth Session Token.
In a Snowpark Container Services environment, there is a 'default' oauth session token. This retrieves it for you (as a string).
"""
with open(SPCS_TOKEN_PATH) as fp:
return fp.read()
def is_spcs_environment() -> bool:
"""
Determines if we're currently in an SPCS (Snowpark Container Services) environment. Does this by checking for the default session token.
Returns a boolean: whether or not we're in an SPCS environment.
"""
return (
os.path.exists(SPCS_TOKEN_PATH)
and os.environ.get("SNOWFLAKE_HOST") is not None
and os.environ.get("SNOWFLAKE_ACCOUNT") is not None
)
def get_spcs_base_url() -> str:
"""
Returns a correctly formatted URL for making Snowflake API calls from within an SPCS environment.
Raises a ValueError if not in an SPCS environment.
Returns a string, https://{some-url} that you can affix an API endpoint such as Cortex to.
"""
if not is_spcs_environment():
raise ValueError("Cannot call get_spcs_base_url unless in an spcs environment.")
return "https://" + os.environ.get("SNOWFLAKE_HOST").replace(
"snowflake",
os.environ.get("SNOWFLAKE_ACCOUNT").lower().replace("_", "-"),
1,
)
def generate_sf_jwt(sf_account: str, sf_user: str, sf_private_key_filepath: str) -> str:
"""
Generate a JSON Web Token for a Snowflake user.
Args:
sf_account: Fully qualified snowflake account name (ORG_ID-ACCOUNT_ID).
sf_user: User to generate token for.
sf_private_key_filepath: Path to user's private key.
Returns:
str: JSON Web Token
"""
with open(sf_private_key_filepath, "rb") as pem_in:
pemlines = pem_in.read()
# TODO: Add support for encrypted private keys
private_key = load_pem_private_key(pemlines, None, default_backend())
# Get the raw bytes of the public key.
public_key_raw = private_key.public_key().public_bytes(
Encoding.DER, PublicFormat.SubjectPublicKeyInfo
)
# Get the sha256 hash of the raw bytes.
sha256hash = hashlib.sha256()
sha256hash.update(public_key_raw)
# Base64-encode the value and prepend the prefix 'SHA256:'.
public_key_fp = "SHA256:" + base64.b64encode(sha256hash.digest()).decode("utf-8")
# Use uppercase for the account identifier and user name.
account = sf_account.upper()
user = sf_user.upper()
qualified_username = account + "." + user
# Get the current time in order to specify the time when the JWT was issued and the expiration time of the JWT.
now = datetime.now(timezone.utc)
# Specify the length of time during which the JWT will be valid. You can specify at most 1 hour.
lifetime = timedelta(minutes=59)
# Create the payload for the token.
payload = {
# Set the issuer to the fully qualified username concatenated with the public key fingerprint (calculated in the previous step).
"iss": qualified_username + "." + public_key_fp,
# Set the subject to the fully qualified username.
"sub": qualified_username,
# Set the issue time to now.
"iat": now,
# Set the expiration time, based on the lifetime specified for this object.
"exp": now + lifetime,
}
# Generate the JWT. private_key is the private key that you read from the private key file in the previous step when you generated the public key fingerprint.
encoding_algorithm = "RS256"
return jwt.encode(payload, key=private_key, algorithm=encoding_algorithm)
|
import base64
import hashlib
from datetime import datetime, timedelta, timezone
import os
import jwt
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import (
Encoding,
PublicFormat,
load_pem_private_key,
)
SPCS_TOKEN_PATH = "/snowflake/session/token"
def get_default_spcs_token() -> str:
"""
Returns the value of the SnowPark default JWT Oauth Session Token.
In a Snowpark Container Services environment, there is a 'default' oauth session token. This retrieves it for you (as a string).
"""
with open(SPCS_TOKEN_PATH) as fp:
return fp.read()
def is_spcs_environment() -> bool:
"""
Determines if we're currently in an SPCS (Snowpark Container Services) environment. Does this by checking for the default session token.
Returns a boolean: whether or not we're in an SPCS environment.
"""
return (
os.path.exists(SPCS_TOKEN_PATH)
and os.environ.get("SNOWFLAKE_HOST") is not None
and os.environ.get("SNOWFLAKE_ACCOUNT") is not None
)
def get_spcs_base_url() -> str:
"""
Returns a correctly formatted URL for making Snowflake API calls from within an SPCS environment.
Raises a ValueError if not in an SPCS environment.
Returns a string, https://{some-url} that you can affix an API endpoint such as Cortex to.
"""
if not is_spcs_environment():
raise ValueError("Cannot call get_spcs_base_url unless in an spcs environment.")
return "https://" + os.environ.get("SNOWFLAKE_HOST").replace(
"snowflake",
os.environ.get("SNOWFLAKE_ACCOUNT").lower().replace("_", "-"),
1,
)
def generate_sf_jwt(sf_account: str, sf_user: str, sf_private_key_filepath: str) -> str:
"""
Generate a JSON Web Token for a Snowflake user.
Args:
sf_account: Fully qualified snowflake account name (ORG_ID-ACCOUNT_ID).
sf_user: User to generate token for.
sf_private_key_filepath: Path to user's private key.
Returns:
str: JSON Web Token
"""
with open(sf_private_key_filepath, "rb") as pem_in:
pemlines = pem_in.read()
# TODO: Add support for encrypted private keys
private_key = load_pem_private_key(pemlines, None, default_backend())
# Get the raw bytes of the public key.
public_key_raw = private_key.public_key().public_bytes(
Encoding.DER, PublicFormat.SubjectPublicKeyInfo
)
# Get the sha256 hash of the raw bytes.
sha256hash = hashlib.sha256()
sha256hash.update(public_key_raw)
# Base64-encode the value and prepend the prefix 'SHA256:'.
public_key_fp = "SHA256:" + base64.b64encode(sha256hash.digest()).decode("utf-8")
# Use uppercase for the account identifier and user name.
account = sf_account.upper()
user = sf_user.upper()
qualified_username = account + "." + user
# Get the current time in order to specify the time when the JWT was issued and the expiration time of the JWT.
now = datetime.now(timezone.utc)
# Specify the length of time during which the JWT will be valid. You can specify at most 1 hour.
lifetime = timedelta(minutes=59)
# Create the payload for the token.
payload = {
# Set the issuer to the fully qualified username concatenated with the public key fingerprint (calculated in the previous step).
"iss": qualified_username + "." + public_key_fp,
# Set the subject to the fully qualified username.
"sub": qualified_username,
# Set the issue time to now.
"iat": now,
# Set the expiration time, based on the lifetime specified for this object.
"exp": now + lifetime,
}
# Generate the JWT. private_key is the private key that you read from the private key file in the previous step when you generated the public key fingerprint.
encoding_algorithm = "RS256"
return jwt.encode(payload, key=private_key, algorithm=encoding_algorithm)
|
import os
import time
import pytest
import requests
from docarray import Document
from jina import Client, Flow
from jina.helper import random_port
from jina.serve.runtimes.servers import BaseServer
from tests.integration.multiple_protocol_gateway.gateway.multiprotocol_gateway import (
MultiProtocolGateway,
)
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope='module')
def multi_port_gateway_docker_image_built():
import docker
client = docker.from_env()
client.images.build(
path=os.path.join(cur_dir, 'gateway/'), tag='multiprotocol-gateway'
)
client.close()
yield
time.sleep(2)
client = docker.from_env()
client.containers.prune()
@pytest.mark.parametrize(
'uses',
[
'MultiProtocolGateway',
'docker://multiprotocol-gateway',
],
)
@pytest.mark.parametrize('use_stream', [False, True])
def test_multiple_protocols_gateway(
multi_port_gateway_docker_image_built, uses, use_stream
):
http_port = random_port()
grpc_port = random_port()
flow = Flow().config_gateway(
uses=uses, port=[http_port, grpc_port], protocol=['http', 'grpc']
)
assert flow.port == [http_port, grpc_port]
grpc_client = Client(protocol='grpc', port=grpc_port)
with flow:
grpc_client.post('/', inputs=Document(), stream=use_stream)
resp = requests.get(f'http://localhost:{http_port}').json()
assert resp['protocol'] == 'http'
assert BaseServer.is_ready(f'localhost:{grpc_port}')
|
import os
import time
import pytest
import requests
from docarray import Document
from jina import Client, Flow
from jina.helper import random_port
from jina.serve.runtimes.servers import BaseServer
from tests.integration.multiple_protocol_gateway.gateway.multiprotocol_gateway import (
MultiProtocolGateway,
)
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope='module')
def multi_port_gateway_docker_image_built():
import docker
client = docker.from_env()
client.images.build(
path=os.path.join(cur_dir, 'gateway/'), tag='multiprotocol-gateway'
)
client.close()
yield
time.sleep(2)
client = docker.from_env()
client.containers.prune()
@pytest.mark.parametrize(
'uses',
[
'MultiProtocolGateway',
'docker://multiprotocol-gateway',
],
)
@pytest.mark.parametrize('use_stream', [False, True])
def test_multiple_protocols_gateway(multi_port_gateway_docker_image_built, uses, use_stream):
http_port = random_port()
grpc_port = random_port()
flow = Flow().config_gateway(
uses=uses, port=[http_port, grpc_port], protocol=['http', 'grpc']
)
assert flow.port == [http_port, grpc_port]
grpc_client = Client(protocol='grpc', port=grpc_port)
with flow:
grpc_client.post('/', inputs=Document(), stream=use_stream)
resp = requests.get(f'http://localhost:{http_port}').json()
assert resp['protocol'] == 'http'
assert BaseServer.is_ready(f'localhost:{grpc_port}')
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import numpy as np
import pytest
from jina import Document, DocumentArray
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import import_vectors
from .. import AnnoySearcher
# fix the seed here
np.random.seed(500)
docs = DocumentArray([Document(embedding=np.random.random(10)) for i in range(10)])
search_doc = DocumentArray([Document(embedding=np.random.random(10))])
DUMP_PATH = 'tests/dump1'
TOP_K = 5
@pytest.fixture(scope='function', autouse=True)
def metas(tmpdir):
os.environ['TEST_WORKSPACE'] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ['TEST_WORKSPACE']
yield metas
del os.environ['TEST_WORKSPACE']
def test_simple_annoy():
from annoy import AnnoyIndex
_index = AnnoyIndex(5, 'angular')
for j in range(3):
_index.add_item(j, np.random.random((5,)))
_index.build(4)
idx1, _ = _index.get_nns_by_vector(
np.random.random((5,)), 3, include_distances=True
)
assert len(idx1) == 3
@pytest.mark.parametrize(['metric', 'is_distance'],
[('angular', True), ('euclidean', True), ('manhattan', True), ('hamming', True),
('dot', True), ('angular', False), ('euclidean', False), ('manhattan', False),
('hamming', False), ('dot', False)])
def test_metric(tmpdir, metric, is_distance):
metas = {'workspace': str(tmpdir), 'name': 'searcher', }
runtime_args = {'pea_id': 0, 'replica_id': 0}
indexer = AnnoySearcher(dump_path=DUMP_PATH, default_top_k=TOP_K, metas=metas, metric=metric, is_distance=is_distance, runtime_args=runtime_args)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
assert len(docs[0].matches) == TOP_K
for i in range(len(docs[0].matches) - 1):
if not is_distance:
assert docs[0].matches[i].scores[metric].value >= docs[0].matches[i + 1].scores[metric].value
else:
assert docs[0].matches[i].scores[metric].value <= docs[0].matches[i + 1].scores[metric].value
def test_query_vector(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher'}
runtime_args = {'pea_id': 0, 'replica_id': 0}
indexer = AnnoySearcher(dump_path=DUMP_PATH, default_top_k=TOP_K, metas=metas, runtime_args=runtime_args)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
ids, vecs = import_vectors(DUMP_PATH, str(0))
ids = np.array(list(ids))
vecs = np.array(list(vecs))
assert len(docs) == 1
assert len(docs[0].matches) == TOP_K
assert docs[0].matches[0].id in ids
assert len(docs[0].matches[0].embedding) == 7
assert docs[0].matches[0].embedding in vecs
da = DocumentArray([Document(id=0), Document(id=1), Document(id=2)])
indexer.fill_embedding(da)
for i, doc in enumerate(da):
assert list(doc.embedding)
def test_query_vector_empty(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher'}
runtime_args = {'pea_id': 0, 'replica_id': 0}
indexer = AnnoySearcher(default_top_k=TOP_K, metas=metas, runtime_args=runtime_args)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
assert len(docs[0].matches) == 0
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import numpy as np
import pytest
from jina import Document, DocumentArray
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import import_vectors
from .. import AnnoySearcher
# fix the seed here
np.random.seed(500)
docs = DocumentArray([Document(embedding=np.random.random(10)) for i in range(10)])
search_doc = DocumentArray([Document(embedding=np.random.random(10))])
DUMP_PATH = 'tests/dump1'
TOP_K = 5
@pytest.fixture(scope='function', autouse=True)
def metas(tmpdir):
os.environ['TEST_WORKSPACE'] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ['TEST_WORKSPACE']
yield metas
del os.environ['TEST_WORKSPACE']
def test_simple_annoy():
from annoy import AnnoyIndex
_index = AnnoyIndex(5, 'angular')
for j in range(3):
_index.add_item(j, np.random.random((5,)))
_index.build(4)
idx1, _ = _index.get_nns_by_vector(
np.random.random((5,)), 3, include_distances=True
)
assert len(idx1) == 3
@pytest.mark.parametrize(['metric', 'is_distance'],
[('angular', True), ('euclidean', True), ('manhattan', True), ('hamming', True),
('dot', True), ('angular', False), ('euclidean', False), ('manhattan', False),
('hamming', False), ('dot', False)])
def test_metric(tmpdir, metric, is_distance):
metas = {'workspace': str(tmpdir), 'name': 'searcher', 'pea_id': 0, 'replica_id': 0}
indexer = AnnoySearcher(dump_path=DUMP_PATH, default_top_k=TOP_K, metas=metas, metric=metric, is_distance=is_distance)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
assert len(docs[0].matches) == TOP_K
for i in range(len(docs[0].matches) - 1):
if not is_distance:
assert docs[0].matches[i].scores[metric].value >= docs[0].matches[i + 1].scores[metric].value
else:
assert docs[0].matches[i].scores[metric].value <= docs[0].matches[i + 1].scores[metric].value
def test_query_vector(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher', 'pea_id': 0, 'replica_id': 0}
indexer = AnnoySearcher(dump_path=DUMP_PATH, default_top_k=TOP_K, metas=metas)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
ids, vecs = import_vectors(DUMP_PATH, str(0))
ids = np.array(list(ids))
vecs = np.array(list(vecs))
assert len(docs) == 1
assert len(docs[0].matches) == TOP_K
assert docs[0].matches[0].id in ids
assert len(docs[0].matches[0].embedding) == 7
assert docs[0].matches[0].embedding in vecs
da = DocumentArray([Document(id=0), Document(id=1), Document(id=2)])
indexer.fill_embedding(da)
for i, doc in enumerate(da):
assert list(doc.embedding)
def test_query_vector_empty(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher', 'pea_id': 0, 'replica_id': 0}
indexer = AnnoySearcher(default_top_k=TOP_K, metas=metas)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
assert len(docs[0].matches) == 0
|
"""Sentence Transformer Finetuning Engine."""
import os
from typing import Any, Optional
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.embeddings.utils import resolve_embed_model
from llama_index.finetuning.embeddings.common import EmbeddingQAFinetuneDataset
from llama_index.finetuning.types import BaseEmbeddingFinetuneEngine
class SentenceTransformersFinetuneEngine(BaseEmbeddingFinetuneEngine):
"""Sentence Transformers Finetune Engine."""
def __init__(
self,
dataset: EmbeddingQAFinetuneDataset,
model_id: str = "BAAI/bge-small-en",
model_output_path: str = "exp_finetune",
batch_size: int = 10,
val_dataset: Optional[EmbeddingQAFinetuneDataset] = None,
loss: Optional[Any] = None,
epochs: int = 2,
show_progress_bar: bool = True,
evaluation_steps: int = 50,
use_all_docs: bool = False,
trust_remote_code: bool = False,
device: Optional[Any] = None,
save_checkpoints: bool = False,
resume_from_checkpoint: bool = False,
checkpoint_save_steps: int = 500,
checkpoint_save_total_limit: int = 0,
) -> None:
"""Init params."""
from sentence_transformers import InputExample, SentenceTransformer, losses
from torch.utils.data import DataLoader
self.dataset = dataset
self.model_id = model_id
self.model_output_path = model_output_path
self.model = SentenceTransformer(
model_id, trust_remote_code=trust_remote_code, device=device
)
self.use_all_docs = use_all_docs
examples: Any = []
for query_id, query in dataset.queries.items():
if use_all_docs:
for node_id in dataset.relevant_docs[query_id]:
text = dataset.corpus[node_id]
example = InputExample(texts=[query, text])
examples.append(example)
else:
node_id = dataset.relevant_docs[query_id][0]
text = dataset.corpus[node_id]
example = InputExample(texts=[query, text])
examples.append(example)
self.examples = examples
self.loader: DataLoader = DataLoader(examples, batch_size=batch_size)
# define evaluator
from sentence_transformers.evaluation import InformationRetrievalEvaluator
evaluator: Optional[InformationRetrievalEvaluator] = None
if val_dataset is not None:
evaluator = InformationRetrievalEvaluator(
val_dataset.queries, val_dataset.corpus, val_dataset.relevant_docs
)
self.evaluator = evaluator
# define loss
self.loss = loss or losses.MultipleNegativesRankingLoss(self.model)
self.epochs = epochs
self.show_progress_bar = show_progress_bar
self.evaluation_steps = evaluation_steps
self.warmup_steps = int(len(self.loader) * epochs * 0.1)
self.checkpoint_path = (
os.path.join("checkpoints", model_output_path) if save_checkpoints else None
)
self.resume_from_checkpoint = resume_from_checkpoint
self.checkpoint_save_steps = checkpoint_save_steps
self.checkpoint_save_total_limit = checkpoint_save_total_limit
def finetune(self, **train_kwargs: Any) -> None:
"""Finetune model."""
self.model.fit(
train_objectives=[(self.loader, self.loss)],
epochs=self.epochs,
warmup_steps=self.warmup_steps,
output_path=self.model_output_path,
show_progress_bar=self.show_progress_bar,
evaluator=self.evaluator,
evaluation_steps=self.evaluation_steps,
checkpoint_path=self.checkpoint_path,
resume_from_checkpoint=self.resume_from_checkpoint,
checkpoint_save_steps=self.checkpoint_save_steps,
checkpoint_save_total_limit=self.checkpoint_save_total_limit,
)
def get_finetuned_model(self, **model_kwargs: Any) -> BaseEmbedding:
"""Gets finetuned model."""
embed_model_str = "local:" + self.model_output_path
return resolve_embed_model(embed_model_str)
|
"""Sentence Transformer Finetuning Engine."""
from typing import Any, Optional
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.embeddings.utils import resolve_embed_model
from llama_index.finetuning.embeddings.common import (
EmbeddingQAFinetuneDataset,
)
from llama_index.finetuning.types import BaseEmbeddingFinetuneEngine
class SentenceTransformersFinetuneEngine(BaseEmbeddingFinetuneEngine):
"""Sentence Transformers Finetune Engine."""
def __init__(
self,
dataset: EmbeddingQAFinetuneDataset,
model_id: str = "BAAI/bge-small-en",
model_output_path: str = "exp_finetune",
batch_size: int = 10,
val_dataset: Optional[EmbeddingQAFinetuneDataset] = None,
loss: Optional[Any] = None,
epochs: int = 2,
show_progress_bar: bool = True,
evaluation_steps: int = 50,
use_all_docs: bool = False,
trust_remote_code: bool = False,
device: Optional[Any] = None,
) -> None:
"""Init params."""
from sentence_transformers import InputExample, SentenceTransformer, losses
from torch.utils.data import DataLoader
self.dataset = dataset
self.model_id = model_id
self.model_output_path = model_output_path
self.model = SentenceTransformer(
model_id, trust_remote_code=trust_remote_code, device=device
)
self.use_all_docs = use_all_docs
examples: Any = []
for query_id, query in dataset.queries.items():
if use_all_docs:
for node_id in dataset.relevant_docs[query_id]:
text = dataset.corpus[node_id]
example = InputExample(texts=[query, text])
examples.append(example)
else:
node_id = dataset.relevant_docs[query_id][0]
text = dataset.corpus[node_id]
example = InputExample(texts=[query, text])
examples.append(example)
self.examples = examples
self.loader: DataLoader = DataLoader(examples, batch_size=batch_size)
# define evaluator
from sentence_transformers.evaluation import InformationRetrievalEvaluator
evaluator: Optional[InformationRetrievalEvaluator] = None
if val_dataset is not None:
evaluator = InformationRetrievalEvaluator(
val_dataset.queries, val_dataset.corpus, val_dataset.relevant_docs
)
self.evaluator = evaluator
# define loss
self.loss = loss or losses.MultipleNegativesRankingLoss(self.model)
self.epochs = epochs
self.show_progress_bar = show_progress_bar
self.evaluation_steps = evaluation_steps
self.warmup_steps = int(len(self.loader) * epochs * 0.1)
def finetune(self, **train_kwargs: Any) -> None:
"""Finetune model."""
self.model.fit(
train_objectives=[(self.loader, self.loss)],
epochs=self.epochs,
warmup_steps=self.warmup_steps,
output_path=self.model_output_path,
show_progress_bar=self.show_progress_bar,
evaluator=self.evaluator,
evaluation_steps=self.evaluation_steps,
)
def get_finetuned_model(self, **model_kwargs: Any) -> BaseEmbedding:
"""Gets finetuned model."""
embed_model_str = "local:" + self.model_output_path
return resolve_embed_model(embed_model_str)
|
import threading
import time
from typing import Union, BinaryIO, TYPE_CHECKING, Generator, Type, Dict, Optional
import numpy as np
if TYPE_CHECKING:
from docarray.typing import T
class VideoDataMixin:
"""Provide helper functions for :class:`Document` to support video data."""
@classmethod
def generator_from_webcam(
cls: Type['T'],
show_window: bool = True,
window_title: str = 'webcam',
fps: int = 30,
exit_key: int = 27,
exit_event=None,
tags: Optional[Dict] = None,
) -> Generator['T', None, None]:
"""
Create a generator that yields a :class:`Document` object from the webcam.
This feature requires the `opencv-python` package.
:param show_window: if to show preview window of the webcam video
:param window_title: the window title of the preview window
:param fps: expected frames per second, note that this is not guaranteed, as the actual fps depends on the hardware limit
:param exit_key: the key to press to exit the preview window
:param exit_event: the multiprocessing/threading/asyncio event that once set to exit the preview window
:param tags: the tags to attach to the document
:return: a generator that yields a :class:`Document` object from a webcam
"""
import cv2
if exit_event is None:
exit_event = threading.Event()
vc = cv2.VideoCapture(0)
prev_frame_time = time.perf_counter()
actual_fps = 0
try:
while not exit_event.is_set():
rval, frame = vc.read()
yield cls(tensor=frame, tags=tags)
key = cv2.waitKey(1000 // (fps + fps - actual_fps))
if show_window:
new_frame_time = time.perf_counter()
actual_fps = int(1 / (new_frame_time - prev_frame_time))
prev_frame_time = new_frame_time
# converting the fps into integer
# putting the FPS count on the frame
cv2.putText(
frame,
f'FPS {actual_fps:0.0f}/{fps}',
(7, 70),
cv2.FONT_HERSHEY_SIMPLEX,
3,
(255, 255, 255),
3,
cv2.LINE_AA,
)
# displaying the frame with fps
cv2.imshow(window_title, frame)
if key == exit_key or not rval:
break
finally:
vc.release()
if show_window:
cv2.destroyWindow(window_title)
def load_uri_to_video_tensor(self: 'T', only_keyframes: bool = False) -> 'T':
"""Convert a :attr:`.uri` to a video ndarray :attr:`.tensor`.
:param only_keyframes: only keep the keyframes in the video
:return: Document itself after processed
"""
import av
with av.open(self.uri) as container:
if only_keyframes:
stream = container.streams.video[0]
stream.codec_context.skip_frame = 'NONKEY'
frames = []
for frame in container.decode(video=0):
img = frame.to_image()
frames.append(np.asarray(img))
self.tensor = np.moveaxis(np.stack(frames), 1, 2)
return self
def save_video_tensor_to_file(
self: 'T', file: Union[str, BinaryIO], frame_rate: int = 30, codec: str = 'h264'
) -> 'T':
"""Save :attr:`.tensor` as a video mp4/h264 file.
:param file: The file to open, which can be either a string or a file-like object.
:param frame_rate: frames per second
:param codec: the name of a decoder/encoder
:return: itself after processed
"""
if (
self.tensor.ndim != 4
or self.tensor.shape[-1] != 3
or self.tensor.dtype != np.uint8
):
raise ValueError(
f'expects `.tensor` with dtype=uint8 and ndim=4 and the last dimension is 3, '
f'but receiving {self.tensor.shape} in {self.tensor.dtype}'
)
video_tensor = np.moveaxis(np.clip(self.tensor, 0, 255), 1, 2)
import av
with av.open(file, mode='w') as container:
stream = container.add_stream(codec, rate=frame_rate)
stream.width = self.tensor.shape[1]
stream.height = self.tensor.shape[2]
stream.pix_fmt = 'yuv420p'
for b in video_tensor:
frame = av.VideoFrame.from_ndarray(b, format='rgb24')
for packet in stream.encode(frame):
container.mux(packet)
for packet in stream.encode():
container.mux(packet)
return self
|
from typing import Union, BinaryIO, TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from docarray.typing import T
class VideoDataMixin:
"""Provide helper functions for :class:`Document` to support video data."""
def load_uri_to_video_tensor(self: 'T', only_keyframes: bool = False) -> 'T':
"""Convert a :attr:`.uri` to a video ndarray :attr:`.tensor`.
:param only_keyframes: only keep the keyframes in the video
:return: Document itself after processed
"""
import av
with av.open(self.uri) as container:
if only_keyframes:
stream = container.streams.video[0]
stream.codec_context.skip_frame = 'NONKEY'
frames = []
for frame in container.decode(video=0):
img = frame.to_image()
frames.append(np.asarray(img))
self.tensor = np.moveaxis(np.stack(frames), 1, 2)
return self
def save_video_tensor_to_file(
self: 'T', file: Union[str, BinaryIO], frame_rate: int = 30, codec: str = 'h264'
) -> 'T':
"""Save :attr:`.tensor` as a video mp4/h264 file.
:param file: The file to open, which can be either a string or a file-like object.
:param frame_rate: frames per second
:param codec: the name of a decoder/encoder
:return: itself after processed
"""
if (
self.tensor.ndim != 4
or self.tensor.shape[-1] != 3
or self.tensor.dtype != np.uint8
):
raise ValueError(
f'expects `.tensor` with dtype=uint8 and ndim=4 and the last dimension is 3, '
f'but receiving {self.tensor.shape} in {self.tensor.dtype}'
)
video_tensor = np.moveaxis(np.clip(self.tensor, 0, 255), 1, 2)
import av
with av.open(file, mode='w') as container:
stream = container.add_stream(codec, rate=frame_rate)
stream.width = self.tensor.shape[1]
stream.height = self.tensor.shape[2]
stream.pix_fmt = 'yuv420p'
for b in video_tensor:
frame = av.VideoFrame.from_ndarray(b, format='rgb24')
for packet in stream.encode(frame):
container.mux(packet)
for packet in stream.encode():
container.mux(packet)
return self
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centripetal_head import CentripetalHead
from .corner_head import CornerHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead',
'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'CascadeRPNHead',
'AutoAssignHead', 'DETRHead', 'YOLOFHead', 'DeformableDETRHead',
'SOLOHead', 'DecoupledSOLOHead', 'CenterNetHead', 'YOLOXHead',
'DecoupledSOLOLightHead', 'LADHead', 'TOODHead', 'MaskFormerHead',
'Mask2FormerHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centripetal_head import CentripetalHead
from .corner_head import CornerHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead',
'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'CascadeRPNHead',
'AutoAssignHead', 'DETRHead', 'YOLOFHead', 'DeformableDETRHead',
'SOLOHead', 'DecoupledSOLOHead', 'CenterNetHead', 'YOLOXHead',
'DecoupledSOLOLightHead', 'LADHead', 'TOODHead', 'MaskFormerHead'
]
|
_base_ = './cascade-mask-rcnn_hrnetv2p-w32_20e_coco.py'
# model settings
model = dict(
backbone=dict(
type='HRNet',
extra=dict(
stage2=dict(num_channels=(40, 80)),
stage3=dict(num_channels=(40, 80, 160)),
stage4=dict(num_channels=(40, 80, 160, 320))),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')),
neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256))
|
_base_ = './cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py'
# model settings
model = dict(
backbone=dict(
type='HRNet',
extra=dict(
stage2=dict(num_channels=(40, 80)),
stage3=dict(num_channels=(40, 80, 160)),
stage4=dict(num_channels=(40, 80, 160, 320))),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')),
neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256))
|
"""
This example starts multiple processes (1 per GPU), which encode
sentences in parallel. This gives a near linear speed-up
when encoding large text collections.
It also demonstrates how to stream data which is helpful in case you don't
want to wait for an extremely large dataset to download, or if you want to
limit the amount of memory used. More info about dataset streaming:
https://huggingface.co/docs/datasets/stream
"""
import logging
from torch.utils.data import DataLoader
from tqdm import tqdm
from datasets import load_dataset
from sentence_transformers import LoggingHandler, SentenceTransformer
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
# Important, you need to shield your code with if __name__. Otherwise, CUDA runs into issues when spawning new processes.
if __name__ == "__main__":
# Set params
data_stream_size = 16384 # Size of the data that is loaded into memory at once
chunk_size = 1024 # Size of the chunks that are sent to each process
encode_batch_size = 128 # Batch size of the model
# Load a large dataset in streaming mode. more info: https://huggingface.co/docs/datasets/stream
dataset = load_dataset("yahoo_answers_topics", split="train", streaming=True)
dataloader = DataLoader(dataset.with_format("torch"), batch_size=data_stream_size)
# Define the model
model = SentenceTransformer("all-MiniLM-L6-v2")
# Start the multi-process pool on all available CUDA devices
pool = model.start_multi_process_pool()
for i, batch in enumerate(tqdm(dataloader)):
# Compute the embeddings using the multi-process pool
sentences = batch["best_answer"]
batch_emb = model.encode_multi_process(sentences, pool, chunk_size=chunk_size, batch_size=encode_batch_size)
print("Embeddings computed for 1 batch. Shape:", batch_emb.shape)
# Optional: Stop the processes in the pool
model.stop_multi_process_pool(pool)
|
"""
This example starts multiple processes (1 per GPU), which encode
sentences in parallel. This gives a near linear speed-up
when encoding large text collections.
It also demonstrates how to stream data which is helpful in case you don't
want to wait for an extremely large dataset to download, or if you want to
limit the amount of memory used. More info about dataset streaming:
https://huggingface.co/docs/datasets/stream
"""
from sentence_transformers import SentenceTransformer, LoggingHandler
import logging
from datasets import load_dataset
from torch.utils.data import DataLoader
from tqdm import tqdm
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
# Important, you need to shield your code with if __name__. Otherwise, CUDA runs into issues when spawning new processes.
if __name__ == "__main__":
# Set params
data_stream_size = 16384 # Size of the data that is loaded into memory at once
chunk_size = 1024 # Size of the chunks that are sent to each process
encode_batch_size = 128 # Batch size of the model
# Load a large dataset in streaming mode. more info: https://huggingface.co/docs/datasets/stream
dataset = load_dataset("yahoo_answers_topics", split="train", streaming=True)
dataloader = DataLoader(dataset.with_format("torch"), batch_size=data_stream_size)
# Define the model
model = SentenceTransformer("all-MiniLM-L6-v2")
# Start the multi-process pool on all available CUDA devices
pool = model.start_multi_process_pool()
for i, batch in enumerate(tqdm(dataloader)):
# Compute the embeddings using the multi-process pool
sentences = batch["best_answer"]
batch_emb = model.encode_multi_process(sentences, pool, chunk_size=chunk_size, batch_size=encode_batch_size)
print("Embeddings computed for 1 batch. Shape:", batch_emb.shape)
# Optional: Stop the processes in the pool
model.stop_multi_process_pool(pool)
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
class AlphaDropoutTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_alpha_dropout_basics(self):
self.run_layer_test(
layers.AlphaDropout,
init_kwargs={
"rate": 0.2,
},
input_shape=(2, 3),
call_kwargs={"training": True},
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_alpha_dropout_correctness(self):
inputs = np.ones((20, 500)).astype("float32")
layer = layers.AlphaDropout(0.3, seed=1337)
outputs = layer(inputs, training=True)
self.assertAllClose(
np.std(backend.convert_to_numpy(outputs)), 1.0, atol=1e-1
)
def test_alpha_dropout_partial_noise_shape_dynamic(self):
inputs = np.ones((20, 5, 10))
layer = layers.AlphaDropout(0.5, noise_shape=(None, 1, None))
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_alpha_dropout_partial_noise_shape_static(self):
inputs = np.ones((20, 5, 10))
layer = layers.AlphaDropout(0.5, noise_shape=(20, 1, 10))
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_alpha_dropout_negative_rate(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `rate`. "
"Expected a float value between 0 and 1.",
):
_ = layers.AlphaDropout(rate=-0.5)
def test_alpha_dropout_rate_greater_than_one(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `rate`. "
"Expected a float value between 0 and 1.",
):
_ = layers.AlphaDropout(rate=1.5)
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
class AlphaDropoutTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_alpha_dropout_basics(self):
self.run_layer_test(
layers.AlphaDropout,
init_kwargs={
"rate": 0.2,
},
input_shape=(2, 3),
call_kwargs={"training": True},
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
)
def test_alpha_dropout_correctness(self):
inputs = np.ones((20, 500)).astype("float32")
layer = layers.AlphaDropout(0.3, seed=1337)
outputs = layer(inputs, training=True)
self.assertAllClose(
np.std(backend.convert_to_numpy(outputs)), 1.0, atol=1e-1
)
def test_alpha_dropout_partial_noise_shape_dynamic(self):
inputs = np.ones((20, 5, 10))
layer = layers.AlphaDropout(0.5, noise_shape=(None, 1, None))
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_alpha_dropout_partial_noise_shape_static(self):
inputs = np.ones((20, 5, 10))
layer = layers.AlphaDropout(0.5, noise_shape=(20, 1, 10))
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_alpha_dropout_negative_rate(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `rate`. "
"Expected a float value between 0 and 1.",
):
_ = layers.AlphaDropout(rate=-0.5)
def test_alpha_dropout_rate_greater_than_one(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `rate`. "
"Expected a float value between 0 and 1.",
):
_ = layers.AlphaDropout(rate=1.5)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class VOCDataset(XMLDataset):
"""Dataset for PASCAL VOC."""
METAINFO = {
'CLASSES':
('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'),
# PALETTE is a list of color tuples, which is used for visualization.
'PALETTE': [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192),
(197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255),
(153, 69, 1), (120, 166, 157), (0, 182, 199),
(0, 226, 252), (182, 182, 255), (0, 0, 230), (220, 20, 60),
(163, 255, 0), (0, 82, 0), (3, 95, 161), (0, 80, 100),
(183, 130, 88)]
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
if 'VOC2007' in self.sub_data_root:
self._metainfo['DATASET_TYPE'] = 'VOC2007'
elif 'VOC2012' in self.sub_data_root:
self._metainfo['DATASET_TYPE'] = 'VOC2012'
else:
self._metainfo['DATASET_TYPE'] = None
|
# Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
from mmcv.utils import print_log
from mmdet.core import eval_map, eval_recalls
from mmdet.registry import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class VOCDataset(XMLDataset):
CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor')
PALETTE = [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192),
(197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255),
(153, 69, 1), (120, 166, 157), (0, 182, 199), (0, 226, 252),
(182, 182, 255), (0, 0, 230), (220, 20, 60), (163, 255, 0),
(0, 82, 0), (3, 95, 161), (0, 80, 100), (183, 130, 88)]
def __init__(self, **kwargs):
super(VOCDataset, self).__init__(**kwargs)
if 'VOC2007' in self.img_prefix:
self.year = 2007
elif 'VOC2012' in self.img_prefix:
self.year = 2012
else:
raise ValueError('Cannot infer dataset year from img_prefix')
def evaluate(self,
results,
metric='mAP',
logger=None,
proposal_nums=(100, 300, 1000),
iou_thr=0.5,
scale_ranges=None):
"""Evaluate in VOC protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'mAP', 'recall'.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Default: None.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thr (float | list[float]): IoU threshold. Default: 0.5.
scale_ranges (list[tuple], optional): Scale ranges for evaluating
mAP. If not specified, all bounding boxes would be included in
evaluation. Default: None.
Returns:
dict[str, float]: AP/recall metrics.
"""
if not isinstance(metric, str):
assert len(metric) == 1
metric = metric[0]
allowed_metrics = ['mAP', 'recall']
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = OrderedDict()
iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr
if metric == 'mAP':
assert isinstance(iou_thrs, list)
if self.year == 2007:
ds_name = 'voc07'
else:
ds_name = self.CLASSES
mean_aps = []
for iou_thr in iou_thrs:
print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}')
# Follow the official implementation,
# http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCdevkit_18-May-2011.tar
# we should use the legacy coordinate system in mmdet 1.x,
# which means w, h should be computed as 'x2 - x1 + 1` and
# `y2 - y1 + 1`
mean_ap, _ = eval_map(
results,
annotations,
scale_ranges=None,
iou_thr=iou_thr,
dataset=ds_name,
logger=logger,
use_legacy_coordinate=True)
mean_aps.append(mean_ap)
eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)
eval_results['mAP'] = sum(mean_aps) / len(mean_aps)
eval_results.move_to_end('mAP', last=False)
elif metric == 'recall':
gt_bboxes = [ann['bboxes'] for ann in annotations]
recalls = eval_recalls(
gt_bboxes,
results,
proposal_nums,
iou_thrs,
logger=logger,
use_legacy_coordinate=True)
for i, num in enumerate(proposal_nums):
for j, iou_thr in enumerate(iou_thrs):
eval_results[f'recall@{num}@{iou_thr}'] = recalls[i, j]
if recalls.shape[1] > 1:
ar = recalls.mean(axis=1)
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
return eval_results
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.confident_callback import DeepEvalCallbackHandler
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"DeepEvalCallbackHandler": "langchain_community.callbacks.confident_callback",
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"DeepEvalCallbackHandler",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.confident_callback import DeepEvalCallbackHandler
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"DeepEvalCallbackHandler": "langchain_community.callbacks.confident_callback"
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"DeepEvalCallbackHandler",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .sampler_seed_hook import DistSamplerSeedHook
__all__ = ['Hook', 'IterTimerHook', 'DistSamplerSeedHook']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .hook import Hook
from .iter_timer_hook import IterTimerHook
__all__ = ['Hook', 'IterTimerHook']
|
import unittest
import torch
import torchaudio.functional as F
from parameterized import parameterized
from torchaudio_unittest.common_utils import (
PytorchTestCase,
skipIfNoSox,
TorchaudioTestCase,
)
from .functional_impl import Functional, FunctionalCPUOnly
class TestFunctionalFloat32(Functional, FunctionalCPUOnly, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
@unittest.expectedFailure
def test_lfilter_9th_order_filter_stability(self):
super().test_lfilter_9th_order_filter_stability()
class TestFunctionalFloat64(Functional, PytorchTestCase):
dtype = torch.float64
device = torch.device("cpu")
@skipIfNoSox
class TestApplyCodec(TorchaudioTestCase):
backend = "sox_io"
def _smoke_test(self, format, compression, check_num_frames):
"""
The purpose of this test suite is to verify that apply_codec functionalities do not exhibit
abnormal behaviors.
"""
sample_rate = 8000
num_frames = 3 * sample_rate
num_channels = 2
waveform = torch.rand(num_channels, num_frames)
augmented = F.apply_codec(waveform, sample_rate, format, True, compression)
assert augmented.dtype == waveform.dtype
assert augmented.shape[0] == num_channels
if check_num_frames:
assert augmented.shape[1] == num_frames
def test_wave(self):
self._smoke_test("wav", compression=None, check_num_frames=True)
@parameterized.expand([(96,), (128,), (160,), (192,), (224,), (256,), (320,)])
def test_mp3(self, compression):
self._smoke_test("mp3", compression, check_num_frames=False)
@parameterized.expand([(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,)])
def test_flac(self, compression):
self._smoke_test("flac", compression, check_num_frames=False)
@parameterized.expand([(-1,), (0,), (1,), (2,), (3,), (3.6,), (5,), (10,)])
def test_vorbis(self, compression):
self._smoke_test("vorbis", compression, check_num_frames=False)
|
import unittest
import torch
import torchaudio.functional as F
from parameterized import parameterized
from torchaudio_unittest.common_utils import (
PytorchTestCase,
skipIfNoSox,
TorchaudioTestCase,
)
from .functional_impl import Functional, FunctionalCPUOnly
class TestFunctionalFloat32(Functional, FunctionalCPUOnly, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
@unittest.expectedFailure
def test_lfilter_9th_order_filter_stability(self):
super().test_lfilter_9th_order_filter_stability()
class TestFunctionalFloat64(Functional, PytorchTestCase):
dtype = torch.float64
device = torch.device("cpu")
@skipIfNoSox
class TestApplyCodec(TorchaudioTestCase):
backend = "sox_io"
def _smoke_test(self, format, compression, check_num_frames):
"""
The purpose of this test suite is to verify that apply_codec functionalities do not exhibit
abnormal behaviors.
"""
torch.random.manual_seed(42)
sample_rate = 8000
num_frames = 3 * sample_rate
num_channels = 2
waveform = torch.rand(num_channels, num_frames)
augmented = F.apply_codec(waveform, sample_rate, format, True, compression)
assert augmented.dtype == waveform.dtype
assert augmented.shape[0] == num_channels
if check_num_frames:
assert augmented.shape[1] == num_frames
def test_wave(self):
self._smoke_test("wav", compression=None, check_num_frames=True)
@parameterized.expand([(96,), (128,), (160,), (192,), (224,), (256,), (320,)])
def test_mp3(self, compression):
self._smoke_test("mp3", compression, check_num_frames=False)
@parameterized.expand([(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,)])
def test_flac(self, compression):
self._smoke_test("flac", compression, check_num_frames=False)
@parameterized.expand([(-1,), (0,), (1,), (2,), (3,), (3.6,), (5,), (10,)])
def test_vorbis(self, compression):
self._smoke_test("vorbis", compression, check_num_frames=False)
|
_base_ = './fast-rcnn_r50_fpn_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './fast_rcnn_r50_fpn_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
from pathlib import Path
from typing import Any, Optional, TypedDict
from tomlkit import load
def get_package_root(cwd: Optional[Path] = None) -> Path:
# traverse path for routes to host (any directory holding a pyproject.toml file)
package_root = Path.cwd() if cwd is None else cwd
visited: set[Path] = set()
while package_root not in visited:
visited.add(package_root)
pyproject_path = package_root / "pyproject.toml"
if pyproject_path.exists():
return package_root
package_root = package_root.parent
msg = "No pyproject.toml found"
raise FileNotFoundError(msg)
class LangServeExport(TypedDict):
"""Fields from pyproject.toml that are relevant to LangServe.
Attributes:
module: The module to import from, tool.langserve.export_module
attr: The attribute to import from the module, tool.langserve.export_attr
package_name: The name of the package, tool.poetry.name
"""
module: str
attr: str
package_name: str
def get_langserve_export(filepath: Path) -> LangServeExport:
with open(filepath) as f:
data: dict[str, Any] = load(f)
try:
module = data["tool"]["langserve"]["export_module"]
attr = data["tool"]["langserve"]["export_attr"]
package_name = data["tool"]["poetry"]["name"]
except KeyError as e:
msg = "Invalid LangServe PyProject.toml"
raise KeyError(msg) from e
return LangServeExport(module=module, attr=attr, package_name=package_name)
|
from pathlib import Path
from typing import Any, Optional, TypedDict
from tomlkit import load
def get_package_root(cwd: Optional[Path] = None) -> Path:
# traverse path for routes to host (any directory holding a pyproject.toml file)
package_root = Path.cwd() if cwd is None else cwd
visited: set[Path] = set()
while package_root not in visited:
visited.add(package_root)
pyproject_path = package_root / "pyproject.toml"
if pyproject_path.exists():
return package_root
package_root = package_root.parent
raise FileNotFoundError("No pyproject.toml found")
class LangServeExport(TypedDict):
"""
Fields from pyproject.toml that are relevant to LangServe
Attributes:
module: The module to import from, tool.langserve.export_module
attr: The attribute to import from the module, tool.langserve.export_attr
package_name: The name of the package, tool.poetry.name
"""
module: str
attr: str
package_name: str
def get_langserve_export(filepath: Path) -> LangServeExport:
with open(filepath) as f:
data: dict[str, Any] = load(f)
try:
module = data["tool"]["langserve"]["export_module"]
attr = data["tool"]["langserve"]["export_attr"]
package_name = data["tool"]["poetry"]["name"]
except KeyError as e:
raise KeyError("Invalid LangServe PyProject.toml") from e
return LangServeExport(module=module, attr=attr, package_name=package_name)
|
from __future__ import annotations
from collections.abc import Iterable
from typing import Any
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence) loss.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`CosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, CoSENTLoss may be used
as a drop-in replacement for :class:`CosineSimilarityLoss` in any training script.
Args:
model: SentenceTransformerModel
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`AnglELoss` is CoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than CoSENTLoss. In our experiments, CoSENTLoss is recommended.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CoSENTLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.similarity_fct = similarity_fct
self.scale = scale
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
scores = self.similarity_fct(embeddings[0], embeddings[1])
scores = scores * self.scale
scores = scores[:, None] - scores[None, :]
# label matrix indicating which pairs are relevant
labels = labels[:, None] < labels[None, :]
labels = labels.float()
# mask out irrelevant pairs so they are negligible after exp()
scores = scores - (1 - labels) * 1e12
# append a zero as e^0 = 1
scores = torch.cat((torch.zeros(1).to(scores.device), scores.view(-1)), dim=0)
loss = torch.logsumexp(scores, dim=0)
return loss
def get_config_dict(self) -> dict[str, Any]:
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
@property
def citation(self) -> str:
return """
@online{kexuefm-8847,
title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT},
author={Su Jianlin},
year={2022},
month={Jan},
url={https://kexue.fm/archives/8847},
}
"""
|
from __future__ import annotations
from typing import Any, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence) loss.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`CosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, CoSENTLoss may be used
as a drop-in replacement for :class:`CosineSimilarityLoss` in any training script.
Args:
model: SentenceTransformerModel
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`AnglELoss` is CoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than CoSENTLoss. In our experiments, CoSENTLoss is recommended.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CoSENTLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.similarity_fct = similarity_fct
self.scale = scale
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
scores = self.similarity_fct(embeddings[0], embeddings[1])
scores = scores * self.scale
scores = scores[:, None] - scores[None, :]
# label matrix indicating which pairs are relevant
labels = labels[:, None] < labels[None, :]
labels = labels.float()
# mask out irrelevant pairs so they are negligible after exp()
scores = scores - (1 - labels) * 1e12
# append a zero as e^0 = 1
scores = torch.cat((torch.zeros(1).to(scores.device), scores.view(-1)), dim=0)
loss = torch.logsumexp(scores, dim=0)
return loss
def get_config_dict(self) -> dict[str, Any]:
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
@property
def citation(self) -> str:
return """
@online{kexuefm-8847,
title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT},
author={Su Jianlin},
year={2022},
month={Jan},
url={https://kexue.fm/archives/8847},
}
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMDetection provides 17 registry nodes to support using modules across
projects. Each node is a child of the root registry in MMEngine.
More details can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from mmengine.registry import DATA_SAMPLERS as MMENGINE_DATA_SAMPLERS
from mmengine.registry import DATASETS as MMENGINE_DATASETS
from mmengine.registry import HOOKS as MMENGINE_HOOKS
from mmengine.registry import LOOPS as MMENGINE_LOOPS
from mmengine.registry import METRICS as MMENGINE_METRICS
from mmengine.registry import MODEL_WRAPPERS as MMENGINE_MODEL_WRAPPERS
from mmengine.registry import MODELS as MMENGINE_MODELS
from mmengine.registry import \
OPTIMIZER_CONSTRUCTORS as MMENGINE_OPTIMIZER_CONSTRUCTORS
from mmengine.registry import OPTIMIZERS as MMENGINE_OPTIMIZERS
from mmengine.registry import PARAM_SCHEDULERS as MMENGINE_PARAM_SCHEDULERS
from mmengine.registry import \
RUNNER_CONSTRUCTORS as MMENGINE_RUNNER_CONSTRUCTORS
from mmengine.registry import RUNNERS as MMENGINE_RUNNERS
from mmengine.registry import TASK_UTILS as MMENGINE_TASK_UTILS
from mmengine.registry import TRANSFORMS as MMENGINE_TRANSFORMS
from mmengine.registry import VISBACKENDS as MMENGINE_VISBACKENDS
from mmengine.registry import VISUALIZERS as MMENGINE_VISUALIZERS
from mmengine.registry import \
WEIGHT_INITIALIZERS as MMENGINE_WEIGHT_INITIALIZERS
from mmengine.registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner', parent=MMENGINE_RUNNERS)
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry(
'runner constructor', parent=MMENGINE_RUNNER_CONSTRUCTORS)
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop', parent=MMENGINE_LOOPS)
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook', parent=MMENGINE_HOOKS)
# manage data-related modules
DATASETS = Registry('dataset', parent=MMENGINE_DATASETS)
DATA_SAMPLERS = Registry('data sampler', parent=MMENGINE_DATA_SAMPLERS)
TRANSFORMS = Registry('transform', parent=MMENGINE_TRANSFORMS)
# manage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model', parent=MMENGINE_MODELS)
# manage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper', parent=MMENGINE_MODEL_WRAPPERS)
# manage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry(
'weight initializer', parent=MMENGINE_WEIGHT_INITIALIZERS)
# manage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer', parent=MMENGINE_OPTIMIZERS)
# manage constructors that customize the optimization hyperparameters.
OPTIMIZER_CONSTRUCTORS = Registry(
'optimizer constructor', parent=MMENGINE_OPTIMIZER_CONSTRUCTORS)
# manage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry(
'parameter scheduler', parent=MMENGINE_PARAM_SCHEDULERS)
# manage all kinds of metrics
METRICS = Registry('metric', parent=MMENGINE_METRICS)
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util', parent=MMENGINE_TASK_UTILS)
# manage visualizer
VISUALIZERS = Registry('visualizer', parent=MMENGINE_VISUALIZERS)
# manage visualizer backend
VISBACKENDS = Registry('vis_backend', parent=MMENGINE_VISBACKENDS)
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMDetection provides 17 registry nodes to support using modules across
projects. Each node is a child of the root registry in MMEngine.
More details can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from mmengine.registry import DATA_SAMPLERS as MMENGINE_DATA_SAMPLERS
from mmengine.registry import DATASETS as MMENGINE_DATASETS
from mmengine.registry import HOOKS as MMENGINE_HOOKS
from mmengine.registry import LOOPS as MMENGINE_LOOPS
from mmengine.registry import METRICS as MMENGINE_METRICS
from mmengine.registry import MODEL_WRAPPERS as MMENGINE_MODEL_WRAPPERS
from mmengine.registry import MODELS as MMENGINE_MODELS
from mmengine.registry import \
OPTIMIZER_CONSTRUCTORS as MMENGINE_OPTIMIZER_CONSTRUCTORS
from mmengine.registry import OPTIMIZERS as MMENGINE_OPTIMIZERS
from mmengine.registry import PARAM_SCHEDULERS as MMENGINE_PARAM_SCHEDULERS
from mmengine.registry import \
RUNNER_CONSTRUCTORS as MMENGINE_RUNNER_CONSTRUCTORS
from mmengine.registry import RUNNERS as MMENGINE_RUNNERS
from mmengine.registry import TASK_UTILS as MMENGINE_TASK_UTILS
from mmengine.registry import TRANSFORMS as MMENGINE_TRANSFORMS
from mmengine.registry import VISBACKENDS as MMENGINE_VISBACKENDS
from mmengine.registry import VISUALIZERS as MMENGINE_VISUALIZERS
from mmengine.registry import \
WEIGHT_INITIALIZERS as MMENGINE_WEIGHT_INITIALIZERS
from mmengine.registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner', parent=MMENGINE_RUNNERS)
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry(
'runner constructor', parent=MMENGINE_RUNNER_CONSTRUCTORS)
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop', parent=MMENGINE_LOOPS)
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook', parent=MMENGINE_HOOKS)
# manage data-related modules
DATASETS = Registry('dataset', parent=MMENGINE_DATASETS)
DATA_SAMPLERS = Registry('data sampler', parent=MMENGINE_DATA_SAMPLERS)
TRANSFORMS = Registry('transform', parent=MMENGINE_TRANSFORMS)
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model', parent=MMENGINE_MODELS)
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper', parent=MMENGINE_MODEL_WRAPPERS)
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry(
'weight initializer', parent=MMENGINE_WEIGHT_INITIALIZERS)
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer', parent=MMENGINE_OPTIMIZERS)
# manage constructors that customize the optimization hyperparameters.
OPTIMIZER_CONSTRUCTORS = Registry(
'optimizer constructor', parent=MMENGINE_OPTIMIZER_CONSTRUCTORS)
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry(
'parameter scheduler', parent=MMENGINE_PARAM_SCHEDULERS)
# manage all kinds of metrics
METRICS = Registry('metric', parent=MMENGINE_METRICS)
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util', parent=MMENGINE_TASK_UTILS)
# manage visualizer
VISUALIZERS = Registry('visualizer', parent=MMENGINE_VISUALIZERS)
# manage visualizer backend
VISBACKENDS = Registry('vis_backend', parent=MMENGINE_VISBACKENDS)
|
import asyncio
import copy
from typing import Any, List, Optional
from jina.serve.gateway import BaseGateway
class CompositeGateway(BaseGateway):
"""GRPC Gateway implementation"""
def __init__(
self,
**kwargs,
):
"""Initialize the gateway
:param kwargs: keyword args
"""
super().__init__(**kwargs)
from jina.parsers.helper import _get_gateway_class
self.gateways: List[BaseGateway] = []
for port, protocol in zip(self.ports, self.protocols):
gateway_cls = _get_gateway_class(protocol)
# ignore metrics_registry since it is not copyable
runtime_args = self._deepcopy_with_ignore_attrs(
self.runtime_args, ['metrics_registry']
)
runtime_args.port = [port]
runtime_args.protocol = [protocol]
gateway_kwargs = {k: v for k, v in kwargs.items() if k != 'runtime_args'}
gateway_kwargs['runtime_args'] = dict(vars(runtime_args))
gateway = gateway_cls(**gateway_kwargs)
self.gateways.append(gateway)
async def setup_server(self):
"""
setup GRPC server
"""
tasks = []
for gateway in self.gateways:
tasks.append(asyncio.create_task(gateway.setup_server()))
await asyncio.gather(*tasks)
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
shutdown_tasks = []
for gateway in self.gateways:
shutdown_tasks.append(asyncio.create_task(gateway.shutdown()))
await asyncio.gather(*shutdown_tasks)
async def run_server(self):
"""Run GRPC server forever"""
run_server_tasks = []
for gateway in self.gateways:
run_server_tasks.append(asyncio.create_task(gateway.run_server()))
await asyncio.gather(*run_server_tasks)
@staticmethod
def _deepcopy_with_ignore_attrs(obj: Any, ignore_attrs: List[str]) -> Any:
"""Deep copy an object and ignore some attributes
:param obj: the object to copy
:param ignore_attrs: the attributes to ignore
:return: the copied object
"""
memo = {}
for k in ignore_attrs:
if hasattr(obj, k):
memo[id(getattr(obj, k))] = None # getattr(obj, k)
return copy.deepcopy(obj, memo)
@property
def _should_exit(self) -> bool:
should_exit_values = [
getattr(gateway.server, 'should_exit', True) for gateway in self.gateways
]
return all(should_exit_values)
|
import copy
from typing import Any, List, Optional
from jina.serve.gateway import BaseGateway
class CompositeGateway(BaseGateway):
"""GRPC Gateway implementation"""
def __init__(
self,
**kwargs,
):
"""Initialize the gateway
:param kwargs: keyword args
"""
super().__init__(**kwargs)
from jina.parsers.helper import _get_gateway_class
self.gateways: List[BaseGateway] = []
for port, protocol in zip(self.ports, self.protocols):
gateway_cls = _get_gateway_class(protocol)
# ignore metrics_registry since it is not copyable
runtime_args = self._deepcopy_with_ignore_attrs(
self.runtime_args, ['metrics_registry']
)
runtime_args.port = [port]
runtime_args.protocol = [protocol]
gateway_kwargs = {k: v for k, v in kwargs.items() if k != 'runtime_args'}
gateway_kwargs['runtime_args'] = dict(vars(runtime_args))
gateway = gateway_cls(**gateway_kwargs)
self.gateways.append(gateway)
async def setup_server(self):
"""
setup GRPC server
"""
for gateway in self.gateways:
await gateway.setup_server()
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
for gateway in self.gateways:
await gateway.shutdown()
async def run_server(self):
"""Run GRPC server forever"""
for gateway in self.gateways:
await gateway.run_server()
@staticmethod
def _deepcopy_with_ignore_attrs(obj: Any, ignore_attrs: List[str]) -> Any:
"""Deep copy an object and ignore some attributes
:param obj: the object to copy
:param ignore_attrs: the attributes to ignore
:return: the copied object
"""
memo = {}
for k in ignore_attrs:
if hasattr(obj, k):
memo[id(getattr(obj, k))] = None # getattr(obj, k)
return copy.deepcopy(obj, memo)
@property
def _should_exit(self) -> bool:
should_exit_values = [
getattr(gateway.server, 'should_exit', True) for gateway in self.gateways
]
return all(should_exit_values)
|
"""
JSONalyze Query Engine.
WARNING: This tool executes a SQL prompt generated by the LLM with SQL Lite and
may lead to arbitrary file creation on the machine running this tool.
This tool is not recommended to be used in a production setting, and would
require heavy sandboxing or virtual machines.
DEPRECATED: Use `JSONalyzeQueryEngine` from `llama-index-experimental` instead.
"""
from typing import Any
class JSONalyzeQueryEngine:
"""
JSONalyze query engine.
DEPRECATED: Use `JSONalyzeQueryEngine` from `llama-index-experimental` instead.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
raise DeprecationWarning(
"JSONalyzeQueryEngine has been moved to `llama-index-experimental`.\n"
"`pip install llama-index-experimental`\n"
"`from llama_index.experimental.query_engine import JSONalyzeQueryEngine`\n"
"Note that the JSONalyzeQueryEngine allows for arbitrary file creation, \n"
"and should be used in a secure environment."
)
|
"""JSONalyze Query Engine.
WARNING: This tool executes a SQL prompt generated by the LLM with SQL Lite and
may lead to arbitrary file creation on the machine running this tool.
This tool is not recommended to be used in a production setting, and would
require heavy sandboxing or virtual machines.
DEPRECATED: Use `JSONalyzeQueryEngine` from `llama-index-experimental` instead.
"""
from typing import Any
class JSONalyzeQueryEngine:
"""JSONalyze query engine.
DEPRECATED: Use `JSONalyzeQueryEngine` from `llama-index-experimental` instead.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
raise DeprecationWarning(
"JSONalyzeQueryEngine has been moved to `llama-index-experimental`.\n"
"`pip install llama-index-experimental`\n"
"`from llama_index.experimental.query_engine import JSONalyzeQueryEngine`\n"
"Note that the JSONalyzeQueryEngine allows for arbitrary file creation, \n"
"and should be used in a secure environment."
)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# model settings
model = dict(
type='CenterNet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNet',
depth=18,
norm_eval=False,
norm_cfg=dict(type='BN'),
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(
type='CTResNetNeck',
in_channels=512,
num_deconv_filters=(256, 128, 64),
num_deconv_kernels=(4, 4, 4),
use_dcn=True),
bbox_head=dict(
type='CenterNetHead',
num_classes=80,
in_channels=64,
feat_channels=64,
loss_center_heatmap=dict(type='GaussianFocalLoss', loss_weight=1.0),
loss_wh=dict(type='L1Loss', loss_weight=0.1),
loss_offset=dict(type='L1Loss', loss_weight=1.0)),
train_cfg=None,
test_cfg=dict(topk=100, local_maximum_kernel=3, max_per_img=100))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
# The cropped images are padded into squares during training,
# but may be less than crop_size.
crop_size=(512, 512),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_pad_mode=None),
# Make sure the output is always crop_size.
dict(type='Resize', scale=(512, 512), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
file_client_args={{_base_.file_client_args}}),
# don't need Resize
dict(
type='RandomCenterCropPad',
ratios=None,
border=None,
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_mode=True,
test_pad_mode=['logical_or', 31],
test_pad_add_pix=1),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'border'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(
batch_size=16,
num_workers=4,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
# Based on the default settings of modern detectors, the SGD effect is better
# than the Adam in the source code, so we use SGD default settings and
# if you use adam+lr5e-4, the map is 29.1.
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
max_epochs = 28
# learning policy
# Based on the default settings of modern detectors, we added warmup settings.
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[18, 24], # the real step is [18*5, 24*5]
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs) # the real epoch is 28*5=140
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (16 samples per GPU)
auto_scale_lr = dict(base_batch_size=128)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# model settings
model = dict(
type='CenterNet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNet',
depth=18,
norm_eval=False,
norm_cfg=dict(type='BN'),
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(
type='CTResNetNeck',
in_channels=512,
num_deconv_filters=(256, 128, 64),
num_deconv_kernels=(4, 4, 4),
use_dcn=True),
bbox_head=dict(
type='CenterNetHead',
num_classes=80,
in_channels=64,
feat_channels=64,
loss_center_heatmap=dict(type='GaussianFocalLoss', loss_weight=1.0),
loss_wh=dict(type='L1Loss', loss_weight=0.1),
loss_offset=dict(type='L1Loss', loss_weight=1.0)),
train_cfg=None,
test_cfg=dict(topk=100, local_maximum_kernel=3, max_per_img=100))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
# The cropped images are padded into squares during training,
# but may be less than crop_size.
crop_size=(512, 512),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_pad_mode=None),
# Make sure the output is always crop_size.
dict(type='Resize', scale=(512, 512), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
file_client_args={{_base_.file_client_args}}),
# don't need Resize
dict(
type='RandomCenterCropPad',
ratios=None,
border=None,
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_mode=True,
test_pad_mode=['logical_or', 31],
test_pad_add_pix=1),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'border'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(
batch_size=16,
num_workers=4,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
# Based on the default settings of modern detectors, the SGD effect is better
# than the Adam in the source code, so we use SGD default settings and
# if you use adam+lr5e-4, the map is 29.1.
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
max_epochs = 28
# learning policy
# Based on the default settings of modern detectors, we added warmup settings.
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[18, 24], # the real step is [18*5, 24*5]
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs) # the real epoch is 28*5=140
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (16 samples per GPU)
auto_scale_lr = dict(base_batch_size=128)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.llms import AzureMLOnlineEndpoint
from langchain_community.llms.azureml_endpoint import (
AzureMLEndpointClient,
ContentFormatterBase,
CustomOpenAIContentFormatter,
DollyContentFormatter,
GPT2ContentFormatter,
HFContentFormatter,
OSSContentFormatter,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"AzureMLEndpointClient": "langchain_community.llms.azureml_endpoint",
"ContentFormatterBase": "langchain_community.llms.azureml_endpoint",
"GPT2ContentFormatter": "langchain_community.llms.azureml_endpoint",
"OSSContentFormatter": "langchain_community.llms.azureml_endpoint",
"HFContentFormatter": "langchain_community.llms.azureml_endpoint",
"DollyContentFormatter": "langchain_community.llms.azureml_endpoint",
"CustomOpenAIContentFormatter": "langchain_community.llms.azureml_endpoint",
"AzureMLOnlineEndpoint": "langchain_community.llms",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AzureMLEndpointClient",
"AzureMLOnlineEndpoint",
"ContentFormatterBase",
"CustomOpenAIContentFormatter",
"DollyContentFormatter",
"GPT2ContentFormatter",
"HFContentFormatter",
"OSSContentFormatter",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.llms import AzureMLOnlineEndpoint
from langchain_community.llms.azureml_endpoint import (
AzureMLEndpointClient,
ContentFormatterBase,
CustomOpenAIContentFormatter,
DollyContentFormatter,
GPT2ContentFormatter,
HFContentFormatter,
OSSContentFormatter,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"AzureMLEndpointClient": "langchain_community.llms.azureml_endpoint",
"ContentFormatterBase": "langchain_community.llms.azureml_endpoint",
"GPT2ContentFormatter": "langchain_community.llms.azureml_endpoint",
"OSSContentFormatter": "langchain_community.llms.azureml_endpoint",
"HFContentFormatter": "langchain_community.llms.azureml_endpoint",
"DollyContentFormatter": "langchain_community.llms.azureml_endpoint",
"CustomOpenAIContentFormatter": "langchain_community.llms.azureml_endpoint",
"AzureMLOnlineEndpoint": "langchain_community.llms",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AzureMLEndpointClient",
"ContentFormatterBase",
"GPT2ContentFormatter",
"OSSContentFormatter",
"HFContentFormatter",
"DollyContentFormatter",
"CustomOpenAIContentFormatter",
"AzureMLOnlineEndpoint",
]
|
_base_ = ['../_base_/models/retinanet_r50_fpn.py', '../common/ms_3x_coco.py']
# optimizer
model = dict(
pretrained='open-mmlab://resnext101_64x4d',
backbone=dict(type='ResNeXt', depth=101, groups=64, base_width=4))
optim_wrapper = dict(optimizer=dict(type='SGD', lr=0.01))
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py', '../common/mstrain_3x_coco.py'
]
# optimizer
model = dict(
pretrained='open-mmlab://resnext101_64x4d',
backbone=dict(type='ResNeXt', depth=101, groups=64, base_width=4))
optim_wrapper = dict(optimizer=dict(type='SGD', lr=0.01))
|
import os
from pathlib import Path
import numpy as np
import pytest
import torch
from mmdet.apis import inference_detector, init_detector
from mmdet.structures import DetDataSample
from mmdet.utils import register_all_modules
# TODO: Waiting to fix multiple call error bug
register_all_modules()
@pytest.mark.parametrize('config,devices',
[('configs/retinanet/retinanet_r18_fpn_1x_coco.py',
('cpu', 'cuda'))])
def test_init_detector(config, devices):
assert all([device in ['cpu', 'cuda'] for device in devices])
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.join(project_dir, '..')
config_file = os.path.join(project_dir, config)
# test init_detector with config_file: str and cfg_options
cfg_options = dict(
model=dict(
backbone=dict(
depth=18,
init_cfg=dict(
type='Pretrained', checkpoint='torchvision://resnet18'))))
for device in devices:
if device == 'cuda' and not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
model = init_detector(
config_file, device=device, cfg_options=cfg_options)
# test init_detector with :obj:`Path`
config_path_object = Path(config_file)
model = init_detector(config_path_object, device=device)
# test init_detector with undesirable type
with pytest.raises(TypeError):
config_list = [config_file]
model = init_detector(config_list) # noqa: F841
@pytest.mark.parametrize('config,devices',
[('configs/retinanet/retinanet_r18_fpn_1x_coco.py',
('cpu', 'cuda'))])
def test_inference_detector(config, devices):
assert all([device in ['cpu', 'cuda'] for device in devices])
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.join(project_dir, '..')
config_file = os.path.join(project_dir, config)
# test init_detector with config_file: str and cfg_options
rng = np.random.RandomState(0)
img1 = rng.randint(0, 255, (32, 32, 3), dtype=np.uint8)
img2 = rng.randint(0, 255, (32, 32, 3), dtype=np.uint8)
for device in devices:
if device == 'cuda' and not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
model = init_detector(config_file, device=device)
result = inference_detector(model, img1)
assert isinstance(result, DetDataSample)
result = inference_detector(model, [img1, img2])
assert isinstance(result, list) and len(result) == 2
|
import os
from pathlib import Path
import numpy as np
import pytest
import torch
from mmdet.apis import inference_detector, init_detector
from mmdet.structures import DetDataSample
from mmdet.utils import register_all_modules
# TODO: Waiting to fix multiple call error bug
register_all_modules()
@pytest.mark.parametrize('config,devices',
[('configs/retinanet/retinanet_r18_fpn_1x_coco.py',
('cpu', 'cuda'))])
def test_init_detector(config, devices):
assert all([device in ['cpu', 'cuda'] for device in devices])
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.join(project_dir, '..')
config_file = os.path.join(project_dir, config)
# test init_detector with config_file: str and cfg_options
cfg_options = dict(
model=dict(
backbone=dict(
depth=18,
init_cfg=dict(
type='Pretrained', checkpoint='torchvision://resnet18'))))
for device in devices:
if device == 'cuda' and not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
model = init_detector(
config_file, device=device, cfg_options=cfg_options)
# test init_detector with :obj:`Path`
config_path_object = Path(config_file)
model = init_detector(config_path_object, device=device)
# test init_detector with undesirable type
with pytest.raises(TypeError):
config_list = [config_file]
model = init_detector(config_list) # noqa: F841
@pytest.mark.parametrize('config,devices',
[('configs/retinanet/retinanet_r18_fpn_1x_coco.py',
('cpu', 'cuda'))])
def test_inference_detector(config, devices):
assert all([device in ['cpu', 'cuda'] for device in devices])
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.join(project_dir, '..')
config_file = os.path.join(project_dir, config)
# test init_detector with config_file: str and cfg_options
rng = np.random.RandomState(0)
img1 = rng.randint(0, 255, (100, 100, 3), dtype=np.uint8)
img2 = rng.randint(0, 255, (100, 100, 3), dtype=np.uint8)
for device in devices:
if device == 'cuda' and not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
model = init_detector(config_file, device=device)
result = inference_detector(model, img1)
assert isinstance(result, DetDataSample)
result = inference_detector(model, [img1, img2])
assert isinstance(result, list) and len(result) == 2
|
# coding: utf-8
"""Helper script for checking versions in the dynamic symbol table.
This script checks that LightGBM library is linked to the appropriate symbol versions.
Linking to newer symbol versions at compile time is problematic because it could result
in built artifacts being unusable on older platforms.
Version history for these symbols can be found at the following:
* GLIBC: https://sourceware.org/glibc/wiki/Glibc%20Timeline
* GLIBCXX: https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html
* OMP/GOMP: https://github.com/gcc-mirror/gcc/blob/master/libgomp/libgomp.map
"""
import re
import sys
from pathlib import Path
def check_dependencies(objdump_string: str) -> None:
"""Check the dynamic symbol versions.
Parameters
----------
objdump_string : str
The dynamic symbol table entries of the file (result of `objdump -T` command).
"""
GLIBC_version = re.compile(r"0{16}[ \(\t]+GLIBC_(\d{1,2})[.](\d{1,3})[.]?\d{,3}[ \)\t]+")
versions = GLIBC_version.findall(objdump_string)
assert len(versions) > 1
for major, minor in versions:
error_msg = f"found unexpected GLIBC version: '{major}.{minor}'"
assert int(major) <= 2, error_msg
assert int(minor) <= 28, error_msg
GLIBCXX_version = re.compile(r"0{16}[ \(\t]+GLIBCXX_(\d{1,2})[.](\d{1,2})[.]?(\d{,3})[ \)\t]+")
versions = GLIBCXX_version.findall(objdump_string)
assert len(versions) > 1
for major, minor, patch in versions:
error_msg = f"found unexpected GLIBCXX version: '{major}.{minor}.{patch}'"
assert int(major) == 3, error_msg
assert int(minor) == 4, error_msg
assert patch == "" or int(patch) <= 22, error_msg
GOMP_version = re.compile(r"0{16}[ \(\t]+G?OMP_(\d{1,2})[.](\d{1,2})[.]?\d{,3}[ \)\t]+")
versions = GOMP_version.findall(objdump_string)
assert len(versions) > 1
for major, minor in versions:
error_msg = f"found unexpected OMP/GOMP version: '{major}.{minor}'"
assert int(major) <= 4, error_msg
assert int(minor) <= 5, error_msg
if __name__ == "__main__":
check_dependencies(Path(sys.argv[1]).read_text(encoding="utf-8"))
|
# coding: utf-8
"""Helper script for checking versions in the dynamic symbol table.
This script checks that LightGBM library is linked to the appropriate symbol versions.
Linking to newer symbol versions at compile time is problematic because it could result
in built artifacts being unusable on older platforms.
Version history for these symbols can be found at the following:
* GLIBC: https://sourceware.org/glibc/wiki/Glibc%20Timeline
* GLIBCXX: https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html
* OMP/GOMP: https://github.com/gcc-mirror/gcc/blob/master/libgomp/libgomp.map
"""
import re
import sys
from pathlib import Path
def check_dependencies(objdump_string: str) -> None:
"""Check the dynamic symbol versions.
Parameters
----------
objdump_string : str
The dynamic symbol table entries of the file (result of `objdump -T` command).
"""
GLIBC_version = re.compile(r"0{16}[ \(\t]+GLIBC_(\d{1,2})[.](\d{1,3})[.]?\d{,3}[ \)\t]+")
versions = GLIBC_version.findall(objdump_string)
assert len(versions) > 1
for major, minor in versions:
error_msg = f"found unexpected GLIBC version: '{major}.{minor}'"
assert int(major) <= 2, error_msg
assert int(minor) <= 28, error_msg
GLIBCXX_version = re.compile(r"0{16}[ \(\t]+GLIBCXX_(\d{1,2})[.](\d{1,2})[.]?(\d{,3})[ \)\t]+")
versions = GLIBCXX_version.findall(objdump_string)
assert len(versions) > 1
for major, minor, patch in versions:
error_msg = f"found unexpected GLIBCXX version: '{major}.{minor}.{patch}'"
assert int(major) == 3, error_msg
assert int(minor) == 4, error_msg
assert patch == "" or int(patch) <= 22, error_msg
GOMP_version = re.compile(r"0{16}[ \(\t]+G?OMP_(\d{1,2})[.](\d{1,2})[.]?\d{,3}[ \)\t]+")
versions = GOMP_version.findall(objdump_string)
assert len(versions) > 1
for major, minor in versions:
error_msg = f"found unexpected OMP/GOMP version: '{major}.{minor}'"
assert int(major) <= 4, error_msg
assert int(minor) <= 5, error_msg
if __name__ == "__main__":
check_dependencies(Path(sys.argv[1]).read_text(encoding="utf-8"))
|
import logging
from colorama import Fore, Style
from .utils import remove_color_codes
class FancyConsoleFormatter(logging.Formatter):
"""
A custom logging formatter designed for console output.
This formatter enhances the standard logging output with color coding. The color
coding is based on the level of the log message, making it easier to distinguish
between different types of messages in the console output.
The color for each level is defined in the LEVEL_COLOR_MAP class attribute.
"""
# level -> (level & text color, title color)
LEVEL_COLOR_MAP = {
logging.DEBUG: Fore.LIGHTBLACK_EX,
logging.INFO: Fore.BLUE,
logging.WARNING: Fore.YELLOW,
logging.ERROR: Fore.RED,
logging.CRITICAL: Fore.RED + Style.BRIGHT,
}
def format(self, record: logging.LogRecord) -> str:
# Make sure `msg` is a string
if not hasattr(record, "msg"):
record.msg = ""
elif type(record.msg) is not str:
record.msg = str(record.msg)
# Determine default color based on error level
level_color = ""
if record.levelno in self.LEVEL_COLOR_MAP:
level_color = self.LEVEL_COLOR_MAP[record.levelno]
record.levelname = f"{level_color}{record.levelname}{Style.RESET_ALL}"
# Determine color for message
color = getattr(record, "color", level_color)
color_is_specified = hasattr(record, "color")
# Don't color INFO messages unless the color is explicitly specified.
if color and (record.levelno != logging.INFO or color_is_specified):
record.msg = f"{color}{record.msg}{Style.RESET_ALL}"
return super().format(record)
class AGPTFormatter(FancyConsoleFormatter):
def __init__(self, *args, no_color: bool = False, **kwargs):
super().__init__(*args, **kwargs)
self.no_color = no_color
def format(self, record: logging.LogRecord) -> str:
# Make sure `msg` is a string
if not hasattr(record, "msg"):
record.msg = ""
elif type(record.msg) is not str:
record.msg = str(record.msg)
# Strip color from the message to prevent color spoofing
if record.msg and not getattr(record, "preserve_color", False):
record.msg = remove_color_codes(record.msg)
# Determine color for title
title = getattr(record, "title", "")
title_color = getattr(record, "title_color", "") or self.LEVEL_COLOR_MAP.get(
record.levelno, ""
)
if title and title_color:
title = f"{title_color + Style.BRIGHT}{title}{Style.RESET_ALL}"
# Make sure record.title is set, and padded with a space if not empty
record.title = f"{title} " if title else ""
if self.no_color:
return remove_color_codes(super().format(record))
else:
return super().format(record)
|
import logging
from colorama import Fore, Style
from google.cloud.logging_v2.handlers import CloudLoggingFilter, StructuredLogHandler
from .utils import remove_color_codes
class FancyConsoleFormatter(logging.Formatter):
"""
A custom logging formatter designed for console output.
This formatter enhances the standard logging output with color coding. The color
coding is based on the level of the log message, making it easier to distinguish
between different types of messages in the console output.
The color for each level is defined in the LEVEL_COLOR_MAP class attribute.
"""
# level -> (level & text color, title color)
LEVEL_COLOR_MAP = {
logging.DEBUG: Fore.LIGHTBLACK_EX,
logging.INFO: Fore.BLUE,
logging.WARNING: Fore.YELLOW,
logging.ERROR: Fore.RED,
logging.CRITICAL: Fore.RED + Style.BRIGHT,
}
def format(self, record: logging.LogRecord) -> str:
# Make sure `msg` is a string
if not hasattr(record, "msg"):
record.msg = ""
elif type(record.msg) is not str:
record.msg = str(record.msg)
# Determine default color based on error level
level_color = ""
if record.levelno in self.LEVEL_COLOR_MAP:
level_color = self.LEVEL_COLOR_MAP[record.levelno]
record.levelname = f"{level_color}{record.levelname}{Style.RESET_ALL}"
# Determine color for message
color = getattr(record, "color", level_color)
color_is_specified = hasattr(record, "color")
# Don't color INFO messages unless the color is explicitly specified.
if color and (record.levelno != logging.INFO or color_is_specified):
record.msg = f"{color}{record.msg}{Style.RESET_ALL}"
return super().format(record)
class AGPTFormatter(FancyConsoleFormatter):
def __init__(self, *args, no_color: bool = False, **kwargs):
super().__init__(*args, **kwargs)
self.no_color = no_color
def format(self, record: logging.LogRecord) -> str:
# Make sure `msg` is a string
if not hasattr(record, "msg"):
record.msg = ""
elif type(record.msg) is not str:
record.msg = str(record.msg)
# Strip color from the message to prevent color spoofing
if record.msg and not getattr(record, "preserve_color", False):
record.msg = remove_color_codes(record.msg)
# Determine color for title
title = getattr(record, "title", "")
title_color = getattr(record, "title_color", "") or self.LEVEL_COLOR_MAP.get(
record.levelno, ""
)
if title and title_color:
title = f"{title_color + Style.BRIGHT}{title}{Style.RESET_ALL}"
# Make sure record.title is set, and padded with a space if not empty
record.title = f"{title} " if title else ""
if self.no_color:
return remove_color_codes(super().format(record))
else:
return super().format(record)
class StructuredLoggingFormatter(StructuredLogHandler, logging.Formatter):
def __init__(self):
# Set up CloudLoggingFilter to add diagnostic info to the log records
self.cloud_logging_filter = CloudLoggingFilter()
# Init StructuredLogHandler
super().__init__()
def format(self, record: logging.LogRecord) -> str:
self.cloud_logging_filter.filter(record)
return super().format(record)
|
from keras.src.callbacks.backup_and_restore import BackupAndRestore
from keras.src.callbacks.callback import Callback
from keras.src.callbacks.callback_list import CallbackList
from keras.src.callbacks.csv_logger import CSVLogger
from keras.src.callbacks.early_stopping import EarlyStopping
from keras.src.callbacks.history import History
from keras.src.callbacks.lambda_callback import LambdaCallback
from keras.src.callbacks.learning_rate_scheduler import LearningRateScheduler
from keras.src.callbacks.model_checkpoint import ModelCheckpoint
from keras.src.callbacks.monitor_callback import MonitorCallback
from keras.src.callbacks.progbar_logger import ProgbarLogger
from keras.src.callbacks.reduce_lr_on_plateau import ReduceLROnPlateau
from keras.src.callbacks.remote_monitor import RemoteMonitor
from keras.src.callbacks.swap_ema_weights import SwapEMAWeights
from keras.src.callbacks.tensorboard import TensorBoard
from keras.src.callbacks.terminate_on_nan import TerminateOnNaN
|
from keras.src.callbacks.backup_and_restore import BackupAndRestore
from keras.src.callbacks.callback import Callback
from keras.src.callbacks.callback_list import CallbackList
from keras.src.callbacks.csv_logger import CSVLogger
from keras.src.callbacks.early_stopping import EarlyStopping
from keras.src.callbacks.history import History
from keras.src.callbacks.lambda_callback import LambdaCallback
from keras.src.callbacks.learning_rate_scheduler import LearningRateScheduler
from keras.src.callbacks.model_checkpoint import ModelCheckpoint
from keras.src.callbacks.progbar_logger import ProgbarLogger
from keras.src.callbacks.reduce_lr_on_plateau import ReduceLROnPlateau
from keras.src.callbacks.remote_monitor import RemoteMonitor
from keras.src.callbacks.swap_ema_weights import SwapEMAWeights
from keras.src.callbacks.tensorboard import TensorBoard
from keras.src.callbacks.terminate_on_nan import TerminateOnNaN
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import Hook
class TestHook:
def test_before_run(self):
hook = Hook()
runner = Mock()
hook.before_run(runner)
def test_after_run(self):
hook = Hook()
runner = Mock()
hook.after_run(runner)
def test_before_epoch(self):
hook = Hook()
runner = Mock()
hook._before_epoch(runner)
def test_after_epoch(self):
hook = Hook()
runner = Mock()
hook._after_epoch(runner)
def test_before_iter(self):
hook = Hook()
runner = Mock()
data_batch = {}
hook._before_iter(runner, data_batch)
def test_after_iter(self):
hook = Hook()
runner = Mock()
data_batch = {}
outputs = {}
hook._after_iter(runner, data_batch, outputs)
def test_before_save_checkpoint(self):
hook = Hook()
runner = Mock()
checkpoint = {}
hook.before_save_checkpoint(runner, checkpoint)
def test_after_load_checkpoint(self):
hook = Hook()
runner = Mock()
checkpoint = {}
hook.after_load_checkpoint(runner, checkpoint)
def test_before_train_epoch(self):
hook = Hook()
runner = Mock()
hook.before_train_epoch(runner)
def test_before_val_epoch(self):
hook = Hook()
runner = Mock()
hook.before_val_epoch(runner)
def test_before_test_epoch(self):
hook = Hook()
runner = Mock()
hook.before_test_epoch(runner)
def test_after_train_epoch(self):
hook = Hook()
runner = Mock()
hook.after_train_epoch(runner)
def test_after_val_epoch(self):
hook = Hook()
runner = Mock()
hook.after_val_epoch(runner, {})
def test_after_test_epoch(self):
hook = Hook()
runner = Mock()
hook.after_test_epoch(runner, {})
def test_before_train_iter(self):
hook = Hook()
runner = Mock()
data_batch = {}
hook.before_train_iter(runner, data_batch)
def test_before_val_iter(self):
hook = Hook()
runner = Mock()
data_batch = {}
hook.before_val_iter(runner, data_batch)
def test_before_test_iter(self):
hook = Hook()
runner = Mock()
data_batch = {}
hook.before_test_iter(runner, data_batch)
def test_after_train_iter(self):
hook = Hook()
runner = Mock()
data_batch = {}
outputs = {}
hook.after_train_iter(runner, data_batch, outputs)
def test_after_val_iter(self):
hook = Hook()
runner = Mock()
data_batch = {}
outputs = {}
hook.after_val_iter(runner, data_batch, outputs)
def test_after_test_iter(self):
hook = Hook()
runner = Mock()
data_batch = {}
outputs = {}
hook.after_test_iter(runner, data_batch, outputs)
def test_every_n_epochs(self):
hook = Hook()
runner = Mock()
for i in range(100):
runner.epoch = i
return_val = hook.every_n_epochs(runner, 3)
if (i + 1) % 3 == 0:
assert return_val
else:
assert not return_val
def test_every_n_inner_iters(self):
hook = Hook()
for i in range(100):
return_val = hook.every_n_inner_iters(i, 3)
if (i + 1) % 3 == 0:
assert return_val
else:
assert not return_val
def test_every_n_train_iters(self):
hook = Hook()
runner = Mock()
for i in range(100):
runner.iter = i
return_val = hook.every_n_train_iters(runner, 3)
if (i + 1) % 3 == 0:
assert return_val
else:
assert not return_val
def test_end_of_epoch(self):
hook = Hook()
# last inner iter
batch_idx = 1
dataloader = Mock()
dataloader.__len__ = Mock(return_value=2)
return_val = hook.end_of_epoch(dataloader, batch_idx)
assert return_val
# not the last inner iter
batch_idx = 0
return_val = hook.end_of_epoch(dataloader, batch_idx)
assert not return_val
def test_is_last_train_epoch(self):
hook = Hook()
runner = Mock()
# last epoch
runner.epoch = 1
runner.max_epochs = 2
return_val = hook.is_last_train_epoch(runner)
assert return_val
# not the last epoch
runner.max_epochs = 0
return_val = hook.is_last_train_epoch(runner)
assert not return_val
def test_is_last_train_iter(self):
hook = Hook()
runner = Mock()
# last iter
runner.iter = 1
runner.max_iters = 2
return_val = hook.is_last_train_iter(runner)
assert return_val
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import Hook
class TestHook:
def test_before_run(self):
hook = Hook()
runner = Mock()
hook.before_run(runner)
def test_after_run(self):
hook = Hook()
runner = Mock()
hook.after_run(runner)
def test_before_epoch(self):
hook = Hook()
runner = Mock()
hook._before_epoch(runner)
def test_after_epoch(self):
hook = Hook()
runner = Mock()
hook._after_epoch(runner)
def test_before_iter(self):
hook = Hook()
runner = Mock()
data_batch = {}
hook._before_iter(runner, data_batch)
def test_after_iter(self):
hook = Hook()
runner = Mock()
data_batch = {}
outputs = {}
hook._after_iter(runner, data_batch, outputs)
def test_before_save_checkpoint(self):
hook = Hook()
runner = Mock()
checkpoint = {}
hook.before_save_checkpoint(runner, checkpoint)
def test_after_load_checkpoint(self):
hook = Hook()
runner = Mock()
checkpoint = {}
hook.after_load_checkpoint(runner, checkpoint)
def test_before_train_epoch(self):
hook = Hook()
runner = Mock()
hook.before_train_epoch(runner)
def test_before_val_epoch(self):
hook = Hook()
runner = Mock()
hook.before_val_epoch(runner)
def test_before_test_epoch(self):
hook = Hook()
runner = Mock()
hook.before_test_epoch(runner)
def test_after_train_epoch(self):
hook = Hook()
runner = Mock()
hook.after_train_epoch(runner)
def test_after_val_epoch(self):
hook = Hook()
runner = Mock()
hook.after_val_epoch(runner)
def test_after_test_epoch(self):
hook = Hook()
runner = Mock()
hook.after_test_epoch(runner)
def test_before_train_iter(self):
hook = Hook()
runner = Mock()
data_batch = {}
hook.before_train_iter(runner, data_batch)
def test_before_val_iter(self):
hook = Hook()
runner = Mock()
data_batch = {}
hook.before_val_iter(runner, data_batch)
def test_before_test_iter(self):
hook = Hook()
runner = Mock()
data_batch = {}
hook.before_test_iter(runner, data_batch)
def test_after_train_iter(self):
hook = Hook()
runner = Mock()
data_batch = {}
outputs = {}
hook.after_train_iter(runner, data_batch, outputs)
def test_after_val_iter(self):
hook = Hook()
runner = Mock()
data_batch = {}
outputs = {}
hook.after_val_iter(runner, data_batch, outputs)
def test_after_test_iter(self):
hook = Hook()
runner = Mock()
data_batch = {}
outputs = {}
hook.after_test_iter(runner, data_batch, outputs)
def test_every_n_epochs(self):
hook = Hook()
runner = Mock()
for i in range(100):
runner.epoch = i
return_val = hook.every_n_epochs(runner, 3)
if (i + 1) % 3 == 0:
assert return_val
else:
assert not return_val
def test_every_n_inner_iters(self):
hook = Hook()
for i in range(100):
return_val = hook.every_n_inner_iters(i, 3)
if (i + 1) % 3 == 0:
assert return_val
else:
assert not return_val
def test_every_n_train_iters(self):
hook = Hook()
runner = Mock()
for i in range(100):
runner.iter = i
return_val = hook.every_n_train_iters(runner, 3)
if (i + 1) % 3 == 0:
assert return_val
else:
assert not return_val
def test_end_of_epoch(self):
hook = Hook()
# last inner iter
batch_idx = 1
dataloader = Mock()
dataloader.__len__ = Mock(return_value=2)
return_val = hook.end_of_epoch(dataloader, batch_idx)
assert return_val
# not the last inner iter
batch_idx = 0
return_val = hook.end_of_epoch(dataloader, batch_idx)
assert not return_val
def test_is_last_train_epoch(self):
hook = Hook()
runner = Mock()
# last epoch
runner.epoch = 1
runner.max_epochs = 2
return_val = hook.is_last_train_epoch(runner)
assert return_val
# not the last epoch
runner.max_epochs = 0
return_val = hook.is_last_train_epoch(runner)
assert not return_val
def test_is_last_train_iter(self):
hook = Hook()
runner = Mock()
# last iter
runner.iter = 1
runner.max_iters = 2
return_val = hook.is_last_train_iter(runner)
assert return_val
|
"""
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of a forest of trees to evaluate the impurity
based importance of the pixels in an image classification task on the faces
dataset. The hotter the pixel, the more important it is.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Loading the data and model fitting
# ----------------------------------
# First, we load the olivetti faces dataset and limit the dataset to contain
# only the first five classes. Then we train a random forest on the dataset
# and evaluate the impurity-based feature importance. One drawback of this
# method is that it cannot be evaluated on a separate test set. For this
# example, we are interested in representing the information learned from
# the full dataset. Also, we'll set the number of cores to use for the tasks.
from sklearn.datasets import fetch_olivetti_faces
# %%
# We select the number of cores to use to perform parallel fitting of
# the forest model. `-1` means use all available cores.
n_jobs = -1
# %%
# Load the faces dataset
data = fetch_olivetti_faces()
X, y = data.data, data.target
# %%
# Limit the dataset to 5 classes.
mask = y < 5
X = X[mask]
y = y[mask]
# %%
# A random forest classifier will be fitted to compute the feature importances.
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(n_estimators=750, n_jobs=n_jobs, random_state=42)
forest.fit(X, y)
# %%
# Feature importance based on mean decrease in impurity (MDI)
# -----------------------------------------------------------
# Feature importances are provided by the fitted attribute
# `feature_importances_` and they are computed as the mean and standard
# deviation of accumulation of the impurity decrease within each tree.
#
# .. warning::
# Impurity-based feature importances can be misleading for **high
# cardinality** features (many unique values). See
# :ref:`permutation_importance` as an alternative.
import time
import matplotlib.pyplot as plt
start_time = time.time()
img_shape = data.images[0].shape
importances = forest.feature_importances_
elapsed_time = time.time() - start_time
print(f"Elapsed time to compute the importances: {elapsed_time:.3f} seconds")
imp_reshaped = importances.reshape(img_shape)
plt.matshow(imp_reshaped, cmap=plt.cm.hot)
plt.title("Pixel importances using impurity values")
plt.colorbar()
plt.show()
# %%
# Can you still recognize a face?
# %%
# The limitations of MDI is not a problem for this dataset because:
#
# 1. All features are (ordered) numeric and will thus not suffer the
# cardinality bias
# 2. We are only interested to represent knowledge of the forest acquired
# on the training set.
#
# If these two conditions are not met, it is recommended to instead use
# the :func:`~sklearn.inspection.permutation_importance`.
|
"""
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of a forest of trees to evaluate the impurity
based importance of the pixels in an image classification task on the faces
dataset. The hotter the pixel, the more important it is.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Loading the data and model fitting
# ----------------------------------
# First, we load the olivetti faces dataset and limit the dataset to contain
# only the first five classes. Then we train a random forest on the dataset
# and evaluate the impurity-based feature importance. One drawback of this
# method is that it cannot be evaluated on a separate test set. For this
# example, we are interested in representing the information learned from
# the full dataset. Also, we'll set the number of cores to use for the tasks.
from sklearn.datasets import fetch_olivetti_faces
# %%
# We select the number of cores to use to perform parallel fitting of
# the forest model. `-1` means use all available cores.
n_jobs = -1
# %%
# Load the faces dataset
data = fetch_olivetti_faces()
X, y = data.data, data.target
# %%
# Limit the dataset to 5 classes.
mask = y < 5
X = X[mask]
y = y[mask]
# %%
# A random forest classifier will be fitted to compute the feature importances.
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(n_estimators=750, n_jobs=n_jobs, random_state=42)
forest.fit(X, y)
# %%
# Feature importance based on mean decrease in impurity (MDI)
# -----------------------------------------------------------
# Feature importances are provided by the fitted attribute
# `feature_importances_` and they are computed as the mean and standard
# deviation of accumulation of the impurity decrease within each tree.
#
# .. warning::
# Impurity-based feature importances can be misleading for **high
# cardinality** features (many unique values). See
# :ref:`permutation_importance` as an alternative.
import time
import matplotlib.pyplot as plt
start_time = time.time()
img_shape = data.images[0].shape
importances = forest.feature_importances_
elapsed_time = time.time() - start_time
print(f"Elapsed time to compute the importances: {elapsed_time:.3f} seconds")
imp_reshaped = importances.reshape(img_shape)
plt.matshow(imp_reshaped, cmap=plt.cm.hot)
plt.title("Pixel importances using impurity values")
plt.colorbar()
plt.show()
# %%
# Can you still recognize a face?
# %%
# The limitations of MDI is not a problem for this dataset because:
#
# 1. All features are (ordered) numeric and will thus not suffer the
# cardinality bias
# 2. We are only interested to represent knowledge of the forest acquired
# on the training set.
#
# If these two conditions are not met, it is recommended to instead use
# the :func:`~sklearn.inspection.permutation_importance`.
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseTripletLoss(TripletLoss):
def __init__(
self, model: SparseEncoder, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SparseEncoder
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. Need to be used in SpladeLoss or CSRLoss as a loss function.
2. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
}
)
loss = losses.SpladeLoss(
model=model, loss=losses.SparseTripletLoss(model), document_regularizer_weight=3e-5, query_regularizer_weight=5e-5
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
super().__init__(model, distance_metric=distance_metric, triplet_margin=triplet_margin)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseTripletLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseTripletLoss(TripletLoss):
def __init__(
self, model: SparseEncoder, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SparseEncoder
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. Need to be used in SpladeLoss or CSRLoss as a loss function.
2. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
}
)
loss = losses.SpladeLoss(
model=model, loss=losses.SparseTripletLoss(model), corpus_regularizer_weight=3e-5, query_regularizer_weight=5e-5
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
super().__init__(model, distance_metric=distance_metric, triplet_margin=triplet_margin)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseTripletLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from docarray.typing.tensor.video.video_ndarray import VideoNdArray
__all__ = ['VideoNdArray']
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor # noqa
__all__.extend(['VideoTorchTensor'])
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.video.video_tensorflow_tensor import ( # noqa: F401
VideoTensorFlowTensor,
)
__all__.extend(['VideoTensorFlowTensor'])
|
from docarray.typing.tensor.video.video_ndarray import VideoNdArray
__all__ = ['VideoNdArray']
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor # noqa
__all__.extend(['VideoTorchTensor'])
|
from jina.serve.runtimes.gateway.gateway import BaseGateway
from jina.serve.runtimes.servers.load_balancer import LoadBalancingServer
__all__ = ['LoadBalancerGateway']
class LoadBalancerGateway(LoadBalancingServer, BaseGateway):
"""
:class:`LoadBalancerGateway`
"""
pass
|
from jina.serve.runtimes.gateway.gateway import BaseGateway
from jina.serve.runtimes.servers.load_balancer import LoadBalancingServer
__all__ = ['LoadBalancerGateway']
class LoadBalancerGateway(LoadBalancingServer, BaseGateway):
"""
:class:`LoadBalancerGateway`
"""
pass
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
import torch.nn as nn
from parameterized import parameterized
from mmdet.models.roi_heads import StandardRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
class TestCascadeRoIHead(TestCase):
@parameterized.expand(['queryinst/queryinst_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
"""Test init standard RoI head."""
# Normal Cascade Mask R-CNN RoI head
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head.init_weights()
assert roi_head.with_bbox
assert roi_head.with_mask
@parameterized.expand(['queryinst/queryinst_r50_fpn_1x_coco.py'])
def test_cascade_roi_head_loss(self, cfg_file):
"""Tests standard roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
# add import elements into proposal
init_proposal_features = nn.Embedding(100, 256).cuda().weight.clone()
for proposal in proposal_list:
proposal.features = init_proposal_features
proposal.imgs_whwh = feats[0].new_tensor([[s, s, s,
s]]).repeat(100, 1)
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
# When there is no truth, the cls loss should be nonzero but
# there should be no box and mask loss.
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
# add import elements into proposal
init_proposal_features = nn.Embedding(100, 256).cuda().weight.clone()
for proposal in proposal_list:
proposal.features = init_proposal_features
proposal.imgs_whwh = feats[0].new_tensor([[s, s, s,
s]]).repeat(100, 1)
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss_cls' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
elif 'loss_bbox' in name or 'loss_mask' in name:
self.assertEqual(value.sum(), 0)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
import torch.nn as nn
from parameterized import parameterized
from mmdet.models.roi_heads import StandardRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
class TestCascadeRoIHead(TestCase):
@parameterized.expand(['queryinst/queryinst_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
"""Test init standard RoI head."""
# Normal Cascade Mask R-CNN RoI head
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head.init_weights()
assert roi_head.with_bbox
assert roi_head.with_mask
@parameterized.expand(['queryinst/queryinst_r50_fpn_1x_coco.py'])
def test_cascade_roi_head_loss(self, cfg_file):
"""Tests standard roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
# add import elements into proposal
init_proposal_features = nn.Embedding(100, 256).cuda().weight.clone()
for proposal in proposal_list:
proposal.features = init_proposal_features
proposal.imgs_whwh = feats[0].new_tensor([[s, s, s,
s]]).repeat(100, 1)
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
# When there is no truth, the cls loss should be nonzero but
# there should be no box and mask loss.
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
# add import elements into proposal
init_proposal_features = nn.Embedding(100, 256).cuda().weight.clone()
for proposal in proposal_list:
proposal.features = init_proposal_features
proposal.imgs_whwh = feats[0].new_tensor([[s, s, s,
s]]).repeat(100, 1)
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[0],
num_classes=4,
with_mask=True)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss_cls' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
elif 'loss_bbox' in name or 'loss_mask' in name:
self.assertEqual(value.sum(), 0)
|
from typing import Any, Dict, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.util import fullname
class CosineSimilarityLoss(nn.Module):
def __init__(self, model: SentenceTransformer, loss_fct=nn.MSELoss(), cos_score_transformation=nn.Identity()):
"""
CosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SentenceTransformer model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
References:
- `Training Examples > Semantic Textual Similarity <../../examples/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Relations:
- :class:`CoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, CoSENTLoss is recommended.
- :class:`AnglELoss` is :class:`CoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than CosineSimilarityLoss.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CosineSimilarityLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(CosineSimilarityLoss, self).__init__()
self.model = model
self.loss_fct = loss_fct
self.cos_score_transformation = cos_score_transformation
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
output = self.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
return self.loss_fct(output, labels.float().view(-1))
def get_config_dict(self) -> Dict[str, Any]:
return {"loss_fct": fullname(self.loss_fct)}
|
import torch
from torch import nn, Tensor
from typing import Any, Iterable, Dict
from sentence_transformers.util import fullname
from ..SentenceTransformer import SentenceTransformer
class CosineSimilarityLoss(nn.Module):
def __init__(self, model: SentenceTransformer, loss_fct=nn.MSELoss(), cos_score_transformation=nn.Identity()):
"""
CosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SentenceTransformer model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
References:
- `Training Examples > Semantic Textual Similarity <../../examples/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Relations:
- :class:`CoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, CoSENTLoss is recommended.
- :class:`AnglELoss` is :class:`CoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than CosineSimilarityLoss.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CosineSimilarityLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(CosineSimilarityLoss, self).__init__()
self.model = model
self.loss_fct = loss_fct
self.cos_score_transformation = cos_score_transformation
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
output = self.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
return self.loss_fct(output, labels.float().view(-1))
def get_config_dict(self) -> Dict[str, Any]:
return {"loss_fct": fullname(self.loss_fct)}
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
model = dict(
type='MaskRCNN',
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[96, 192, 384, 768]))
max_epochs = 12
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)
}),
optimizer=dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
model = dict(
type='MaskRCNN',
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[96, 192, 384, 768]))
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)
}))
lr_config = dict(warmup_iters=1000, step=[8, 11])
runner = dict(max_epochs=12)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from typing import Tuple
import cv2
import mmcv
import numpy as np
import torch
import torch.nn as nn
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import init_detector
from mmdet.registry import VISUALIZERS
from mmdet.structures import DetDataSample
try:
import ffmpegcv
except ImportError:
raise ImportError(
'Please install ffmpegcv with:\n\n pip install ffmpegcv')
def parse_args():
parser = argparse.ArgumentParser(
description='MMDetection video demo with GPU acceleration')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--nvdecode', action='store_true', help='Use NVIDIA decoder')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def prefetch_batch_input_shape(model: nn.Module, ori_wh: Tuple[int,
int]) -> dict:
cfg = model.cfg
w, h = ori_wh
cfg.test_dataloader.dataset.pipeline[0].type = 'LoadImageFromNDArray'
test_pipeline = Compose(cfg.test_dataloader.dataset.pipeline)
data = {'img': np.zeros((h, w, 3), dtype=np.uint8), 'img_id': 0}
data = test_pipeline(data)
data['inputs'] = [data['inputs']]
data['data_samples'] = [data['data_samples']]
data_sample = model.data_preprocessor(data, False)['data_samples']
batch_input_shape = data_sample[0].batch_input_shape
return batch_input_shape
def pack_data(frame_resize: np.ndarray, batch_input_shape: Tuple[int, int],
ori_shape: Tuple[int, int]) -> dict:
assert frame_resize.shape[:2] == batch_input_shape
data_sample = DetDataSample()
data_sample.set_metainfo({
'img_shape':
batch_input_shape,
'ori_shape':
ori_shape,
'scale_factor': (batch_input_shape[0] / ori_shape[0],
batch_input_shape[1] / ori_shape[1])
})
frame_resize = torch.from_numpy(frame_resize).permute((2, 0, 1)).cuda()
data = {'inputs': [frame_resize], 'data_samples': [data_sample]}
return data
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
model = init_detector(args.config, args.checkpoint, device=args.device)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
if args.nvdecode:
VideoCapture = ffmpegcv.VideoCaptureNV
else:
VideoCapture = ffmpegcv.VideoCapture
video_origin = VideoCapture(args.video)
batch_input_shape = prefetch_batch_input_shape(
model, (video_origin.width, video_origin.height))
ori_shape = (video_origin.height, video_origin.width)
resize_wh = batch_input_shape[::-1]
video_resize = VideoCapture(
args.video,
resize=resize_wh,
resize_keepratio=True,
resize_keepratioalign='topleft')
video_writer = None
if args.out:
video_writer = ffmpegcv.VideoWriter(args.out, fps=video_origin.fps)
with torch.no_grad():
for i, (frame_resize, frame_origin) in enumerate(
zip(track_iter_progress(video_resize), video_origin)):
data = pack_data(frame_resize, batch_input_shape, ori_shape)
result = model.test_step(data)[0]
visualizer.add_datasample(
name='video',
image=frame_origin,
data_sample=result,
draw_gt=False,
show=False,
pred_score_thr=args.score_thr)
frame_mask = visualizer.get_image()
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame_mask, 'video', args.wait_time)
if args.out:
video_writer.write(frame_mask)
if video_writer:
video_writer.release()
video_origin.release()
video_resize.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from typing import Tuple
import cv2
import mmcv
import numpy as np
import torch
import torch.nn as nn
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import init_detector
from mmdet.registry import VISUALIZERS
from mmdet.structures import DetDataSample
try:
import ffmpegcv
except ImportError:
raise ImportError(
'Please install ffmpegcv with:\n\n pip install ffmpegcv')
def parse_args():
parser = argparse.ArgumentParser(
description='MMDetection video demo with GPU acceleration')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--nvdecode', action='store_true', help='Use NVIDIA decoder')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def prefetch_batch_input_shape(model: nn.Module, ori_wh: Tuple[int,
int]) -> dict:
cfg = model.cfg
w, h = ori_wh
cfg.test_dataloader.dataset.pipeline[0].type = 'LoadImageFromNDArray'
test_pipeline = Compose(cfg.test_dataloader.dataset.pipeline)
data = {'img': np.zeros((h, w, 3), dtype=np.uint8), 'img_id': 0}
data = test_pipeline(data)
_, data_sample = model.data_preprocessor([data], False)
batch_input_shape = data_sample[0].batch_input_shape
return batch_input_shape
def pack_data(frame_resize: np.ndarray, batch_input_shape: Tuple[int, int],
ori_shape: Tuple[int, int]) -> dict:
assert frame_resize.shape[:2] == batch_input_shape
data_sample = DetDataSample()
data_sample.set_metainfo({
'img_shape':
batch_input_shape,
'ori_shape':
ori_shape,
'scale_factor': (batch_input_shape[0] / ori_shape[0],
batch_input_shape[1] / ori_shape[1])
})
frame_resize = torch.from_numpy(frame_resize).permute((2, 0, 1))
data = {'inputs': frame_resize, 'data_sample': data_sample}
return data
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
model = init_detector(args.config, args.checkpoint, device=args.device)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
if args.nvdecode:
VideoCapture = ffmpegcv.VideoCaptureNV
else:
VideoCapture = ffmpegcv.VideoCapture
video_origin = VideoCapture(args.video)
batch_input_shape = prefetch_batch_input_shape(
model, (video_origin.width, video_origin.height))
ori_shape = (video_origin.height, video_origin.width)
resize_wh = batch_input_shape[::-1]
video_resize = VideoCapture(
args.video,
resize=resize_wh,
resize_keepratio=True,
resize_keepratioalign='topleft')
video_writer = None
if args.out:
video_writer = ffmpegcv.VideoWriter(args.out, fps=video_origin.fps)
with torch.no_grad():
for i, (frame_resize, frame_origin) in enumerate(
zip(track_iter_progress(video_resize), video_origin)):
data = pack_data(frame_resize, batch_input_shape, ori_shape)
result = model.test_step([data])[0]
visualizer.add_datasample(
name='video',
image=frame_origin,
data_sample=result,
draw_gt=False,
show=False,
pred_score_thr=args.score_thr)
frame_mask = visualizer.get_image()
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame_mask, 'video', args.wait_time)
if args.out:
video_writer.write(frame_mask)
if video_writer:
video_writer.release()
video_origin.release()
video_resize.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class FasterRCNN(TwoStageDetector):
"""Implementation of `Faster R-CNN <https://arxiv.org/abs/1506.01497>`_"""
def __init__(self,
backbone: ConfigType,
rpn_head: ConfigType,
roi_head: ConfigType,
train_cfg: ConfigType,
test_cfg: ConfigType,
neck: OptConfigType = None,
preprocess_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg,
preprocess_cfg=preprocess_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Union
from mmengine.config import ConfigDict
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class FasterRCNN(TwoStageDetector):
"""Implementation of `Faster R-CNN <https://arxiv.org/abs/1506.01497>`_"""
def __init__(self,
backbone: Union[ConfigDict, dict],
rpn_head: Union[ConfigDict, dict],
roi_head: Union[ConfigDict, dict],
train_cfg: Union[ConfigDict, dict],
test_cfg: Union[ConfigDict, dict],
neck: Optional[Union[ConfigDict, dict]] = None,
pretrained: Optional[str] = None,
preprocess_cfg: Optional[Union[ConfigDict, dict]] = None,
init_cfg: Optional[Union[ConfigDict, dict]] = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg,
preprocess_cfg=preprocess_cfg)
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
# 270k iterations with batch_size 64 is roughly equivalent to 144 epochs
'../common/ssj_scp_270k_coco-instance.py'
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# the model is trained from scratch, so init_cfg is None
data_preprocessor=dict(
# pad_size_divisor=32 is unnecessary in training but necessary
# in testing.
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2), # leads to 0.1+ mAP
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
# 270k iterations with batch_size 64 is roughly equivalent to 144 epochs
'../common/ssj_scp_270k_coco-instance.py'
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# the model is trained from scratch, so init_cfg is None
data_preprocessor=dict(
# pad_size_divisor=32 is unnecessary in training but necessary
# in testing.
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2), # leads to 0.1+ mAP
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
|
"""
This examples trains a CrossEncoder for the Quora Duplicate Questions Detection task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_quora_duplicate_questions.py
"""
import csv
import logging
import math
import os
from datetime import datetime
from zipfile import ZipFile
from torch.utils.data import DataLoader
from sentence_transformers import LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CEBinaryClassificationEvaluator
from sentence_transformers.readers import InputExample
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
dataset_path = "quora-dataset/"
if not os.path.exists(dataset_path):
logger.info("Dataset not found. Download")
zip_save_path = "quora-IR-dataset.zip"
util.http_get(url="https://sbert.net/datasets/quora-IR-dataset.zip", path=zip_save_path)
with ZipFile(zip_save_path, "r") as zip:
zip.extractall(dataset_path)
# Read the quora dataset split for classification
logger.info("Read train dataset")
train_samples = []
with open(os.path.join(dataset_path, "classification", "train_pairs.tsv"), encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
train_samples.append(InputExample(texts=[row["question1"], row["question2"]], label=int(row["is_duplicate"])))
train_samples.append(InputExample(texts=[row["question2"], row["question1"]], label=int(row["is_duplicate"])))
logger.info("Read dev dataset")
dev_samples = []
with open(os.path.join(dataset_path, "classification", "dev_pairs.tsv"), encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
dev_samples.append(InputExample(texts=[row["question1"], row["question2"]], label=int(row["is_duplicate"])))
# Configuration
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_quora-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# We use distilroberta-base with a single label, i.e., it will output a value between 0 and 1 indicating the similarity of the two questions
model = CrossEncoder("distilroberta-base", num_labels=1)
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# We add an evaluator, which evaluates the performance during training
evaluator = CEBinaryClassificationEvaluator.from_input_examples(dev_samples, name="Quora-dev")
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info(f"Warmup-steps: {warmup_steps}")
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=5000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
|
"""
This examples trains a CrossEncoder for the Quora Duplicate Questions Detection task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_quora_duplicate_questions.py
"""
import csv
import logging
import math
import os
from datetime import datetime
from zipfile import ZipFile
from torch.utils.data import DataLoader
from sentence_transformers import LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CEBinaryClassificationEvaluator
from sentence_transformers.readers import InputExample
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
dataset_path = "quora-dataset/"
if not os.path.exists(dataset_path):
logger.info("Dataset not found. Download")
zip_save_path = "quora-IR-dataset.zip"
util.http_get(url="https://sbert.net/datasets/quora-IR-dataset.zip", path=zip_save_path)
with ZipFile(zip_save_path, "r") as zip:
zip.extractall(dataset_path)
# Read the quora dataset split for classification
logger.info("Read train dataset")
train_samples = []
with open(os.path.join(dataset_path, "classification", "train_pairs.tsv"), "r", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
train_samples.append(InputExample(texts=[row["question1"], row["question2"]], label=int(row["is_duplicate"])))
train_samples.append(InputExample(texts=[row["question2"], row["question1"]], label=int(row["is_duplicate"])))
logger.info("Read dev dataset")
dev_samples = []
with open(os.path.join(dataset_path, "classification", "dev_pairs.tsv"), "r", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
dev_samples.append(InputExample(texts=[row["question1"], row["question2"]], label=int(row["is_duplicate"])))
# Configuration
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_quora-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# We use distilroberta-base with a single label, i.e., it will output a value between 0 and 1 indicating the similarity of the two questions
model = CrossEncoder("distilroberta-base", num_labels=1)
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# We add an evaluator, which evaluates the performance during training
evaluator = CEBinaryClassificationEvaluator.from_input_examples(dev_samples, name="Quora-dev")
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=5000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
|
from typing import Type, TypeVar, Union
from uuid import UUID
from pydantic import BaseConfig, parse_obj_as
from pydantic.fields import ModelField
from docarray.proto import NodeProto
from docarray.typing.abstract_type import AbstractType
T = TypeVar('T', bound='ID')
class ID(str, AbstractType):
"""
Represent an unique ID
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[str, int, UUID],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
try:
id: str = str(value)
return cls(id)
except Exception:
raise ValueError(f'Expected a str, int or UUID, got {type(value)}')
def _to_node_protobuf(self) -> NodeProto:
"""Convert an ID into a NodeProto message. This function should
be called when the self is nested into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(id=self)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:
"""
read ndarray from a proto msg
:param pb_msg:
:return: a string
"""
return parse_obj_as(cls, pb_msg)
|
from typing import Optional, Type, TypeVar, Union
from uuid import UUID
from pydantic import BaseConfig, parse_obj_as
from pydantic.fields import ModelField
from docarray.document.base_node import BaseNode
from docarray.proto import NodeProto
T = TypeVar('T', bound='ID')
class ID(str, BaseNode):
"""
Represent an unique ID
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[str, int, UUID],
field: Optional['ModelField'] = None,
config: Optional['BaseConfig'] = None,
) -> T:
try:
id: str = str(value)
return cls(id)
except Exception:
raise ValueError(f'Expected a str, int or UUID, got {type(value)}')
def _to_node_protobuf(self) -> NodeProto:
"""Convert an ID into a NodeProto message. This function should
be called when the self is nested into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(id=self)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:
"""
read ndarray from a proto msg
:param pb_msg:
:return: a string
"""
return parse_obj_as(cls, pb_msg)
|
# Copyright (c) OpenMMLab. All rights reserved.
# Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/cityscapes.py # noqa
# and https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa
from typing import List
from mmdet.registry import DATASETS
from .coco import CocoDataset
@DATASETS.register_module()
class CityscapesDataset(CocoDataset):
"""Dataset for Cityscapes."""
METAINFO = {
'classes': ('person', 'rider', 'car', 'truck', 'bus', 'train',
'motorcycle', 'bicycle'),
'palette': [(220, 20, 60), (255, 0, 0), (0, 0, 142), (0, 0, 70),
(0, 60, 100), (0, 80, 100), (0, 0, 230), (119, 11, 32)]
}
def filter_data(self) -> List[dict]:
"""Filter annotations according to filter_cfg.
Returns:
List[dict]: Filtered results.
"""
if self.test_mode:
return self.data_list
if self.filter_cfg is None:
return self.data_list
filter_empty_gt = self.filter_cfg.get('filter_empty_gt', False)
min_size = self.filter_cfg.get('min_size', 0)
# obtain images that contain annotation
ids_with_ann = set(data_info['img_id'] for data_info in self.data_list)
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_data_infos = []
for i, data_info in enumerate(self.data_list):
img_id = data_info['img_id']
width = data_info['width']
height = data_info['height']
all_is_crowd = all([
instance['ignore_flag'] == 1
for instance in data_info['instances']
])
if filter_empty_gt and (img_id not in ids_in_cat or all_is_crowd):
continue
if min(width, height) >= min_size:
valid_data_infos.append(data_info)
return valid_data_infos
|
# Copyright (c) OpenMMLab. All rights reserved.
# Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/cityscapes.py # noqa
# and https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa
from typing import List
from mmdet.registry import DATASETS
from .coco import CocoDataset
@DATASETS.register_module()
class CityscapesDataset(CocoDataset):
"""Dataset for Cityscapes."""
METAINFO = {
'CLASSES': ('person', 'rider', 'car', 'truck', 'bus', 'train',
'motorcycle', 'bicycle'),
'PALETTE': [(220, 20, 60), (255, 0, 0), (0, 0, 142), (0, 0, 70),
(0, 60, 100), (0, 80, 100), (0, 0, 230), (119, 11, 32)]
}
def filter_data(self) -> List[dict]:
"""Filter annotations according to filter_cfg.
Returns:
List[dict]: Filtered results.
"""
if self.test_mode:
return self.data_list
if self.filter_cfg is None:
return self.data_list
filter_empty_gt = self.filter_cfg.get('filter_empty_gt', False)
min_size = self.filter_cfg.get('min_size', 0)
# obtain images that contain annotation
ids_with_ann = set(data_info['img_id'] for data_info in self.data_list)
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_data_infos = []
for i, data_info in enumerate(self.data_list):
img_id = data_info['img_id']
width = data_info['width']
height = data_info['height']
all_is_crowd = all([
instance['ignore_flag'] == 1
for instance in data_info['instances']
])
if filter_empty_gt and (img_id not in ids_in_cat or all_is_crowd):
continue
if min(width, height) >= min_size:
valid_data_infos.append(data_info)
return valid_data_infos
|
"""
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
we can also visualize the top tokens for each text."""
from sentence_transformers import SparseEncoder
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Embed a list of sentences
sentences = [
"This framework generates embeddings for each input sentence",
"Sentences are passed as a list of string.",
"The quick brown fox jumps over the lazy dog.",
]
# Generate embeddings
embeddings = model.encode(sentences)
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
stats = model.sparsity(embeddings)
print(f"Embedding sparsity: {stats}")
print(f"Average non-zero dimensions: {stats['active_dims']:.2f}")
print(f"Sparsity percentage: {stats['sparsity_ratio']:.2%}")
"""
Embedding dim: 30522
Embedding sparsity: {'active_dims': 56.333335876464844, 'sparsity_ratio': 0.9981543366792325}
Average non-zero dimensions: 56.33
Sparsity percentage: 99.82%
"""
# Visualize top tokens for each text
top_k = 10
token_weights = model.decode(embeddings, top_k=top_k)
print(f"\nTop tokens {top_k} for each text:")
# The result is a list of sentence embeddings as numpy arrays
for i, sentence in enumerate(sentences):
token_scores = ", ".join([f'("{token.strip()}", {value:.2f})' for token, value in token_weights[i]])
print(f"{i}: {sentence} -> Top tokens: {token_scores}")
"""
Top tokens 10 for each text:
0: This framework generates embeddings for each input sentence -> Top tokens: ("framework", 2.19), ("##bed", 2.12), ("input", 1.99), ("each", 1.60), ("em", 1.58), ("sentence", 1.49), ("generate", 1.42), ("##ding", 1.33), ("sentences", 1.10), ("create", 0.93)
1: Sentences are passed as a list of string. -> Top tokens: ("string", 2.72), ("pass", 2.24), ("sentences", 2.15), ("passed", 2.07), ("sentence", 1.90), ("strings", 1.86), ("list", 1.84), ("lists", 1.49), ("as", 1.18), ("passing", 0.73)
2: The quick brown fox jumps over the lazy dog. -> Top tokens: ("lazy", 2.18), ("fox", 1.67), ("brown", 1.56), ("over", 1.52), ("dog", 1.50), ("quick", 1.49), ("jump", 1.39), ("dogs", 1.25), ("foxes", 0.99), ("jumping", 0.84)
"""
# Example of using max_active_dims during encoding
print("\n--- Using max_active_dims during encoding ---")
# Generate embeddings with limited active dimensions
embeddings_limited = model.encode(sentences, max_active_dims=32)
stats_limited = model.sparsity(embeddings_limited)
print(f"Limited embedding sparsity: {stats_limited}")
print(f"Average non-zero dimensions: {stats_limited['active_dims']:.2f}")
print(f"Sparsity percentage: {stats_limited['sparsity_ratio']:.2%}")
"""
--- Using max_active_dims during encoding ---
Limited embedding sparsity: {'active_dims': 32.0, 'sparsity_ratio': 0.9989516139030457}
Average non-zero dimensions: 32.00
Sparsity percentage: 99.90%
"""
# Comparing memory usage
print("\n--- Comparing memory usage ---")
def get_memory_size(tensor):
if tensor.is_sparse:
# For sparse tensors, only count non-zero elements
return (
tensor._values().element_size() * tensor._values().nelement()
+ tensor._indices().element_size() * tensor._indices().nelement()
)
else:
return tensor.element_size() * tensor.nelement()
print(f"Original embeddings memory: {get_memory_size(embeddings) / 1024:.2f} KB")
print(f"Embeddings with max_active_dims=32 memory: {get_memory_size(embeddings_limited) / 1024:.2f} KB")
"""
--- Comparing memory usage ---
Original embeddings memory: 3.32 KB
Embeddings with max_active_dims=32 memory: 1.88 KB
"""
|
"""
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
we can also visualize the top tokens for each text."""
from sentence_transformers import SparseEncoder
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Embed a list of sentences
sentences = [
"This framework generates embeddings for each input sentence",
"Sentences are passed as a list of string.",
"The quick brown fox jumps over the lazy dog.",
]
# Generate embeddings
embeddings = model.encode(sentences)
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
stats = model.sparsity(embeddings)
print(f"Embedding sparsity: {stats}")
print(f"Average non-zero dimensions: {stats['active_dims']:.2f}")
print(f"Sparsity percentage: {stats['sparsity_ratio']:.2%}")
"""
Embedding dim: 30522
Embedding sparsity: {'active_dims': 56.66666793823242, 'sparsity_ratio': 0.9981433749198914}
Average non-zero dimensions: 56.67
Sparsity percentage: 99.81%
"""
# Visualize top tokens for each text
top_k = 10
token_weights = model.decode(embeddings, top_k=top_k)
print(f"\nTop tokens {top_k} for each text:")
# The result is a list of sentence embeddings as numpy arrays
for i, sentence in enumerate(sentences):
token_scores = ", ".join([f'("{token.strip()}", {value:.2f})' for token, value in token_weights[i]])
print(f"{i}: {sentence} -> Top tokens: {token_scores}")
"""
Top tokens 10 for each text:
0: This framework generates embeddings for each input sentence -> Top tokens: ("framework", 2.19), ("##bed", 2.12), ("input", 1.99), ("each", 1.60), ("em", 1.58), ("sentence", 1.49), ("generate", 1.42), ("##ding", 1.33), ("sentences", 1.10), ("create", 0.93)
1: Sentences are passed as a list of string. -> Top tokens: ("string", 2.72), ("pass", 2.24), ("sentences", 2.15), ("passed", 2.07), ("sentence", 1.90), ("strings", 1.86), ("list", 1.84), ("lists", 1.49), ("as", 1.18), ("passing", 0.73)
2: The quick brown fox jumps over the lazy dog. -> Top tokens: ("lazy", 2.18), ("fox", 1.67), ("brown", 1.56), ("over", 1.52), ("dog", 1.50), ("quick", 1.49), ("jump", 1.39), ("dogs", 1.25), ("foxes", 0.99), ("jumping", 0.84)
"""
# Example of using max_active_dims during encoding
print("\n--- Using max_active_dims during encoding ---")
# Generate embeddings with limited active dimensions
embeddings_limited = model.encode(sentences, max_active_dims=32)
stats_limited = model.sparsity(embeddings_limited)
print(f"Limited embedding sparsity: {stats_limited}")
print(f"Average non-zero dimensions: {stats_limited['active_dims']:.2f}")
print(f"Sparsity percentage: {stats_limited['sparsity_ratio']:.2%}")
"""
--- Using max_active_dims during encoding ---
Limited embedding sparsity: {'active_dims': 32.0, 'sparsity_ratio': 0.9989516139030457}
Average non-zero dimensions: 32.00
Sparsity percentage: 99.90%
"""
# Comparing memory usage
print("\n--- Comparing memory usage ---")
def get_memory_size(tensor):
if tensor.is_sparse:
# For sparse tensors, only count non-zero elements
return (
tensor._values().element_size() * tensor._values().nelement()
+ tensor._indices().element_size() * tensor._indices().nelement()
)
else:
return tensor.element_size() * tensor.nelement()
print(f"Original embeddings memory: {get_memory_size(embeddings) / 1024:.2f} KB")
print(f"Embeddings with max_active_dims=32 memory: {get_memory_size(embeddings_limited) / 1024:.2f} KB")
"""
--- Comparing memory usage ---
Original embeddings memory: 3.32 KB
Embeddings with max_active_dims=32 memory: 1.88 KB
"""
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseRerankingEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with queries, positives, and negatives
eval_dataset = load_dataset("microsoft/ms_marco", "v1.1", split="validation").select(range(1000))
samples = [
{
"query": sample["query"],
"positive": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if is_selected
],
"negative": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if not is_selected
],
}
for sample in eval_dataset
]
# Now evaluate using only the documents from the 1000 samples
reranking_evaluator = SparseRerankingEvaluator(
samples=samples,
name="ms-marco-dev-small",
show_progress_bar=True,
batch_size=32,
)
results = reranking_evaluator(model)
"""
RerankingEvaluator: Evaluating the model on the ms-marco-dev-small dataset:
Queries: 967 Positives: Min 1.0, Mean 1.1, Max 3.0 Negatives: Min 1.0, Mean 7.1, Max 9.0
MAP: 53.46
MRR@10: 54.18
NDCG@10: 65.10
Model Sparsity Stats Query : Row Non-Zero Mean: 43.89658737182617, Row Sparsity Mean: 0.9985617995262146
Model Sparsity Stats Corpus : Row Non-Zero Mean: 128.37216186523438, Row Sparsity Mean: 0.9957940578460693
"""
# Print the results
print(f"Primary metric: {reranking_evaluator.primary_metric}")
# => Primary metric: ms-marco-dev-small_ndcg@10
print(f"Primary metric value: {results[reranking_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6510
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseRerankingEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with queries, positives, and negatives
eval_dataset = load_dataset("microsoft/ms_marco", "v1.1", split="validation").select(range(1000))
samples = [
{
"query": sample["query"],
"positive": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if is_selected
],
"negative": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if not is_selected
],
}
for sample in eval_dataset
]
# Now evaluate using only the documents from the 1000 samples
reranking_evaluator = SparseRerankingEvaluator(
samples=samples,
name="ms-marco-dev-small",
show_progress_bar=True,
batch_size=32,
)
results = reranking_evaluator(model)
"""
RerankingEvaluator: Evaluating the model on the ms-marco-dev-small dataset:
Queries: 967 Positives: Min 1.0, Mean 1.1, Max 3.0 Negatives: Min 1.0, Mean 7.1, Max 9.0
MAP: 53.46
MRR@10: 54.18
NDCG@10: 65.10
"""
# Print the results
print(f"Primary metric: {reranking_evaluator.primary_metric}")
# => Primary metric: ms-marco-dev-small_ndcg@10
print(f"Primary metric value: {results[reranking_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6510
|
import pytest
from docarray import DocumentArray
from jina import Client, Document, Executor, Flow, requests, types
from jina.excepts import BadServer
class SimplExecutor(Executor):
@requests
def add_text(self, docs, **kwargs):
docs[0].text = 'Hello World!'
def test_simple_docarray_return():
f = Flow().add(uses=SimplExecutor)
with f:
docs = f.post(on='/index', inputs=[Document()])
assert docs[0].text == 'Hello World!'
def test_flatten_docarrays():
f = Flow().add(uses=SimplExecutor)
with f:
docs = f.post(
on='/index',
inputs=[Document() for _ in range(100)],
request_size=10,
)
assert isinstance(docs, DocumentArray)
assert len(docs) == 100
assert docs[0].text == 'Hello World!'
def my_cb(resp):
return resp
@pytest.mark.parametrize('on_done', [None, my_cb])
@pytest.mark.parametrize('on_always', [None, my_cb])
@pytest.mark.parametrize('on_error', [None, my_cb])
def test_automatically_set_returnresults(on_done, on_always, on_error):
f = Flow().add(uses=SimplExecutor)
with f:
docs = f.post(
on='/index',
inputs=[Document() for _ in range(100)],
request_size=10,
on_done=on_done,
on_always=on_always,
on_error=on_error,
)
if on_done is None and on_always is None:
assert isinstance(docs, DocumentArray)
assert len(docs) == 100
assert docs[0].text == 'Hello World!'
else:
assert docs is None
def test_empty_docarray():
f = Flow().add(uses=SimplExecutor)
with pytest.raises(BadServer):
with f:
docs = f.post(on='/')
def test_flow_client_defaults():
exposed_port = 12345
f = Flow(port=exposed_port).add(uses=SimplExecutor)
c = Client(port=exposed_port)
with f:
docs = f.post(on='/index', inputs=[Document()])
results = c.post(on='/index', inputs=[Document()])
assert isinstance(docs, DocumentArray)
assert docs[0].text == 'Hello World!'
assert isinstance(results, DocumentArray)
assert results[0].text == 'Hello World!'
|
from docarray import DocumentArray
from jina import Document, Executor, Flow, Client, requests, types
import pytest
class SimplExecutor(Executor):
@requests
def add_text(self, docs, **kwargs):
docs[0].text = 'Hello World!'
def test_simple_docarray_return():
f = Flow().add(uses=SimplExecutor)
with f:
docs = f.post(on='/index', inputs=[Document()])
assert docs[0].text == 'Hello World!'
def test_flatten_docarrays():
f = Flow().add(uses=SimplExecutor)
with f:
docs = f.post(
on='/index',
inputs=[Document() for _ in range(100)],
request_size=10,
)
assert isinstance(docs, DocumentArray)
assert len(docs) == 100
assert docs[0].text == 'Hello World!'
def my_cb(resp, e: Exception = None):
return resp
@pytest.mark.parametrize('on_done', [None, my_cb])
@pytest.mark.parametrize('on_always', [None, my_cb])
@pytest.mark.parametrize('on_error', [None, my_cb])
def test_automatically_set_returnresults(on_done, on_always, on_error):
f = Flow().add(uses=SimplExecutor)
with f:
docs = f.post(
on='/index',
inputs=[Document() for _ in range(100)],
request_size=10,
on_done=on_done,
on_always=on_always,
on_error=on_error,
)
if on_done is None and on_always is None:
assert isinstance(docs, DocumentArray)
assert len(docs) == 100
assert docs[0].text == 'Hello World!'
else:
assert docs is None
def test_empty_docarray():
f = Flow().add(uses=SimplExecutor)
with f:
docs = f.post(on='/')
assert isinstance(docs, DocumentArray)
assert len(docs) == 0
def test_flow_client_defaults():
exposed_port = 12345
f = Flow(port=exposed_port).add(uses=SimplExecutor)
c = Client(port=exposed_port)
with f:
docs = f.post(on='/index', inputs=[Document()])
results = c.post(on='/index', inputs=[Document()])
assert isinstance(docs, DocumentArray)
assert docs[0].text == 'Hello World!'
assert isinstance(results, DocumentArray)
assert results[0].text == 'Hello World!'
|
"""Parser for JSON output."""
from __future__ import annotations
import json
from json import JSONDecodeError
from typing import Annotated, Any, Optional, TypeVar, Union
import jsonpatch # type: ignore[import]
import pydantic
from pydantic import SkipValidation
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers.format_instructions import JSON_FORMAT_INSTRUCTIONS
from langchain_core.output_parsers.transform import BaseCumulativeTransformOutputParser
from langchain_core.outputs import Generation
from langchain_core.utils.json import (
parse_and_check_json_markdown,
parse_json_markdown,
parse_partial_json,
)
from langchain_core.utils.pydantic import IS_PYDANTIC_V1
if IS_PYDANTIC_V1:
PydanticBaseModel = pydantic.BaseModel
else:
from pydantic.v1 import BaseModel
# Union type needs to be last assignment to PydanticBaseModel to make mypy happy.
PydanticBaseModel = Union[BaseModel, pydantic.BaseModel] # type: ignore
TBaseModel = TypeVar("TBaseModel", bound=PydanticBaseModel)
class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]):
"""Parse the output of an LLM call to a JSON object.
When used in streaming mode, it will yield partial JSON objects containing
all the keys that have been returned so far.
In streaming, if `diff` is set to `True`, yields JSONPatch operations
describing the difference between the previous and the current object.
"""
pydantic_object: Annotated[Optional[type[TBaseModel]], SkipValidation()] = None # type: ignore
"""The Pydantic object to use for validation.
If None, no validation is performed."""
def _diff(self, prev: Optional[Any], next: Any) -> Any:
return jsonpatch.make_patch(prev, next).patch
def _get_schema(self, pydantic_object: type[TBaseModel]) -> dict[str, Any]:
if issubclass(pydantic_object, pydantic.BaseModel):
return pydantic_object.model_json_schema()
if issubclass(pydantic_object, pydantic.v1.BaseModel):
return pydantic_object.schema()
return None
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
"""Parse the result of an LLM call to a JSON object.
Args:
result: The result of the LLM call.
partial: Whether to parse partial JSON objects.
If True, the output will be a JSON object containing
all the keys that have been returned so far.
If False, the output will be the full JSON object.
Default is False.
Returns:
The parsed JSON object.
Raises:
OutputParserException: If the output is not valid JSON.
"""
text = result[0].text
text = text.strip()
if partial:
try:
return parse_json_markdown(text)
except JSONDecodeError:
return None
else:
try:
return parse_json_markdown(text)
except JSONDecodeError as e:
msg = f"Invalid json output: {text}"
raise OutputParserException(msg, llm_output=text) from e
def parse(self, text: str) -> Any:
"""Parse the output of an LLM call to a JSON object.
Args:
text: The output of the LLM call.
Returns:
The parsed JSON object.
"""
return self.parse_result([Generation(text=text)])
def get_format_instructions(self) -> str:
"""Return the format instructions for the JSON output.
Returns:
The format instructions for the JSON output.
"""
if self.pydantic_object is None:
return "Return a JSON object."
# Copy schema to avoid altering original Pydantic schema.
schema = dict(self._get_schema(self.pydantic_object).items())
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure json in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema, ensure_ascii=False)
return JSON_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "simple_json_output_parser"
# For backwards compatibility
SimpleJsonOutputParser = JsonOutputParser
__all__ = [
"JsonOutputParser",
"SimpleJsonOutputParser", # For backwards compatibility
"parse_partial_json", # For backwards compatibility
"parse_and_check_json_markdown", # For backwards compatibility
]
|
"""Parser for JSON output."""
from __future__ import annotations
import json
from json import JSONDecodeError
from typing import Annotated, Any, Optional, TypeVar, Union
import jsonpatch # type: ignore[import]
import pydantic
from pydantic import SkipValidation
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers.format_instructions import JSON_FORMAT_INSTRUCTIONS
from langchain_core.output_parsers.transform import BaseCumulativeTransformOutputParser
from langchain_core.outputs import Generation
from langchain_core.utils.json import (
parse_and_check_json_markdown,
parse_json_markdown,
parse_partial_json,
)
from langchain_core.utils.pydantic import PYDANTIC_MAJOR_VERSION
if PYDANTIC_MAJOR_VERSION < 2:
PydanticBaseModel = pydantic.BaseModel
else:
from pydantic.v1 import BaseModel
# Union type needs to be last assignment to PydanticBaseModel to make mypy happy.
PydanticBaseModel = Union[BaseModel, pydantic.BaseModel] # type: ignore
TBaseModel = TypeVar("TBaseModel", bound=PydanticBaseModel)
class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]):
"""Parse the output of an LLM call to a JSON object.
When used in streaming mode, it will yield partial JSON objects containing
all the keys that have been returned so far.
In streaming, if `diff` is set to `True`, yields JSONPatch operations
describing the difference between the previous and the current object.
"""
pydantic_object: Annotated[Optional[type[TBaseModel]], SkipValidation()] = None # type: ignore
"""The Pydantic object to use for validation.
If None, no validation is performed."""
def _diff(self, prev: Optional[Any], next: Any) -> Any:
return jsonpatch.make_patch(prev, next).patch
def _get_schema(self, pydantic_object: type[TBaseModel]) -> dict[str, Any]:
if issubclass(pydantic_object, pydantic.BaseModel):
return pydantic_object.model_json_schema()
if issubclass(pydantic_object, pydantic.v1.BaseModel):
return pydantic_object.schema()
return None
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
"""Parse the result of an LLM call to a JSON object.
Args:
result: The result of the LLM call.
partial: Whether to parse partial JSON objects.
If True, the output will be a JSON object containing
all the keys that have been returned so far.
If False, the output will be the full JSON object.
Default is False.
Returns:
The parsed JSON object.
Raises:
OutputParserException: If the output is not valid JSON.
"""
text = result[0].text
text = text.strip()
if partial:
try:
return parse_json_markdown(text)
except JSONDecodeError:
return None
else:
try:
return parse_json_markdown(text)
except JSONDecodeError as e:
msg = f"Invalid json output: {text}"
raise OutputParserException(msg, llm_output=text) from e
def parse(self, text: str) -> Any:
"""Parse the output of an LLM call to a JSON object.
Args:
text: The output of the LLM call.
Returns:
The parsed JSON object.
"""
return self.parse_result([Generation(text=text)])
def get_format_instructions(self) -> str:
"""Return the format instructions for the JSON output.
Returns:
The format instructions for the JSON output.
"""
if self.pydantic_object is None:
return "Return a JSON object."
# Copy schema to avoid altering original Pydantic schema.
schema = dict(self._get_schema(self.pydantic_object).items())
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure json in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema, ensure_ascii=False)
return JSON_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "simple_json_output_parser"
# For backwards compatibility
SimpleJsonOutputParser = JsonOutputParser
__all__ = [
"JsonOutputParser",
"SimpleJsonOutputParser", # For backwards compatibility
"parse_partial_json", # For backwards compatibility
"parse_and_check_json_markdown", # For backwards compatibility
]
|
"""
This script contains an example how to perform semantic search with Elasticsearch.
You need Elasticsearch up and running locally:
https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html
Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.readthedocs.io/, e.g.:
```
pip install elasticsearch
```
This script was created for `elasticsearch` v8.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_elasticsearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 4. Encode the corpus
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True)
corpus_index = None
while True:
# 5. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using Elasticsearch
results, search_time, corpus_index = semantic_search_elasticsearch(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""
This script contains an example how to perform semantic search with Elasticsearch.
You need Elasticsearch up and running locally:
https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html
Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.readthedocs.io/, e.g.:
```
pip install elasticsearch
```
This script was created for `elasticsearch` v8.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_elasticsearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 5. Encode the corpus
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True)
corpus_index = None
while True:
# 6. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 7. Perform semantic search using qdrant
results, search_time, corpus_index = semantic_search_elasticsearch(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=5,
output_index=True,
)
# 8. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 10. Prompt for more queries
queries = [input("Please enter a question: ")]
|
import asyncio
import time
import pytest
from docarray import Document
from jina.clients.request import request_generator
from jina.serve.stream.helper import AsyncRequestsIterator, _RequestsCounter
def slow_blocking_generator():
for i in range(2):
yield Document(id=str(i))
time.sleep(2)
@pytest.mark.asyncio
async def test_iter_requests():
iter = request_generator(exec_endpoint='/', data=slow_blocking_generator())
count = 0
num_reqs = 0
async def another_task():
nonlocal count
for _ in range(20):
await asyncio.sleep(0.2)
count += 1
task = asyncio.create_task(another_task())
async for _ in AsyncRequestsIterator(iter):
"""Using following code will block the event loop and count will be <5
for _ in iter:
...
"""
num_reqs += 1
task.cancel()
# ideally count will be 20, but to avoid flaky CI
assert count > 15
@pytest.mark.asyncio
async def test_iter_requests_with_prefetch():
max_amount_requests = _RequestsCounter()
counter = _RequestsCounter()
async def consume_requests():
while True:
await asyncio.sleep(0.01)
if counter.count > 0:
counter.count -= 1
async def req_iterator(max_amount_requests):
for i in range(1000):
await asyncio.sleep(0.001)
counter.count += 1
if counter.count > max_amount_requests.count:
max_amount_requests.count = counter.count
yield i
consume_task = asyncio.create_task(consume_requests())
async for _ in AsyncRequestsIterator(
req_iterator(max_amount_requests), counter, 10
):
pass
consume_task.cancel()
assert max_amount_requests.count == 10
|
import asyncio
import time
import pytest
from jina import Document
from jina.clients.request import request_generator
from jina.serve.stream.helper import AsyncRequestsIterator, _RequestsCounter
def slow_blocking_generator():
for i in range(2):
yield Document(id=str(i))
time.sleep(2)
@pytest.mark.asyncio
async def test_iter_requests():
iter = request_generator(exec_endpoint='/', data=slow_blocking_generator())
count = 0
num_reqs = 0
async def another_task():
nonlocal count
for _ in range(20):
await asyncio.sleep(0.2)
count += 1
task = asyncio.create_task(another_task())
async for _ in AsyncRequestsIterator(iter):
"""Using following code will block the event loop and count will be <5
for _ in iter:
...
"""
num_reqs += 1
task.cancel()
# ideally count will be 20, but to avoid flaky CI
assert count > 15
@pytest.mark.asyncio
async def test_iter_requests_with_prefetch():
max_amount_requests = _RequestsCounter()
counter = _RequestsCounter()
async def consume_requests():
while True:
await asyncio.sleep(0.01)
if counter.count > 0:
counter.count -= 1
async def req_iterator(max_amount_requests):
for i in range(1000):
await asyncio.sleep(0.001)
counter.count += 1
if counter.count > max_amount_requests.count:
max_amount_requests.count = counter.count
yield i
consume_task = asyncio.create_task(consume_requests())
async for _ in AsyncRequestsIterator(
req_iterator(max_amount_requests), counter, 10
):
pass
consume_task.cancel()
assert max_amount_requests.count == 10
|
import os
from pathlib import Path
import numpy as np
import pytest
import torch
from mmdet.apis import inference_detector, init_detector
from mmdet.data_elements import DetDataSample
from mmdet.utils import register_all_modules
# TODO: Waiting to fix multiple call error bug
register_all_modules()
@pytest.mark.parametrize('config,devices',
[('configs/retinanet/retinanet_r18_fpn_1x_coco.py',
('cpu', 'cuda'))])
def test_init_detector(config, devices):
assert all([device in ['cpu', 'cuda'] for device in devices])
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.join(project_dir, '..')
config_file = os.path.join(project_dir, config)
# test init_detector with config_file: str and cfg_options
cfg_options = dict(
model=dict(
backbone=dict(
depth=18,
init_cfg=dict(
type='Pretrained', checkpoint='torchvision://resnet18'))))
for device in devices:
if device == 'cuda' and not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
model = init_detector(
config_file, device=device, cfg_options=cfg_options)
# test init_detector with :obj:`Path`
config_path_object = Path(config_file)
model = init_detector(config_path_object, device=device)
# test init_detector with undesirable type
with pytest.raises(TypeError):
config_list = [config_file]
model = init_detector(config_list) # noqa: F841
@pytest.mark.parametrize('config,devices',
[('configs/retinanet/retinanet_r18_fpn_1x_coco.py',
('cpu', 'cuda'))])
def test_inference_detector(config, devices):
assert all([device in ['cpu', 'cuda'] for device in devices])
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.join(project_dir, '..')
config_file = os.path.join(project_dir, config)
# test init_detector with config_file: str and cfg_options
rng = np.random.RandomState(0)
img1 = rng.randint(0, 255, (100, 100, 3), dtype=np.uint8)
img2 = rng.randint(0, 255, (100, 100, 3), dtype=np.uint8)
for device in devices:
if device == 'cuda' and not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
model = init_detector(config_file, device=device)
result = inference_detector(model, img1)
assert isinstance(result, DetDataSample)
result = inference_detector(model, [img1, img2])
assert isinstance(result, list) and len(result) == 2
|
import os
from pathlib import Path
import numpy as np
import pytest
import torch
from mmdet.apis import inference_detector, init_detector
from mmdet.core import DetDataSample
from mmdet.utils import register_all_modules
# TODO: Waiting to fix multiple call error bug
register_all_modules()
@pytest.mark.parametrize('config,devices',
[('configs/retinanet/retinanet_r18_fpn_1x_coco.py',
('cpu', 'cuda'))])
def test_init_detector(config, devices):
assert all([device in ['cpu', 'cuda'] for device in devices])
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.join(project_dir, '..')
config_file = os.path.join(project_dir, config)
# test init_detector with config_file: str and cfg_options
cfg_options = dict(
model=dict(
backbone=dict(
depth=18,
init_cfg=dict(
type='Pretrained', checkpoint='torchvision://resnet18'))))
for device in devices:
if device == 'cuda' and not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
model = init_detector(
config_file, device=device, cfg_options=cfg_options)
# test init_detector with :obj:`Path`
config_path_object = Path(config_file)
model = init_detector(config_path_object, device=device)
# test init_detector with undesirable type
with pytest.raises(TypeError):
config_list = [config_file]
model = init_detector(config_list) # noqa: F841
@pytest.mark.parametrize('config,devices',
[('configs/retinanet/retinanet_r18_fpn_1x_coco.py',
('cpu', 'cuda'))])
def test_inference_detector(config, devices):
assert all([device in ['cpu', 'cuda'] for device in devices])
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.join(project_dir, '..')
config_file = os.path.join(project_dir, config)
# test init_detector with config_file: str and cfg_options
rng = np.random.RandomState(0)
img1 = rng.randint(0, 255, (100, 100, 3), dtype=np.uint8)
img2 = rng.randint(0, 255, (100, 100, 3), dtype=np.uint8)
for device in devices:
if device == 'cuda' and not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
model = init_detector(config_file, device=device)
result = inference_detector(model, img1)
assert isinstance(result, DetDataSample)
result = inference_detector(model, [img1, img2])
assert isinstance(result, list) and len(result) == 2
|
import asyncio
import json
import os
import time
import pytest
from jina import Client, Document
from jina.enums import PodRoleType, PollingType
from jina.helper import random_port
from jina.orchestrate.pods import Pod
from jina.orchestrate.pods.container import ContainerPod
from jina.parsers import set_gateway_parser
from jina.serve.runtimes.servers import BaseServer
from tests.helper import _generate_pod_args
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope='module')
def head_runtime_docker_image_built():
import docker
client = docker.from_env()
client.images.build(path=os.path.join(cur_dir, 'head-runtime/'), tag='head-runtime')
client.close()
yield
time.sleep(2)
client = docker.from_env()
client.containers.prune()
@pytest.fixture(scope='module')
def worker_runtime_docker_image_built():
import docker
client = docker.from_env()
client.images.build(
path=os.path.join(cur_dir, 'worker-runtime/'), tag='worker-runtime'
)
client.close()
yield
time.sleep(2)
client = docker.from_env()
client.containers.prune()
@pytest.mark.asyncio
# test gateway, head and worker pod by creating them manually in the most simple configuration
async def test_pods_trivial_topology(
head_runtime_docker_image_built, worker_runtime_docker_image_built
):
worker_port = random_port()
head_port = random_port()
port = random_port()
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}'
# create a single worker pod
worker_pod = _create_worker_pod(worker_port)
# this would be done by the Pod, its adding the worker to the head
worker_host, worker_port = worker_pod.runtime_ctrl_address.split(':')
connection_list_dict = {'0': [f'{worker_host}:{worker_port}']}
# create a single head pod
head_pod = _create_head_pod(head_port, connection_list_dict)
# create a single gateway pod
gateway_pod = _create_gateway_pod(graph_description, pod_addresses, port)
with gateway_pod, head_pod, worker_pod:
await asyncio.sleep(1.0)
print(f' runtime_ctrls_address {head_pod.runtime_ctrl_address}')
assert BaseServer.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=head_pod.runtime_ctrl_address,
ready_or_shutdown_event=head_pod.ready_or_shutdown.event,
)
print(f' runtime_ctrls_address {worker_pod.runtime_ctrl_address}')
assert BaseServer.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=worker_pod.runtime_ctrl_address,
ready_or_shutdown_event=worker_pod.ready_or_shutdown.event,
)
head_pod.ready_or_shutdown.event.wait(timeout=5.0)
worker_pod.ready_or_shutdown.event.wait(timeout=5.0)
gateway_pod.ready_or_shutdown.event.wait(timeout=5.0)
# send requests to the gateway
c = Client(host='localhost', port=port, asyncio=True)
responses = c.post(
'/', inputs=async_inputs, request_size=1, return_responses=True
)
response_list = []
async for response in responses:
response_list.append(response)
assert len(response_list) == 20
assert len(response_list[0].docs) == 1
def _create_worker_pod(port):
args = _generate_pod_args()
args.port = [port]
args.name = 'worker'
args.uses = 'docker://worker-runtime'
return ContainerPod(args)
def _create_head_pod(port, connection_list_dict):
args = _generate_pod_args()
args.port = [port]
args.name = 'head'
args.pod_role = PodRoleType.HEAD
args.polling = PollingType.ANY
args.uses = 'docker://head-runtime'
args.connection_list = json.dumps(connection_list_dict)
return ContainerPod(args)
def _create_gateway_pod(graph_description, pod_addresses, port):
return Pod(
set_gateway_parser().parse_args(
[
'--graph-description',
graph_description,
'--deployments-addresses',
pod_addresses,
'--port',
str(port),
]
)
)
async def async_inputs():
for _ in range(20):
yield Document(text='client0-Request')
|
import asyncio
import json
import os
import time
import pytest
from jina import Client, Document
from jina.enums import PodRoleType, PollingType
from jina.helper import random_port
from jina.orchestrate.pods import Pod
from jina.orchestrate.pods.container import ContainerPod
from jina.parsers import set_gateway_parser
from jina.serve.runtimes.head import HeadRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from tests.helper import _generate_pod_args
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope='module')
def head_runtime_docker_image_built():
import docker
client = docker.from_env()
client.images.build(path=os.path.join(cur_dir, 'head-runtime/'), tag='head-runtime')
client.close()
yield
time.sleep(2)
client = docker.from_env()
client.containers.prune()
@pytest.fixture(scope='module')
def worker_runtime_docker_image_built():
import docker
client = docker.from_env()
client.images.build(
path=os.path.join(cur_dir, 'worker-runtime/'), tag='worker-runtime'
)
client.close()
yield
time.sleep(2)
client = docker.from_env()
client.containers.prune()
@pytest.mark.asyncio
# test gateway, head and worker pod by creating them manually in the most simple configuration
async def test_pods_trivial_topology(
head_runtime_docker_image_built, worker_runtime_docker_image_built
):
worker_port = random_port()
head_port = random_port()
port = random_port()
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}'
# create a single worker pod
worker_pod = _create_worker_pod(worker_port)
# this would be done by the Pod, its adding the worker to the head
worker_host, worker_port = worker_pod.runtime_ctrl_address.split(':')
connection_list_dict = {'0': [f'{worker_host}:{worker_port}']}
# create a single head pod
head_pod = _create_head_pod(head_port, connection_list_dict)
# create a single gateway pod
gateway_pod = _create_gateway_pod(graph_description, pod_addresses, port)
with gateway_pod, head_pod, worker_pod:
await asyncio.sleep(1.0)
assert HeadRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=head_pod.runtime_ctrl_address,
ready_or_shutdown_event=head_pod.ready_or_shutdown.event,
)
assert WorkerRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=worker_pod.runtime_ctrl_address,
ready_or_shutdown_event=worker_pod.ready_or_shutdown.event,
)
head_pod.ready_or_shutdown.event.wait(timeout=5.0)
worker_pod.ready_or_shutdown.event.wait(timeout=5.0)
gateway_pod.ready_or_shutdown.event.wait(timeout=5.0)
# send requests to the gateway
c = Client(host='localhost', port=port, asyncio=True)
responses = c.post(
'/', inputs=async_inputs, request_size=1, return_responses=True
)
response_list = []
async for response in responses:
response_list.append(response)
assert len(response_list) == 20
assert len(response_list[0].docs) == 1
def _create_worker_pod(port):
args = _generate_pod_args()
args.port = port
args.name = 'worker'
args.uses = 'docker://worker-runtime'
return ContainerPod(args)
def _create_head_pod(port, connection_list_dict):
args = _generate_pod_args()
args.port = port
args.name = 'head'
args.pod_role = PodRoleType.HEAD
args.polling = PollingType.ANY
args.uses = 'docker://head-runtime'
args.connection_list = json.dumps(connection_list_dict)
return ContainerPod(args)
def _create_gateway_pod(graph_description, pod_addresses, port):
return Pod(
set_gateway_parser().parse_args(
[
'--graph-description',
graph_description,
'--deployments-addresses',
pod_addresses,
'--port',
str(port),
]
)
)
async def async_inputs():
for _ in range(20):
yield Document(text='client0-Request')
|
from pathlib import Path
from typing import Optional, List, Tuple
from annlite.storage.table import Table
class OffsetMapping(Table):
def __init__(
self,
name: str = 'offset2ids',
data_path: Optional[Path] = None,
in_memory: bool = True,
):
super().__init__(name, data_path, in_memory=in_memory)
self.create_table()
self._size = None
def close(self):
self._conn.close()
def create_table(self):
sql = f'''CREATE TABLE IF NOT EXISTS {self.name}
(offset INTEGER NOT NULL PRIMARY KEY,
doc_id TEXT NOT NULL)'''
self.execute(sql, commit=True)
def drop(self):
sql = f'''DROP TABLE IF EXISTS {self.name}'''
self.execute(sql, commit=True)
def clear(self):
super().clear()
self._size = None
def __len__(self):
return self.size
@property
def size(self):
if self._size is None:
sql = f'SELECT MAX(offset) from {self.name} LIMIT 1;'
result = self._conn.execute(sql).fetchone()
self._size = result[0] + 1 if result[0] else 0
return self._size
def extend_doc_ids(self, doc_ids: List[str], commit: bool = True):
offsets = [self.size + i for i in range(len(doc_ids))]
offset_ids = list(zip(offsets, doc_ids))
self._insert(offset_ids, commit=commit)
def _insert(self, offset_ids: List[Tuple[int, str]], commit: bool = True):
sql = f'INSERT INTO {self.name}(offset, doc_id) VALUES (?, ?);'
self.execute_many(sql, offset_ids, commit=commit)
self._size = self.size + len(offset_ids)
def get_id_by_offset(self, offset: int):
offset = len(self) + offset if offset < 0 else offset
sql = f'SELECT doc_id FROM {self.name} WHERE offset = ? LIMIT 1;'
result = self._conn.execute(sql, (offset,)).fetchone()
return str(result[0]) if result is not None else None
def get_ids_by_offsets(self, offsets: List[int]) -> List[str]:
return [self.get_id_by_offset(offset) for offset in offsets]
def get_offsets_by_ids(self, ids: List[str]) -> List[int]:
return [self.get_offset_by_id(k) for k in ids]
def get_offset_by_id(self, doc_id: str):
sql = f'SELECT offset FROM {self.name} WHERE doc_id=? LIMIT 1;'
result = self._conn.execute(sql, (doc_id,)).fetchone()
return result[0] if result else None
def get_all_ids(self):
sql = f'SELECT doc_id FROM {self.name} ORDER BY offset'
result = self._conn.execute(sql).fetchall()
return [r[0] for r in result] if result else []
def del_at_offset(self, offset: int, commit: bool = True):
offset = len(self) + offset if offset < 0 else offset
sql = f'DELETE FROM {self.name} WHERE offset=?'
self._conn.execute(sql, (offset,))
self.shift_offset(offset, shift_step=1, direction='left', commit=commit)
self._size -= 1
def del_at_offsets(self, offsets: List[int], commit: bool = True):
for offset in sorted(offsets, reverse=True):
self.del_at_offset(offset, commit=False)
if commit:
self.commit()
def insert_at_offset(self, offset: int, doc_id: str, commit: bool = True):
offset = len(self) + offset if offset < 0 else offset
self.shift_offset(offset - 1, shift_step=1, direction='right', commit=False)
self._insert([(offset, doc_id)], commit=commit)
def set_at_offset(self, offset: int, doc_id: str, commit: bool = True):
offset = len(self) + offset if offset < 0 else offset
sql = f'UPDATE {self.name} SET doc_id=? WHERE offset = ?'
self._conn.execute(
sql,
(
doc_id,
offset,
),
)
if commit:
self.commit()
def shift_offset(
self,
shift_from: int,
shift_step: int = 1,
direction: str = 'left',
commit: bool = True,
):
if direction == 'left':
sql = f'UPDATE {self.name} SET offset=offset-{shift_step} WHERE offset > ?'
elif direction == 'right':
sql = f'UPDATE {self.name} SET offset=offset+{shift_step} WHERE offset > ?'
else:
raise ValueError(f'The shit_offset directory {direction} is not supported!')
self._conn.execute(sql, (shift_from,))
if commit:
self._conn.commit()
|
from pathlib import Path
from typing import Optional, List, Tuple
from annlite.storage.table import Table
class OffsetMapping(Table):
def __init__(
self,
name: str = 'offset2ids',
data_path: Optional[Path] = None,
in_memory: bool = True,
):
super().__init__(name, data_path, in_memory)
self.create_table()
self._size = None
def close(self):
self._conn.close()
def create_table(self):
sql = f'''CREATE TABLE IF NOT EXISTS {self.name}
(offset INTEGER NOT NULL PRIMARY KEY,
doc_id TEXT NOT NULL)'''
self.execute(sql, commit=True)
def drop(self):
sql = f'''DROP TABLE IF EXISTS {self.name}'''
self.execute(sql, commit=True)
def clear(self):
super().clear()
self._size = None
def __len__(self):
return self.size
@property
def size(self):
if self._size is None:
sql = f'SELECT MAX(offset) from {self.name} LIMIT 1;'
result = self._conn.execute(sql).fetchone()
self._size = result[0] + 1 if result[0] else 0
return self._size
def extend_doc_ids(self, doc_ids: List[str], commit: bool = True):
offsets = [self.size + i for i in range(len(doc_ids))]
offset_ids = list(zip(offsets, doc_ids))
self._insert(offset_ids, commit=commit)
def _insert(self, offset_ids: List[Tuple[int, str]], commit: bool = True):
sql = f'INSERT INTO {self.name}(offset, doc_id) VALUES (?, ?);'
self.execute_many(sql, offset_ids, commit=commit)
self._size = self.size + len(offset_ids)
def get_id_by_offset(self, offset: int):
offset = len(self) + offset if offset < 0 else offset
sql = f'SELECT doc_id FROM {self.name} WHERE offset = ? LIMIT 1;'
result = self._conn.execute(sql, (offset,)).fetchone()
return str(result[0]) if result is not None else None
def get_ids_by_offsets(self, offsets: List[int]) -> List[str]:
return [self.get_id_by_offset(offset) for offset in offsets]
def get_offsets_by_ids(self, ids: List[str]) -> List[int]:
return [self.get_offset_by_id(k) for k in ids]
def get_offset_by_id(self, doc_id: str):
sql = f'SELECT offset FROM {self.name} WHERE doc_id=? LIMIT 1;'
result = self._conn.execute(sql, (doc_id,)).fetchone()
return result[0] if result else None
def get_all_ids(self):
sql = f'SELECT doc_id FROM {self.name} ORDER BY offset'
result = self._conn.execute(sql).fetchall()
return [r[0] for r in result] if result else []
def del_at_offset(self, offset: int, commit: bool = True):
offset = len(self) + offset if offset < 0 else offset
sql = f'DELETE FROM {self.name} WHERE offset=?'
self._conn.execute(sql, (offset,))
self.shift_offset(offset, shift_step=1, direction='left', commit=commit)
self._size -= 1
def del_at_offsets(self, offsets: List[int], commit: bool = True):
for offset in sorted(offsets, reverse=True):
self.del_at_offset(offset, commit=False)
if commit:
self.commit()
def insert_at_offset(self, offset: int, doc_id: str, commit: bool = True):
offset = len(self) + offset if offset < 0 else offset
self.shift_offset(offset - 1, shift_step=1, direction='right', commit=False)
self._insert([(offset, doc_id)], commit=commit)
def set_at_offset(self, offset: int, doc_id: str, commit: bool = True):
offset = len(self) + offset if offset < 0 else offset
sql = f'UPDATE {self.name} SET doc_id=? WHERE offset = ?'
self._conn.execute(
sql,
(
doc_id,
offset,
),
)
if commit:
self.commit()
def shift_offset(
self,
shift_from: int,
shift_step: int = 1,
direction: str = 'left',
commit: bool = True,
):
if direction == 'left':
sql = f'UPDATE {self.name} SET offset=offset-{shift_step} WHERE offset > ?'
elif direction == 'right':
sql = f'UPDATE {self.name} SET offset=offset+{shift_step} WHERE offset > ?'
else:
raise ValueError(f'The shit_offset directory {direction} is not supported!')
self._conn.execute(sql, (shift_from,))
if commit:
self._conn.commit()
|
import asyncio
import math
import time
from collections.abc import AsyncIterator
from langchain_core.tracers.memory_stream import _MemoryStream
async def test_same_event_loop() -> None:
"""Test that the memory stream works when the same event loop is used.
This is the easy case.
"""
reader_loop = asyncio.get_event_loop()
channel = _MemoryStream[dict](reader_loop)
writer = channel.get_send_stream()
reader = channel.get_receive_stream()
async def producer() -> None:
"""Produce items with slight delay."""
tic = time.time()
for i in range(3):
await asyncio.sleep(0.10)
toc = time.time()
await writer.send(
{
"item": i,
"produce_time": toc - tic,
}
)
await writer.aclose()
async def consumer() -> AsyncIterator[dict]:
tic = time.time()
async for item in reader:
toc = time.time()
yield {
"receive_time": toc - tic,
**item,
}
producer_task = asyncio.create_task(producer())
items = [item async for item in consumer()]
for item in items:
delta_time = item["receive_time"] - item["produce_time"]
# Allow a generous 10ms of delay
# The test is meant to verify that the producer and consumer are running in
# parallel despite the fact that the producer is running from another thread.
# abs_tol is used to allow for some delay in the producer and consumer
# due to overhead.
# To verify that the producer and consumer are running in parallel, we
# expect the delta_time to be smaller than the sleep delay in the producer
# * # of items = 30 ms
assert math.isclose(delta_time, 0, abs_tol=0.010) is True, (
f"delta_time: {delta_time}"
)
await producer_task
async def test_queue_for_streaming_via_sync_call() -> None:
"""Test via async -> sync -> async path."""
reader_loop = asyncio.get_event_loop()
channel = _MemoryStream[dict](reader_loop)
writer = channel.get_send_stream()
reader = channel.get_receive_stream()
async def producer() -> None:
"""Produce items with slight delay."""
tic = time.time()
for i in range(3):
await asyncio.sleep(0.2)
toc = time.time()
await writer.send(
{
"item": i,
"produce_time": toc - tic,
}
)
await writer.aclose()
def sync_call() -> None:
"""Blocking sync call."""
asyncio.run(producer())
async def consumer() -> AsyncIterator[dict]:
tic = time.time()
async for item in reader:
toc = time.time()
yield {
"receive_time": toc - tic,
**item,
}
task = asyncio.create_task(asyncio.to_thread(sync_call))
items = [item async for item in consumer()]
await task
assert len(items) == 3
for item in items:
delta_time = item["receive_time"] - item["produce_time"]
# Allow a generous 10ms of delay
# The test is meant to verify that the producer and consumer are running in
# parallel despite the fact that the producer is running from another thread.
# abs_tol is used to allow for some delay in the producer and consumer
# due to overhead.
# To verify that the producer and consumer are running in parallel, we
# expect the delta_time to be smaller than the sleep delay in the producer
# * # of items = 30 ms
assert math.isclose(delta_time, 0, abs_tol=0.020) is True, (
f"delta_time: {delta_time}"
)
def test_send_to_closed_stream() -> None:
"""Test that sending to a closed stream doesn't raise an error.
We may want to handle this in a better way in the future.
"""
event_loop = asyncio.get_event_loop()
channel = _MemoryStream[str](event_loop)
writer = channel.get_send_stream()
# send with an open even loop
writer.send_nowait("hello")
event_loop.close()
writer.send_nowait("hello")
# now close the loop
event_loop.close()
writer.close()
writer.send_nowait("hello")
async def test_closed_stream() -> None:
reader_loop = asyncio.get_event_loop()
channel = _MemoryStream[str](reader_loop)
writer = channel.get_send_stream()
reader = channel.get_receive_stream()
await writer.aclose()
assert [chunk async for chunk in reader] == []
|
import asyncio
import math
import time
from collections.abc import AsyncIterator
from langchain_core.tracers.memory_stream import _MemoryStream
async def test_same_event_loop() -> None:
"""Test that the memory stream works when the same event loop is used.
This is the easy case.
"""
reader_loop = asyncio.get_event_loop()
channel = _MemoryStream[dict](reader_loop)
writer = channel.get_send_stream()
reader = channel.get_receive_stream()
async def producer() -> None:
"""Produce items with slight delay."""
tic = time.time()
for i in range(3):
await asyncio.sleep(0.10)
toc = time.time()
await writer.send(
{
"item": i,
"produce_time": toc - tic,
}
)
await writer.aclose()
async def consumer() -> AsyncIterator[dict]:
tic = time.time()
async for item in reader:
toc = time.time()
yield {
"receive_time": toc - tic,
**item,
}
asyncio.create_task(producer())
items = [item async for item in consumer()]
for item in items:
delta_time = item["receive_time"] - item["produce_time"]
# Allow a generous 10ms of delay
# The test is meant to verify that the producer and consumer are running in
# parallel despite the fact that the producer is running from another thread.
# abs_tol is used to allow for some delay in the producer and consumer
# due to overhead.
# To verify that the producer and consumer are running in parallel, we
# expect the delta_time to be smaller than the sleep delay in the producer
# * # of items = 30 ms
assert math.isclose(delta_time, 0, abs_tol=0.010) is True, (
f"delta_time: {delta_time}"
)
async def test_queue_for_streaming_via_sync_call() -> None:
"""Test via async -> sync -> async path."""
reader_loop = asyncio.get_event_loop()
channel = _MemoryStream[dict](reader_loop)
writer = channel.get_send_stream()
reader = channel.get_receive_stream()
async def producer() -> None:
"""Produce items with slight delay."""
tic = time.time()
for i in range(3):
await asyncio.sleep(0.2)
toc = time.time()
await writer.send(
{
"item": i,
"produce_time": toc - tic,
}
)
await writer.aclose()
def sync_call() -> None:
"""Blocking sync call."""
asyncio.run(producer())
async def consumer() -> AsyncIterator[dict]:
tic = time.time()
async for item in reader:
toc = time.time()
yield {
"receive_time": toc - tic,
**item,
}
task = asyncio.create_task(asyncio.to_thread(sync_call))
items = [item async for item in consumer()]
await task
assert len(items) == 3
for item in items:
delta_time = item["receive_time"] - item["produce_time"]
# Allow a generous 10ms of delay
# The test is meant to verify that the producer and consumer are running in
# parallel despite the fact that the producer is running from another thread.
# abs_tol is used to allow for some delay in the producer and consumer
# due to overhead.
# To verify that the producer and consumer are running in parallel, we
# expect the delta_time to be smaller than the sleep delay in the producer
# * # of items = 30 ms
assert math.isclose(delta_time, 0, abs_tol=0.020) is True, (
f"delta_time: {delta_time}"
)
def test_send_to_closed_stream() -> None:
"""Test that sending to a closed stream doesn't raise an error.
We may want to handle this in a better way in the future.
"""
event_loop = asyncio.get_event_loop()
channel = _MemoryStream[str](event_loop)
writer = channel.get_send_stream()
# send with an open even loop
writer.send_nowait("hello")
event_loop.close()
writer.send_nowait("hello")
# now close the loop
event_loop.close()
writer.close()
writer.send_nowait("hello")
async def test_closed_stream() -> None:
reader_loop = asyncio.get_event_loop()
channel = _MemoryStream[str](reader_loop)
writer = channel.get_send_stream()
reader = channel.get_receive_stream()
await writer.aclose()
assert [chunk async for chunk in reader] == []
|
import importlib
import shutil
import warnings
from typing import List
import fsspec
import fsspec.asyn
from fsspec.implementations.local import LocalFileSystem
from ..utils.deprecation_utils import deprecated
from . import compression
_has_s3fs = importlib.util.find_spec("s3fs") is not None
if _has_s3fs:
from .s3filesystem import S3FileSystem # noqa: F401
COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [
compression.Bz2FileSystem,
compression.GzipFileSystem,
compression.Lz4FileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
@deprecated(
"This function is deprecated and will be removed in a future version. Please use `fsspec.core.strip_protocol` instead."
)
def extract_path_from_uri(dataset_path: str) -> str:
"""
Preprocesses `dataset_path` and removes remote filesystem (e.g. removing `s3://`).
Args:
dataset_path (`str`):
Path (e.g. `dataset/train`) or remote uri (e.g. `s3://my-bucket/dataset/train`) of the dataset directory.
"""
if "://" in dataset_path:
dataset_path = dataset_path.split("://")[1]
return dataset_path
def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
"""
Checks if `fs` is a remote filesystem.
Args:
fs (`fsspec.spec.AbstractFileSystem`):
An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or [`datasets.filesystems.S3FileSystem`].
"""
return not isinstance(fs, LocalFileSystem)
def rename(fs: fsspec.AbstractFileSystem, src: str, dst: str):
"""
Renames the file `src` in `fs` to `dst`.
"""
if not is_remote_filesystem(fs):
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(src), fs._strip_protocol(dst))
else:
fs.mv(src, dst, recursive=True)
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from fsspec.implementations.local import LocalFileSystem
from ..utils.deprecation_utils import deprecated
from . import compression
_has_s3fs = importlib.util.find_spec("s3fs") is not None
if _has_s3fs:
from .s3filesystem import S3FileSystem # noqa: F401
COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [
compression.Bz2FileSystem,
compression.GzipFileSystem,
compression.Lz4FileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
@deprecated(
"This function is deprecated and will be removed in a future version. Please use `fsspec.core.strip_protocol` instead."
)
def extract_path_from_uri(dataset_path: str) -> str:
"""
Preprocesses `dataset_path` and removes remote filesystem (e.g. removing `s3://`).
Args:
dataset_path (`str`):
Path (e.g. `dataset/train`) or remote uri (e.g. `s3://my-bucket/dataset/train`) of the dataset directory.
"""
if "://" in dataset_path:
dataset_path = dataset_path.split("://")[1]
return dataset_path
def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
"""
Checks if `fs` is a remote filesystem.
Args:
fs (`fsspec.spec.AbstractFileSystem`):
An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or [`datasets.filesystems.S3FileSystem`].
"""
return not isinstance(fs, LocalFileSystem)
def rename(fs: fsspec.AbstractFileSystem, src: str, dst: str):
"""
Renames the file `src` in `fs` to `dst`.
"""
if not is_remote_filesystem(fs):
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(src), fs._strip_protocol(dst))
else:
fs.mv(src, dst, recursive=True)
def _reset_fsspec_lock() -> None:
"""
Clear reference to the loop and thread.
This is necessary otherwise HTTPFileSystem hangs in the ML training loop.
Only required for fsspec >= 0.9.0
See https://github.com/fsspec/gcsfs/issues/379
"""
if hasattr(fsspec.asyn, "reset_lock"):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
fsspec.asyn.iothread[0] = None
fsspec.asyn.loop[0] = None
fsspec.asyn.lock = threading.Lock()
|
"""Tool for the Google Finance"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.google_finance import GoogleFinanceAPIWrapper
class GoogleFinanceQueryRun(BaseTool):
"""Tool that queries the Google Finance API."""
name: str = "google_finance"
description: str = (
"A wrapper around Google Finance Search. "
"Useful for when you need to get information about"
"google search Finance from Google Finance"
"Input should be a search query."
)
api_wrapper: GoogleFinanceAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
|
"""Tool for the Google Finance"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.google_finance import GoogleFinanceAPIWrapper
class GoogleFinanceQueryRun(BaseTool): # type: ignore[override]
"""Tool that queries the Google Finance API."""
name: str = "google_finance"
description: str = (
"A wrapper around Google Finance Search. "
"Useful for when you need to get information about"
"google search Finance from Google Finance"
"Input should be a search query."
)
api_wrapper: GoogleFinanceAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
|
from datasets import Dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseEncoderTrainer,
SparseMarginMSELoss,
SpladeLoss,
SpladePooling,
)
# Initialize the SPLADE model
student_model_name = "prithivida/Splade_PP_en_v1"
student_model = SparseEncoder(
modules=[
MLMTransformer(student_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Initialize the SPLADE model
teacher_model_name = "naver/splade-cocondenser-ensembledistil"
teacher_model = SparseEncoder(
modules=[
MLMTransformer(teacher_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Create a small toy dataset
train_dataset = Dataset.from_dict(
{
"query": ["It's nice weather outside today.", "He drove to work."],
"passage1": ["It's so sunny.", "He took the car to work."],
"passage2": ["It's very sunny.", "She walked to the store."],
}
)
def compute_labels(batch):
emb_queries = teacher_model.encode(batch["query"])
emb_passages1 = teacher_model.encode(batch["passage1"])
emb_passages2 = teacher_model.encode(batch["passage2"])
return {
"label": teacher_model.similarity_pairwise(emb_queries, emb_passages1)
- teacher_model.similarity_pairwise(emb_queries, emb_passages2)
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = SpladeLoss(
student_model,
main_loss=SparseMarginMSELoss,
lambda_corpus=5e-3,
lambda_query=0.1,
)
trainer = SparseEncoderTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
|
from datasets import Dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseEncoderTrainer,
SparseMarginMSELoss,
SpladeLoss,
SpladePooling,
)
# Initialize the SPLADE model
student_model_name = "prithivida/Splade_PP_en_v1"
student_model = SparseEncoder(
modules=[
MLMTransformer(student_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Initialize the SPLADE model
teacher_model_name = "naver/splade-cocondenser-ensembledistil"
teacher_model = SparseEncoder(
modules=[
MLMTransformer(teacher_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Create a small toy dataset
train_dataset = Dataset.from_dict(
{
"query": ["It's nice weather outside today.", "He drove to work."],
"passage1": ["It's so sunny.", "He took the car to work."],
"passage2": ["It's very sunny.", "She walked to the store."],
}
)
def compute_labels(batch):
emb_queries = teacher_model.encode(batch["query"])
emb_passages1 = teacher_model.encode(batch["passage1"])
emb_passages2 = teacher_model.encode(batch["passage2"])
return {
"label": teacher_model.similarity_pairwise(emb_queries, emb_passages1)
- teacher_model.similarity_pairwise(emb_queries, emb_passages2)
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = SpladeLoss(
student_model,
main_loss=SparseMarginMSELoss,
lamda_corpus=5e-3,
lamda_query=0.1,
)
trainer = SparseEncoderTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
|
import torch
from datasets import Dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseEncoderTrainer,
SparseMarginMSELoss,
SpladePooling,
)
# Initialize the SPLADE model
student_model_name = "prithivida/Splade_PP_en_v1"
student_model = SparseEncoder(
modules=[
MLMTransformer(student_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Initialize the SPLADE model
teacher_model_name = "naver/splade-cocondenser-ensembledistil"
teacher_model = SparseEncoder(
modules=[
MLMTransformer(teacher_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Create a small toy dataset
train_dataset = Dataset.from_dict(
{
"query": ["It's nice weather outside today.", "He drove to work."],
"passage1": ["It's so sunny.", "He took the car to work."],
"passage2": ["It's very cold.", "She walked to the store."],
"passage3": ["Its rainy", "She took the bus"],
}
)
def compute_labels(batch):
emb_queries = teacher_model.encode(batch["query"])
emb_passages1 = teacher_model.encode(batch["passage1"])
emb_passages2 = teacher_model.encode(batch["passage2"])
emb_passages3 = teacher_model.encode(batch["passage3"])
return {
"label": torch.stack(
[
teacher_model.similarity_pairwise(emb_queries, emb_passages1)
- teacher_model.similarity_pairwise(emb_queries, emb_passages2),
teacher_model.similarity_pairwise(emb_queries, emb_passages1)
- teacher_model.similarity_pairwise(emb_queries, emb_passages3),
],
dim=1,
)
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = SparseMarginMSELoss(student_model)
trainer = SparseEncoderTrainer(model=student_model, train_dataset=train_dataset, loss=loss)
trainer.train()
|
from datasets import Dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseEncoderTrainer,
SparseMarginMSELoss,
SpladePooling,
)
# Initialize the SPLADE model
student_model_name = "prithivida/Splade_PP_en_v1"
student_model = SparseEncoder(
modules=[
MLMTransformer(student_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Initialize the SPLADE model
teacher_model_name = "naver/splade-cocondenser-ensembledistil"
teacher_model = SparseEncoder(
modules=[
MLMTransformer(teacher_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Create a small toy dataset
train_dataset = Dataset.from_dict(
{
"query": ["It's nice weather outside today.", "He drove to work."],
"passage1": ["It's so sunny.", "He took the car to work."],
"passage2": ["It's very sunny.", "She walked to the store."],
}
)
def compute_labels(batch):
emb_queries = teacher_model.encode(batch["query"])
emb_passages1 = teacher_model.encode(batch["passage1"])
emb_passages2 = teacher_model.encode(batch["passage2"])
return {
"label": teacher_model.similarity_pairwise(emb_queries, emb_passages1)
- teacher_model.similarity_pairwise(emb_queries, emb_passages2)
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = SparseMarginMSELoss(student_model)
trainer = SparseEncoderTrainer(model=student_model, train_dataset=train_dataset, loss=loss)
trainer.train()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.logging import print_log
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--auto-scale-lr',
action='store_true',
help='enable automatically scaling LR.')
parser.add_argument(
'--resume',
nargs='?',
type=str,
const='auto',
help='If specify checkpoint path, resume from it, while if not '
'specify, try to auto resume from the latest checkpoint '
'in the work directory.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# register all modules in mmdet into the registries
# do not init the default scope here because it will be init in the runner
register_all_modules(init_default_scope=False)
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.type
if optim_wrapper == 'AmpOptimWrapper':
print_log(
'AMP training is already enabled in your config.',
logger='current',
level=logging.WARNING)
else:
assert optim_wrapper == 'OptimWrapper', (
'`--amp` is only supported when the optimizer wrapper type is '
f'`OptimWrapper` but got {optim_wrapper}.')
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# enable automatically scaling LR
if args.auto_scale_lr:
if 'auto_scale_lr' in cfg and \
'enable' in cfg.auto_scale_lr and \
'base_batch_size' in cfg.auto_scale_lr:
cfg.auto_scale_lr.enable = True
else:
raise RuntimeError('Can not find "auto_scale_lr" or '
'"auto_scale_lr.enable" or '
'"auto_scale_lr.base_batch_size" in your'
' configuration file.')
# resume is determined in this priority: resume from > auto_resume
if args.resume == 'auto':
cfg.resume = True
cfg.load_from = None
elif args.resume is not None:
cfg.resume = True
cfg.load_from = args.resume
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start training
runner.train()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.logging import print_log
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--auto-scale-lr',
action='store_true',
help='enable automatically scaling LR.')
parser.add_argument(
'--resume',
action='store_true',
help='resume from the latest checkpoint in the work_dir automatically')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# register all modules in mmdet into the registries
# do not init the default scope here because it will be init in the runner
register_all_modules(init_default_scope=False)
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.type
if optim_wrapper == 'AmpOptimWrapper':
print_log(
'AMP training is already enabled in your config.',
logger='current',
level=logging.WARNING)
else:
assert optim_wrapper == 'OptimWrapper', (
'`--amp` is only supported when the optimizer wrapper type is '
f'`OptimWrapper` but got {optim_wrapper}.')
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# enable automatically scaling LR
if args.auto_scale_lr:
if 'auto_scale_lr' in cfg and \
'enable' in cfg.auto_scale_lr and \
'base_batch_size' in cfg.auto_scale_lr:
cfg.auto_scale_lr.enable = True
else:
raise RuntimeError('Can not find "auto_scale_lr" or '
'"auto_scale_lr.enable" or '
'"auto_scale_lr.base_batch_size" in your'
' configuration file.')
if args.resume:
cfg.resume = args.resume
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start training
runner.train()
if __name__ == '__main__':
main()
|
from typing import (
Union,
Optional,
TYPE_CHECKING,
List,
Dict,
)
if TYPE_CHECKING: # pragma: no cover
import numpy as np
from docarray import DocumentArray
class FindMixin:
def _find(
self,
query: 'np.ndarray',
limit: Optional[Union[int, float]] = 20,
only_id: bool = False,
filter: Optional[Dict] = None,
**kwargs,
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given an input query.
:param query: the query documents to search.
:param limit: the number of results to get for each query document in search.
:param only_id: if set, then returning matches will only contain ``id``
:param filter: filter query used for pre-filtering
:param kwargs: other kwargs.
:return: a list of DocumentArrays containing the closest Document objects for each of the queries in `query`.
"""
from docarray.math import ndarray
n_rows, _ = ndarray.get_array_rows(query)
if n_rows == 1:
query = query.reshape(1, -1)
_, match_docs = self._annlite.search_by_vectors(
query, limit=limit, filter=filter or {}, include_metadata=not only_id
)
return match_docs
def _filter(
self,
filter: Dict,
limit: Optional[Union[int, float]] = 20,
only_id: bool = False,
) -> 'DocumentArray':
"""Returns a subset of documents by filtering by the given filter (`Annlite` filter).
:param filter: the input filter to apply in each stored document
:param limit: the number of results to get for each query document in search.
:param only_id: if set, then returning matches will only contain ``id``
:return: a `DocumentArray` containing the `Document` objects that verify the filter.
"""
docs = self._annlite.filter(
filter=filter,
limit=limit,
include_metadata=not only_id,
)
return DocumentArray(docs)
|
from typing import (
Union,
Optional,
TYPE_CHECKING,
List,
Dict,
)
if TYPE_CHECKING:
import numpy as np
from docarray import DocumentArray
class FindMixin:
def _find(
self,
query: 'np.ndarray',
limit: Optional[Union[int, float]] = 20,
only_id: bool = False,
filter: Optional[Dict] = None,
**kwargs,
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given an input query.
:param query: the query documents to search.
:param limit: the number of results to get for each query document in search.
:param only_id: if set, then returning matches will only contain ``id``
:param filter: filter query used for pre-filtering
:param kwargs: other kwargs.
:return: a list of DocumentArrays containing the closest Document objects for each of the queries in `query`.
"""
from docarray.math import ndarray
n_rows, _ = ndarray.get_array_rows(query)
if n_rows == 1:
query = query.reshape(1, -1)
_, match_docs = self._annlite.search_by_vectors(
query, limit=limit, filter=filter or {}, include_metadata=not only_id
)
return match_docs
def _filter(
self,
filter: Dict,
limit: Optional[Union[int, float]] = 20,
only_id: bool = False,
) -> 'DocumentArray':
"""Returns a subset of documents by filtering by the given filter (`Annlite` filter).
:param filter: the input filter to apply in each stored document
:param limit: the number of results to get for each query document in search.
:param only_id: if set, then returning matches will only contain ``id``
:return: a `DocumentArray` containing the `Document` objects that verify the filter.
"""
docs = self._annlite.filter(
filter=filter,
limit=limit,
include_metadata=not only_id,
)
return DocumentArray(docs)
|
"""HTML node parser."""
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, Union
from llama_index.core.bridge.pydantic import Field
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.node_parser.interface import NodeParser
from llama_index.core.node_parser.node_utils import build_nodes_from_splits
from llama_index.core.schema import BaseNode, MetadataMode, TextNode
from llama_index.core.utils import get_tqdm_iterable
if TYPE_CHECKING:
from bs4 import Tag, PageElement, NavigableString
DEFAULT_TAGS = ["p", "h1", "h2", "h3", "h4", "h5", "h6", "li", "b", "i", "u", "section"]
class HTMLNodeParser(NodeParser):
"""HTML node parser.
Splits a document into Nodes using custom HTML splitting logic.
Args:
include_metadata (bool): whether to include metadata in nodes
include_prev_next_rel (bool): whether to include prev/next relationships
"""
tags: List[str] = Field(
default=DEFAULT_TAGS, description="HTML tags to extract text from."
)
@classmethod
def from_defaults(
cls,
include_metadata: bool = True,
include_prev_next_rel: bool = True,
callback_manager: Optional[CallbackManager] = None,
tags: Optional[List[str]] = DEFAULT_TAGS,
) -> "HTMLNodeParser":
callback_manager = callback_manager or CallbackManager([])
return cls(
include_metadata=include_metadata,
include_prev_next_rel=include_prev_next_rel,
callback_manager=callback_manager,
tags=tags,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "HTMLNodeParser"
def _parse_nodes(
self,
nodes: Sequence[BaseNode],
show_progress: bool = False,
**kwargs: Any,
) -> List[BaseNode]:
all_nodes: List[BaseNode] = []
nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes")
for node in nodes_with_progress:
nodes = self.get_nodes_from_node(node)
all_nodes.extend(nodes)
return all_nodes
def get_nodes_from_node(self, node: BaseNode) -> List[TextNode]:
"""Get nodes from document."""
try:
from bs4 import BeautifulSoup, Tag
except ImportError:
raise ImportError("bs4 is required to read HTML files.")
text = node.get_content(metadata_mode=MetadataMode.NONE)
soup = BeautifulSoup(text, "html.parser")
html_nodes = []
last_tag = None
current_section = ""
tags = soup.find_all(self.tags)
for tag in tags:
tag_text = self._extract_text_from_tag(tag)
if isinstance(tag, Tag) and (tag.name == last_tag or last_tag is None):
last_tag = tag.name
current_section += f"{tag_text.strip()}\n"
else:
html_nodes.append(
self._build_node_from_split(
current_section.strip(), node, {"tag": last_tag}
)
)
if isinstance(tag, Tag):
last_tag = tag.name
current_section = f"{tag_text}\n"
if current_section:
html_nodes.append(
self._build_node_from_split(
current_section.strip(), node, {"tag": last_tag}
)
)
return html_nodes
def _extract_text_from_tag(
self, tag: Union["Tag", "NavigableString", "PageElement"]
) -> str:
from bs4 import NavigableString, Tag, PageElement
texts = []
if isinstance(tag, Tag):
for elem in tag.children:
if isinstance(elem, NavigableString):
if elem.strip():
texts.append(elem.strip())
elif isinstance(elem, Tag):
if elem.name in self.tags:
continue
else:
texts.append(elem.get_text().strip())
elif isinstance(elem, PageElement):
texts.append(elem.get_text().strip())
else:
texts.append(tag.get_text().strip())
return "\n".join(texts)
def _build_node_from_split(
self,
text_split: str,
node: BaseNode,
metadata: dict,
) -> TextNode:
"""Build node from single text split."""
node = build_nodes_from_splits([text_split], node, id_func=self.id_func)[0]
if self.include_metadata:
node.metadata = {**node.metadata, **metadata}
return node
|
"""HTML node parser."""
from typing import TYPE_CHECKING, Any, List, Optional, Sequence
from llama_index.core.bridge.pydantic import Field
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.node_parser.interface import NodeParser
from llama_index.core.node_parser.node_utils import build_nodes_from_splits
from llama_index.core.schema import BaseNode, MetadataMode, TextNode
from llama_index.core.utils import get_tqdm_iterable
if TYPE_CHECKING:
from bs4 import Tag
DEFAULT_TAGS = ["p", "h1", "h2", "h3", "h4", "h5", "h6", "li", "b", "i", "u", "section"]
class HTMLNodeParser(NodeParser):
"""HTML node parser.
Splits a document into Nodes using custom HTML splitting logic.
Args:
include_metadata (bool): whether to include metadata in nodes
include_prev_next_rel (bool): whether to include prev/next relationships
"""
tags: List[str] = Field(
default=DEFAULT_TAGS, description="HTML tags to extract text from."
)
@classmethod
def from_defaults(
cls,
include_metadata: bool = True,
include_prev_next_rel: bool = True,
callback_manager: Optional[CallbackManager] = None,
tags: Optional[List[str]] = DEFAULT_TAGS,
) -> "HTMLNodeParser":
callback_manager = callback_manager or CallbackManager([])
return cls(
include_metadata=include_metadata,
include_prev_next_rel=include_prev_next_rel,
callback_manager=callback_manager,
tags=tags,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "HTMLNodeParser"
def _parse_nodes(
self,
nodes: Sequence[BaseNode],
show_progress: bool = False,
**kwargs: Any,
) -> List[BaseNode]:
all_nodes: List[BaseNode] = []
nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes")
for node in nodes_with_progress:
nodes = self.get_nodes_from_node(node)
all_nodes.extend(nodes)
return all_nodes
def get_nodes_from_node(self, node: BaseNode) -> List[TextNode]:
"""Get nodes from document."""
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError("bs4 is required to read HTML files.")
text = node.get_content(metadata_mode=MetadataMode.NONE)
soup = BeautifulSoup(text, "html.parser")
html_nodes = []
last_tag = None
current_section = ""
tags = soup.find_all(self.tags)
for tag in tags:
tag_text = self._extract_text_from_tag(tag)
if tag.name == last_tag or last_tag is None:
last_tag = tag.name
current_section += f"{tag_text.strip()}\n"
else:
html_nodes.append(
self._build_node_from_split(
current_section.strip(), node, {"tag": last_tag}
)
)
last_tag = tag.name
current_section = f"{tag_text}\n"
if current_section:
html_nodes.append(
self._build_node_from_split(
current_section.strip(), node, {"tag": last_tag}
)
)
return html_nodes
def _extract_text_from_tag(self, tag: "Tag") -> str:
from bs4 import NavigableString
texts = []
for elem in tag.children:
if isinstance(elem, NavigableString):
if elem.strip():
texts.append(elem.strip())
elif elem.name in self.tags:
continue
else:
texts.append(elem.get_text().strip())
return "\n".join(texts)
def _build_node_from_split(
self,
text_split: str,
node: BaseNode,
metadata: dict,
) -> TextNode:
"""Build node from single text split."""
node = build_nodes_from_splits([text_split], node, id_func=self.id_func)[0]
if self.include_metadata:
node.metadata = {**node.metadata, **metadata}
return node
|
# Reference: https://github.com/shenyunhang/APE/blob/main/datasets/tools/objects3652coco/fix_o365_names.py # noqa
import argparse
import copy
import json
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--ann',
default='data/objects365v2/annotations/zhiyuan_objv2_train.json')
parser.add_argument(
'--fix_name_map',
default='tools/dataset_converters/zhiyuan_objv2_train_names_fix.csv')
args = parser.parse_args()
new_names = {}
old_names = {}
with open(args.fix_name_map, 'r') as f:
for line in f:
tmp = line.strip().split(',')
old_names[int(tmp[0])] = tmp[1]
new_names[int(tmp[0])] = tmp[2]
data = json.load(open(args.ann, 'r'))
cat_info = copy.deepcopy(data['categories'])
for x in cat_info:
if old_names[x['id']] != new_names[x['id']]:
print('Renaming', x['id'], x['name'], new_names[x['id']])
x['name'] = new_names[x['id']]
data['categories'] = cat_info
out_name = args.ann[:-5] + '_fixname.json'
print('Saving to', out_name)
json.dump(data, open(out_name, 'w'))
|
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import copy
import json
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--ann',
default='data/objects365v2/annotations/zhiyuan_objv2_train.json')
parser.add_argument(
'--fix_name_map',
default='tools/dataset_converters/zhiyuan_objv2_train_names_fix.csv')
args = parser.parse_args()
new_names = {}
old_names = {}
with open(args.fix_name_map, 'r') as f:
for line in f:
tmp = line.strip().split(',')
old_names[int(tmp[0])] = tmp[1]
new_names[int(tmp[0])] = tmp[2]
data = json.load(open(args.ann, 'r'))
cat_info = copy.deepcopy(data['categories'])
for x in cat_info:
if old_names[x['id']].strip() != x['name'].strip():
print('{} {} {}'.format(x, old_names[x['id']], new_names[x['id']]))
import pdb
pdb.set_trace()
if old_names[x['id']] != new_names[x['id']]:
print('Renaming', x['id'], x['name'], new_names[x['id']])
x['name'] = new_names[x['id']]
data['categories'] = cat_info
out_name = args.ann[:-5] + '_fixname.json'
print('Saving to', out_name)
json.dump(data, open(out_name, 'w'))
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
|
from typing import Any, Callable
from langchain_core.documents import Document
from langchain.retrievers.multi_vector import MultiVectorRetriever, SearchType
from langchain.storage import InMemoryStore
from tests.unit_tests.indexes.test_indexing import InMemoryVectorStore
class InMemoryVectorstoreWithSearch(InMemoryVectorStore):
@staticmethod
def _identity_fn(score: float) -> float:
return score
def _select_relevance_score_fn(self) -> Callable[[float], float]:
return self._identity_fn
def similarity_search(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> list[Document]:
res = self.store.get(query)
if res is None:
return []
return [res]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> list[tuple[Document, float]]:
res = self.store.get(query)
if res is None:
return []
return [(res, 0.8)]
def test_multi_vector_retriever_initialization() -> None:
vectorstore = InMemoryVectorstoreWithSearch()
retriever = MultiVectorRetriever( # type: ignore[call-arg]
vectorstore=vectorstore,
docstore=InMemoryStore(),
doc_id="doc_id",
)
documents = [Document(page_content="test document", metadata={"doc_id": "1"})]
retriever.vectorstore.add_documents(documents, ids=["1"])
retriever.docstore.mset(list(zip(["1"], documents)))
results = retriever.invoke("1")
assert len(results) > 0
assert results[0].page_content == "test document"
async def test_multi_vector_retriever_initialization_async() -> None:
vectorstore = InMemoryVectorstoreWithSearch()
retriever = MultiVectorRetriever( # type: ignore[call-arg]
vectorstore=vectorstore,
docstore=InMemoryStore(),
doc_id="doc_id",
)
documents = [Document(page_content="test document", metadata={"doc_id": "1"})]
await retriever.vectorstore.aadd_documents(documents, ids=["1"])
await retriever.docstore.amset(list(zip(["1"], documents)))
results = await retriever.ainvoke("1")
assert len(results) > 0
assert results[0].page_content == "test document"
def test_multi_vector_retriever_similarity_search_with_score() -> None:
documents = [Document(page_content="test document", metadata={"doc_id": "1"})]
vectorstore = InMemoryVectorstoreWithSearch()
vectorstore.add_documents(documents, ids=["1"])
# score_threshold = 0.5
retriever = MultiVectorRetriever( # type: ignore[call-arg]
vectorstore=vectorstore,
docstore=InMemoryStore(),
doc_id="doc_id",
search_kwargs={"score_threshold": 0.5},
search_type=SearchType.similarity_score_threshold,
)
retriever.docstore.mset(list(zip(["1"], documents)))
results = retriever.invoke("1")
assert len(results) == 1
assert results[0].page_content == "test document"
# score_threshold = 0.9
retriever = MultiVectorRetriever( # type: ignore[call-arg]
vectorstore=vectorstore,
docstore=InMemoryStore(),
doc_id="doc_id",
search_kwargs={"score_threshold": 0.9},
search_type=SearchType.similarity_score_threshold,
)
retriever.docstore.mset(list(zip(["1"], documents)))
results = retriever.invoke("1")
assert len(results) == 0
async def test_multi_vector_retriever_similarity_search_with_score_async() -> None:
documents = [Document(page_content="test document", metadata={"doc_id": "1"})]
vectorstore = InMemoryVectorstoreWithSearch()
await vectorstore.aadd_documents(documents, ids=["1"])
# score_threshold = 0.5
retriever = MultiVectorRetriever( # type: ignore[call-arg]
vectorstore=vectorstore,
docstore=InMemoryStore(),
doc_id="doc_id",
search_kwargs={"score_threshold": 0.5},
search_type=SearchType.similarity_score_threshold,
)
await retriever.docstore.amset(list(zip(["1"], documents)))
results = retriever.invoke("1")
assert len(results) == 1
assert results[0].page_content == "test document"
# score_threshold = 0.9
retriever = MultiVectorRetriever( # type: ignore[call-arg]
vectorstore=vectorstore,
docstore=InMemoryStore(),
doc_id="doc_id",
search_kwargs={"score_threshold": 0.9},
search_type=SearchType.similarity_score_threshold,
)
await retriever.docstore.amset(list(zip(["1"], documents)))
results = retriever.invoke("1")
assert len(results) == 0
|
from typing import Any, Callable
from langchain_core.documents import Document
from langchain.retrievers.multi_vector import MultiVectorRetriever, SearchType
from langchain.storage import InMemoryStore
from tests.unit_tests.indexes.test_indexing import InMemoryVectorStore
class InMemoryVectorstoreWithSearch(InMemoryVectorStore):
@staticmethod
def _identity_fn(score: float) -> float:
return score
def _select_relevance_score_fn(self) -> Callable[[float], float]:
return self._identity_fn
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> list[Document]:
res = self.store.get(query)
if res is None:
return []
return [res]
def similarity_search_with_score(
self, query: str, k: int = 4, **kwargs: Any
) -> list[tuple[Document, float]]:
res = self.store.get(query)
if res is None:
return []
return [(res, 0.8)]
def test_multi_vector_retriever_initialization() -> None:
vectorstore = InMemoryVectorstoreWithSearch()
retriever = MultiVectorRetriever( # type: ignore[call-arg]
vectorstore=vectorstore, docstore=InMemoryStore(), doc_id="doc_id"
)
documents = [Document(page_content="test document", metadata={"doc_id": "1"})]
retriever.vectorstore.add_documents(documents, ids=["1"])
retriever.docstore.mset(list(zip(["1"], documents)))
results = retriever.invoke("1")
assert len(results) > 0
assert results[0].page_content == "test document"
async def test_multi_vector_retriever_initialization_async() -> None:
vectorstore = InMemoryVectorstoreWithSearch()
retriever = MultiVectorRetriever( # type: ignore[call-arg]
vectorstore=vectorstore, docstore=InMemoryStore(), doc_id="doc_id"
)
documents = [Document(page_content="test document", metadata={"doc_id": "1"})]
await retriever.vectorstore.aadd_documents(documents, ids=["1"])
await retriever.docstore.amset(list(zip(["1"], documents)))
results = await retriever.ainvoke("1")
assert len(results) > 0
assert results[0].page_content == "test document"
def test_multi_vector_retriever_similarity_search_with_score() -> None:
documents = [Document(page_content="test document", metadata={"doc_id": "1"})]
vectorstore = InMemoryVectorstoreWithSearch()
vectorstore.add_documents(documents, ids=["1"])
# score_threshold = 0.5
retriever = MultiVectorRetriever( # type: ignore[call-arg]
vectorstore=vectorstore,
docstore=InMemoryStore(),
doc_id="doc_id",
search_kwargs={"score_threshold": 0.5},
search_type=SearchType.similarity_score_threshold,
)
retriever.docstore.mset(list(zip(["1"], documents)))
results = retriever.invoke("1")
assert len(results) == 1
assert results[0].page_content == "test document"
# score_threshold = 0.9
retriever = MultiVectorRetriever( # type: ignore[call-arg]
vectorstore=vectorstore,
docstore=InMemoryStore(),
doc_id="doc_id",
search_kwargs={"score_threshold": 0.9},
search_type=SearchType.similarity_score_threshold,
)
retriever.docstore.mset(list(zip(["1"], documents)))
results = retriever.invoke("1")
assert len(results) == 0
async def test_multi_vector_retriever_similarity_search_with_score_async() -> None:
documents = [Document(page_content="test document", metadata={"doc_id": "1"})]
vectorstore = InMemoryVectorstoreWithSearch()
await vectorstore.aadd_documents(documents, ids=["1"])
# score_threshold = 0.5
retriever = MultiVectorRetriever( # type: ignore[call-arg]
vectorstore=vectorstore,
docstore=InMemoryStore(),
doc_id="doc_id",
search_kwargs={"score_threshold": 0.5},
search_type=SearchType.similarity_score_threshold,
)
await retriever.docstore.amset(list(zip(["1"], documents)))
results = retriever.invoke("1")
assert len(results) == 1
assert results[0].page_content == "test document"
# score_threshold = 0.9
retriever = MultiVectorRetriever( # type: ignore[call-arg]
vectorstore=vectorstore,
docstore=InMemoryStore(),
doc_id="doc_id",
search_kwargs={"score_threshold": 0.9},
search_type=SearchType.similarity_score_threshold,
)
await retriever.docstore.amset(list(zip(["1"], documents)))
results = retriever.invoke("1")
assert len(results) == 0
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .atss_vlfusion_head import ATSSVLFusionHead
from .autoassign_head import AutoAssignHead
from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centernet_update_head import CenterNetUpdateHead
from .centripetal_head import CentripetalHead
from .condinst_head import CondInstBboxHead, CondInstMaskHead
from .conditional_detr_head import ConditionalDETRHead
from .corner_head import CornerHead
from .dab_detr_head import DABDETRHead
from .ddod_head import DDODHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .dino_head import DINOHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .rtmdet_head import RTMDetHead, RTMDetSepBNHead
from .rtmdet_ins_head import RTMDetInsHead, RTMDetInsSepBNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead',
'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead',
'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead',
'DeformableDETRHead', 'CenterNetHead', 'YOLOXHead', 'SOLOHead',
'DecoupledSOLOHead', 'DecoupledSOLOLightHead', 'SOLOV2Head', 'LADHead',
'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'DDODHead',
'CenterNetUpdateHead', 'RTMDetHead', 'RTMDetSepBNHead', 'CondInstBboxHead',
'CondInstMaskHead', 'RTMDetInsHead', 'RTMDetInsSepBNHead',
'BoxInstBboxHead', 'BoxInstMaskHead', 'ConditionalDETRHead', 'DINOHead',
'ATSSVLFusionHead', 'DABDETRHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centernet_update_head import CenterNetUpdateHead
from .centripetal_head import CentripetalHead
from .condinst_head import CondInstBboxHead, CondInstMaskHead
from .conditional_detr_head import ConditionalDETRHead
from .corner_head import CornerHead
from .dab_detr_head import DABDETRHead
from .ddod_head import DDODHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .dino_head import DINOHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .rtmdet_head import RTMDetHead, RTMDetSepBNHead
from .rtmdet_ins_head import RTMDetInsHead, RTMDetInsSepBNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead',
'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead',
'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead',
'DeformableDETRHead', 'CenterNetHead', 'YOLOXHead', 'SOLOHead',
'DecoupledSOLOHead', 'DecoupledSOLOLightHead', 'SOLOV2Head', 'LADHead',
'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'DDODHead',
'CenterNetUpdateHead', 'RTMDetHead', 'RTMDetSepBNHead', 'CondInstBboxHead',
'CondInstMaskHead', 'RTMDetInsHead', 'RTMDetInsSepBNHead',
'BoxInstBboxHead', 'BoxInstMaskHead', 'ConditionalDETRHead', 'DINOHead',
'DABDETRHead'
]
|
# coding: utf-8
"""Compatibility library."""
"""pandas"""
try:
from pandas import DataFrame as pd_DataFrame
from pandas import Series as pd_Series
from pandas import concat
from pandas.api.types import is_sparse as is_dtype_sparse
PANDAS_INSTALLED = True
except ImportError:
PANDAS_INSTALLED = False
class pd_Series: # type: ignore
"""Dummy class for pandas.Series."""
def __init__(self, *args, **kwargs):
pass
class pd_DataFrame: # type: ignore
"""Dummy class for pandas.DataFrame."""
def __init__(self, *args, **kwargs):
pass
concat = None
is_dtype_sparse = None
"""matplotlib"""
try:
import matplotlib
MATPLOTLIB_INSTALLED = True
except ImportError:
MATPLOTLIB_INSTALLED = False
"""graphviz"""
try:
import graphviz
GRAPHVIZ_INSTALLED = True
except ImportError:
GRAPHVIZ_INSTALLED = False
"""datatable"""
try:
import datatable
if hasattr(datatable, "Frame"):
dt_DataTable = datatable.Frame
else:
dt_DataTable = datatable.DataTable
DATATABLE_INSTALLED = True
except ImportError:
DATATABLE_INSTALLED = False
class dt_DataTable: # type: ignore
"""Dummy class for datatable.DataTable."""
def __init__(self, *args, **kwargs):
pass
"""sklearn"""
try:
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from sklearn.preprocessing import LabelEncoder
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.validation import assert_all_finite, check_array, check_X_y
try:
from sklearn.exceptions import NotFittedError
from sklearn.model_selection import GroupKFold, StratifiedKFold
except ImportError:
from sklearn.cross_validation import GroupKFold, StratifiedKFold
from sklearn.utils.validation import NotFittedError
try:
from sklearn.utils.validation import _check_sample_weight
except ImportError:
from sklearn.utils.validation import check_consistent_length
# dummy function to support older version of scikit-learn
def _check_sample_weight(sample_weight, X, dtype=None):
check_consistent_length(sample_weight, X)
return sample_weight
SKLEARN_INSTALLED = True
_LGBMModelBase = BaseEstimator
_LGBMRegressorBase = RegressorMixin
_LGBMClassifierBase = ClassifierMixin
_LGBMLabelEncoder = LabelEncoder
LGBMNotFittedError = NotFittedError
_LGBMStratifiedKFold = StratifiedKFold
_LGBMGroupKFold = GroupKFold
_LGBMCheckXY = check_X_y
_LGBMCheckArray = check_array
_LGBMCheckSampleWeight = _check_sample_weight
_LGBMAssertAllFinite = assert_all_finite
_LGBMCheckClassificationTargets = check_classification_targets
_LGBMComputeSampleWeight = compute_sample_weight
except ImportError:
SKLEARN_INSTALLED = False
class _LGBMModelBase: # type: ignore
"""Dummy class for sklearn.base.BaseEstimator."""
pass
class _LGBMClassifierBase: # type: ignore
"""Dummy class for sklearn.base.ClassifierMixin."""
pass
class _LGBMRegressorBase: # type: ignore
"""Dummy class for sklearn.base.RegressorMixin."""
pass
_LGBMLabelEncoder = None
LGBMNotFittedError = ValueError
_LGBMStratifiedKFold = None
_LGBMGroupKFold = None
_LGBMCheckXY = None
_LGBMCheckArray = None
_LGBMCheckSampleWeight = None
_LGBMAssertAllFinite = None
_LGBMCheckClassificationTargets = None
_LGBMComputeSampleWeight = None
"""dask"""
try:
from dask import delayed
from dask.array import Array as dask_Array
from dask.array import from_delayed as dask_array_from_delayed
from dask.bag import from_delayed as dask_bag_from_delayed
from dask.dataframe import DataFrame as dask_DataFrame
from dask.dataframe import Series as dask_Series
from dask.distributed import Client, default_client, wait
DASK_INSTALLED = True
except ImportError:
DASK_INSTALLED = False
dask_array_from_delayed = None
dask_bag_from_delayed = None
delayed = None
default_client = None
wait = None
class Client: # type: ignore
"""Dummy class for dask.distributed.Client."""
def __init__(self, *args, **kwargs):
pass
class dask_Array: # type: ignore
"""Dummy class for dask.array.Array."""
def __init__(self, *args, **kwargs):
pass
class dask_DataFrame: # type: ignore
"""Dummy class for dask.dataframe.DataFrame."""
def __init__(self, *args, **kwargs):
pass
class dask_Series: # type: ignore
"""Dummy class for dask.dataframe.Series."""
def __init__(self, *args, **kwargs):
pass
|
# coding: utf-8
"""Compatibility library."""
"""pandas"""
try:
from pandas import DataFrame as pd_DataFrame
from pandas import Series as pd_Series
from pandas import concat
from pandas.api.types import is_sparse as is_dtype_sparse
PANDAS_INSTALLED = True
except ImportError:
PANDAS_INSTALLED = False
class pd_Series: # type: ignore
"""Dummy class for pandas.Series."""
pass
class pd_DataFrame: # type: ignore
"""Dummy class for pandas.DataFrame."""
pass
concat = None
is_dtype_sparse = None
"""matplotlib"""
try:
import matplotlib
MATPLOTLIB_INSTALLED = True
except ImportError:
MATPLOTLIB_INSTALLED = False
"""graphviz"""
try:
import graphviz
GRAPHVIZ_INSTALLED = True
except ImportError:
GRAPHVIZ_INSTALLED = False
"""datatable"""
try:
import datatable
if hasattr(datatable, "Frame"):
dt_DataTable = datatable.Frame
else:
dt_DataTable = datatable.DataTable
DATATABLE_INSTALLED = True
except ImportError:
DATATABLE_INSTALLED = False
class dt_DataTable: # type: ignore
"""Dummy class for datatable.DataTable."""
pass
"""sklearn"""
try:
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from sklearn.preprocessing import LabelEncoder
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.validation import assert_all_finite, check_array, check_X_y
try:
from sklearn.exceptions import NotFittedError
from sklearn.model_selection import GroupKFold, StratifiedKFold
except ImportError:
from sklearn.cross_validation import GroupKFold, StratifiedKFold
from sklearn.utils.validation import NotFittedError
try:
from sklearn.utils.validation import _check_sample_weight
except ImportError:
from sklearn.utils.validation import check_consistent_length
# dummy function to support older version of scikit-learn
def _check_sample_weight(sample_weight, X, dtype=None):
check_consistent_length(sample_weight, X)
return sample_weight
SKLEARN_INSTALLED = True
_LGBMModelBase = BaseEstimator
_LGBMRegressorBase = RegressorMixin
_LGBMClassifierBase = ClassifierMixin
_LGBMLabelEncoder = LabelEncoder
LGBMNotFittedError = NotFittedError
_LGBMStratifiedKFold = StratifiedKFold
_LGBMGroupKFold = GroupKFold
_LGBMCheckXY = check_X_y
_LGBMCheckArray = check_array
_LGBMCheckSampleWeight = _check_sample_weight
_LGBMAssertAllFinite = assert_all_finite
_LGBMCheckClassificationTargets = check_classification_targets
_LGBMComputeSampleWeight = compute_sample_weight
except ImportError:
SKLEARN_INSTALLED = False
class _LGBMModelBase: # type: ignore
"""Dummy class for sklearn.base.BaseEstimator."""
pass
class _LGBMClassifierBase: # type: ignore
"""Dummy class for sklearn.base.ClassifierMixin."""
pass
class _LGBMRegressorBase: # type: ignore
"""Dummy class for sklearn.base.RegressorMixin."""
pass
_LGBMLabelEncoder = None
LGBMNotFittedError = ValueError
_LGBMStratifiedKFold = None
_LGBMGroupKFold = None
_LGBMCheckXY = None
_LGBMCheckArray = None
_LGBMCheckSampleWeight = None
_LGBMAssertAllFinite = None
_LGBMCheckClassificationTargets = None
_LGBMComputeSampleWeight = None
"""dask"""
try:
from dask import delayed
from dask.array import Array as dask_Array
from dask.array import from_delayed as dask_array_from_delayed
from dask.bag import from_delayed as dask_bag_from_delayed
from dask.dataframe import DataFrame as dask_DataFrame
from dask.dataframe import Series as dask_Series
from dask.distributed import Client, default_client, wait
DASK_INSTALLED = True
except ImportError:
DASK_INSTALLED = False
dask_array_from_delayed = None
dask_bag_from_delayed = None
delayed = None
default_client = None
wait = None
class Client: # type: ignore
"""Dummy class for dask.distributed.Client."""
pass
class dask_Array: # type: ignore
"""Dummy class for dask.array.Array."""
pass
class dask_DataFrame: # type: ignore
"""Dummy class for dask.dataframe.DataFrame."""
pass
class dask_Series: # type: ignore
"""Dummy class for dask.dataframe.Series."""
pass
|
_base_ = 'faster-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py'
model = dict(
backbone=dict(
type='RegNet',
arch='regnetx_800mf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')),
neck=dict(
type='FPN',
in_channels=[64, 128, 288, 672],
out_channels=256,
num_outs=5))
|
_base_ = 'faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py'
model = dict(
backbone=dict(
type='RegNet',
arch='regnetx_800mf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')),
neck=dict(
type='FPN',
in_channels=[64, 128, 288, 672],
out_channels=256,
num_outs=5))
|
from .autograd_utils import use_deterministic_algorithms
from .backend_utils import set_audio_backend
from .case_utils import (
disabledInCI,
HttpServerMixin,
PytorchTestCase,
skipIfCudaSmallMemory,
skipIfNoAudioDevice,
skipIfNoCtcDecoder,
skipIfNoCuCtcDecoder,
skipIfNoCuda,
skipIfNoExec,
skipIfNoFFmpeg,
skipIfNoHWAccel,
skipIfNoMacOS,
skipIfNoModule,
skipIfNoQengine,
skipIfNoRIR,
skipIfNoSox,
skipIfNoSoxDecoder,
skipIfNoSoxEncoder,
skipIfPy310,
skipIfRocm,
TempDirMixin,
TestBaseMixin,
TorchaudioTestCase,
zip_equal,
)
from .data_utils import get_asset_path, get_sinusoid, get_spectrogram, get_whitenoise
from .func_utils import torch_script
from .image_utils import get_image, rgb_to_gray, rgb_to_yuv_ccir, save_image
from .parameterized_utils import load_params, nested_params
from .wav_utils import get_wav_data, load_wav, normalize_wav, save_wav
__all__ = [
"get_asset_path",
"get_whitenoise",
"get_sinusoid",
"get_spectrogram",
"set_audio_backend",
"TempDirMixin",
"HttpServerMixin",
"TestBaseMixin",
"PytorchTestCase",
"TorchaudioTestCase",
"skipIfNoAudioDevice",
"skipIfNoCtcDecoder",
"skipIfNoCuCtcDecoder",
"skipIfNoCuda",
"skipIfCudaSmallMemory",
"skipIfNoExec",
"skipIfNoMacOS",
"skipIfNoModule",
"skipIfNoRIR",
"skipIfNoSox",
"skipIfNoSoxDecoder",
"skipIfNoSoxEncoder",
"skipIfRocm",
"skipIfNoQengine",
"skipIfNoFFmpeg",
"skipIfNoHWAccel",
"skipIfPy310",
"disabledInCI",
"get_wav_data",
"normalize_wav",
"load_wav",
"save_wav",
"load_params",
"nested_params",
"torch_script",
"save_image",
"get_image",
"rgb_to_gray",
"rgb_to_yuv_ccir",
"use_deterministic_algorithms",
"zip_equal",
]
|
from .autograd_utils import use_deterministic_algorithms
from .backend_utils import set_audio_backend
from .case_utils import (
disabledInCI,
HttpServerMixin,
is_ffmpeg_available,
PytorchTestCase,
skipIfCudaSmallMemory,
skipIfNoAudioDevice,
skipIfNoCtcDecoder,
skipIfNoCuCtcDecoder,
skipIfNoCuda,
skipIfNoExec,
skipIfNoFFmpeg,
skipIfNoHWAccel,
skipIfNoMacOS,
skipIfNoModule,
skipIfNoQengine,
skipIfNoRIR,
skipIfNoSox,
skipIfNoSoxDecoder,
skipIfNoSoxEncoder,
skipIfPy310,
skipIfRocm,
TempDirMixin,
TestBaseMixin,
TorchaudioTestCase,
zip_equal,
)
from .data_utils import get_asset_path, get_sinusoid, get_spectrogram, get_whitenoise
from .func_utils import torch_script
from .image_utils import get_image, rgb_to_gray, rgb_to_yuv_ccir, save_image
from .parameterized_utils import load_params, nested_params
from .wav_utils import get_wav_data, load_wav, normalize_wav, save_wav
__all__ = [
"get_asset_path",
"get_whitenoise",
"get_sinusoid",
"get_spectrogram",
"set_audio_backend",
"TempDirMixin",
"HttpServerMixin",
"TestBaseMixin",
"PytorchTestCase",
"TorchaudioTestCase",
"is_ffmpeg_available",
"skipIfNoAudioDevice",
"skipIfNoCtcDecoder",
"skipIfNoCuCtcDecoder",
"skipIfNoCuda",
"skipIfCudaSmallMemory",
"skipIfNoExec",
"skipIfNoMacOS",
"skipIfNoModule",
"skipIfNoRIR",
"skipIfNoSox",
"skipIfNoSoxDecoder",
"skipIfNoSoxEncoder",
"skipIfRocm",
"skipIfNoQengine",
"skipIfNoFFmpeg",
"skipIfNoHWAccel",
"skipIfPy310",
"disabledInCI",
"get_wav_data",
"normalize_wav",
"load_wav",
"save_wav",
"load_params",
"nested_params",
"torch_script",
"save_image",
"get_image",
"rgb_to_gray",
"rgb_to_yuv_ccir",
"use_deterministic_algorithms",
"zip_equal",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .build_functions import (build_from_cfg, build_model_from_cfg,
build_runner_from_cfg, build_scheduler_from_cfg)
from .default_scope import DefaultScope
from .registry import Registry
from .root import (DATA_SAMPLERS, DATASETS, EVALUATOR, HOOKS, LOG_PROCESSORS,
LOOPS, METRICS, MODEL_WRAPPERS, MODELS,
OPTIM_WRAPPER_CONSTRUCTORS, OPTIM_WRAPPERS, OPTIMIZERS,
PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS,
TRANSFORMS, VISBACKENDS, VISUALIZERS, WEIGHT_INITIALIZERS)
from .utils import (count_registered_modules, init_default_scope,
traverse_registry_tree)
__all__ = [
'Registry', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS', 'DATASETS',
'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIM_WRAPPER_CONSTRUCTORS', 'TASK_UTILS',
'PARAM_SCHEDULERS', 'METRICS', 'MODEL_WRAPPERS', 'OPTIM_WRAPPERS', 'LOOPS',
'VISBACKENDS', 'VISUALIZERS', 'LOG_PROCESSORS', 'EVALUATOR',
'DefaultScope', 'traverse_registry_tree', 'count_registered_modules',
'build_model_from_cfg', 'build_runner_from_cfg', 'build_from_cfg',
'build_scheduler_from_cfg', 'init_default_scope'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .build_functions import (build_from_cfg, build_model_from_cfg,
build_runner_from_cfg, build_scheduler_from_cfg)
from .default_scope import DefaultScope
from .registry import Registry
from .root import (DATA_SAMPLERS, DATASETS, EVALUATOR, HOOKS, LOG_PROCESSORS,
LOOPS, METRICS, MODEL_WRAPPERS, MODELS,
OPTIM_WRAPPER_CONSTRUCTORS, OPTIM_WRAPPERS, OPTIMIZERS,
PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS,
TRANSFORMS, VISBACKENDS, VISUALIZERS, WEIGHT_INITIALIZERS)
from .utils import count_registered_modules, traverse_registry_tree
__all__ = [
'Registry', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS', 'DATASETS',
'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIM_WRAPPER_CONSTRUCTORS', 'TASK_UTILS',
'PARAM_SCHEDULERS', 'METRICS', 'MODEL_WRAPPERS', 'OPTIM_WRAPPERS', 'LOOPS',
'VISBACKENDS', 'VISUALIZERS', 'LOG_PROCESSORS', 'EVALUATOR',
'DefaultScope', 'traverse_registry_tree', 'count_registered_modules',
'build_model_from_cfg', 'build_runner_from_cfg', 'build_from_cfg',
'build_scheduler_from_cfg'
]
|
"""
Default query for PandasIndex.
WARNING: This tool provides the LLM access to the `eval` function.
Arbitrary code execution is possible on the machine running this tool.
This tool is not recommended to be used in a production setting, and would
require heavy sandboxing or virtual machines.
DEPRECATED: Use `PandasQueryEngine` from `llama-index-experimental` instead.
"""
from typing import Any
class PandasQueryEngine:
"""
Pandas query engine.
DEPRECATED: Use `PandasQueryEngine` from `llama-index-experimental` instead.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
raise DeprecationWarning(
"PandasQueryEngine has been moved to `llama-index-experimental`.\n"
"`pip install llama-index-experimental`\n"
"`from llama_index.experimental.query_engine import PandasQueryEngine`\n"
"Note that the PandasQueryEngine allows for arbitrary code execution, \n"
"and should be used in a secure environment."
)
# legacy
NLPandasQueryEngine = PandasQueryEngine
GPTNLPandasQueryEngine = PandasQueryEngine
|
"""Default query for PandasIndex.
WARNING: This tool provides the LLM access to the `eval` function.
Arbitrary code execution is possible on the machine running this tool.
This tool is not recommended to be used in a production setting, and would
require heavy sandboxing or virtual machines.
DEPRECATED: Use `PandasQueryEngine` from `llama-index-experimental` instead.
"""
from typing import Any
class PandasQueryEngine:
"""Pandas query engine.
DEPRECATED: Use `PandasQueryEngine` from `llama-index-experimental` instead.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
raise DeprecationWarning(
"PandasQueryEngine has been moved to `llama-index-experimental`.\n"
"`pip install llama-index-experimental`\n"
"`from llama_index.experimental.query_engine import PandasQueryEngine`\n"
"Note that the PandasQueryEngine allows for arbitrary code execution, \n"
"and should be used in a secure environment."
)
# legacy
NLPandasQueryEngine = PandasQueryEngine
GPTNLPandasQueryEngine = PandasQueryEngine
|
# Owner(s): ["oncall: distributed"]
import os
from datetime import timedelta
import torch
import torch.distributed._dist2 as dist2
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_gloo,
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import run_tests, TestCase
class ProcessGroupTest(TestCase):
def test_context_manager(self):
os.environ["RANK"] = str(0)
os.environ["WORLD_SIZE"] = str(1)
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = "29500"
pg1 = dist2.new_group(
backend="gloo", timeout=timedelta(seconds=60), device="cpu", pg_options=None
)
pg2 = dist2.new_group(
backend="gloo", timeout=timedelta(seconds=60), device="cpu", pg_options=None
)
self.assertIsNone(dist2.current_process_group())
with dist2.process_group(pg1):
self.assertIs(dist2.current_process_group(), pg1)
with dist2.process_group(pg2):
self.assertIs(dist2.current_process_group(), pg2)
self.assertIs(dist2.current_process_group(), pg1)
self.assertIsNone(dist2.current_process_group())
class ProcessGroupGlooTest(MultiProcessTestCase):
lazy_init = False
@property
def world_size(self) -> int:
return 2
def setUp(self):
super().setUp()
self._spawn_processes()
@requires_gloo()
def test_new_group(self):
os.environ["RANK"] = str(self.rank)
os.environ["WORLD_SIZE"] = str(self.world_size)
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = "29500"
device = "cpu"
group = dist2.new_group(
backend="gloo",
timeout=timedelta(seconds=60),
device=device,
pg_options=None,
)
t = torch.rand(10, device=device)
group.allreduce(t).wait()
class ProcessGroupNCCLTest(MultiProcessTestCase):
lazy_init = False
@property
def world_size(self) -> int:
return 2
def setUp(self):
super().setUp()
self._spawn_processes()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_new_group(self):
os.environ["RANK"] = str(self.rank)
os.environ["WORLD_SIZE"] = str(self.world_size)
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = "29500"
device = torch.device("cuda", self.rank)
from torch.distributed import ProcessGroupNCCL
opts = ProcessGroupNCCL.Options()
group = dist2.new_group(
backend="nccl",
timeout=timedelta(seconds=60),
device=device,
pg_options=opts,
)
t = torch.rand(10, device=device)
group.allreduce(t).wait()
if __name__ == "__main__":
assert not torch.cuda._initialized, (
"test_distributed must not have initialized CUDA context on main process"
)
run_tests()
|
# Owner(s): ["oncall: distributed"]
import os
from datetime import timedelta
import torch
import torch.distributed._dist2 as dist2
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_gloo,
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import run_tests
class ProcessGroupGlooTest(MultiProcessTestCase):
lazy_init = False
@property
def world_size(self) -> int:
return 2
def setUp(self):
super().setUp()
self._spawn_processes()
@requires_gloo()
def test_new_group(self):
os.environ["RANK"] = str(self.rank)
os.environ["WORLD_SIZE"] = str(self.world_size)
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = "29500"
device = "cpu"
group = dist2.new_group(
backend="gloo",
timeout=timedelta(seconds=60),
device=device,
pg_options=None,
)
t = torch.rand(10, device=device)
group.allreduce(t).wait()
class ProcessGroupNCCLTest(MultiProcessTestCase):
lazy_init = False
@property
def world_size(self) -> int:
return 2
def setUp(self):
super().setUp()
self._spawn_processes()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_new_group(self):
os.environ["RANK"] = str(self.rank)
os.environ["WORLD_SIZE"] = str(self.world_size)
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = "29500"
device = torch.device("cuda", self.rank)
from torch.distributed import ProcessGroupNCCL
opts = ProcessGroupNCCL.Options()
group = dist2.new_group(
backend="nccl",
timeout=timedelta(seconds=60),
device=device,
pg_options=opts,
)
t = torch.rand(10, device=device)
group.allreduce(t).wait()
if __name__ == "__main__":
assert not torch.cuda._initialized, (
"test_distributed must not have initialized CUDA context on main process"
)
run_tests()
|
"""Argparser module for WorkerRuntime"""
from jina.parsers.helper import KVAppendAction, add_arg_group
from jina.parsers.orchestrate.runtimes.runtime import mixin_base_runtime_parser
def mixin_worker_runtime_parser(parser):
"""Mixing in arguments required by :class:`WorkerRuntime` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='WorkerRuntime')
from jina.constants import __default_executor__
gp.add_argument(
'--uses',
type=str,
default=__default_executor__,
help='''
The config of the executor, it could be one of the followings:
* the string literal of an Executor class name
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
''',
)
gp.add_argument(
'--uses-with',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `with` configuration in `uses`
''',
)
gp.add_argument(
'--uses-metas',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `metas` configuration in `uses`
''',
)
gp.add_argument(
'--uses-requests',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `requests` configuration in `uses`
''',
)
gp.add_argument(
'--uses-dynamic-batching',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `dynamic_batching` configuration in `uses`
''',
)
gp.add_argument(
'--py-modules',
type=str,
nargs='*',
metavar='PATH',
help='''
The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/concepts/executor/executor-files/>`__
''',
)
gp.add_argument(
'--output-array-type',
type=str,
default=None,
help='''
The type of array `tensor` and `embedding` will be serialized to.
Supports the same types as `docarray.to_protobuf(.., ndarray_type=...)`, which can be found
`here <https://docarray.jina.ai/fundamentals/document/serialization/#from-to-protobuf>`.
Defaults to retaining whatever type is returned by the Executor.
''',
)
gp.add_argument(
'--exit-on-exceptions',
type=str,
default=[],
nargs='*',
help='List of exceptions that will cause the Executor to shut down.',
)
gp.add_argument(
'--no-reduce',
'--disable-reduce',
action='store_true',
default=False,
help='Disable the built-in reduction mechanism. Set this if the reduction is to be handled by the Executor itself by operating on a `docs_matrix` or `docs_map`',
)
mixin_base_runtime_parser(gp)
|
"""Argparser module for WorkerRuntime"""
from jina.parsers.helper import KVAppendAction, add_arg_group
from jina.parsers.orchestrate.runtimes.runtime import mixin_base_runtime_parser
def mixin_worker_runtime_parser(parser):
"""Mixing in arguments required by :class:`WorkerRuntime` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='WorkerRuntime')
from jina import __default_executor__
gp.add_argument(
'--uses',
type=str,
default=__default_executor__,
help='''
The config of the executor, it could be one of the followings:
* the string literal of an Executor class name
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
''',
)
gp.add_argument(
'--uses-with',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `with` configuration in `uses`
''',
)
gp.add_argument(
'--uses-metas',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `metas` configuration in `uses`
''',
)
gp.add_argument(
'--uses-requests',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `requests` configuration in `uses`
''',
)
gp.add_argument(
'--uses-dynamic-batching',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `dynamic_batching` configuration in `uses`
''',
)
gp.add_argument(
'--py-modules',
type=str,
nargs='*',
metavar='PATH',
help='''
The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/concepts/executor/executor-files/>`__
''',
)
gp.add_argument(
'--output-array-type',
type=str,
default=None,
help='''
The type of array `tensor` and `embedding` will be serialized to.
Supports the same types as `docarray.to_protobuf(.., ndarray_type=...)`, which can be found
`here <https://docarray.jina.ai/fundamentals/document/serialization/#from-to-protobuf>`.
Defaults to retaining whatever type is returned by the Executor.
''',
)
gp.add_argument(
'--exit-on-exceptions',
type=str,
default=[],
nargs='*',
help='List of exceptions that will cause the Executor to shut down.',
)
gp.add_argument(
'--no-reduce',
'--disable-reduce',
action='store_true',
default=False,
help='Disable the built-in reduction mechanism. Set this if the reduction is to be handled by the Executor itself by operating on a `docs_matrix` or `docs_map`',
)
mixin_base_runtime_parser(gp)
|
import torch
import torchaudio.prototype.transforms as T
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class Transforms(TestBaseMixin):
@nested_params(
["Convolve", "FFTConvolve"],
["full", "valid", "same"],
)
def test_Convolve(self, cls, mode):
leading_dims = (2, 3, 2)
L_x, L_y = 32, 55
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
convolve = getattr(T, cls)(mode=mode).to(device=self.device, dtype=self.dtype)
output = convolve(x, y)
ts_output = torch_script(convolve)(x, y)
self.assertEqual(ts_output, output)
def test_Speed(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=self.dtype, device=self.device)
speed = T.Speed(1000, 0.9).to(self.device, self.dtype)
output = speed(waveform, lengths)
ts_output = torch_script(speed)(waveform, lengths)
self.assertEqual(ts_output, output)
def test_SpeedPerturbation(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=self.dtype, device=self.device)
speed = T.SpeedPerturbation(1000, [0.9]).to(self.device, self.dtype)
output = speed(waveform, lengths)
ts_output = torch_script(speed)(waveform, lengths)
self.assertEqual(ts_output, output)
|
import torch
import torchaudio.prototype.transforms as T
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class Transforms(TestBaseMixin):
@nested_params(
["Convolve", "FFTConvolve"],
["full", "valid", "same"],
)
def test_Convolve(self, cls, mode):
leading_dims = (2, 3, 2)
L_x, L_y = 32, 55
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
convolve = getattr(T, cls)(mode=mode).to(device=self.device, dtype=self.dtype)
output = convolve(x, y)
ts_output = torch_script(convolve)(x, y)
self.assertEqual(ts_output, output)
|
"""
Python polyfills for sys
"""
from __future__ import annotations
import sys
from ..decorators import substitute_in_graph
__all__ = [
"intern",
"getrecursionlimit",
]
@substitute_in_graph(sys.intern, can_constant_fold_through=True)
def intern(string: str, /) -> str:
return string
@substitute_in_graph(sys.getrecursionlimit, can_constant_fold_through=True)
def getrecursionlimit() -> int:
return sys.getrecursionlimit()
@substitute_in_graph(sys.get_int_max_str_digits, can_constant_fold_through=True)
def get_int_max_str_digits() -> int:
return sys.get_int_max_str_digits()
|
"""
Python polyfills for sys
"""
from __future__ import annotations
import sys
from ..decorators import substitute_in_graph
__all__ = [
"intern",
"getrecursionlimit",
]
@substitute_in_graph(sys.intern, can_constant_fold_through=True)
def intern(string: str, /) -> str:
return string
@substitute_in_graph(sys.getrecursionlimit, can_constant_fold_through=True)
def getrecursionlimit() -> int:
return sys.getrecursionlimit()
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.wrappers.sklearn_wrapper import (
SKLearnClassifier as SKLearnClassifier,
)
from keras.src.wrappers.sklearn_wrapper import (
SKLearnRegressor as SKLearnRegressor,
)
from keras.src.wrappers.sklearn_wrapper import (
SKLearnTransformer as SKLearnTransformer,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.wrappers.sklearn_wrapper import SKLearnClassifier
from keras.src.wrappers.sklearn_wrapper import SKLearnRegressor
from keras.src.wrappers.sklearn_wrapper import SKLearnTransformer
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bfp import BFP
from .channel_mapper import ChannelMapper
from .ct_resnet_neck import CTResNetNeck
from .dilated_encoder import DilatedEncoder
from .fpg import FPG
from .fpn import FPN
from .fpn_carafe import FPN_CARAFE
from .hrfpn import HRFPN
from .nas_fpn import NASFPN
from .nasfcos_fpn import NASFCOS_FPN
from .pafpn import PAFPN
from .rfp import RFP
from .ssd_neck import SSDNeck
from .yolo_neck import YOLOV3Neck
from .yolox_pafpn import YOLOXPAFPN
__all__ = [
'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN',
'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG', 'DilatedEncoder',
'CTResNetNeck', 'SSDNeck', 'YOLOXPAFPN'
]
|
from .bfp import BFP
from .channel_mapper import ChannelMapper
from .ct_resnet_neck import CTResNetNeck
from .dilated_encoder import DilatedEncoder
from .fpg import FPG
from .fpn import FPN
from .fpn_carafe import FPN_CARAFE
from .hrfpn import HRFPN
from .nas_fpn import NASFPN
from .nasfcos_fpn import NASFCOS_FPN
from .pafpn import PAFPN
from .rfp import RFP
from .ssd_neck import SSDNeck
from .yolo_neck import YOLOV3Neck
from .yolox_pafpn import YOLOXPAFPN
__all__ = [
'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN',
'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG', 'DilatedEncoder',
'CTResNetNeck', 'SSDNeck', 'YOLOXPAFPN'
]
|
_base_ = './faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
_base_ = './faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
import asyncio
from typing import Any, Dict, List, Optional
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.constants import DEFAULT_EMBED_BATCH_SIZE
from ollama import Client, AsyncClient
class OllamaEmbedding(BaseEmbedding):
"""Class for Ollama embeddings."""
base_url: str = Field(description="Base url the model is hosted by Ollama")
model_name: str = Field(description="The Ollama model to use.")
embed_batch_size: int = Field(
default=DEFAULT_EMBED_BATCH_SIZE,
description="The batch size for embedding calls.",
gt=0,
le=2048,
)
ollama_additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the Ollama API."
)
_client: Client = PrivateAttr()
_async_client: AsyncClient = PrivateAttr()
def __init__(
self,
model_name: str,
base_url: str = "http://localhost:11434",
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
ollama_additional_kwargs: Optional[Dict[str, Any]] = None,
callback_manager: Optional[CallbackManager] = None,
client_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> None:
super().__init__(
model_name=model_name,
base_url=base_url,
embed_batch_size=embed_batch_size,
ollama_additional_kwargs=ollama_additional_kwargs or {},
callback_manager=callback_manager,
**kwargs,
)
client_kwargs = client_kwargs or {}
self._client = Client(host=self.base_url, **client_kwargs)
self._async_client = AsyncClient(host=self.base_url, **client_kwargs)
@classmethod
def class_name(cls) -> str:
return "OllamaEmbedding"
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self.get_general_text_embedding(query)
async def _aget_query_embedding(self, query: str) -> List[float]:
"""The asynchronous version of _get_query_embedding."""
return await self.aget_general_text_embedding(query)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self.get_general_text_embedding(text)
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Asynchronously get text embedding."""
return await self.aget_general_text_embedding(text)
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
embeddings_list: List[List[float]] = []
for text in texts:
embeddings = self.get_general_text_embedding(text)
embeddings_list.append(embeddings)
return embeddings_list
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Asynchronously get text embeddings."""
return await asyncio.gather(
*[self.aget_general_text_embedding(text) for text in texts]
)
def get_general_text_embedding(self, texts: str) -> List[float]:
"""Get Ollama embedding."""
result = self._client.embeddings(
model=self.model_name, prompt=texts, options=self.ollama_additional_kwargs
)
return result["embedding"]
async def aget_general_text_embedding(self, prompt: str) -> List[float]:
"""Asynchronously get Ollama embedding."""
result = await self._async_client.embeddings(
model=self.model_name, prompt=prompt, options=self.ollama_additional_kwargs
)
return result["embedding"]
|
import asyncio
from typing import Any, Dict, List, Optional
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.constants import DEFAULT_EMBED_BATCH_SIZE
from ollama import Client, AsyncClient
class OllamaEmbedding(BaseEmbedding):
"""Class for Ollama embeddings."""
base_url: str = Field(description="Base url the model is hosted by Ollama")
model_name: str = Field(description="The Ollama model to use.")
embed_batch_size: int = Field(
default=DEFAULT_EMBED_BATCH_SIZE,
description="The batch size for embedding calls.",
gt=0,
le=2048,
)
ollama_additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the Ollama API."
)
_client: Client = PrivateAttr()
_async_client: AsyncClient = PrivateAttr()
def __init__(
self,
model_name: str,
base_url: str = "http://localhost:11434",
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
ollama_additional_kwargs: Optional[Dict[str, Any]] = None,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
) -> None:
super().__init__(
model_name=model_name,
base_url=base_url,
embed_batch_size=embed_batch_size,
ollama_additional_kwargs=ollama_additional_kwargs or {},
callback_manager=callback_manager,
**kwargs,
)
self._client = Client(host=self.base_url)
self._async_client = AsyncClient(host=self.base_url)
@classmethod
def class_name(cls) -> str:
return "OllamaEmbedding"
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self.get_general_text_embedding(query)
async def _aget_query_embedding(self, query: str) -> List[float]:
"""The asynchronous version of _get_query_embedding."""
return await self.aget_general_text_embedding(query)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self.get_general_text_embedding(text)
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Asynchronously get text embedding."""
return await self.aget_general_text_embedding(text)
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
embeddings_list: List[List[float]] = []
for text in texts:
embeddings = self.get_general_text_embedding(text)
embeddings_list.append(embeddings)
return embeddings_list
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Asynchronously get text embeddings."""
return await asyncio.gather(
*[self.aget_general_text_embedding(text) for text in texts]
)
def get_general_text_embedding(self, texts: str) -> List[float]:
"""Get Ollama embedding."""
result = self._client.embeddings(
model=self.model_name, prompt=texts, options=self.ollama_additional_kwargs
)
return result["embedding"]
async def aget_general_text_embedding(self, prompt: str) -> List[float]:
"""Asynchronously get Ollama embedding."""
result = await self._async_client.embeddings(
model=self.model_name, prompt=prompt, options=self.ollama_additional_kwargs
)
return result["embedding"]
|
from abc import ABC
import pytest
from docarray import DocumentArray
from docarray.array.storage.memory import GetSetDelMixin, SequenceLikeMixin
from docarray.array.storage.redis.backend import BackendMixin, RedisConfig
class StorageMixins(BackendMixin, GetSetDelMixin, SequenceLikeMixin, ABC):
...
class DocumentArrayDummy(StorageMixins, DocumentArray):
def __new__(cls, *args, **kwargs):
return super().__new__(cls)
def _load_offset2ids(self):
pass
def _save_offset2ids(self):
pass
type_convert = {
'int': b'NUMERIC',
'float': b'NUMERIC',
'double': b'NUMERIC',
'long': b'NUMERIC',
'str': b'TEXT',
'bytes': b'TEXT',
'bool': b'NUMERIC',
}
@pytest.mark.parametrize('distance', ['L2', 'IP', 'COSINE'])
@pytest.mark.parametrize(
'method,initial_cap,ef_construction,block_size',
[
('HNSW', 10, 250, 1000000),
('FLAT', 10, 250, 1000000),
],
)
@pytest.mark.parametrize(
'columns',
[
[('attr1', 'str'), ('attr2', 'bytes')],
[('attr1', 'int'), ('attr2', 'float')],
[('attr1', 'double'), ('attr2', 'long'), ('attr3', 'bool')],
{'attr1': 'str', 'attr2': 'bytes'},
{'attr1': 'int', 'attr2': 'float'},
{'attr1': 'double', 'attr2': 'long', 'attr3': 'bool'},
],
)
@pytest.mark.parametrize(
'redis_config',
[
{'decode_responses': True},
{'decode_responses': False},
{'retry_on_timeout': True},
{'decode_responses': True, 'retry_on_timeout': True},
{},
],
)
def test_init_storage(
distance,
columns,
method,
initial_cap,
ef_construction,
block_size,
redis_config,
start_storage,
):
cfg = RedisConfig(
n_dim=128,
distance=distance,
columns=columns,
method=method,
initial_cap=initial_cap,
ef_construction=ef_construction,
block_size=block_size,
redis_config=redis_config,
)
redis_da = DocumentArrayDummy(storage='redis', config=cfg)
assert redis_da._client.info()['tcp_port'] == redis_da._config.port
assert (
redis_da._client.ft(index_name=redis_da._config.index_name).info()[
'attributes'
][0][1]
== b'embedding'
)
assert (
redis_da._client.ft(index_name=redis_da._config.index_name).info()[
'attributes'
][0][5]
== b'VECTOR'
)
def test_init_storage_update_schema(start_storage):
cfg = RedisConfig(n_dim=128, columns={'attr1': 'str'}, index_name="idx")
redis_da = DocumentArrayDummy(storage='redis', config=cfg)
assert (
redis_da._client.ft(index_name=redis_da._config.index_name).info()[
'attributes'
][1][1]
== b'attr1'
)
cfg = RedisConfig(
n_dim=128, columns={'attr2': 'str'}, index_name="idx", update_schema=False
)
redis_da = DocumentArrayDummy(storage='redis', config=cfg)
assert (
redis_da._client.ft(index_name=redis_da._config.index_name).info()[
'attributes'
][1][1]
== b'attr1'
)
cfg = RedisConfig(
n_dim=128, columns={'attr2': 'str'}, index_name="idx", update_schema=True
)
redis_da = DocumentArrayDummy(storage='redis', config=cfg)
assert (
redis_da._client.ft(index_name=redis_da._config.index_name).info()[
'attributes'
][1][1]
== b'attr2'
)
|
from abc import ABC
import pytest
from docarray import DocumentArray
from docarray.array.storage.memory import GetSetDelMixin, SequenceLikeMixin
from docarray.array.storage.redis.backend import BackendMixin, RedisConfig
class StorageMixins(BackendMixin, GetSetDelMixin, SequenceLikeMixin, ABC):
...
class DocumentArrayDummy(StorageMixins, DocumentArray):
def __new__(cls, *args, **kwargs):
return super().__new__(cls)
def _load_offset2ids(self):
pass
def _save_offset2ids(self):
pass
type_convert = {
'int': b'NUMERIC',
'float': b'NUMERIC',
'double': b'NUMERIC',
'long': b'NUMERIC',
'str': b'TEXT',
'bytes': b'TEXT',
'bool': b'NUMERIC',
}
@pytest.fixture(scope='function')
def da_redis():
cfg = RedisConfig(n_dim=128, flush=True)
da_redis = DocumentArrayDummy(storage='redis', config=cfg)
return da_redis
@pytest.mark.parametrize('distance', ['L2', 'IP', 'COSINE'])
@pytest.mark.parametrize(
'method,initial_cap,ef_construction,block_size',
[
('HNSW', 10, 250, 1000000),
('FLAT', 10, 250, 1000000),
],
)
@pytest.mark.parametrize(
'columns',
[
[('attr1', 'str'), ('attr2', 'bytes')],
[('attr1', 'int'), ('attr2', 'float')],
[('attr1', 'double'), ('attr2', 'long'), ('attr3', 'bool')],
{'attr1': 'str', 'attr2': 'bytes'},
{'attr1': 'int', 'attr2': 'float'},
{'attr1': 'double', 'attr2': 'long', 'attr3': 'bool'},
],
)
@pytest.mark.parametrize(
'redis_config',
[
{'decode_responses': True},
{'decode_responses': False},
{'retry_on_timeout': True},
{'decode_responses': True, 'retry_on_timeout': True},
{},
],
)
def test_init_storage(
distance,
columns,
method,
initial_cap,
ef_construction,
block_size,
redis_config,
start_storage,
):
cfg = RedisConfig(
n_dim=128,
distance=distance,
flush=True,
columns=columns,
method=method,
initial_cap=initial_cap,
ef_construction=ef_construction,
block_size=block_size,
redis_config=redis_config,
)
redis_da = DocumentArrayDummy(storage='redis', config=cfg)
assert redis_da._client.info()['tcp_port'] == redis_da._config.port
assert redis_da._client.ft().info()['attributes'][0][1] == b'embedding'
assert redis_da._client.ft().info()['attributes'][0][5] == b'VECTOR'
def test_init_storage_update_schema(start_storage):
cfg = RedisConfig(n_dim=128, columns={'attr1': 'str'}, flush=True)
redis_da = DocumentArrayDummy(storage='redis', config=cfg)
assert redis_da._client.ft().info()['attributes'][1][1] == b'attr1'
cfg = RedisConfig(n_dim=128, columns={'attr2': 'str'}, update_schema=False)
redis_da = DocumentArrayDummy(storage='redis', config=cfg)
assert redis_da._client.ft().info()['attributes'][1][1] == b'attr1'
cfg = RedisConfig(n_dim=128, columns={'attr2': 'str'}, update_schema=True)
redis_da = DocumentArrayDummy(storage='redis', config=cfg)
assert redis_da._client.ft().info()['attributes'][1][1] == b'attr2'
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centripetal_head import CentripetalHead
from .corner_head import CornerHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .ld_head import LDHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .ssd_head import SSDHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead',
'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'CascadeRPNHead',
'AutoAssignHead', 'DETRHead', 'YOLOFHead', 'DeformableDETRHead',
'SOLOHead', 'DecoupledSOLOHead', 'CenterNetHead', 'YOLOXHead',
'DecoupledSOLOLightHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centripetal_head import CentripetalHead
from .corner_head import CornerHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .ld_head import LDHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .sabl_retina_head import SABLRetinaHead
from .ssd_head import SSDHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead',
'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'CascadeRPNHead',
'AutoAssignHead', 'DETRHead', 'YOLOFHead', 'DeformableDETRHead',
'CenterNetHead', 'YOLOXHead'
]
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class FlopsLoss(nn.Module):
def __init__(self, model: SparseEncoder, threshold: float = None) -> None:
"""
FlopsLoss implements a regularization technique to promote sparsity in sparse encoder models.
It calculates the squared L2 norm of the mean embedding vector, which helps reduce the number of floating-point
operations (FLOPs) required during inference by encouraging more zero values in the embeddings.
It can use a threshold to ignore embeddings with too few non-zero (active) elements.
This loss is used as a regularization component within other losses like :class:`SpladeLoss` rather than
being used as a standalone loss function.
Args:
model: SparseEncoder model to be regularized
threshold: Optional threshold for the number of non-zero (active) elements in the embeddings.
If specified, only embeddings with more than this number of non-zero (active) elements will be considered.
This can help to ignore embeddings that are too sparse and may not contribute meaningfully to the loss.
References:
- For further details, see: https://arxiv.org/pdf/2004.05665 for the general FLOPS loss and https://arxiv.org/pdf/2504.14839 for FLOPS with thresholds, a.k.a. FLOPS with l0 masking.
Relations:
- Used as a component within :class:`SpladeLoss` to regularize both query and document embeddings
Example:
- This loss is typically used within the :class:`SpladeLoss` class, which combines it with other loss components.
"""
super().__init__()
self.model = model
self.threshold = threshold
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise NotImplementedError(
"FlopsLoss is not intended to be used directly. Use it as a regulizer within the SpladeLoss class."
)
def compute_loss_from_embeddings(self, embeddings: list[torch.Tensor]) -> torch.Tensor:
if self.threshold is not None:
l0_norm = (embeddings != 0).sum(dim=1)
mask = (l0_norm > self.threshold).float()
embeddings = embeddings * mask.unsqueeze(1)
return torch.sum(torch.mean(embeddings, dim=0) ** 2)
@property
def citation(self) -> str:
return """
@article{paria2020minimizing,
title={Minimizing flops to learn efficient sparse representations},
author={Paria, Biswajit and Yeh, Chih-Kuan and Yen, Ian EH and Xu, Ning and Ravikumar, Pradeep and P{\'o}czos, Barnab{\'a}s},
journal={arXiv preprint arXiv:2004.05665},
year={2020}
}
"""
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class FlopsLoss(nn.Module):
def __init__(self, model: SparseEncoder, threshold: float = None) -> None:
"""
FlopsLoss implements a regularization technique to promote sparsity in sparse encoder models.
It calculates the squared L2 norm of the mean embedding vector, which helps reduce the number of floating-point
operations (FLOPs) required during inference by encouraging more zero values in the embeddings.
It can use a threshold to ignore embeddings with too few non-zero elements.
This loss is used as a regularization component within other losses like :class:`SpladeLoss` rather than
being used as a standalone loss function.
Args:
model: SparseEncoder model to be regularized
threshold: Optional threshold for the number of non-zero elements in the embeddings.
If specified, only embeddings with more than this number of non-zero elements will be considered.
This can help to ignore embeddings that are too sparse and may not contribute meaningfully to the loss.
References:
- For further details, see: https://arxiv.org/pdf/2004.05665 for the general FLOPS loss and https://arxiv.org/pdf/2504.14839 for FLOPS with thresholds, a.k.a. FLOPS with l0 masking.
Relations:
- Used as a component within :class:`SpladeLoss` to regularize both query and document embeddings
Example:
- This loss is typically used within the :class:`SpladeLoss` class, which combines it with other loss components.
"""
super().__init__()
self.model = model
self.threshold = threshold
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise NotImplementedError(
"FlopsLoss is not intended to be used directly. Use it as a regulizer within the SpladeLoss class."
)
def compute_loss_from_embeddings(self, embeddings: list[torch.Tensor]) -> torch.Tensor:
if self.threshold is not None:
l0_norm = (embeddings != 0).sum(dim=1)
mask = (l0_norm > self.threshold).float()
embeddings = embeddings * mask.unsqueeze(1)
return torch.sum(torch.mean(embeddings, dim=0) ** 2)
@property
def citation(self) -> str:
return """
@article{paria2020minimizing,
title={Minimizing flops to learn efficient sparse representations},
author={Paria, Biswajit and Yeh, Chih-Kuan and Yen, Ian EH and Xu, Ning and Ravikumar, Pradeep and P{\'o}czos, Barnab{\'a}s},
journal={arXiv preprint arXiv:2004.05665},
year={2020}
}
"""
|
from abc import ABC, abstractmethod
from docarray.proto import NodeProto
class BaseNode(ABC):
"""
A DocumentNode is an object than can be nested inside a Document.
A Document itself is a DocumentNode as well as prebuilt type
"""
@abstractmethod
def _to_node_protobuf(self) -> NodeProto:
"""Convert itself into a NodeProto message. This function should
be called when the self is nested into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
...
|
from abc import ABC, abstractmethod
from docarray.proto import NodeProto
class BaseNode(ABC):
"""
A DocumentNode is an object than can be nested inside a Document.
A Document itself is a DocumentNode as well as prebuilt type
"""
@abstractmethod
def _to_nested_item_protobuf(self) -> 'NodeProto':
"""Convert itself into a nested item protobuf message. This function should
be called when the self is nested into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
...
|
__version__ = '0.13.33'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.13.32'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .base_detr import DetectionTransformer
from .boxinst import BoxInst
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .condinst import CondInst
from .conditional_detr import ConditionalDETR
from .cornernet import CornerNet
from .crowddet import CrowdDet
from .d2_wrapper import Detectron2Wrapper
from .dab_detr import DABDETR
from .ddod import DDOD
from .ddq_detr import DDQDETR
from .deformable_detr import DeformableDETR
from .detr import DETR
from .dino import DINO
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .glip import GLIP
from .grid_rcnn import GridRCNN
from .grounding_dino import GroundingDINO
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask2former import Mask2Former
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .rtmdet import RTMDet
from .scnet import SCNet
from .semi_base import SemiBaseDetector
from .single_stage import SingleStageDetector
from .soft_teacher import SoftTeacher
from .solo import SOLO
from .solov2 import SOLOv2
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer', 'DDOD', 'Mask2Former', 'SemiBaseDetector', 'SoftTeacher',
'RTMDet', 'Detectron2Wrapper', 'CrowdDet', 'CondInst', 'BoxInst',
'DetectionTransformer', 'ConditionalDETR', 'DINO', 'DABDETR', 'GLIP',
'DDQDETR', 'GroundingDINO'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .base_detr import DetectionTransformer
from .boxinst import BoxInst
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .condinst import CondInst
from .conditional_detr import ConditionalDETR
from .cornernet import CornerNet
from .crowddet import CrowdDet
from .d2_wrapper import Detectron2Wrapper
from .dab_detr import DABDETR
from .ddod import DDOD
from .deformable_detr import DeformableDETR
from .detr import DETR
from .dino import DINO
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .glip import GLIP
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask2former import Mask2Former
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .rtmdet import RTMDet
from .scnet import SCNet
from .semi_base import SemiBaseDetector
from .single_stage import SingleStageDetector
from .soft_teacher import SoftTeacher
from .solo import SOLO
from .solov2 import SOLOv2
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer', 'DDOD', 'Mask2Former', 'SemiBaseDetector', 'SoftTeacher',
'RTMDet', 'Detectron2Wrapper', 'CrowdDet', 'CondInst', 'BoxInst',
'DetectionTransformer', 'ConditionalDETR', 'DINO', 'DABDETR', 'GLIP'
]
|
from pathlib import Path
from typing import List, Tuple, Union
import torch
import torchaudio
from torch.utils.data import Dataset
SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]]
class LibriMix(Dataset):
r"""Create the *LibriMix* [:footcite:`cosentino2020librimix`] dataset.
Args:
root (str or Path): The path to the directory where the directory ``Libri2Mix`` or
``Libri3Mix`` is stored.
subset (str, optional): The subset to use. Options: [``train-360``, ``train-100``,
``dev``, and ``test``] (Default: ``train-360``).
num_speakers (int, optional): The number of speakers, which determines the directories
to traverse. The Dataset will traverse ``s1`` to ``sN`` directories to collect
N source audios. (Default: 2)
sample_rate (int, optional): sample rate of audio files. The ``sample_rate`` determines
which subdirectory the audio are fetched. If any of the audio has a different sample
rate, raises ``ValueError``. Options: [8000, 16000] (Default: 8000)
task (str, optional): the task of LibriMix.
Options: [``enh_single``, ``enh_both``, ``sep_clean``, ``sep_noisy``]
(Default: ``sep_clean``)
Note:
The LibriMix dataset needs to be manually generated. Please check https://github.com/JorisCos/LibriMix
"""
def __init__(
self,
root: Union[str, Path],
subset: str = "train-360",
num_speakers: int = 2,
sample_rate: int = 8000,
task: str = "sep_clean",
):
self.root = Path(root) / f"Libri{num_speakers}Mix"
if sample_rate == 8000:
self.root = self.root / "wav8k/min" / subset
elif sample_rate == 16000:
self.root = self.root / "wav16k/min" / subset
else:
raise ValueError(f"Unsupported sample rate. Found {sample_rate}.")
self.sample_rate = sample_rate
self.task = task
self.mix_dir = (self.root / f"mix_{task.split('_')[1]}").resolve()
self.src_dirs = [(self.root / f"s{i+1}").resolve() for i in range(num_speakers)]
self.files = [p.name for p in self.mix_dir.glob("*wav")]
self.files.sort()
def _load_audio(self, path) -> torch.Tensor:
waveform, sample_rate = torchaudio.load(path)
if sample_rate != self.sample_rate:
raise ValueError(
f"The dataset contains audio file of sample rate {sample_rate}, "
f"but the requested sample rate is {self.sample_rate}."
)
return waveform
def _load_sample(self, filename) -> SampleType:
mixed = self._load_audio(str(self.mix_dir / filename))
srcs = []
for i, dir_ in enumerate(self.src_dirs):
src = self._load_audio(str(dir_ / filename))
if mixed.shape != src.shape:
raise ValueError(f"Different waveform shapes. mixed: {mixed.shape}, src[{i}]: {src.shape}")
srcs.append(src)
return self.sample_rate, mixed, srcs
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, key: int) -> SampleType:
"""Load the n-th sample from the dataset.
Args:
key (int): The index of the sample to be loaded
Returns:
(int, Tensor, List[Tensor]): ``(sample_rate, mix_waveform, list_of_source_waveforms)``
"""
return self._load_sample(self.files[key])
|
from pathlib import Path
from typing import List, Tuple, Union
import torch
import torchaudio
from torch.utils.data import Dataset
SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]]
class LibriMix(Dataset):
r"""Create the *LibriMix* [:footcite:`cosentino2020librimix`] dataset.
Args:
root (str or Path): The path to the directory where the directory ``Libri2Mix`` or
``Libri3Mix`` is stored.
subset (str, optional): The subset to use. Options: [``train-360`, ``train-100``,
``dev``, and ``test``] (Default: ``train-360``).
num_speakers (int, optional): The number of speakers, which determines the directories
to traverse. The Dataset will traverse ``s1`` to ``sN`` directories to collect
N source audios. (Default: 2)
sample_rate (int, optional): sample rate of audio files. The ``sample_rate`` determines
which subdirectory the audio are fetched. If any of the audio has a different sample
rate, raises ``ValueError``. Options: [8000, 16000] (Default: 8000)
task (str, optional): the task of LibriMix.
Options: [``enh_single``, ``enh_both``, ``sep_clean``, ``sep_noisy``]
(Default: ``sep_clean``)
Note:
The LibriMix dataset needs to be manually generated. Please check https://github.com/JorisCos/LibriMix
"""
def __init__(
self,
root: Union[str, Path],
subset: str = "train-360",
num_speakers: int = 2,
sample_rate: int = 8000,
task: str = "sep_clean",
):
self.root = Path(root) / f"Libri{num_speakers}Mix"
if sample_rate == 8000:
self.root = self.root / "wav8k/min" / subset
elif sample_rate == 16000:
self.root = self.root / "wav16k/min" / subset
else:
raise ValueError(f"Unsupported sample rate. Found {sample_rate}.")
self.sample_rate = sample_rate
self.task = task
self.mix_dir = (self.root / f"mix_{task.split('_')[1]}").resolve()
self.src_dirs = [(self.root / f"s{i+1}").resolve() for i in range(num_speakers)]
self.files = [p.name for p in self.mix_dir.glob("*wav")]
self.files.sort()
def _load_audio(self, path) -> torch.Tensor:
waveform, sample_rate = torchaudio.load(path)
if sample_rate != self.sample_rate:
raise ValueError(
f"The dataset contains audio file of sample rate {sample_rate}, "
f"but the requested sample rate is {self.sample_rate}."
)
return waveform
def _load_sample(self, filename) -> SampleType:
mixed = self._load_audio(str(self.mix_dir / filename))
srcs = []
for i, dir_ in enumerate(self.src_dirs):
src = self._load_audio(str(dir_ / filename))
if mixed.shape != src.shape:
raise ValueError(f"Different waveform shapes. mixed: {mixed.shape}, src[{i}]: {src.shape}")
srcs.append(src)
return self.sample_rate, mixed, srcs
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, key: int) -> SampleType:
"""Load the n-th sample from the dataset.
Args:
key (int): The index of the sample to be loaded
Returns:
(int, Tensor, List[Tensor]): ``(sample_rate, mix_waveform, list_of_source_waveforms)``
"""
return self._load_sample(self.files[key])
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.runner import load_checkpoint
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
@DETECTORS.register_module()
class LAD(KnowledgeDistillationSingleStageDetector):
"""Implementation of `LAD <https://arxiv.org/pdf/2108.10520.pdf>`_."""
def __init__(self,
backbone,
neck,
bbox_head,
teacher_backbone,
teacher_neck,
teacher_bbox_head,
teacher_ckpt,
eval_teacher=True,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(KnowledgeDistillationSingleStageDetector,
self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
pretrained)
self.eval_teacher = eval_teacher
self.teacher_model = nn.Module()
self.teacher_model.backbone = build_backbone(teacher_backbone)
if teacher_neck is not None:
self.teacher_model.neck = build_neck(teacher_neck)
teacher_bbox_head.update(train_cfg=train_cfg)
teacher_bbox_head.update(test_cfg=test_cfg)
self.teacher_model.bbox_head = build_head(teacher_bbox_head)
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
@property
def with_teacher_neck(self):
"""bool: whether the detector has a teacher_neck"""
return hasattr(self.teacher_model, 'neck') and \
self.teacher_model.neck is not None
def extract_teacher_feat(self, img):
"""Directly extract teacher features from the backbone+neck."""
x = self.teacher_model.backbone(img)
if self.with_teacher_neck:
x = self.teacher_model.neck(x)
return x
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
# get label assignment from the teacher
with torch.no_grad():
x_teacher = self.extract_teacher_feat(img)
outs_teacher = self.teacher_model.bbox_head(x_teacher)
label_assignment_results = \
self.teacher_model.bbox_head.get_label_assignment(
*outs_teacher, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# the student use the label assignment from the teacher to learn
x = self.extract_feat(img)
losses = self.bbox_head.forward_train(x, label_assignment_results,
img_metas, gt_bboxes, gt_labels,
gt_bboxes_ignore)
return losses
|
import torch
import torch.nn as nn
from mmcv.runner import load_checkpoint
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
@DETECTORS.register_module()
class LAD(KnowledgeDistillationSingleStageDetector):
"""Implementation of `LAD <https://arxiv.org/pdf/2108.10520.pdf>`_."""
def __init__(self,
backbone,
neck,
bbox_head,
teacher_backbone,
teacher_neck,
teacher_bbox_head,
teacher_ckpt,
eval_teacher=True,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(KnowledgeDistillationSingleStageDetector,
self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
pretrained)
self.eval_teacher = eval_teacher
self.teacher_model = nn.Module()
self.teacher_model.backbone = build_backbone(teacher_backbone)
if teacher_neck is not None:
self.teacher_model.neck = build_neck(teacher_neck)
teacher_bbox_head.update(train_cfg=train_cfg)
teacher_bbox_head.update(test_cfg=test_cfg)
self.teacher_model.bbox_head = build_head(teacher_bbox_head)
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
@property
def with_teacher_neck(self):
"""bool: whether the detector has a teacher_neck"""
return hasattr(self.teacher_model, 'neck') and \
self.teacher_model.neck is not None
def extract_teacher_feat(self, img):
"""Directly extract teacher features from the backbone+neck."""
x = self.teacher_model.backbone(img)
if self.with_teacher_neck:
x = self.teacher_model.neck(x)
return x
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
# get label assignment from the teacher
with torch.no_grad():
x_teacher = self.extract_teacher_feat(img)
outs_teacher = self.teacher_model.bbox_head(x_teacher)
label_assignment_results = \
self.teacher_model.bbox_head.get_label_assignment(
*outs_teacher, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# the student use the label assignment from the teacher to learn
x = self.extract_feat(img)
losses = self.bbox_head.forward_train(x, label_assignment_results,
img_metas, gt_bboxes, gt_labels,
gt_bboxes_ignore)
return losses
|
import os
import subprocess
import sys
directory = os.path.dirname(os.path.realpath(__file__))
BACKEND_DIR = "."
LIBS_DIR = "../autogpt_libs"
TARGET_DIRS = [BACKEND_DIR, LIBS_DIR]
def run(*command: str) -> None:
print(f">>>>> Running poetry run {' '.join(command)}")
try:
subprocess.run(
["poetry", "run"] + list(command),
cwd=directory,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
except subprocess.CalledProcessError as e:
print(e.output.decode("utf-8"), file=sys.stderr)
def lint():
try:
run("ruff", "check", *TARGET_DIRS, "--exit-zero")
run("ruff", "format", "--diff", "--check", LIBS_DIR)
run("isort", "--diff", "--check", "--profile", "black", BACKEND_DIR)
run("black", "--diff", "--check", BACKEND_DIR)
run("pyright", *TARGET_DIRS)
except subprocess.CalledProcessError as e:
print("Lint failed, try running `poetry run format` to fix the issues: ", e)
raise e
def format():
run("ruff", "check", "--fix", *TARGET_DIRS)
run("ruff", "format", LIBS_DIR)
run("isort", "--profile", "black", BACKEND_DIR)
run("black", BACKEND_DIR)
run("pyright", *TARGET_DIRS)
|
import os
import subprocess
directory = os.path.dirname(os.path.realpath(__file__))
BACKEND_DIR = "."
LIBS_DIR = "../autogpt_libs"
TARGET_DIRS = [BACKEND_DIR, LIBS_DIR]
def run(*command: str) -> None:
print(f">>>>> Running poetry run {' '.join(command)}")
subprocess.run(["poetry", "run"] + list(command), cwd=directory, check=True)
def lint():
try:
run("ruff", "check", *TARGET_DIRS, "--exit-zero")
run("ruff", "format", "--diff", "--check", LIBS_DIR)
run("isort", "--diff", "--check", "--profile", "black", BACKEND_DIR)
run("black", "--diff", "--check", BACKEND_DIR)
run("pyright", *TARGET_DIRS)
except subprocess.CalledProcessError as e:
print("Lint failed, try running `poetry run format` to fix the issues: ", e)
raise e
def format():
run("ruff", "check", "--fix", *TARGET_DIRS)
run("ruff", "format", LIBS_DIR)
run("isort", "--profile", "black", BACKEND_DIR)
run("black", BACKEND_DIR)
run("pyright", *TARGET_DIRS)
|
"""Base tool spec class."""
import asyncio
from inspect import signature
from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, Type, Union
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.core.tools.types import ToolMetadata
AsyncCallable = Callable[..., Awaitable[Any]]
# TODO: deprecate the Tuple (there's no use for it)
SPEC_FUNCTION_TYPE = Union[str, Tuple[str, str]]
class BaseToolSpec:
"""Base tool spec class."""
# list of functions that you'd want to convert to spec
spec_functions: List[SPEC_FUNCTION_TYPE]
def get_fn_schema_from_fn_name(
self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None
) -> Optional[Type[BaseModel]]:
"""
NOTE: This function is deprecated and kept only for backwards compatibility.
Return map from function name.
Return type is Optional, meaning that the schema can be None.
In this case, it's up to the downstream tool implementation to infer the schema.
"""
return None
def get_metadata_from_fn_name(
self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None
) -> Optional[ToolMetadata]:
"""
NOTE: This function is deprecated and kept only for backwards compatibility.
Return map from function name.
Return type is Optional, meaning that the schema can be None.
In this case, it's up to the downstream tool implementation to infer the schema.
"""
schema = self.get_fn_schema_from_fn_name(fn_name, spec_functions=spec_functions)
if schema is None:
return None
func = getattr(self, fn_name)
name = fn_name
docstring = func.__doc__ or ""
description = f"{name}{signature(func)}\n{docstring}"
fn_schema = self.get_fn_schema_from_fn_name(
fn_name, spec_functions=spec_functions
)
return ToolMetadata(name=name, description=description, fn_schema=fn_schema)
def to_tool_list(
self,
spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None,
func_to_metadata_mapping: Optional[Dict[str, ToolMetadata]] = None,
) -> List[FunctionTool]:
"""Convert tool spec to list of tools."""
spec_functions = spec_functions or self.spec_functions
func_to_metadata_mapping = func_to_metadata_mapping or {}
tool_list = []
for func_spec in spec_functions:
func_sync = None
func_async = None
if isinstance(func_spec, str):
func = getattr(self, func_spec)
if asyncio.iscoroutinefunction(func):
func_async = func
else:
func_sync = func
metadata = func_to_metadata_mapping.get(func_spec, None)
if metadata is None:
metadata = self.get_metadata_from_fn_name(func_spec)
elif isinstance(func_spec, tuple) and len(func_spec) == 2:
func_sync = getattr(self, func_spec[0])
func_async = getattr(self, func_spec[1])
metadata = func_to_metadata_mapping.get(func_spec[0], None)
if metadata is None:
metadata = func_to_metadata_mapping.get(func_spec[1], None)
if metadata is None:
metadata = self.get_metadata_from_fn_name(func_spec[0])
else:
raise ValueError(
"spec_functions must be of type: List[Union[str, Tuple[str, str]]]"
)
if func_sync is None:
if func_async is not None:
func_sync = patch_sync(func_async)
else:
raise ValueError(
f"Could not retrieve a function for spec: {func_spec}"
)
tool = FunctionTool.from_defaults(
fn=func_sync,
async_fn=func_async,
tool_metadata=metadata,
)
tool_list.append(tool)
return tool_list
def patch_sync(func_async: AsyncCallable) -> Callable:
"""Patch sync function from async function."""
def patched_sync(*args: Any, **kwargs: Any) -> Any:
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(func_async(*args, **kwargs))
return patched_sync
|
"""Base tool spec class."""
import asyncio
from inspect import signature
from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, Type, Union
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.core.tools.types import ToolMetadata
from llama_index.core.tools.utils import create_schema_from_function
AsyncCallable = Callable[..., Awaitable[Any]]
# TODO: deprecate the Tuple (there's no use for it)
SPEC_FUNCTION_TYPE = Union[str, Tuple[str, str]]
class BaseToolSpec:
"""Base tool spec class."""
# list of functions that you'd want to convert to spec
spec_functions: List[SPEC_FUNCTION_TYPE]
def get_fn_schema_from_fn_name(
self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None
) -> Optional[Type[BaseModel]]:
"""
Return map from function name.
Return type is Optional, meaning that the schema can be None.
In this case, it's up to the downstream tool implementation to infer the schema.
"""
spec_functions = spec_functions or self.spec_functions
for fn in spec_functions:
if fn == fn_name:
return create_schema_from_function(fn_name, getattr(self, fn_name))
raise ValueError(f"Invalid function name: {fn_name}")
def get_metadata_from_fn_name(
self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None
) -> Optional[ToolMetadata]:
"""
Return map from function name.
Return type is Optional, meaning that the schema can be None.
In this case, it's up to the downstream tool implementation to infer the schema.
"""
try:
func = getattr(self, fn_name)
except AttributeError:
return None
name = fn_name
docstring = func.__doc__ or ""
description = f"{name}{signature(func)}\n{docstring}"
fn_schema = self.get_fn_schema_from_fn_name(
fn_name, spec_functions=spec_functions
)
return ToolMetadata(name=name, description=description, fn_schema=fn_schema)
def to_tool_list(
self,
spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None,
func_to_metadata_mapping: Optional[Dict[str, ToolMetadata]] = None,
) -> List[FunctionTool]:
"""Convert tool spec to list of tools."""
spec_functions = spec_functions or self.spec_functions
func_to_metadata_mapping = func_to_metadata_mapping or {}
tool_list = []
for func_spec in spec_functions:
func_sync = None
func_async = None
if isinstance(func_spec, str):
func = getattr(self, func_spec)
if asyncio.iscoroutinefunction(func):
func_async = func
else:
func_sync = func
metadata = func_to_metadata_mapping.get(func_spec, None)
if metadata is None:
metadata = self.get_metadata_from_fn_name(func_spec)
elif isinstance(func_spec, tuple) and len(func_spec) == 2:
func_sync = getattr(self, func_spec[0])
func_async = getattr(self, func_spec[1])
metadata = func_to_metadata_mapping.get(func_spec[0], None)
if metadata is None:
metadata = func_to_metadata_mapping.get(func_spec[1], None)
if metadata is None:
metadata = self.get_metadata_from_fn_name(func_spec[0])
else:
raise ValueError(
"spec_functions must be of type: List[Union[str, Tuple[str, str]]]"
)
if func_sync is None:
if func_async is not None:
func_sync = patch_sync(func_async)
else:
raise ValueError(
f"Could not retrieve a function for spec: {func_spec}"
)
tool = FunctionTool.from_defaults(
fn=func_sync,
async_fn=func_async,
tool_metadata=metadata,
)
tool_list.append(tool)
return tool_list
def patch_sync(func_async: AsyncCallable) -> Callable:
"""Patch sync function from async function."""
def patched_sync(*args: Any, **kwargs: Any) -> Any:
loop = asyncio.get_event_loop()
return loop.run_until_complete(func_async(*args, **kwargs))
return patched_sync
|
_base_ = './cascade-mask-rcnn_r50_fpn_ms-3x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
import pytest
import torch
from torchvision.prototype import datapoints
def test_isinstance():
assert isinstance(
datapoints.Label([0, 1, 0], categories=["foo", "bar"]),
torch.Tensor,
)
def test_wrapping_no_copy():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
assert label.data_ptr() == tensor.data_ptr()
def test_to_wrapping():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
label_to = label.to(torch.int32)
assert type(label_to) is datapoints.Label
assert label_to.dtype is torch.int32
assert label_to.categories is label.categories
def test_to_datapoint_reference():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"]).to(torch.int32)
tensor_to = tensor.to(label)
assert type(tensor_to) is torch.Tensor
assert tensor_to.dtype is torch.int32
def test_clone_wrapping():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
label_clone = label.clone()
assert type(label_clone) is datapoints.Label
assert label_clone.data_ptr() != label.data_ptr()
assert label_clone.categories is label.categories
def test_requires_grad__wrapping():
tensor = torch.tensor([0, 1, 0], dtype=torch.float32)
label = datapoints.Label(tensor, categories=["foo", "bar"])
assert not label.requires_grad
label_requires_grad = label.requires_grad_(True)
assert type(label_requires_grad) is datapoints.Label
assert label.requires_grad
assert label_requires_grad.requires_grad
def test_other_op_no_wrapping():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
# any operation besides .to() and .clone() will do here
output = label * 2
assert type(output) is torch.Tensor
@pytest.mark.parametrize(
"op",
[
lambda t: t.numpy(),
lambda t: t.tolist(),
lambda t: t.max(dim=-1),
],
)
def test_no_tensor_output_op_no_wrapping(op):
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
output = op(label)
assert type(output) is not datapoints.Label
def test_inplace_op_no_wrapping():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
output = label.add_(0)
assert type(output) is torch.Tensor
assert type(label) is datapoints.Label
def test_wrap_like():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
# any operation besides .to() and .clone() will do here
output = label * 2
label_new = datapoints.Label.wrap_like(label, output)
assert type(label_new) is datapoints.Label
assert label_new.data_ptr() == output.data_ptr()
assert label_new.categories is label.categories
|
import pytest
import torch
from torchvision.prototype import datapoints
def test_isinstance():
assert isinstance(
datapoints.Label([0, 1, 0], categories=["foo", "bar"]),
torch.Tensor,
)
def test_wrapping_no_copy():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
assert label.data_ptr() == tensor.data_ptr()
def test_to_wrapping():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
label_to = label.to(torch.int32)
assert type(label_to) is datapoints.Label
assert label_to.dtype is torch.int32
assert label_to.categories is label.categories
def test_to_feature_reference():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"]).to(torch.int32)
tensor_to = tensor.to(label)
assert type(tensor_to) is torch.Tensor
assert tensor_to.dtype is torch.int32
def test_clone_wrapping():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
label_clone = label.clone()
assert type(label_clone) is datapoints.Label
assert label_clone.data_ptr() != label.data_ptr()
assert label_clone.categories is label.categories
def test_requires_grad__wrapping():
tensor = torch.tensor([0, 1, 0], dtype=torch.float32)
label = datapoints.Label(tensor, categories=["foo", "bar"])
assert not label.requires_grad
label_requires_grad = label.requires_grad_(True)
assert type(label_requires_grad) is datapoints.Label
assert label.requires_grad
assert label_requires_grad.requires_grad
def test_other_op_no_wrapping():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
# any operation besides .to() and .clone() will do here
output = label * 2
assert type(output) is torch.Tensor
@pytest.mark.parametrize(
"op",
[
lambda t: t.numpy(),
lambda t: t.tolist(),
lambda t: t.max(dim=-1),
],
)
def test_no_tensor_output_op_no_wrapping(op):
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
output = op(label)
assert type(output) is not datapoints.Label
def test_inplace_op_no_wrapping():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
output = label.add_(0)
assert type(output) is torch.Tensor
assert type(label) is datapoints.Label
def test_wrap_like():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
# any operation besides .to() and .clone() will do here
output = label * 2
label_new = datapoints.Label.wrap_like(label, output)
assert type(label_new) is datapoints.Label
assert label_new.data_ptr() == output.data_ptr()
assert label_new.categories is label.categories
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.ops import MaskedConv2d
from torch import Tensor
from mmdet.core import OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
@MODELS.register_module()
class GARetinaHead(GuidedAnchorHead):
"""Guided-Anchor-based RetinaNet head."""
def __init__(self,
num_classes: int,
in_channels: int,
stacked_convs: int = 4,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None,
**kwargs) -> None:
if init_cfg is None:
init_cfg = dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=[
dict(
type='Normal',
name='conv_loc',
std=0.01,
bias_prob=0.01),
dict(
type='Normal',
name='retina_cls',
std=0.01,
bias_prob=0.01)
])
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
super().__init__(
num_classes=num_classes,
in_channels=in_channels,
init_cfg=init_cfg,
**kwargs)
def _init_layers(self) -> None:
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1)
num_anchors = self.square_anchor_generator.num_base_priors[0]
self.conv_shape = nn.Conv2d(self.feat_channels, num_anchors * 2, 1)
self.feature_adaption_cls = FeatureAdaption(
self.feat_channels,
self.feat_channels,
kernel_size=3,
deform_groups=self.deform_groups)
self.feature_adaption_reg = FeatureAdaption(
self.feat_channels,
self.feat_channels,
kernel_size=3,
deform_groups=self.deform_groups)
self.retina_cls = MaskedConv2d(
self.feat_channels,
self.num_base_priors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = MaskedConv2d(
self.feat_channels, self.num_base_priors * 4, 3, padding=1)
def forward_single(self, x: Tensor) -> Tuple[Tensor]:
"""Forward feature map of a single scale level."""
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
loc_pred = self.conv_loc(cls_feat)
shape_pred = self.conv_shape(reg_feat)
cls_feat = self.feature_adaption_cls(cls_feat, shape_pred)
reg_feat = self.feature_adaption_reg(reg_feat, shape_pred)
if not self.training:
mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr
else:
mask = None
cls_score = self.retina_cls(cls_feat, mask)
bbox_pred = self.retina_reg(reg_feat, mask)
return cls_score, bbox_pred, shape_pred, loc_pred
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.ops import MaskedConv2d
from mmdet.registry import MODELS
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
@MODELS.register_module()
class GARetinaHead(GuidedAnchorHead):
"""Guided-Anchor-based RetinaNet head."""
def __init__(self,
num_classes,
in_channels,
stacked_convs=4,
conv_cfg=None,
norm_cfg=None,
init_cfg=None,
**kwargs):
if init_cfg is None:
init_cfg = dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=[
dict(
type='Normal',
name='conv_loc',
std=0.01,
bias_prob=0.01),
dict(
type='Normal',
name='retina_cls',
std=0.01,
bias_prob=0.01)
])
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
super(GARetinaHead, self).__init__(
num_classes, in_channels, init_cfg=init_cfg, **kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1)
self.conv_shape = nn.Conv2d(self.feat_channels, self.num_anchors * 2,
1)
self.feature_adaption_cls = FeatureAdaption(
self.feat_channels,
self.feat_channels,
kernel_size=3,
deform_groups=self.deform_groups)
self.feature_adaption_reg = FeatureAdaption(
self.feat_channels,
self.feat_channels,
kernel_size=3,
deform_groups=self.deform_groups)
self.retina_cls = MaskedConv2d(
self.feat_channels,
self.num_base_priors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = MaskedConv2d(
self.feat_channels, self.num_base_priors * 4, 3, padding=1)
def forward_single(self, x):
"""Forward feature map of a single scale level."""
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
loc_pred = self.conv_loc(cls_feat)
shape_pred = self.conv_shape(reg_feat)
cls_feat = self.feature_adaption_cls(cls_feat, shape_pred)
reg_feat = self.feature_adaption_reg(reg_feat, shape_pred)
if not self.training:
mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr
else:
mask = None
cls_score = self.retina_cls(cls_feat, mask)
bbox_pred = self.retina_reg(reg_feat, mask)
return cls_score, bbox_pred, shape_pred, loc_pred
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.