input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class PAA(SingleStageDetector):
"""Implementation of `PAA <https://arxiv.org/pdf/2007.08103.pdf>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of PAA. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of PAA. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class PAA(SingleStageDetector):
"""Implementation of `PAA <https://arxiv.org/pdf/2007.08103.pdf>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of PAA. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of PAA. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
from abc import ABC
from typing import Any, Optional, Tuple, Type, TypeVar, Union
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.torch_tensor import TorchTensor
T = TypeVar('T', bound='Embedding')
class EmbeddingMixin(AbstractTensor, ABC):
alternative_type: Optional[Type] = None
@classmethod
def __validate_getitem__(cls, item: Any) -> Tuple[int]:
shape = super().__validate_getitem__(item)
if len(shape) > 1:
error_msg = f'`{cls}` can only have a single dimension/axis.'
if cls.alternative_type:
error_msg += f' Consider using {cls.alternative_type} instead.'
raise ValueError(error_msg)
return shape
class NdArrayEmbedding(NdArray, EmbeddingMixin):
alternative_type = NdArray
torch_base = type(TorchTensor) # type: Any
embedding_base = type(EmbeddingMixin) # type: Any
class metaTorchAndEmbedding(torch_base, embedding_base):
pass
class TorchEmbedding(TorchTensor, EmbeddingMixin, metaclass=metaTorchAndEmbedding):
alternative_type = TorchTensor
Embedding = Union[NdArrayEmbedding, TorchEmbedding]
|
from typing import TypeVar
from docarray.proto import NodeProto
from docarray.typing.tensor import NdArray
T = TypeVar('T', bound='Embedding')
class Embedding(NdArray):
def _to_node_protobuf(self: T, field: str = 'tensor') -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to be
converted into a protobuf
:param field: field in which to store the content in the node proto
:return: the nested item protobuf message
"""
return super()._to_node_protobuf(field='embedding')
|
import PIL.Image
import pytest
import torch
import torchvision.prototype.transforms.utils
from prototype_common_utils import make_bounding_box, make_detection_mask, make_image
from torchvision.prototype import datapoints
from torchvision.prototype.transforms.functional import to_image_pil
from torchvision.prototype.transforms.utils import has_all, has_any
IMAGE = make_image(color_space="RGB")
BOUNDING_BOX = make_bounding_box(format=datapoints.BoundingBoxFormat.XYXY, spatial_size=IMAGE.spatial_size)
MASK = make_detection_mask(size=IMAGE.spatial_size)
@pytest.mark.parametrize(
("sample", "types", "expected"),
[
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBox,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Mask,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBox), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBox, datapoints.Mask), True),
((MASK,), (datapoints.Image, datapoints.BoundingBox), False),
((BOUNDING_BOX,), (datapoints.Image, datapoints.Mask), False),
((IMAGE,), (datapoints.BoundingBox, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBox, datapoints.Mask),
True,
),
((), (datapoints.Image, datapoints.BoundingBox, datapoints.Mask), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda obj: isinstance(obj, datapoints.Image),), True),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: False,), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: True,), True),
((IMAGE,), (datapoints.Image, PIL.Image.Image, torchvision.prototype.transforms.utils.is_simple_tensor), True),
(
(torch.Tensor(IMAGE),),
(datapoints.Image, PIL.Image.Image, torchvision.prototype.transforms.utils.is_simple_tensor),
True,
),
(
(to_image_pil(IMAGE),),
(datapoints.Image, PIL.Image.Image, torchvision.prototype.transforms.utils.is_simple_tensor),
True,
),
],
)
def test_has_any(sample, types, expected):
assert has_any(sample, *types) is expected
@pytest.mark.parametrize(
("sample", "types", "expected"),
[
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBox,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Mask,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBox), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBox, datapoints.Mask), True),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBox, datapoints.Mask),
True,
),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBox), False),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), False),
((IMAGE, MASK), (datapoints.BoundingBox, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBox, datapoints.Mask),
True,
),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBox, datapoints.Mask), False),
((IMAGE, MASK), (datapoints.Image, datapoints.BoundingBox, datapoints.Mask), False),
((IMAGE, BOUNDING_BOX), (datapoints.Image, datapoints.BoundingBox, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(lambda obj: isinstance(obj, (datapoints.Image, datapoints.BoundingBox, datapoints.Mask)),),
True,
),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: False,), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: True,), True),
],
)
def test_has_all(sample, types, expected):
assert has_all(sample, *types) is expected
|
import PIL.Image
import pytest
import torch
import torchvision.prototype.transforms.utils
from prototype_common_utils import make_bounding_box, make_detection_mask, make_image
from torchvision.prototype import datapoints
from torchvision.prototype.transforms.functional import to_image_pil
from torchvision.prototype.transforms.utils import has_all, has_any
IMAGE = make_image(color_space=datapoints.ColorSpace.RGB)
BOUNDING_BOX = make_bounding_box(format=datapoints.BoundingBoxFormat.XYXY, spatial_size=IMAGE.spatial_size)
MASK = make_detection_mask(size=IMAGE.spatial_size)
@pytest.mark.parametrize(
("sample", "types", "expected"),
[
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBox,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Mask,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBox), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBox, datapoints.Mask), True),
((MASK,), (datapoints.Image, datapoints.BoundingBox), False),
((BOUNDING_BOX,), (datapoints.Image, datapoints.Mask), False),
((IMAGE,), (datapoints.BoundingBox, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBox, datapoints.Mask),
True,
),
((), (datapoints.Image, datapoints.BoundingBox, datapoints.Mask), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda obj: isinstance(obj, datapoints.Image),), True),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: False,), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: True,), True),
((IMAGE,), (datapoints.Image, PIL.Image.Image, torchvision.prototype.transforms.utils.is_simple_tensor), True),
(
(torch.Tensor(IMAGE),),
(datapoints.Image, PIL.Image.Image, torchvision.prototype.transforms.utils.is_simple_tensor),
True,
),
(
(to_image_pil(IMAGE),),
(datapoints.Image, PIL.Image.Image, torchvision.prototype.transforms.utils.is_simple_tensor),
True,
),
],
)
def test_has_any(sample, types, expected):
assert has_any(sample, *types) is expected
@pytest.mark.parametrize(
("sample", "types", "expected"),
[
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBox,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Mask,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBox), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBox, datapoints.Mask), True),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBox, datapoints.Mask),
True,
),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBox), False),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), False),
((IMAGE, MASK), (datapoints.BoundingBox, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBox, datapoints.Mask),
True,
),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBox, datapoints.Mask), False),
((IMAGE, MASK), (datapoints.Image, datapoints.BoundingBox, datapoints.Mask), False),
((IMAGE, BOUNDING_BOX), (datapoints.Image, datapoints.BoundingBox, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(lambda obj: isinstance(obj, (datapoints.Image, datapoints.BoundingBox, datapoints.Mask)),),
True,
),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: False,), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: True,), True),
],
)
def test_has_all(sample, types, expected):
assert has_all(sample, *types) is expected
|
"""Base tool spec class."""
import asyncio
from inspect import signature
from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, Type, Union
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.core.tools.types import ToolMetadata
from llama_index.core.tools.utils import create_schema_from_function
AsyncCallable = Callable[..., Awaitable[Any]]
# TODO: deprecate the Tuple (there's no use for it)
SPEC_FUNCTION_TYPE = Union[str, Tuple[str, str]]
class BaseToolSpec:
"""Base tool spec class."""
# list of functions that you'd want to convert to spec
spec_functions: List[SPEC_FUNCTION_TYPE]
def get_fn_schema_from_fn_name(
self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None
) -> Optional[Type[BaseModel]]:
"""
Return map from function name.
Return type is Optional, meaning that the schema can be None.
In this case, it's up to the downstream tool implementation to infer the schema.
"""
spec_functions = spec_functions or self.spec_functions
for fn in spec_functions:
if fn == fn_name:
return create_schema_from_function(fn_name, getattr(self, fn_name))
raise ValueError(f"Invalid function name: {fn_name}")
def get_metadata_from_fn_name(
self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None
) -> Optional[ToolMetadata]:
"""
Return map from function name.
Return type is Optional, meaning that the schema can be None.
In this case, it's up to the downstream tool implementation to infer the schema.
"""
try:
func = getattr(self, fn_name)
except AttributeError:
return None
name = fn_name
docstring = func.__doc__ or ""
description = f"{name}{signature(func)}\n{docstring}"
fn_schema = self.get_fn_schema_from_fn_name(
fn_name, spec_functions=spec_functions
)
return ToolMetadata(name=name, description=description, fn_schema=fn_schema)
def to_tool_list(
self,
spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None,
func_to_metadata_mapping: Optional[Dict[str, ToolMetadata]] = None,
) -> List[FunctionTool]:
"""Convert tool spec to list of tools."""
spec_functions = spec_functions or self.spec_functions
func_to_metadata_mapping = func_to_metadata_mapping or {}
tool_list = []
for func_spec in spec_functions:
func_sync = None
func_async = None
if isinstance(func_spec, str):
func = getattr(self, func_spec)
if asyncio.iscoroutinefunction(func):
func_async = func
else:
func_sync = func
metadata = func_to_metadata_mapping.get(func_spec, None)
if metadata is None:
metadata = self.get_metadata_from_fn_name(func_spec)
elif isinstance(func_spec, tuple) and len(func_spec) == 2:
func_sync = getattr(self, func_spec[0])
func_async = getattr(self, func_spec[1])
metadata = func_to_metadata_mapping.get(func_spec[0], None)
if metadata is None:
metadata = func_to_metadata_mapping.get(func_spec[1], None)
if metadata is None:
metadata = self.get_metadata_from_fn_name(func_spec[0])
else:
raise ValueError(
"spec_functions must be of type: List[Union[str, Tuple[str, str]]]"
)
if func_sync is None:
if func_async is not None:
func_sync = patch_sync(func_async)
else:
raise ValueError(
f"Could not retrieve a function for spec: {func_spec}"
)
tool = FunctionTool.from_defaults(
fn=func_sync,
async_fn=func_async,
tool_metadata=metadata,
)
tool_list.append(tool)
return tool_list
def patch_sync(func_async: AsyncCallable) -> Callable:
"""Patch sync function from async function."""
def patched_sync(*args: Any, **kwargs: Any) -> Any:
loop = asyncio.get_event_loop()
return loop.run_until_complete(func_async(*args, **kwargs))
return patched_sync
|
"""Base tool spec class."""
import asyncio
from inspect import signature
from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, Type, Union
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.core.tools.types import ToolMetadata
from llama_index.core.tools.utils import create_schema_from_function
AsyncCallable = Callable[..., Awaitable[Any]]
# TODO: deprecate the Tuple (there's no use for it)
SPEC_FUNCTION_TYPE = Union[str, Tuple[str, str]]
class BaseToolSpec:
"""Base tool spec class."""
# list of functions that you'd want to convert to spec
spec_functions: List[SPEC_FUNCTION_TYPE]
def get_fn_schema_from_fn_name(
self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None
) -> Optional[Type[BaseModel]]:
"""Return map from function name.
Return type is Optional, meaning that the schema can be None.
In this case, it's up to the downstream tool implementation to infer the schema.
"""
spec_functions = spec_functions or self.spec_functions
for fn in spec_functions:
if fn == fn_name:
return create_schema_from_function(fn_name, getattr(self, fn_name))
raise ValueError(f"Invalid function name: {fn_name}")
def get_metadata_from_fn_name(
self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None
) -> Optional[ToolMetadata]:
"""Return map from function name.
Return type is Optional, meaning that the schema can be None.
In this case, it's up to the downstream tool implementation to infer the schema.
"""
try:
func = getattr(self, fn_name)
except AttributeError:
return None
name = fn_name
docstring = func.__doc__ or ""
description = f"{name}{signature(func)}\n{docstring}"
fn_schema = self.get_fn_schema_from_fn_name(
fn_name, spec_functions=spec_functions
)
return ToolMetadata(name=name, description=description, fn_schema=fn_schema)
def to_tool_list(
self,
spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None,
func_to_metadata_mapping: Optional[Dict[str, ToolMetadata]] = None,
) -> List[FunctionTool]:
"""Convert tool spec to list of tools."""
spec_functions = spec_functions or self.spec_functions
func_to_metadata_mapping = func_to_metadata_mapping or {}
tool_list = []
for func_spec in spec_functions:
func_sync = None
func_async = None
if isinstance(func_spec, str):
func = getattr(self, func_spec)
if asyncio.iscoroutinefunction(func):
func_async = func
else:
func_sync = func
metadata = func_to_metadata_mapping.get(func_spec, None)
if metadata is None:
metadata = self.get_metadata_from_fn_name(func_spec)
elif isinstance(func_spec, tuple) and len(func_spec) == 2:
func_sync = getattr(self, func_spec[0])
func_async = getattr(self, func_spec[1])
metadata = func_to_metadata_mapping.get(func_spec[0], None)
if metadata is None:
metadata = func_to_metadata_mapping.get(func_spec[1], None)
if metadata is None:
metadata = self.get_metadata_from_fn_name(func_spec[0])
else:
raise ValueError(
"spec_functions must be of type: List[Union[str, Tuple[str, str]]]"
)
if func_sync is None:
if func_async is not None:
func_sync = patch_sync(func_async)
else:
raise ValueError(
f"Could not retrieve a function for spec: {func_spec}"
)
tool = FunctionTool.from_defaults(
fn=func_sync,
async_fn=func_async,
tool_metadata=metadata,
)
tool_list.append(tool)
return tool_list
def patch_sync(func_async: AsyncCallable) -> Callable:
"""Patch sync function from async function."""
def patched_sync(*args: Any, **kwargs: Any) -> Any:
loop = asyncio.get_event_loop()
return loop.run_until_complete(func_async(*args, **kwargs))
return patched_sync
|
"""
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.decomposition import PCA
from sklearn.metrics import euclidean_distances
# Generate the data
EPSILON = np.finfo(np.float32).eps
n_samples = 20
rng = np.random.RandomState(seed=3)
X_true = rng.randint(0, 20, 2 * n_samples).astype(float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
# Compute pairwise Euclidean distances
distances = euclidean_distances(X_true)
# Add noise to the distances
noise = rng.rand(n_samples, n_samples)
noise = noise + noise.T
np.fill_diagonal(noise, 0)
distances += noise
mds = manifold.MDS(
n_components=2,
max_iter=3000,
eps=1e-9,
random_state=42,
dissimilarity="precomputed",
n_jobs=1,
)
X_mds = mds.fit(distances).embedding_
nmds = manifold.MDS(
n_components=2,
metric=False,
max_iter=3000,
eps=1e-12,
dissimilarity="precomputed",
random_state=42,
n_jobs=1,
n_init=1,
)
X_nmds = nmds.fit_transform(distances)
# Rescale the data
X_mds *= np.sqrt((X_true**2).sum()) / np.sqrt((X_mds**2).sum())
X_nmds *= np.sqrt((X_true**2).sum()) / np.sqrt((X_nmds**2).sum())
# Rotate the data
pca = PCA(n_components=2)
X_true = pca.fit_transform(X_true)
X_mds = pca.fit_transform(X_mds)
X_nmds = pca.fit_transform(X_nmds)
# Align the sign of PCs
for i in [0, 1]:
if np.corrcoef(X_mds[:, i], X_true[:, i])[0, 1] < 0:
X_mds[:, i] *= -1
if np.corrcoef(X_nmds[:, i], X_true[:, i])[0, 1] < 0:
X_nmds[:, i] *= -1
fig = plt.figure(1)
ax = plt.axes([0.0, 0.0, 1.0, 1.0])
s = 100
plt.scatter(X_true[:, 0], X_true[:, 1], color="navy", s=s, lw=0, label="True Position")
plt.scatter(X_mds[:, 0], X_mds[:, 1], color="turquoise", s=s, lw=0, label="MDS")
plt.scatter(X_nmds[:, 0], X_nmds[:, 1], color="darkorange", s=s, lw=0, label="NMDS")
plt.legend(scatterpoints=1, loc="best", shadow=False)
# Plot the edges
start_idx, end_idx = np.where(X_mds)
# a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [
[X_true[i, :], X_true[j, :]] for i in range(len(X_true)) for j in range(len(X_true))
]
edges = distances.max() / (distances + EPSILON) * 100
np.fill_diagonal(edges, 0)
edges = np.abs(edges)
lc = LineCollection(
segments, zorder=0, cmap=plt.cm.Blues, norm=plt.Normalize(0, edges.max())
)
lc.set_array(edges.flatten())
lc.set_linewidths(np.full(len(segments), 0.5))
ax.add_collection(lc)
plt.show()
|
"""
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.decomposition import PCA
from sklearn.metrics import euclidean_distances
EPSILON = np.finfo(np.float32).eps
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(
n_components=2,
max_iter=3000,
eps=1e-9,
random_state=seed,
dissimilarity="precomputed",
n_jobs=1,
)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(
n_components=2,
metric=False,
max_iter=3000,
eps=1e-12,
dissimilarity="precomputed",
random_state=seed,
n_jobs=1,
n_init=1,
)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true**2).sum()) / np.sqrt((pos**2).sum())
npos *= np.sqrt((X_true**2).sum()) / np.sqrt((npos**2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0.0, 0.0, 1.0, 1.0])
s = 100
plt.scatter(X_true[:, 0], X_true[:, 1], color="navy", s=s, lw=0, label="True Position")
plt.scatter(pos[:, 0], pos[:, 1], color="turquoise", s=s, lw=0, label="MDS")
plt.scatter(npos[:, 0], npos[:, 1], color="darkorange", s=s, lw=0, label="NMDS")
plt.legend(scatterpoints=1, loc="best", shadow=False)
similarities = similarities.max() / (similarities + EPSILON) * 100
np.fill_diagonal(similarities, 0)
# Plot the edges
start_idx, end_idx = np.where(pos)
# a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [
[X_true[i, :], X_true[j, :]] for i in range(len(pos)) for j in range(len(pos))
]
values = np.abs(similarities)
lc = LineCollection(
segments, zorder=0, cmap=plt.cm.Blues, norm=plt.Normalize(0, values.max())
)
lc.set_array(similarities.flatten())
lc.set_linewidths(np.full(len(segments), 0.5))
ax.add_collection(lc)
plt.show()
|
import os
from functools import partial
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
class BaseCompressedFileFileSystem(AbstractArchiveFileSystem):
"""Read contents of compressed file as a filesystem with one file inside."""
root_marker = ""
protocol: str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
compression: str = None # compression type in fsspec. ex: "gzip"
extension: str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__(
self, fo: str = "", target_protocol: Optional[str] = None, target_options: Optional[dict] = None, **kwargs
):
"""
The compressed file system can be instantiated from any compressed file.
It reads the contents of compressed file as a filesystem with one file inside, as if it was an archive.
The single file inside the filesystem is named after the compresssed file,
without the compression extension at the end of the filename.
Args:
fo (:obj:``str``): Path to compressed file. Will fetch file using ``fsspec.open()``
mode (:obj:``str``): Currently, only 'rb' accepted
target_protocol(:obj:``str``, optional): To override the FS protocol inferred from a URL.
target_options (:obj:``dict``, optional): Kwargs passed when instantiating the target FS.
"""
super().__init__(self, **kwargs)
self.fo = fo.__fspath__() if hasattr(fo, "__fspath__") else fo
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
self._open_with_fsspec = partial(
fsspec.open,
self.fo,
mode="rb",
protocol=target_protocol,
compression=self.compression,
client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs", {}), # To avoid issues if it was already passed.
},
**(target_options or {}),
)
self.compressed_name = os.path.basename(self.fo.split("::")[0])
self.uncompressed_name = (
self.compressed_name[: self.compressed_name.rindex(".")]
if "." in self.compressed_name
else self.compressed_name
)
self.dir_cache = None
@classmethod
def _strip_protocol(cls, path):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(path).lstrip("/")
def _get_dirs(self):
if self.dir_cache is None:
f = {**self._open_with_fsspec().fs.info(self.fo), "name": self.uncompressed_name}
self.dir_cache = {f["name"]: f}
def cat(self, path: str):
with self._open_with_fsspec().open() as f:
return f.read()
def _open(
self,
path: str,
mode: str = "rb",
block_size=None,
autocommit=True,
cache_options=None,
**kwargs,
):
path = self._strip_protocol(path)
if mode != "rb":
raise ValueError(f"Tried to read with mode {mode} on file {self.fo} opened with mode 'rb'")
return self._open_with_fsspec().open()
class Bz2FileSystem(BaseCompressedFileFileSystem):
"""Read contents of BZ2 file as a filesystem with one file inside."""
protocol = "bz2"
compression = "bz2"
extension = ".bz2"
class GzipFileSystem(BaseCompressedFileFileSystem):
"""Read contents of GZIP file as a filesystem with one file inside."""
protocol = "gzip"
compression = "gzip"
extension = ".gz"
class Lz4FileSystem(BaseCompressedFileFileSystem):
"""Read contents of LZ4 file as a filesystem with one file inside."""
protocol = "lz4"
compression = "lz4"
extension = ".lz4"
class XzFileSystem(BaseCompressedFileFileSystem):
"""Read contents of .xz (LZMA) file as a filesystem with one file inside."""
protocol = "xz"
compression = "xz"
extension = ".xz"
class ZstdFileSystem(BaseCompressedFileFileSystem):
"""
Read contents of .zstd file as a filesystem with one file inside.
"""
protocol = "zstd"
compression = "zstd"
extension = ".zst"
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
class BaseCompressedFileFileSystem(AbstractArchiveFileSystem):
"""Read contents of compressed file as a filesystem with one file inside."""
root_marker = ""
protocol: str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
compression: str = None # compression type in fsspec. ex: "gzip"
extension: str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__(
self, fo: str = "", target_protocol: Optional[str] = None, target_options: Optional[dict] = None, **kwargs
):
"""
The compressed file system can be instantiated from any compressed file.
It reads the contents of compressed file as a filesystem with one file inside, as if it was an archive.
The single file inside the filesystem is named after the compresssed file,
without the compression extension at the end of the filename.
Args:
fo (:obj:``str``): Path to compressed file. Will fetch file using ``fsspec.open()``
mode (:obj:``str``): Currently, only 'rb' accepted
target_protocol(:obj:``str``, optional): To override the FS protocol inferred from a URL.
target_options (:obj:``dict``, optional): Kwargs passed when instantiating the target FS.
"""
super().__init__(self, **kwargs)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
self.file = fsspec.open(
fo,
mode="rb",
protocol=target_protocol,
compression=self.compression,
client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs", {}), # To avoid issues if it was already passed.
},
**(target_options or {}),
)
self.compressed_name = os.path.basename(self.file.path.split("::")[0])
self.uncompressed_name = (
self.compressed_name[: self.compressed_name.rindex(".")]
if "." in self.compressed_name
else self.compressed_name
)
self.dir_cache = None
@classmethod
def _strip_protocol(cls, path):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(path).lstrip("/")
def _get_dirs(self):
if self.dir_cache is None:
f = {**self.file.fs.info(self.file.path), "name": self.uncompressed_name}
self.dir_cache = {f["name"]: f}
def cat(self, path: str):
return self.file.open().read()
def _open(
self,
path: str,
mode: str = "rb",
block_size=None,
autocommit=True,
cache_options=None,
**kwargs,
):
path = self._strip_protocol(path)
if mode != "rb":
raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'")
return self.file.open()
class Bz2FileSystem(BaseCompressedFileFileSystem):
"""Read contents of BZ2 file as a filesystem with one file inside."""
protocol = "bz2"
compression = "bz2"
extension = ".bz2"
class GzipFileSystem(BaseCompressedFileFileSystem):
"""Read contents of GZIP file as a filesystem with one file inside."""
protocol = "gzip"
compression = "gzip"
extension = ".gz"
class Lz4FileSystem(BaseCompressedFileFileSystem):
"""Read contents of LZ4 file as a filesystem with one file inside."""
protocol = "lz4"
compression = "lz4"
extension = ".lz4"
class XzFileSystem(BaseCompressedFileFileSystem):
"""Read contents of .xz (LZMA) file as a filesystem with one file inside."""
protocol = "xz"
compression = "xz"
extension = ".xz"
class ZstdFileSystem(BaseCompressedFileFileSystem):
"""
Read contents of .zstd file as a filesystem with one file inside.
"""
protocol = "zstd"
compression = "zstd"
extension = ".zst"
|
from pathlib import Path
from typing import List
import pytest
from flair_text import FlairTextEncoder
from jina import Document, DocumentArray, Executor
_EMBEDDING_DIM = 100
@pytest.fixture(scope='session')
def basic_encoder() -> FlairTextEncoder:
return FlairTextEncoder()
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_batch_size == 32
def test_no_document(basic_encoder: FlairTextEncoder):
basic_encoder.encode(None, {})
def test_empty_documents(basic_encoder: FlairTextEncoder):
docs = DocumentArray([])
basic_encoder.encode(docs, {})
assert len(docs) == 0
def test_no_text_documents(basic_encoder: FlairTextEncoder):
docs = DocumentArray([Document()])
basic_encoder.encode(docs, {})
assert len(docs) == 1
assert docs[0].embedding is None
def test_embeddings_str_error():
with pytest.raises(ValueError, match='embeddings'):
FlairTextEncoder(embeddings='word:glove')
def test_pooling_strategy_error():
with pytest.raises(ValueError, match='pooling_strategy'):
FlairTextEncoder(pooling_strategy='wrong')
def test_unknown_model_error():
with pytest.raises(ValueError, match='The model name wrong'):
FlairTextEncoder(embeddings=['wrong:glove'])
def test_encoding_cpu():
docs = DocumentArray([Document(text='hello there')])
encoder = FlairTextEncoder(device='cpu')
encoder.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.gpu
def test_encoding_gpu():
docs = DocumentArray([Document(text='hello there')])
encoder = FlairTextEncoder(device='cuda')
encoder.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'embeddings, dim',
(
(['flair:news-forward'], 2048),
(['flair:news-forward', 'flair:news-backward'], 4096),
(['word:glove', 'flair:news-backward'], 2148),
(['byte-pair:en'], 100),
),
)
def test_encoding_models(embeddings: List[str], dim: int):
docs = DocumentArray([Document(text='hello there')])
encoder = FlairTextEncoder(embeddings=embeddings)
encoder.encode(docs, {})
assert docs[0].embedding.shape == (dim,)
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: FlairTextEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: FlairTextEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
def test_quality_embeddings(basic_encoder: FlairTextEncoder):
docs = DocumentArray(
[
Document(id='A', text='a furry animal that with a long tail'),
Document(id='B', text='a domesticated mammal with four legs'),
Document(id='C', text='a type of aircraft that uses rotating wings'),
Document(id='D', text='flying vehicle that has fixed wings and engines'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
from pathlib import Path
from typing import List
import pytest
from jina import Document, DocumentArray, Executor
from ...flair_text import FlairTextEncoder
_EMBEDDING_DIM = 100
@pytest.fixture(scope='session')
def basic_encoder() -> FlairTextEncoder:
return FlairTextEncoder()
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_batch_size == 32
def test_no_document(basic_encoder: FlairTextEncoder):
basic_encoder.encode(None, {})
def test_empty_documents(basic_encoder: FlairTextEncoder):
docs = DocumentArray([])
basic_encoder.encode(docs, {})
assert len(docs) == 0
def test_no_text_documents(basic_encoder: FlairTextEncoder):
docs = DocumentArray([Document()])
basic_encoder.encode(docs, {})
assert len(docs) == 1
assert docs[0].embedding is None
def test_embeddings_str_error():
with pytest.raises(ValueError, match='embeddings'):
FlairTextEncoder(embeddings='word:glove')
def test_pooling_strategy_error():
with pytest.raises(ValueError, match='pooling_strategy'):
FlairTextEncoder(pooling_strategy='wrong')
def test_unknown_model_error():
with pytest.raises(ValueError, match='The model name wrong'):
FlairTextEncoder(embeddings=['wrong:glove'])
def test_encoding_cpu():
docs = DocumentArray([Document(text='hello there')])
encoder = FlairTextEncoder(device='cpu')
encoder.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.gpu
def test_encoding_gpu():
docs = DocumentArray([Document(text='hello there')])
encoder = FlairTextEncoder(device='cuda')
encoder.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'embeddings, dim',
(
(['flair:news-forward'], 2048),
(['flair:news-forward', 'flair:news-backward'], 4096),
(['word:glove', 'flair:news-backward'], 2148),
(['byte-pair:en'], 100),
),
)
def test_encoding_models(embeddings: List[str], dim: int):
docs = DocumentArray([Document(text='hello there')])
encoder = FlairTextEncoder(embeddings=embeddings)
encoder.encode(docs, {})
assert docs[0].embedding.shape == (dim,)
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: FlairTextEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: FlairTextEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
def test_quality_embeddings(basic_encoder: FlairTextEncoder):
docs = DocumentArray(
[
Document(id='A', text='a furry animal that with a long tail'),
Document(id='B', text='a domesticated mammal with four legs'),
Document(id='C', text='a type of aircraft that uses rotating wings'),
Document(id='D', text='flying vehicle that has fixed wings and engines'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
# Copyright (c) OpenMMLab. All rights reserved.
third_part_libs = [
'pip install -r ../requirements/albu.txt',
'pip install instaboostfast',
'pip install git+https://github.com/cocodataset/panopticapi.git',
'pip install timm',
'pip install mmpretrain',
'pip install git+https://github.com/lvis-dataset/lvis-api.git',
'pip install -r ../requirements/multimodal.txt',
'pip install -r ../requirements/tracking.txt',
'pip install git+https://github.com/JonathonLuiten/TrackEval.git',
]
default_floating_range = 0.5
model_floating_ranges = {'atss/atss_r50_fpn_1x_coco.py': 0.3}
|
# Copyright (c) OpenMMLab. All rights reserved.
third_part_libs = [
'pip install -r ../requirements/albu.txt',
'pip install instaboostfast',
'pip install git+https://github.com/cocodataset/panopticapi.git',
'pip install timm',
'pip install mmcls>=1.0.0rc0',
'pip install git+https://github.com/lvis-dataset/lvis-api.git',
]
default_floating_range = 0.5
model_floating_ranges = {'atss/atss_r50_fpn_1x_coco.py': 0.3}
|
from typing import Optional
import numpy as np
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image
from docarray.typing import AnyTensor, ImageUrl
from jina import Deployment, Executor, Flow, requests
def test_different_document_schema():
class Image(BaseDocument):
tensor: Optional[AnyTensor]
url: ImageUrl
class MyExec(Executor):
@requests(on='/foo')
def foo(self, docs: DocumentArray[Image], **kwargs) -> DocumentArray[Image]:
for doc in docs:
doc.tensor = doc.url.load()
return docs
with Flow().add(uses=MyExec) as f:
docs = f.post(
on='/foo',
inputs=DocumentArray[Image](
[Image(url='https://via.placeholder.com/150.png')]
),
return_type=DocumentArray[Image],
)
docs = docs.stack()
assert docs.tensor.ndim == 4
def test_send_custom_doc():
class MyDoc(BaseDocument):
text: str
class MyExec(Executor):
@requests(on='/foo')
def foo(self, docs: DocumentArray[MyDoc], **kwargs):
docs[0].text = 'hello world'
with Flow().add(uses=MyExec) as f:
doc = f.post(
on='/foo', inputs=MyDoc(text='hello'), return_type=DocumentArray[MyDoc]
)
assert doc[0].text == 'hello world'
def test_input_response_schema():
class MyDoc(BaseDocument):
text: str
class MyExec(Executor):
@requests(
on='/foo',
request_schema=DocumentArray[MyDoc],
response_schema=DocumentArray[MyDoc],
)
def foo(self, docs, **kwargs):
assert docs.__class__.document_type == MyDoc
docs[0].text = 'hello world'
return docs
with Flow().add(uses=MyExec) as f:
docs = f.post(
on='/foo', inputs=MyDoc(text='hello'), return_type=DocumentArray[MyDoc]
)
assert docs[0].text == 'hello world'
assert docs.__class__.document_type == MyDoc
def test_input_response_schema_annotation():
class MyDoc(BaseDocument):
text: str
class MyExec(Executor):
@requests(on='/bar')
def bar(self, docs: DocumentArray[MyDoc], **kwargs) -> DocumentArray[MyDoc]:
assert docs.__class__.document_type == MyDoc
docs[0].text = 'hello world'
return docs
with Flow().add(uses=MyExec) as f:
docs = f.post(
on='/bar', inputs=MyDoc(text='hello'), return_type=DocumentArray[MyDoc]
)
assert docs[0].text == 'hello world'
assert docs.__class__.document_type == MyDoc
def test_different_output_input():
class InputDoc(BaseDocument):
img: Image
class OutputDoc(BaseDocument):
embedding: AnyTensor
class MyExec(Executor):
@requests(on='/bar')
def bar(
self, docs: DocumentArray[InputDoc], **kwargs
) -> DocumentArray[OutputDoc]:
docs_return = DocumentArray[OutputDoc](
[OutputDoc(embedding=np.zeros((100, 1))) for _ in range(len(docs))]
)
return docs_return
with Flow().add(uses=MyExec) as f:
docs = f.post(
on='/bar',
inputs=InputDoc(img=Image(tensor=np.zeros((3, 224, 224)))),
return_type=DocumentArray[OutputDoc],
)
assert docs[0].embedding.shape == (100, 1)
assert docs.__class__.document_type == OutputDoc
def test_deployments():
class InputDoc(BaseDocument):
img: Image
class OutputDoc(BaseDocument):
embedding: AnyTensor
class MyExec(Executor):
@requests(on='/bar')
def bar(
self, docs: DocumentArray[InputDoc], **kwargs
) -> DocumentArray[OutputDoc]:
docs_return = DocumentArray[OutputDoc](
[OutputDoc(embedding=np.zeros((100, 1))) for _ in range(len(docs))]
)
return docs_return
with Deployment(uses=MyExec) as dep:
docs = dep.post(
on='/bar',
inputs=InputDoc(img=Image(tensor=np.zeros((3, 224, 224)))),
return_type=DocumentArray[OutputDoc],
)
assert docs[0].embedding.shape == (100, 1)
assert docs.__class__.document_type == OutputDoc
|
from typing import Optional
import numpy as np
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image
from docarray.typing import AnyTensor, ImageUrl
from jina import Deployment, Executor, Flow, requests
def test_different_document_schema():
class Image(BaseDocument):
tensor: Optional[AnyTensor]
url: ImageUrl
class MyExec(Executor):
@requests(on='/foo')
def foo(self, docs: DocumentArray[Image], **kwargs) -> DocumentArray[Image]:
for doc in docs:
doc.tensor = doc.url.load()
return docs
with Flow().add(uses=MyExec) as f:
docs = f.post(
on='/foo',
inputs=DocumentArray[Image](
[Image(url='https://via.placeholder.com/150.png')]
),
return_type=DocumentArray[Image],
)
docs = docs.stack()
assert docs.tensor.ndim == 4
def test_send_custom_doc():
class MyDoc(BaseDocument):
text: str
class MyExec(Executor):
@requests(on='/foo')
def foo(self, docs: DocumentArray[MyDoc], **kwargs):
docs[0].text = 'hello world'
with Flow().add(uses=MyExec) as f:
doc = f.post(on='/foo', inputs=MyDoc(text='hello'))
assert doc[0].text == 'hello world'
def test_input_response_schema():
class MyDoc(BaseDocument):
text: str
class MyExec(Executor):
@requests(
on='/foo',
request_schema=DocumentArray[MyDoc],
response_schema=DocumentArray[MyDoc],
)
def foo(self, docs, **kwargs):
assert docs.__class__.document_type == MyDoc
docs[0].text = 'hello world'
return docs
with Flow().add(uses=MyExec) as f:
docs = f.post(
on='/foo', inputs=MyDoc(text='hello'), return_type=DocumentArray[MyDoc]
)
assert docs[0].text == 'hello world'
assert docs.__class__.document_type == MyDoc
def test_input_response_schema_annotation():
class MyDoc(BaseDocument):
text: str
class MyExec(Executor):
@requests(on='/bar')
def bar(self, docs: DocumentArray[MyDoc], **kwargs) -> DocumentArray[MyDoc]:
assert docs.__class__.document_type == MyDoc
docs[0].text = 'hello world'
return docs
with Flow().add(uses=MyExec) as f:
docs = f.post(
on='/bar', inputs=MyDoc(text='hello'), return_type=DocumentArray[MyDoc]
)
assert docs[0].text == 'hello world'
assert docs.__class__.document_type == MyDoc
def test_different_output_input():
class InputDoc(BaseDocument):
img: Image
class OutputDoc(BaseDocument):
embedding: AnyTensor
class MyExec(Executor):
@requests(on='/bar')
def bar(
self, docs: DocumentArray[InputDoc], **kwargs
) -> DocumentArray[OutputDoc]:
docs_return = DocumentArray[OutputDoc](
[OutputDoc(embedding=np.zeros((100, 1))) for _ in range(len(docs))]
)
return docs_return
with Flow().add(uses=MyExec) as f:
docs = f.post(
on='/bar',
inputs=InputDoc(img=Image(tensor=np.zeros((3, 224, 224)))),
return_type=DocumentArray[OutputDoc],
)
assert docs[0].embedding.shape == (100, 1)
assert docs.__class__.document_type == OutputDoc
def test_deployments():
class InputDoc(BaseDocument):
img: Image
class OutputDoc(BaseDocument):
embedding: AnyTensor
class MyExec(Executor):
@requests(on='/bar')
def bar(
self, docs: DocumentArray[InputDoc], **kwargs
) -> DocumentArray[OutputDoc]:
docs_return = DocumentArray[OutputDoc](
[OutputDoc(embedding=np.zeros((100, 1))) for _ in range(len(docs))]
)
return docs_return
with Deployment(uses=MyExec) as dep:
docs = dep.post(
on='/bar',
inputs=InputDoc(img=Image(tensor=np.zeros((3, 224, 224)))),
return_type=DocumentArray[OutputDoc],
)
assert docs[0].embedding.shape == (100, 1)
assert docs.__class__.document_type == OutputDoc
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class VFNet(SingleStageDetector):
"""Implementation of `VarifocalNet
(VFNet).<https://arxiv.org/abs/2008.13367>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(VFNet, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class VFNet(SingleStageDetector):
"""Implementation of `VarifocalNet
(VFNet).<https://arxiv.org/abs/2008.13367>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(VFNet, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
import inspect
import re
import warnings
from operator import itemgetter
from typing import Optional, Tuple, List
from jina import Document
def get_properties(cls) -> List[Tuple[str, Optional[str], Optional[str]]]:
src = inspect.getsource(cls)
members = dict(inspect.getmembers(cls))
setters = re.findall(
r'@[a-zA-Z0-9_]+\.setter\s+def\s+([a-zA-Z0-9_]+)\s*\(self,\s*[a-zA-Z0-9_]+\s*:\s*(.*?)\)',
src,
flags=re.DOTALL,
)
property_docs = []
for setter, _ in setters:
if setter not in members:
warnings.warn(
f'{setter} is found as a setter but there is no corresponding getter'
)
property_docs.append(None)
else:
doc = inspect.getdoc(members[setter])
description = next(iter(re.findall(':return:(.*)', doc)), None)
if description:
description = description.strip()
property_docs.append(description)
return sorted(
list(
zip(map(itemgetter(0), setters), map(itemgetter(1), setters), property_docs)
),
key=lambda x: x[0],
)
def get_overload_signature(
properties,
indent=' ' * 4,
):
kwargs = [
f'{indent}{indent}{property_name}: Optional[{type_hint}] = None'
for property_name, type_hint, _ in properties
]
args_str = ', \n'.join(kwargs + [f'{indent}{indent}**kwargs'])
doc_str = '\n'.join(
[
f'{indent}{indent}:param {property_name}: {description}'
for property_name, _, description in properties
]
+ [
f'{indent}{indent}:param kwargs: other parameters to be set _after_ the document is constructed'
]
)
signature = f'def __init__(\n{indent}{indent}self,\n{args_str}\n{indent}):'
final_str = f'@overload\n{indent}{signature}\n{indent}{indent}"""\n{doc_str}\n{indent}{indent}"""'
return final_str
def write_signature(
cls,
overload_signature,
tag,
indent=' ' * 4,
):
filepath = inspect.getfile(cls)
final_code = re.sub(
rf'(# overload_inject_start_{tag}).*(# overload_inject_end_{tag})',
f'\\1\n{indent}{overload_signature}\n{indent}\\2',
open(filepath).read(),
0,
re.DOTALL,
)
with open(filepath, 'w', encoding='utf-8') as fp:
fp.write(final_code)
def inject_properties_as_overload(cls):
properties = get_properties(cls)
overload_signature = get_overload_signature(properties)
write_signature(cls, overload_signature, 'document')
print(inspect.getfile(cls))
if __name__ == '__main__':
inject_properties_as_overload(Document)
|
import inspect
import re
import warnings
from operator import itemgetter
from typing import Optional, Tuple, List
from jina import Document
def get_properties(cls) -> List[Tuple[str, Optional[str], Optional[str]]]:
src = inspect.getsource(cls)
members = dict(inspect.getmembers(cls))
setters = re.findall(
r'@[a-zA-Z0-9_]+\.setter\s+def\s+([a-zA-Z0-9_]+)\s*\(self,\s*[a-zA-Z0-9_]+\s*:\s*(.*?)\)',
src,
flags=re.DOTALL,
)
property_docs = []
for setter, _ in setters:
if setter not in members:
warnings.warn(
f'{setter} is found as a setter but there is no corresponding getter'
)
property_docs.append(None)
else:
doc = inspect.getdoc(members[setter])
description = next(iter(re.findall(':return:(.*)', doc)), None)
if description:
description = description.strip()
property_docs.append(description)
return sorted(
list(
zip(map(itemgetter(0), setters), map(itemgetter(1), setters), property_docs)
),
key=lambda x: x[0],
)
def get_overload_signature(
properties,
indent=' ' * 4,
):
kwargs = [
f'{indent}{indent}{property_name}: Optional[{type_hint}] = None'
for property_name, type_hint, _ in properties
]
args_str = ', \n'.join(kwargs + [f'{indent}{indent}**kwargs'])
doc_str = '\n'.join(
[
f'{indent}{indent}:param {property_name}: {description}'
for property_name, _, description in properties
]
+ [
f'{indent}{indent}:param kwargs: other parameters to be set _after_ the document is constructed'
]
)
signature = f'def __init__(\n{indent}{indent}self,\n{args_str}\n{indent}):'
final_str = f'@overload\n{indent}{signature}\n{indent}{indent}"""\n{doc_str}\n{indent}{indent}"""'
return final_str
def write_signature(
cls,
overload_signature,
tag,
indent=' ' * 4,
):
filepath = inspect.getfile(cls)
final_code = re.sub(
rf'(# overload_inject_start_{tag}).*(# overload_inject_end_{tag})',
f'\\1\n{indent}{overload_signature}\n{indent}\\2',
open(filepath).read(),
0,
re.DOTALL,
)
with open(filepath, 'w') as fp:
fp.write(final_code)
def inject_properties_as_overload(cls):
properties = get_properties(cls)
overload_signature = get_overload_signature(properties)
write_signature(cls, overload_signature, 'document')
print(inspect.getfile(cls))
if __name__ == '__main__':
inject_properties_as_overload(Document)
|
"""LLM Chain for generating examples for question answering."""
from __future__ import annotations
from typing import Any
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseLLMOutputParser
from pydantic import Field
from langchain.chains.llm import LLMChain
from langchain.evaluation.qa.generate_prompt import PROMPT
from langchain.output_parsers.regex import RegexParser
_QA_OUTPUT_PARSER = RegexParser(
regex=r"QUESTION: (.*?)\n+ANSWER: (.*)",
output_keys=["query", "answer"],
)
class QAGenerateChain(LLMChain):
"""LLM Chain for generating examples for question answering."""
output_parser: BaseLLMOutputParser = Field(default=_QA_OUTPUT_PARSER)
output_key: str = "qa_pairs"
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@classmethod
def from_llm(cls, llm: BaseLanguageModel, **kwargs: Any) -> QAGenerateChain:
"""Load QA Generate Chain from LLM."""
return cls(llm=llm, prompt=PROMPT, **kwargs)
|
"""LLM Chain for generating examples for question answering."""
from __future__ import annotations
from typing import Any
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseLLMOutputParser
from pydantic import Field
from langchain.chains.llm import LLMChain
from langchain.evaluation.qa.generate_prompt import PROMPT
from langchain.output_parsers.regex import RegexParser
_QA_OUTPUT_PARSER = RegexParser(
regex=r"QUESTION: (.*?)\n+ANSWER: (.*)", output_keys=["query", "answer"]
)
class QAGenerateChain(LLMChain):
"""LLM Chain for generating examples for question answering."""
output_parser: BaseLLMOutputParser = Field(default=_QA_OUTPUT_PARSER)
output_key: str = "qa_pairs"
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@classmethod
def from_llm(cls, llm: BaseLanguageModel, **kwargs: Any) -> QAGenerateChain:
"""Load QA Generate Chain from LLM."""
return cls(llm=llm, prompt=PROMPT, **kwargs)
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseBinaryClassificationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with two text columns and a class label column (https://huggingface.co/datasets/sentence-transformers/quora-duplicates)
eval_dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train[-1000:]")
# Initialize the evaluator
binary_acc_evaluator = SparseBinaryClassificationEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
labels=eval_dataset["label"],
name="quora_duplicates_dev",
show_progress_bar=True,
similarity_fn_names=["cosine", "dot", "euclidean", "manhattan"],
)
results = binary_acc_evaluator(model)
"""
Accuracy with Cosine-Similarity: 74.90 (Threshold: 0.8668)
F1 with Cosine-Similarity: 67.37 (Threshold: 0.5959)
Precision with Cosine-Similarity: 54.15
Recall with Cosine-Similarity: 89.13
Average Precision with Cosine-Similarity: 67.81
Matthews Correlation with Cosine-Similarity: 49.89
Accuracy with Dot-Product: 76.50 (Threshold: 24.3460)
F1 with Dot-Product: 66.93 (Threshold: 20.0762)
Precision with Dot-Product: 57.62
Recall with Dot-Product: 79.81
Average Precision with Dot-Product: 65.94
Matthews Correlation with Dot-Product: 48.82
Accuracy with Euclidean-Distance: 67.70 (Threshold: -10.0062)
F1 with Euclidean-Distance: 48.60 (Threshold: -0.2346)
Precision with Euclidean-Distance: 32.13
Recall with Euclidean-Distance: 99.69
Average Precision with Euclidean-Distance: 20.52
Matthews Correlation with Euclidean-Distance: -4.59
Accuracy with Manhattan-Distance: 67.70 (Threshold: -103.1993)
F1 with Manhattan-Distance: 48.60 (Threshold: -1.1565)
Precision with Manhattan-Distance: 32.13
Recall with Manhattan-Distance: 99.69
Average Precision with Manhattan-Distance: 21.05
Matthews Correlation with Manhattan-Distance: -4.59
Model Sparsity: Active Dimensions: 63.1, Sparsity Ratio: 0.9979
"""
# Print the results
print(f"Primary metric: {binary_acc_evaluator.primary_metric}")
# => Primary metric: quora_duplicates_dev_max_ap
print(f"Primary metric value: {results[binary_acc_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6781
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseBinaryClassificationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with two text columns and a class label column (https://huggingface.co/datasets/sentence-transformers/quora-duplicates)
eval_dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train[-1000:]")
# Initialize the evaluator
binary_acc_evaluator = SparseBinaryClassificationEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
labels=eval_dataset["label"],
name="quora_duplicates_dev",
show_progress_bar=True,
similarity_fn_names=["cosine", "dot", "euclidean", "manhattan"],
)
results = binary_acc_evaluator(model)
"""
Accuracy with Cosine-Similarity: 74.90 (Threshold: 0.8668)
F1 with Cosine-Similarity: 67.37 (Threshold: 0.5959)
Precision with Cosine-Similarity: 54.15
Recall with Cosine-Similarity: 89.13
Average Precision with Cosine-Similarity: 67.81
Matthews Correlation with Cosine-Similarity: 49.89
Accuracy with Dot-Product: 76.50 (Threshold: 24.3460)
F1 with Dot-Product: 66.93 (Threshold: 20.0762)
Precision with Dot-Product: 57.62
Recall with Dot-Product: 79.81
Average Precision with Dot-Product: 65.94
Matthews Correlation with Dot-Product: 48.82
Accuracy with Euclidean-Distance: 67.70 (Threshold: -10.0062)
F1 with Euclidean-Distance: 48.60 (Threshold: -0.2346)
Precision with Euclidean-Distance: 32.13
Recall with Euclidean-Distance: 99.69
Average Precision with Euclidean-Distance: 20.52
Matthews Correlation with Euclidean-Distance: -4.59
Accuracy with Manhattan-Distance: 67.70 (Threshold: -103.1993)
F1 with Manhattan-Distance: 48.60 (Threshold: -1.1565)
Precision with Manhattan-Distance: 32.13
Recall with Manhattan-Distance: 99.69
Average Precision with Manhattan-Distance: 21.05
Matthews Correlation with Manhattan-Distance: -4.59
Model Sparsity Stats: Row Non-Zero Mean: 63.13884735107422, Row Sparsity Mean: 0.9979313611984253
"""
# Print the results
print(f"Primary metric: {binary_acc_evaluator.primary_metric}")
# => Primary metric: quora_duplicates_dev_max_ap
print(f"Primary metric value: {results[binary_acc_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6781
|
"""Test Base Schema of documents."""
from collections.abc import Iterator
from langchain_core.document_loaders import BaseBlobParser, Blob
from langchain_core.documents import Document
def test_base_blob_parser() -> None:
"""Verify that the eager method is hooked up to the lazy method by default."""
class MyParser(BaseBlobParser):
"""A simple parser that returns a single document."""
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazy parsing interface."""
yield Document(
page_content="foo",
)
parser = MyParser()
assert isinstance(parser.lazy_parse(Blob(data="who?")), Iterator)
# We're verifying that the eager method is hooked up to the lazy method by default.
docs = parser.parse(Blob(data="who?"))
assert len(docs) == 1
assert docs[0].page_content == "foo"
|
"""Test Base Schema of documents."""
from typing import Iterator
from langchain_core.document_loaders import BaseBlobParser, Blob
from langchain_core.documents import Document
def test_base_blob_parser() -> None:
"""Verify that the eager method is hooked up to the lazy method by default."""
class MyParser(BaseBlobParser):
"""A simple parser that returns a single document."""
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazy parsing interface."""
yield Document(
page_content="foo",
)
parser = MyParser()
assert isinstance(parser.lazy_parse(Blob(data="who?")), Iterator)
# We're verifying that the eager method is hooked up to the lazy method by default.
docs = parser.parse(Blob(data="who?"))
assert len(docs) == 1
assert docs[0].page_content == "foo"
|
import random
import asyncio
import time
import aiohttp
import grpc
def _raise_last_attempt(err, attempt):
if isinstance(err, asyncio.CancelledError):
trailing_metadata = grpc.aio.Metadata()
trailing_metadata.add('jina-client-attempts', str(attempt))
raise grpc.aio.AioRpcError(
code=grpc.StatusCode.CANCELLED,
initial_metadata=grpc.aio.Metadata(),
trailing_metadata=trailing_metadata,
)
elif isinstance(err, grpc.aio.AioRpcError):
trailing_metadata = err.trailing_metadata() or grpc.aio.Metadata()
trailing_metadata.add('jina-client-attempts', str(attempt))
raise grpc.aio.AioRpcError(
code=err.code(),
details=err.details(),
initial_metadata=err.initial_metadata(),
trailing_metadata=trailing_metadata,
debug_error_string=err.debug_error_string(),
)
elif isinstance(err, aiohttp.ClientConnectorCertificateError):
raise err
elif isinstance(err, aiohttp.ClientError):
raise ConnectionError(str(err))
else:
raise err
def sync_wait_or_raise_err(attempt: int,
err: Exception,
max_attempts: float,
backoff_multiplier: float,
initial_backoff: float,
max_backoff: float,
):
"""
Accepts retry parameters and the underlying. The error is raised if the max_attempts has been reached otherwise the
method waits based on the backoff calculations.
:param attempt: Number of the current attempt.
:param err: Underlying error that was raised by the operation.
:param max_attempts: Maximum number of attempts that are allowed.
:param backoff_multiplier: Factor that will be raised to the exponent of (attempt - 1) for calculating the backoff wait time.
:param initial_backoff: The backoff time on the first error. This will be multiplied by the backoff_multiplier exponent for subsequent wait time calculations.
:param max_backoff: The maximum backoff wait time.
"""
if attempt == max_attempts:
_raise_last_attempt(err, attempt)
else:
time.sleep(_wait_time(attempt, backoff_multiplier, initial_backoff, max_backoff))
async def wait_or_raise_err(
attempt: int,
err: Exception,
max_attempts: float,
backoff_multiplier: float,
initial_backoff: float,
max_backoff: float,
):
"""
Accepts retry parameters and the underlying. The error is raised if the max_attempts has been reached otherwise the
method waits based on the backoff calculations.
:param attempt: Number of the current attempt.
:param err: Underlying error that was raised by the operation.
:param max_attempts: Maximum number of attempts that are allowed.
:param backoff_multiplier: Factor that will be raised to the exponent of (attempt - 1) for calculating the backoff wait time.
:param initial_backoff: The backoff time on the first error. This will be multiplied by the backoff_multiplier exponent for subsequent wait time calculations.
:param max_backoff: The maximum backoff wait time.
"""
if attempt == max_attempts:
_raise_last_attempt(err, attempt)
else:
await asyncio.sleep(_wait_time(attempt, backoff_multiplier, initial_backoff, max_backoff))
def _wait_time(attempt, backoff_multiplier, initial_backoff, max_backoff):
if attempt == 1:
wait_time = initial_backoff
else:
wait_time = random.uniform(
0,
min(initial_backoff * backoff_multiplier ** (attempt - 1), max_backoff),
)
return wait_time
|
import asyncio
import random
import aiohttp
import grpc
async def wait_or_raise_err(
attempt: int,
err: Exception,
max_attempts: float,
backoff_multiplier: float,
initial_backoff: float,
max_backoff: float,
):
"""
Accepts retry parameters and the underlying. The error is raised if the max_attempts has been reached otherwise the
method waits based on the backoff calculations.
:param attempt: Number of the current attempt.
:param err: Underlying error that was raised by the operation.
:param max_attempts: Maximum number of attempts that are allowed.
:param backoff_multiplier: Factor that will be raised to the exponent of (attempt - 1) for calculating the backoff wait time.
:param initial_backoff: The backoff time on the first error. This will be multiplied by the backoff_multiplier exponent for subsequent wait time calculations.
:param max_backoff: The maximum backoff wait time.
"""
if attempt == max_attempts:
if isinstance(err, asyncio.CancelledError):
trailing_metadata = grpc.aio.Metadata()
trailing_metadata.add('jina-client-attempts', str(attempt))
raise grpc.aio.AioRpcError(
code=grpc.StatusCode.CANCELLED,
initial_metadata=grpc.aio.Metadata(),
trailing_metadata=trailing_metadata,
)
elif isinstance(err, grpc.aio.AioRpcError):
trailing_metadata = err.trailing_metadata() or grpc.aio.Metadata()
trailing_metadata.add('jina-client-attempts', str(attempt))
raise grpc.aio.AioRpcError(
code=err.code(),
details=err.details(),
initial_metadata=err.initial_metadata(),
trailing_metadata=trailing_metadata,
debug_error_string=err.debug_error_string(),
)
elif isinstance(err, aiohttp.ClientConnectorCertificateError):
raise err
elif isinstance(err, aiohttp.ClientError):
raise ConnectionError(str(err))
else:
raise err
else:
await _backoff_wait(attempt, backoff_multiplier, initial_backoff, max_backoff)
async def _backoff_wait(attempt, backoff_multiplier, initial_backoff, max_backoff):
if attempt == 1:
wait_time = initial_backoff
else:
wait_time = random.uniform(
0,
min(initial_backoff * backoff_multiplier ** (attempt - 1), max_backoff),
)
await asyncio.sleep(wait_time)
|
"""Module to test base parser implementations."""
from typing_extensions import override
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import GenericFakeChatModel
from langchain_core.messages import AIMessage
from langchain_core.output_parsers import (
BaseGenerationOutputParser,
BaseTransformOutputParser,
)
from langchain_core.outputs import ChatGeneration, Generation
def test_base_generation_parser() -> None:
"""Test Base Generation Output Parser."""
class StrInvertCase(BaseGenerationOutputParser[str]):
"""An example parser that inverts the case of the characters in the message."""
@override
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> str:
"""Parse a list of model Generations into a specific format.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Many parsers assume that only a single generation is passed it in.
We will assert for that
partial: Whether to allow partial results. This is used for parsers
that support streaming
"""
if len(result) != 1:
msg = "This output parser can only be used with a single generation."
raise NotImplementedError(msg)
generation = result[0]
if not isinstance(generation, ChatGeneration):
# Say that this one only works with chat generations
msg = "This output parser can only be used with a chat generation."
raise OutputParserException(msg)
content = generation.message.content
assert isinstance(content, str)
return content.swapcase() # type: ignore
StrInvertCase.model_rebuild()
model = GenericFakeChatModel(messages=iter([AIMessage(content="hEllo")]))
chain = model | StrInvertCase()
assert chain.invoke("") == "HeLLO"
def test_base_transform_output_parser() -> None:
"""Test base transform output parser."""
class StrInvertCase(BaseTransformOutputParser[str]):
"""An example parser that inverts the case of the characters in the message."""
def parse(self, text: str) -> str:
"""Parse a single string into a specific format."""
raise NotImplementedError
@override
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> str:
"""Parse a list of model Generations into a specific format.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Many parsers assume that only a single generation is passed it in.
We will assert for that
partial: Whether to allow partial results. This is used for parsers
that support streaming
"""
if len(result) != 1:
msg = "This output parser can only be used with a single generation."
raise NotImplementedError(msg)
generation = result[0]
if not isinstance(generation, ChatGeneration):
# Say that this one only works with chat generations
msg = "This output parser can only be used with a chat generation."
raise OutputParserException(msg)
content = generation.message.content
assert isinstance(content, str)
return content.swapcase() # type: ignore
model = GenericFakeChatModel(messages=iter([AIMessage(content="hello world")]))
chain = model | StrInvertCase()
# inputs to models are ignored, response is hard-coded in model definition
chunks = list(chain.stream(""))
assert chunks == ["HELLO", " ", "WORLD"]
|
"""Module to test base parser implementations."""
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import GenericFakeChatModel
from langchain_core.messages import AIMessage
from langchain_core.output_parsers import (
BaseGenerationOutputParser,
BaseTransformOutputParser,
)
from langchain_core.outputs import ChatGeneration, Generation
def test_base_generation_parser() -> None:
"""Test Base Generation Output Parser."""
class StrInvertCase(BaseGenerationOutputParser[str]):
"""An example parser that inverts the case of the characters in the message."""
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> str:
"""Parse a list of model Generations into a specific format.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Many parsers assume that only a single generation is passed it in.
We will assert for that
partial: Whether to allow partial results. This is used for parsers
that support streaming
"""
if len(result) != 1:
msg = "This output parser can only be used with a single generation."
raise NotImplementedError(msg)
generation = result[0]
if not isinstance(generation, ChatGeneration):
# Say that this one only works with chat generations
msg = "This output parser can only be used with a chat generation."
raise OutputParserException(msg)
content = generation.message.content
assert isinstance(content, str)
return content.swapcase() # type: ignore
StrInvertCase.model_rebuild()
model = GenericFakeChatModel(messages=iter([AIMessage(content="hEllo")]))
chain = model | StrInvertCase()
assert chain.invoke("") == "HeLLO"
def test_base_transform_output_parser() -> None:
"""Test base transform output parser."""
class StrInvertCase(BaseTransformOutputParser[str]):
"""An example parser that inverts the case of the characters in the message."""
def parse(self, text: str) -> str:
"""Parse a single string into a specific format."""
raise NotImplementedError
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> str:
"""Parse a list of model Generations into a specific format.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Many parsers assume that only a single generation is passed it in.
We will assert for that
partial: Whether to allow partial results. This is used for parsers
that support streaming
"""
if len(result) != 1:
msg = "This output parser can only be used with a single generation."
raise NotImplementedError(msg)
generation = result[0]
if not isinstance(generation, ChatGeneration):
# Say that this one only works with chat generations
msg = "This output parser can only be used with a chat generation."
raise OutputParserException(msg)
content = generation.message.content
assert isinstance(content, str)
return content.swapcase() # type: ignore
model = GenericFakeChatModel(messages=iter([AIMessage(content="hello world")]))
chain = model | StrInvertCase()
# inputs to models are ignored, response is hard-coded in model definition
chunks = list(chain.stream(""))
assert chunks == ["HELLO", " ", "WORLD"]
|
from llama_index.core.indices.managed.base import BaseManagedIndex
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.indices.managed.vertexai import VertexAIIndex
from llama_index.indices.managed.vertexai import VertexAIRetriever
def test_class():
names_of_base_classes = [b.__name__ for b in VertexAIIndex.__mro__]
assert BaseManagedIndex.__name__ in names_of_base_classes
names_of_base_classes = [b.__name__ for b in VertexAIRetriever.__mro__]
assert BaseRetriever.__name__ in names_of_base_classes
|
from llama_index.core.indices.managed.base import BaseManagedIndex
from llama_index.indices.managed.vertexai import VertexAIIndex
def test_class():
names_of_base_classes = [b.__name__ for b in VertexAIIndex.__mro__]
assert BaseManagedIndex.__name__ in names_of_base_classes
|
from typing import Optional
from rich.progress import (
BarColumn,
MofNCompleteColumn,
Progress,
SpinnerColumn,
Text,
TextColumn,
TimeElapsedColumn,
TimeRemainingColumn,
)
class QPSColumn(TextColumn):
def render(self, task) -> Text:
if task.speed:
_text = f'{task.speed:.0f} QPS'
else:
_text = 'unknown'
if self.markup:
text = Text.from_markup(_text, style=self.style, justify=self.justify)
else:
text = Text(_text, style=self.style, justify=self.justify)
if self.highlighter:
self.highlighter.highlight(text)
return text
def _get_pbar(disable: bool, total: Optional[int] = None):
columns = (
SpinnerColumn(),
TextColumn('[bold]{task.description}'),
BarColumn(),
MofNCompleteColumn(),
'•',
QPSColumn('{task.speed} QPS', justify='right', style='progress.data.speed'),
'•',
TimeRemainingColumn() if total else TimeElapsedColumn(),
'•',
TextColumn(
'[bold blue]{task.fields[total_size]}',
justify='right',
style='progress.filesize',
),
)
return Progress(
*columns,
transient=False,
disable=disable,
)
def _get_progressbar(description: str, disable: bool, total: Optional[int]):
progress = _get_pbar(disable, total)
task = progress.add_task(description, total=total, start=False, total_size=0)
return progress, task
|
from rich.progress import (
Progress,
BarColumn,
SpinnerColumn,
MofNCompleteColumn,
TextColumn,
TimeRemainingColumn,
Text,
)
class QPSColumn(TextColumn):
def render(self, task) -> Text:
if task.speed:
_text = f'{task.speed:.0f} QPS'
else:
_text = 'unknown'
if self.markup:
text = Text.from_markup(_text, style=self.style, justify=self.justify)
else:
text = Text(_text, style=self.style, justify=self.justify)
if self.highlighter:
self.highlighter.highlight(text)
return text
def _get_pbar(disable):
return Progress(
SpinnerColumn(),
TextColumn('[bold]{task.description}'),
BarColumn(),
MofNCompleteColumn(),
'•',
QPSColumn('{task.speed} QPS', justify='right', style='progress.data.speed'),
'•',
TimeRemainingColumn(),
'•',
TextColumn(
'[bold blue]{task.fields[total_size]}',
justify='right',
style='progress.filesize',
),
transient=False,
disable=disable,
)
def _get_progressbar(description, disable, total):
progress = _get_pbar(disable)
task = progress.add_task(description, total=total, start=False, total_size=0)
return progress, task
|
from __future__ import annotations
import difflib
from pathlib import Path
import pytest
from typer.testing import CliRunner
from langchain_cli.cli import app
from tests.unit_tests.migrate.cli_runner.cases import before, expected
from tests.unit_tests.migrate.cli_runner.folder import Folder
pytest.importorskip("gritql")
def find_issue(current: Folder, expected: Folder) -> str:
for current_file, expected_file in zip(current.files, expected.files):
if current_file != expected_file:
if current_file.name != expected_file.name:
return (
f"Files have "
f"different names: {current_file.name} != {expected_file.name}"
)
if isinstance(current_file, Folder) and isinstance(expected_file, Folder):
return find_issue(current_file, expected_file)
if isinstance(current_file, Folder) or isinstance(expected_file, Folder):
return (
f"One of the files is a "
f"folder: {current_file.name} != {expected_file.name}"
)
return "\n".join(
difflib.unified_diff(
current_file.content.splitlines(),
expected_file.content.splitlines(),
fromfile=current_file.name,
tofile=expected_file.name,
)
)
return "Unknown"
@pytest.mark.xfail(reason="grit may not be installed in env")
def test_command_line(tmp_path: Path) -> None:
runner = CliRunner()
with runner.isolated_filesystem(temp_dir=tmp_path) as td:
before.create_structure(root=Path(td))
# The input is used to force through the confirmation.
result = runner.invoke(app, ["migrate", before.name, "--force"])
assert result.exit_code == 0, result.output
after = Folder.from_structure(Path(td) / before.name)
assert after == expected, find_issue(after, expected)
|
# ruff: noqa: E402
from __future__ import annotations
import pytest
pytest.importorskip("gritql")
import difflib
from pathlib import Path
from typer.testing import CliRunner
from langchain_cli.cli import app
from tests.unit_tests.migrate.cli_runner.cases import before, expected
from tests.unit_tests.migrate.cli_runner.folder import Folder
def find_issue(current: Folder, expected: Folder) -> str:
for current_file, expected_file in zip(current.files, expected.files):
if current_file != expected_file:
if current_file.name != expected_file.name:
return (
f"Files have "
f"different names: {current_file.name} != {expected_file.name}"
)
if isinstance(current_file, Folder) and isinstance(expected_file, Folder):
return find_issue(current_file, expected_file)
elif isinstance(current_file, Folder) or isinstance(expected_file, Folder):
return (
f"One of the files is a "
f"folder: {current_file.name} != {expected_file.name}"
)
return "\n".join(
difflib.unified_diff(
current_file.content.splitlines(),
expected_file.content.splitlines(),
fromfile=current_file.name,
tofile=expected_file.name,
)
)
return "Unknown"
@pytest.mark.xfail(reason="grit may not be installed in env")
def test_command_line(tmp_path: Path) -> None:
runner = CliRunner()
with runner.isolated_filesystem(temp_dir=tmp_path) as td:
before.create_structure(root=Path(td))
# The input is used to force through the confirmation.
result = runner.invoke(app, ["migrate", before.name, "--force"])
assert result.exit_code == 0, result.output
after = Folder.from_structure(Path(td) / before.name)
assert after == expected, find_issue(after, expected)
|
import warnings
from typing import Any, List, Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Image.Image:
call = ", num_output_channels=3" if num_output_channels == 3 else ""
replacement = "convert_color_space(..., color_space=features.ColorSpace.GRAY)"
if num_output_channels == 3:
replacement = f"convert_color_space({replacement}, color_space=features.ColorSpace.RGB)"
warnings.warn(
f"The function `to_grayscale(...{call})` is deprecated in will be removed in a future release. "
f"Instead, please use `{replacement}`.",
)
return _F.to_grayscale(inpt, num_output_channels=num_output_channels)
def rgb_to_grayscale(
inpt: Union[features.ImageTypeJIT, features.VideoTypeJIT], num_output_channels: int = 1
) -> Union[features.ImageTypeJIT, features.VideoTypeJIT]:
if not torch.jit.is_scripting() and isinstance(inpt, (features.Image, features.Video)):
inpt = inpt.as_subclass(torch.Tensor)
old_color_space = None
elif isinstance(inpt, torch.Tensor):
old_color_space = features._image._from_tensor_shape(inpt.shape) # type: ignore[arg-type]
else:
old_color_space = None
call = ", num_output_channels=3" if num_output_channels == 3 else ""
replacement = (
f"convert_color_space(..., color_space=features.ColorSpace.GRAY"
f"{f', old_color_space=features.ColorSpace.{old_color_space}' if old_color_space is not None else ''})"
)
if num_output_channels == 3:
replacement = (
f"convert_color_space({replacement}, color_space=features.ColorSpace.RGB"
f"{f', old_color_space=features.ColorSpace.GRAY' if old_color_space is not None else ''})"
)
warnings.warn(
f"The function `rgb_to_grayscale(...{call})` is deprecated in will be removed in a future release. "
f"Instead, please use `{replacement}`.",
)
return _F.rgb_to_grayscale(inpt, num_output_channels=num_output_channels)
@torch.jit.unused
def to_tensor(inpt: Any) -> torch.Tensor:
warnings.warn(
"The function `to_tensor(...)` is deprecated and will be removed in a future release. "
"Instead, please use `to_image_tensor(...)` followed by `convert_image_dtype(...)`."
)
return _F.to_tensor(inpt)
def get_image_size(inpt: Union[features.ImageTypeJIT, features.VideoTypeJIT]) -> List[int]:
warnings.warn(
"The function `get_image_size(...)` is deprecated and will be removed in a future release. "
"Instead, please use `get_spatial_size(...)` which returns `[h, w]` instead of `[w, h]`."
)
return _F.get_image_size(inpt)
|
import warnings
from typing import Any, List, Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Image.Image:
call = ", num_output_channels=3" if num_output_channels == 3 else ""
replacement = "convert_color_space(..., color_space=features.ColorSpace.GRAY)"
if num_output_channels == 3:
replacement = f"convert_color_space({replacement}, color_space=features.ColorSpace.RGB)"
warnings.warn(
f"The function `to_grayscale(...{call})` is deprecated in will be removed in a future release. "
f"Instead, please use `{replacement}`.",
)
return _F.to_grayscale(inpt, num_output_channels=num_output_channels)
def rgb_to_grayscale(
inpt: Union[features.LegacyImageTypeJIT, features.LegacyVideoTypeJIT], num_output_channels: int = 1
) -> Union[features.LegacyImageTypeJIT, features.LegacyVideoTypeJIT]:
if not torch.jit.is_scripting() and isinstance(inpt, (features.Image, features.Video)):
inpt = inpt.as_subclass(torch.Tensor)
old_color_space = None
elif isinstance(inpt, torch.Tensor):
old_color_space = features._image._from_tensor_shape(inpt.shape) # type: ignore[arg-type]
else:
old_color_space = None
call = ", num_output_channels=3" if num_output_channels == 3 else ""
replacement = (
f"convert_color_space(..., color_space=features.ColorSpace.GRAY"
f"{f', old_color_space=features.ColorSpace.{old_color_space}' if old_color_space is not None else ''})"
)
if num_output_channels == 3:
replacement = (
f"convert_color_space({replacement}, color_space=features.ColorSpace.RGB"
f"{f', old_color_space=features.ColorSpace.GRAY' if old_color_space is not None else ''})"
)
warnings.warn(
f"The function `rgb_to_grayscale(...{call})` is deprecated in will be removed in a future release. "
f"Instead, please use `{replacement}`.",
)
return _F.rgb_to_grayscale(inpt, num_output_channels=num_output_channels)
@torch.jit.unused
def to_tensor(inpt: Any) -> torch.Tensor:
warnings.warn(
"The function `to_tensor(...)` is deprecated and will be removed in a future release. "
"Instead, please use `to_image_tensor(...)` followed by `convert_image_dtype(...)`."
)
return _F.to_tensor(inpt)
def get_image_size(inpt: Union[features.ImageTypeJIT, features.VideoTypeJIT]) -> List[int]:
warnings.warn(
"The function `get_image_size(...)` is deprecated and will be removed in a future release. "
"Instead, please use `get_spatial_size(...)` which returns `[h, w]` instead of `[w, h]`."
)
return _F.get_image_size(inpt)
|
__version__ = '0.17.0'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.16.6'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .build_functions import (build_from_cfg, build_model_from_cfg,
build_runner_from_cfg, build_scheduler_from_cfg)
from .default_scope import DefaultScope
from .registry import Registry
from .root import (DATA_SAMPLERS, DATASETS, EVALUATOR, FUNCTIONS, HOOKS,
INFERENCERS, LOG_PROCESSORS, LOOPS, METRICS, MODEL_WRAPPERS,
MODELS, OPTIM_WRAPPER_CONSTRUCTORS, OPTIM_WRAPPERS,
OPTIMIZERS, PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS,
STRATEGIES, TASK_UTILS, TRANSFORMS, VISBACKENDS,
VISUALIZERS, WEIGHT_INITIALIZERS)
from .utils import (count_registered_modules, init_default_scope,
traverse_registry_tree)
__all__ = [
'Registry', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS', 'DATASETS',
'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIM_WRAPPER_CONSTRUCTORS', 'TASK_UTILS',
'PARAM_SCHEDULERS', 'METRICS', 'MODEL_WRAPPERS', 'OPTIM_WRAPPERS', 'LOOPS',
'VISBACKENDS', 'VISUALIZERS', 'LOG_PROCESSORS', 'EVALUATOR', 'INFERENCERS',
'DefaultScope', 'traverse_registry_tree', 'count_registered_modules',
'build_model_from_cfg', 'build_runner_from_cfg', 'build_from_cfg',
'build_scheduler_from_cfg', 'init_default_scope', 'FUNCTIONS', 'STRATEGIES'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .build_functions import (build_from_cfg, build_model_from_cfg,
build_runner_from_cfg, build_scheduler_from_cfg)
from .default_scope import DefaultScope
from .registry import Registry
from .root import (DATA_SAMPLERS, DATASETS, EVALUATOR, FUNCTIONS, HOOKS,
INFERENCERS, LOG_PROCESSORS, LOOPS, METRICS, MODEL_WRAPPERS,
MODELS, OPTIM_WRAPPER_CONSTRUCTORS, OPTIM_WRAPPERS,
OPTIMIZERS, PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS,
TASK_UTILS, TRANSFORMS, VISBACKENDS, VISUALIZERS,
WEIGHT_INITIALIZERS)
from .utils import (count_registered_modules, init_default_scope,
traverse_registry_tree)
__all__ = [
'Registry', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS', 'DATASETS',
'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIM_WRAPPER_CONSTRUCTORS', 'TASK_UTILS',
'PARAM_SCHEDULERS', 'METRICS', 'MODEL_WRAPPERS', 'OPTIM_WRAPPERS', 'LOOPS',
'VISBACKENDS', 'VISUALIZERS', 'LOG_PROCESSORS', 'EVALUATOR', 'INFERENCERS',
'DefaultScope', 'traverse_registry_tree', 'count_registered_modules',
'build_model_from_cfg', 'build_runner_from_cfg', 'build_from_cfg',
'build_scheduler_from_cfg', 'init_default_scope', 'FUNCTIONS'
]
|
import requests
from packaging import version
from typing import Sequence, Union, List, Optional
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
)
from tgi.types import (
Message,
)
def resolve_tgi_function_call(url: str) -> bool:
url = f"{url}/info"
model_info = dict(requests.get(url).json())
tgi_version = model_info.get("version")
if version.parse(tgi_version) >= version.parse("2.0.1"):
return True
else:
raise ValueError(
"'text-generation-inference' version ",
f"incompatible with function call: {tgi_version}. ",
"Function call support was added in v2.0.1",
)
def get_max_input_tokens(url: str) -> Union[int, None]:
url = f"{url}/info"
model_info = dict(requests.get(url).json())
tgi_version = model_info.get("version")
if version.parse(tgi_version) >= version.parse("2.1.0"):
return model_info.get("max_input_tokens")
else:
return model_info.get("max_input_length")
def get_max_total_tokens(url: str) -> Union[int, None]:
url = f"{url}/info"
model_info = dict(requests.get(url).json())
return model_info.get("max_total_tokens")
def get_model_name(url: str) -> Union[str, None]:
url = f"{url}/info"
model_info = dict(requests.get(url).json())
return model_info.get("model_id")
def to_tgi_messages(messages: Sequence[ChatMessage]) -> Sequence[Message]:
out_messages = []
for m in messages:
tool_calls = m.additional_kwargs.get("tool_calls")
out_messages.append(
Message(role=m.role.value, content=m.content, tool_calls=tool_calls)
)
return out_messages
def force_single_tool_call(response: ChatResponse) -> None:
tool_calls = response.message.additional_kwargs.get("tool_calls", [])
if len(tool_calls) > 1:
response.message.additional_kwargs["tool_calls"] = [tool_calls[0]]
def resolve_tool_choice(
tools: Optional[List[dict]] = None, tool_choice: str = "none"
) -> Union[str, dict]:
"""
Resolve tool choice.
Check if tool_name exists in tools.
Note that unlike in OpenAI specification, 'auto' will ALWAYS choose the tool for you.
Set to 'none' explicitly if do not wish to use tool.
"""
valid_tool_choices = ["none", "auto"] + [t["function"]["name"] for t in tools or []]
if tool_choice not in valid_tool_choices:
raise ValueError(
f"{tool_choice} is not a valid tool_choice. Must be one of {valid_tool_choices}"
)
return tool_choice
|
import requests
from packaging import version
from typing import Sequence, Union, List, Optional
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
)
from tgi.types import (
Message,
)
def resolve_tgi_function_call(url: str) -> bool:
url = f"{url}/info"
model_info = dict(requests.get(url).json())
tgi_version = model_info.get("version", None)
if version.parse(tgi_version) >= version.parse("2.0.1"):
return True
else:
raise ValueError(
"'text-generation-inference' version ",
f"incompatible with function call: {tgi_version}. ",
"Function call support was added in v2.0.1",
)
def get_max_input_tokens(url: str) -> Union[int, None]:
url = f"{url}/info"
model_info = dict(requests.get(url).json())
tgi_version = model_info.get("version", None)
if version.parse(tgi_version) >= version.parse("2.1.0"):
return model_info.get("max_input_tokens", None)
else:
return model_info.get("max_input_length", None)
def get_max_total_tokens(url: str) -> Union[int, None]:
url = f"{url}/info"
model_info = dict(requests.get(url).json())
return model_info.get("max_total_tokens", None)
def get_model_name(url: str) -> Union[str, None]:
url = f"{url}/info"
model_info = dict(requests.get(url).json())
return model_info.get("model_id", None)
def to_tgi_messages(messages: Sequence[ChatMessage]) -> Sequence[Message]:
out_messages = []
for m in messages:
tool_calls = m.additional_kwargs.get("tool_calls")
out_messages.append(
Message(role=m.role.value, content=m.content, tool_calls=tool_calls)
)
return out_messages
def force_single_tool_call(response: ChatResponse) -> None:
tool_calls = response.message.additional_kwargs.get("tool_calls", [])
if len(tool_calls) > 1:
response.message.additional_kwargs["tool_calls"] = [tool_calls[0]]
def resolve_tool_choice(
tools: Optional[List[dict]] = None, tool_choice: str = "none"
) -> Union[str, dict]:
"""Resolve tool choice.
Check if tool_name exists in tools.
Note that unlike in OpenAI specification, 'auto' will ALWAYS choose the tool for you.
Set to 'none' explicitly if do not wish to use tool.
"""
valid_tool_choices = ["none", "auto"] + [t["function"]["name"] for t in tools or []]
if tool_choice not in valid_tool_choices:
raise ValueError(
f"{tool_choice} is not a valid tool_choice. Must be one of {valid_tool_choices}"
)
return tool_choice
|
from __future__ import annotations
import concurrent.futures
from pathlib import Path
from typing import Iterator, Literal, Optional, Sequence, Union
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.blob_loaders import (
BlobLoader,
FileSystemBlobLoader,
)
from langchain_community.document_loaders.generic import GenericLoader
from langchain_community.document_loaders.parsers.registry import get_parser
_PathLike = Union[str, Path]
DEFAULT = Literal["default"]
class ConcurrentLoader(GenericLoader):
"""Load and pars Documents concurrently."""
def __init__(
self,
blob_loader: BlobLoader,
blob_parser: BaseBlobParser,
num_workers: int = 4,
) -> None:
super().__init__(blob_loader, blob_parser)
self.num_workers = num_workers
def lazy_load(
self,
) -> Iterator[Document]:
"""Load documents lazily with concurrent parsing."""
with concurrent.futures.ThreadPoolExecutor(
max_workers=self.num_workers
) as executor:
futures = {
executor.submit(self.blob_parser.lazy_parse, blob)
for blob in self.blob_loader.yield_blobs()
}
for future in concurrent.futures.as_completed(futures):
yield from future.result()
@classmethod
def from_filesystem(
cls,
path: _PathLike,
*,
glob: str = "**/[!.]*",
exclude: Sequence[str] = (),
suffixes: Optional[Sequence[str]] = None,
show_progress: bool = False,
parser: Union[DEFAULT, BaseBlobParser] = "default",
num_workers: int = 4,
parser_kwargs: Optional[dict] = None,
) -> ConcurrentLoader:
"""Create a concurrent generic document loader using a filesystem blob loader.
Args:
path: The path to the directory to load documents from.
glob: The glob pattern to use to find documents.
suffixes: The suffixes to use to filter documents. If None, all files
matching the glob will be loaded.
exclude: A list of patterns to exclude from the loader.
show_progress: Whether to show a progress bar or not (requires tqdm).
Proxies to the file system loader.
parser: A blob parser which knows how to parse blobs into documents
num_workers: Max number of concurrent workers to use.
parser_kwargs: Keyword arguments to pass to the parser.
"""
blob_loader = FileSystemBlobLoader(
path,
glob=glob,
exclude=exclude,
suffixes=suffixes,
show_progress=show_progress,
)
if isinstance(parser, str):
if parser == "default" and cls.get_parser != GenericLoader.get_parser:
# There is an implementation of get_parser on the class, use it.
blob_parser = cls.get_parser(**(parser_kwargs or {}))
else:
blob_parser = get_parser(parser)
else:
blob_parser = parser
return cls(blob_loader, blob_parser, num_workers=num_workers)
|
from __future__ import annotations
import concurrent.futures
from pathlib import Path
from typing import Iterator, Literal, Optional, Sequence, Union
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.blob_loaders import (
BlobLoader,
FileSystemBlobLoader,
)
from langchain_community.document_loaders.generic import GenericLoader
from langchain_community.document_loaders.parsers.registry import get_parser
_PathLike = Union[str, Path]
DEFAULT = Literal["default"]
class ConcurrentLoader(GenericLoader):
"""Load and pars Documents concurrently."""
def __init__(
self,
blob_loader: BlobLoader, # type: ignore[valid-type]
blob_parser: BaseBlobParser,
num_workers: int = 4, # type: ignore[valid-type]
) -> None:
super().__init__(blob_loader, blob_parser)
self.num_workers = num_workers
def lazy_load(
self,
) -> Iterator[Document]:
"""Load documents lazily with concurrent parsing."""
with concurrent.futures.ThreadPoolExecutor(
max_workers=self.num_workers
) as executor:
futures = {
executor.submit(self.blob_parser.lazy_parse, blob)
for blob in self.blob_loader.yield_blobs() # type: ignore[attr-defined]
}
for future in concurrent.futures.as_completed(futures):
yield from future.result()
@classmethod
def from_filesystem(
cls,
path: _PathLike,
*,
glob: str = "**/[!.]*",
exclude: Sequence[str] = (),
suffixes: Optional[Sequence[str]] = None,
show_progress: bool = False,
parser: Union[DEFAULT, BaseBlobParser] = "default",
num_workers: int = 4,
parser_kwargs: Optional[dict] = None,
) -> ConcurrentLoader:
"""Create a concurrent generic document loader using a filesystem blob loader.
Args:
path: The path to the directory to load documents from.
glob: The glob pattern to use to find documents.
suffixes: The suffixes to use to filter documents. If None, all files
matching the glob will be loaded.
exclude: A list of patterns to exclude from the loader.
show_progress: Whether to show a progress bar or not (requires tqdm).
Proxies to the file system loader.
parser: A blob parser which knows how to parse blobs into documents
num_workers: Max number of concurrent workers to use.
parser_kwargs: Keyword arguments to pass to the parser.
"""
blob_loader = FileSystemBlobLoader( # type: ignore[attr-defined, misc]
path,
glob=glob,
exclude=exclude,
suffixes=suffixes,
show_progress=show_progress,
)
if isinstance(parser, str):
if parser == "default" and cls.get_parser != GenericLoader.get_parser:
# There is an implementation of get_parser on the class, use it.
blob_parser = cls.get_parser(**(parser_kwargs or {}))
else:
blob_parser = get_parser(parser)
else:
blob_parser = parser
return cls(blob_loader, blob_parser, num_workers=num_workers)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
import numpy as np
import torch
from mmcv.transforms import Compose
from torchvision.transforms import functional as F
from mmdet.apis import init_detector
try:
import ffmpegcv
except ImportError:
raise ImportError(
'Please install ffmpegcv with:\n\n pip install ffmpegcv')
def parse_args():
parser = argparse.ArgumentParser(
description='MMDetection video demo with GPU acceleration')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--nvdecode', action='store_true', help='Use NVIDIA decoder')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def prefetch_img_metas(cfg, ori_wh):
w, h = ori_wh
cfg.data.test.pipeline[0].type = 'LoadImageFromNDArray'
test_pipeline = Compose(cfg.data.test.pipeline)
data = {'img': np.zeros((h, w, 3), dtype=np.uint8)}
data = test_pipeline(data)
img_metas = data['img_metas'][0].data
return img_metas
def process_img(frame_resize, img_metas, device):
assert frame_resize.shape == img_metas['pad_shape']
frame_cuda = torch.from_numpy(frame_resize).to(device).float()
frame_cuda = frame_cuda.permute(2, 0, 1) # HWC to CHW
mean = torch.from_numpy(img_metas['img_norm_cfg']['mean']).to(device)
std = torch.from_numpy(img_metas['img_norm_cfg']['std']).to(device)
frame_cuda = F.normalize(frame_cuda, mean=mean, std=std, inplace=True)
frame_cuda = frame_cuda[None, :, :, :] # NCHW
data = {'img': [frame_cuda], 'img_metas': [[img_metas]]}
return data
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
model = init_detector(args.config, args.checkpoint, device=args.device)
if args.nvdecode:
VideoCapture = ffmpegcv.VideoCaptureNV
else:
VideoCapture = ffmpegcv.VideoCapture
video_origin = VideoCapture(args.video)
img_metas = prefetch_img_metas(model.cfg,
(video_origin.width, video_origin.height))
resize_wh = img_metas['pad_shape'][1::-1]
video_resize = VideoCapture(
args.video,
resize=resize_wh,
resize_keepratio=True,
resize_keepratioalign='topleft',
pix_fmt='rgb24')
video_writer = None
if args.out:
video_writer = ffmpegcv.VideoWriter(args.out, fps=video_origin.fps)
with torch.no_grad():
for frame_resize, frame_origin in zip(
mmcv.track_iter_progress(video_resize), video_origin):
data = process_img(frame_resize, img_metas, args.device)
result = model(return_loss=False, rescale=True, **data)[0]
frame_mask = model.show_result(
frame_origin, result, score_thr=args.score_thr)
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame_mask, 'video', args.wait_time)
if args.out:
video_writer.write(frame_mask)
if video_writer:
video_writer.release()
video_origin.release()
video_resize.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
import numpy as np
import torch
from torchvision.transforms import functional as F
from mmdet.apis import init_detector
from mmdet.datasets.pipelines import Compose
try:
import ffmpegcv
except ImportError:
raise ImportError(
'Please install ffmpegcv with:\n\n pip install ffmpegcv')
def parse_args():
parser = argparse.ArgumentParser(
description='MMDetection video demo with GPU acceleration')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--nvdecode', action='store_true', help='Use NVIDIA decoder')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def prefetch_img_metas(cfg, ori_wh):
w, h = ori_wh
cfg.data.test.pipeline[0].type = 'LoadImageFromNDArray'
test_pipeline = Compose(cfg.data.test.pipeline)
data = {'img': np.zeros((h, w, 3), dtype=np.uint8)}
data = test_pipeline(data)
img_metas = data['img_metas'][0].data
return img_metas
def process_img(frame_resize, img_metas, device):
assert frame_resize.shape == img_metas['pad_shape']
frame_cuda = torch.from_numpy(frame_resize).to(device).float()
frame_cuda = frame_cuda.permute(2, 0, 1) # HWC to CHW
mean = torch.from_numpy(img_metas['img_norm_cfg']['mean']).to(device)
std = torch.from_numpy(img_metas['img_norm_cfg']['std']).to(device)
frame_cuda = F.normalize(frame_cuda, mean=mean, std=std, inplace=True)
frame_cuda = frame_cuda[None, :, :, :] # NCHW
data = {'img': [frame_cuda], 'img_metas': [[img_metas]]}
return data
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
model = init_detector(args.config, args.checkpoint, device=args.device)
if args.nvdecode:
VideoCapture = ffmpegcv.VideoCaptureNV
else:
VideoCapture = ffmpegcv.VideoCapture
video_origin = VideoCapture(args.video)
img_metas = prefetch_img_metas(model.cfg,
(video_origin.width, video_origin.height))
resize_wh = img_metas['pad_shape'][1::-1]
video_resize = VideoCapture(
args.video,
resize=resize_wh,
resize_keepratio=True,
resize_keepratioalign='topleft',
pix_fmt='rgb24')
video_writer = None
if args.out:
video_writer = ffmpegcv.VideoWriter(args.out, fps=video_origin.fps)
with torch.no_grad():
for frame_resize, frame_origin in zip(
mmcv.track_iter_progress(video_resize), video_origin):
data = process_img(frame_resize, img_metas, args.device)
result = model(return_loss=False, rescale=True, **data)[0]
frame_mask = model.show_result(
frame_origin, result, score_thr=args.score_thr)
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame_mask, 'video', args.wait_time)
if args.out:
video_writer.write(frame_mask)
if video_writer:
video_writer.release()
video_origin.release()
video_resize.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.2.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.2.0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
_base_ = '../cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')),
roi_head=dict(
bbox_head=[
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
], ))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = '../cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')),
roi_head=dict(
bbox_head=[
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
], ))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
from pathlib import Path
from typing import Union, Tuple, List
import torch
import torchaudio
from torch.utils.data import Dataset
SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]]
class LibriMix(Dataset):
r"""Create the *LibriMix* [:footcite:`cosentino2020librimix`] dataset.
Args:
root (str or Path): The path to the directory where the directory ``Libri2Mix`` or
``Libri3Mix`` is stored.
subset (str, optional): The subset to use. Options: [``train-360`, ``train-100``,
``dev``, and ``test``] (Default: ``train-360``).
num_speakers (int, optional): The number of speakers, which determines the directories
to traverse. The Dataset will traverse ``s1`` to ``sN`` directories to collect
N source audios. (Default: 2)
sample_rate (int, optional): sample rate of audio files. The ``sample_rate`` determines
which subdirectory the audio are fetched. If any of the audio has a different sample
rate, raises ``ValueError``. Options: [8000, 16000] (Default: 8000)
task (str, optional): the task of LibriMix.
Options: [``enh_single``, ``enh_both``, ``sep_clean``, ``sep_noisy``]
(Default: ``sep_clean``)
Note:
The LibriMix dataset needs to be manually generated. Please check https://github.com/JorisCos/LibriMix
"""
def __init__(
self,
root: Union[str, Path],
subset: str = "train-360",
num_speakers: int = 2,
sample_rate: int = 8000,
task: str = "sep_clean",
):
self.root = Path(root) / f"Libri{num_speakers}Mix"
if sample_rate == 8000:
self.root = self.root / "wav8k/min" / subset
elif sample_rate == 16000:
self.root = self.root / "wav16k/min" / subset
else:
raise ValueError(f"Unsupported sample rate. Found {sample_rate}.")
self.sample_rate = sample_rate
self.task = task
self.mix_dir = (self.root / f"mix_{task.split('_')[1]}").resolve()
self.src_dirs = [(self.root / f"s{i+1}").resolve() for i in range(num_speakers)]
self.files = [p.name for p in self.mix_dir.glob("*wav")]
self.files.sort()
def _load_audio(self, path) -> torch.Tensor:
waveform, sample_rate = torchaudio.load(path)
if sample_rate != self.sample_rate:
raise ValueError(
f"The dataset contains audio file of sample rate {sample_rate}, "
f"but the requested sample rate is {self.sample_rate}."
)
return waveform
def _load_sample(self, filename) -> SampleType:
mixed = self._load_audio(str(self.mix_dir / filename))
srcs = []
for i, dir_ in enumerate(self.src_dirs):
src = self._load_audio(str(dir_ / filename))
if mixed.shape != src.shape:
raise ValueError(f"Different waveform shapes. mixed: {mixed.shape}, src[{i}]: {src.shape}")
srcs.append(src)
return self.sample_rate, mixed, srcs
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, key: int) -> SampleType:
"""Load the n-th sample from the dataset.
Args:
key (int): The index of the sample to be loaded
Returns:
(int, Tensor, List[Tensor]): ``(sample_rate, mix_waveform, list_of_source_waveforms)``
"""
return self._load_sample(self.files[key])
|
from pathlib import Path
from typing import Union, Tuple, List
import torch
import torchaudio
from torch.utils.data import Dataset
SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]]
class LibriMix(Dataset):
r"""Create the LibriMix dataset.
Args:
root (str or Path): The path to the directory where the directory ``Libri2Mix`` or
``Libri3Mix`` is stored.
subset (str, optional): The subset to use. Options: [``train-360`, ``train-100``,
``dev``, and ``test``] (Default: ``train-360``).
num_speakers (int, optional): The number of speakers, which determines the directories
to traverse. The Dataset will traverse ``s1`` to ``sN`` directories to collect
N source audios. (Default: 2)
sample_rate (int, optional): sample rate of audio files. The ``sample_rate`` determines
which subdirectory the audio are fetched. If any of the audio has a different sample
rate, raises ``ValueError``. Options: [8000, 16000] (Default: 8000)
task (str, optional): the task of LibriMix.
Options: [``enh_single``, ``enh_both``, ``sep_clean``, ``sep_noisy``]
(Default: ``sep_clean``)
Note:
The LibriMix dataset needs to be manually generated. Please check https://github.com/JorisCos/LibriMix
"""
def __init__(
self,
root: Union[str, Path],
subset: str = "train-360",
num_speakers: int = 2,
sample_rate: int = 8000,
task: str = "sep_clean",
):
self.root = Path(root) / f"Libri{num_speakers}Mix"
if sample_rate == 8000:
self.root = self.root / "wav8k/min" / subset
elif sample_rate == 16000:
self.root = self.root / "wav16k/min" / subset
else:
raise ValueError(f"Unsupported sample rate. Found {sample_rate}.")
self.sample_rate = sample_rate
self.task = task
self.mix_dir = (self.root / f"mix_{task.split('_')[1]}").resolve()
self.src_dirs = [(self.root / f"s{i+1}").resolve() for i in range(num_speakers)]
self.files = [p.name for p in self.mix_dir.glob("*wav")]
self.files.sort()
def _load_audio(self, path) -> torch.Tensor:
waveform, sample_rate = torchaudio.load(path)
if sample_rate != self.sample_rate:
raise ValueError(
f"The dataset contains audio file of sample rate {sample_rate}, "
f"but the requested sample rate is {self.sample_rate}."
)
return waveform
def _load_sample(self, filename) -> SampleType:
mixed = self._load_audio(str(self.mix_dir / filename))
srcs = []
for i, dir_ in enumerate(self.src_dirs):
src = self._load_audio(str(dir_ / filename))
if mixed.shape != src.shape:
raise ValueError(f"Different waveform shapes. mixed: {mixed.shape}, src[{i}]: {src.shape}")
srcs.append(src)
return self.sample_rate, mixed, srcs
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, key: int) -> SampleType:
"""Load the n-th sample from the dataset.
Args:
key (int): The index of the sample to be loaded
Returns:
(int, Tensor, List[Tensor]): ``(sample_rate, mix_waveform, list_of_source_waveforms)``
"""
return self._load_sample(self.files[key])
|
import requests
from docarray import DocumentArray
def test_weaviate_hnsw(start_storage):
da = DocumentArray(
storage='weaviate',
config={
'n_dim': 100,
'ef': 100,
'ef_construction': 100,
'max_connections': 16,
'dynamic_ef_min': 50,
'dynamic_ef_max': 300,
'dynamic_ef_factor': 4,
'vector_cache_max_objects': 1000000,
'flat_search_cutoff': 20000,
'cleanup_interval_seconds': 1000,
'skip': True,
},
)
result = requests.get('http://localhost:8080/v1/schema').json()
classes = result.get('classes', [])
main_class = list(
filter(lambda class_element: class_element['class'] == da._config.name, classes)
)
assert len(main_class) == 1
main_class = main_class[0]
assert main_class.get('vectorIndexConfig', {}).get('maxConnections') == 16
assert main_class.get('vectorIndexConfig', {}).get('efConstruction') == 100
assert main_class.get('vectorIndexConfig', {}).get('ef') == 100
assert main_class.get('vectorIndexConfig', {}).get('dynamicEfMin') == 50
assert main_class.get('vectorIndexConfig', {}).get('dynamicEfMax') == 300
assert main_class.get('vectorIndexConfig', {}).get('dynamicEfFactor') == 4
assert (
main_class.get('vectorIndexConfig', {}).get('vectorCacheMaxObjects') == 1000000
)
assert main_class.get('vectorIndexConfig', {}).get('flatSearchCutoff') == 20000
assert main_class.get('vectorIndexConfig', {}).get('cleanupIntervalSeconds') == 1000
assert main_class.get('vectorIndexConfig', {}).get('skip') is True
|
import requests
from docarray import DocumentArray
def test_weaviate_hnsw(start_storage):
da = DocumentArray(
storage='weaviate',
config={'n_dim': 100, 'ef': 100, 'ef_construction': 100, 'max_connections': 16},
)
result = requests.get('http://localhost:8080/v1/schema').json()
classes = result.get('classes', [])
main_class = list(
filter(lambda class_element: class_element['class'] == da._config.name, classes)
)
assert len(main_class) == 1
main_class = main_class[0]
assert main_class.get('vectorIndexConfig', {}).get('maxConnections') == 16
assert main_class.get('vectorIndexConfig', {}).get('efConstruction') == 100
assert main_class.get('vectorIndexConfig', {}).get('ef') == 100
|
from __future__ import annotations
import gzip
import os
from . import InputExample
class NLIDataReader:
"""Reads in the Stanford NLI dataset and the MultiGenre NLI dataset"""
def __init__(self, dataset_folder):
self.dataset_folder = dataset_folder
def get_examples(self, filename, max_examples=0):
"""
data_splits specified which data split to use (train, dev, test).
Expects that self.dataset_folder contains the files s1.$data_split.gz, s2.$data_split.gz,
labels.$data_split.gz, e.g., for the train split, s1.train.gz, s2.train.gz, labels.train.gz
"""
s1 = gzip.open(os.path.join(self.dataset_folder, "s1." + filename), mode="rt", encoding="utf-8").readlines()
s2 = gzip.open(os.path.join(self.dataset_folder, "s2." + filename), mode="rt", encoding="utf-8").readlines()
labels = gzip.open(
os.path.join(self.dataset_folder, "labels." + filename), mode="rt", encoding="utf-8"
).readlines()
examples = []
id = 0
for sentence_a, sentence_b, label in zip(s1, s2, labels):
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid, texts=[sentence_a, sentence_b], label=self.map_label(label)))
if 0 < max_examples <= len(examples):
break
return examples
@staticmethod
def get_labels():
return {"contradiction": 0, "entailment": 1, "neutral": 2}
def get_num_labels(self):
return len(self.get_labels())
def map_label(self, label):
return self.get_labels()[label.strip().lower()]
|
from __future__ import annotations
import gzip
import os
from . import InputExample
class NLIDataReader(object):
"""Reads in the Stanford NLI dataset and the MultiGenre NLI dataset"""
def __init__(self, dataset_folder):
self.dataset_folder = dataset_folder
def get_examples(self, filename, max_examples=0):
"""
data_splits specified which data split to use (train, dev, test).
Expects that self.dataset_folder contains the files s1.$data_split.gz, s2.$data_split.gz,
labels.$data_split.gz, e.g., for the train split, s1.train.gz, s2.train.gz, labels.train.gz
"""
s1 = gzip.open(os.path.join(self.dataset_folder, "s1." + filename), mode="rt", encoding="utf-8").readlines()
s2 = gzip.open(os.path.join(self.dataset_folder, "s2." + filename), mode="rt", encoding="utf-8").readlines()
labels = gzip.open(
os.path.join(self.dataset_folder, "labels." + filename), mode="rt", encoding="utf-8"
).readlines()
examples = []
id = 0
for sentence_a, sentence_b, label in zip(s1, s2, labels):
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid, texts=[sentence_a, sentence_b], label=self.map_label(label)))
if 0 < max_examples <= len(examples):
break
return examples
@staticmethod
def get_labels():
return {"contradiction": 0, "entailment": 1, "neutral": 2}
def get_num_labels(self):
return len(self.get_labels())
def map_label(self, label):
return self.get_labels()[label.strip().lower()]
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast
import numpy as np
from docarray.typing.tensor.embedding.embedding_mixin import EmbeddingMixin
from docarray.typing.tensor.embedding.ndarray import NdArrayEmbedding
from docarray.typing.tensor.tensor import AnyTensor
from docarray.utils._internal.misc import is_tf_available, is_torch_available # noqa
torch_available = is_torch_available()
if torch_available:
import torch
from docarray.typing.tensor.embedding.torch import TorchEmbedding
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
from docarray.typing.tensor.embedding.tensorflow import TensorFlowEmbedding
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar("T", bound="AnyEmbedding")
class AnyEmbedding(AnyTensor, EmbeddingMixin):
"""
Represents an embedding tensor object that can be used with TensorFlow, PyTorch, and NumPy type.
---
'''python
from docarray import BaseDoc
from docarray.typing import AnyEmbedding
class MyEmbeddingDoc(BaseDoc):
embedding: AnyEmbedding
# Example usage with TensorFlow:
import tensorflow as tf
doc = MyEmbeddingDoc(embedding=tf.zeros(1000, 2))
type(doc.embedding) # TensorFlowEmbedding
# Example usage with PyTorch:
import torch
doc = MyEmbeddingDoc(embedding=torch.zeros(1000, 2))
type(doc.embedding) # TorchEmbedding
# Example usage with NumPy:
import numpy as np
doc = MyEmbeddingDoc(embedding=np.zeros((1000, 2)))
type(doc.embedding) # NdArrayEmbedding
'''
---
Raises:
TypeError: If the type of the value is not one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray]
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: "ModelField",
config: "BaseConfig",
):
if torch_available:
if isinstance(value, TorchTensor):
return cast(TorchEmbedding, value)
elif isinstance(value, torch.Tensor):
return TorchEmbedding._docarray_from_native(value) # noqa
if tf_available:
if isinstance(value, TensorFlowTensor):
return cast(TensorFlowEmbedding, value)
elif isinstance(value, tf.Tensor):
return TensorFlowEmbedding._docarray_from_native(value) # noqa
try:
return NdArrayEmbedding.validate(value, field, config)
except Exception: # noqa
pass
raise TypeError(
f"Expected one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray] "
f"compatible type, got {type(value)}"
)
|
from typing import Union
from docarray.typing.tensor.embedding.ndarray import NdArrayEmbedding
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.embedding.torch import TorchEmbedding
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.embedding.tensorflow import (
TensorFlowEmbedding as TFEmbedding,
)
if tf_available and torch_available:
AnyEmbedding = Union[NdArrayEmbedding, TorchEmbedding, TFEmbedding] # type: ignore
elif tf_available:
AnyEmbedding = Union[NdArrayEmbedding, TFEmbedding] # type: ignore
elif torch_available:
AnyEmbedding = Union[NdArrayEmbedding, TorchEmbedding] # type: ignore
else:
AnyEmbedding = Union[NdArrayEmbedding] # type: ignore
__all__ = ['AnyEmbedding']
|
# Copyright (c) OpenMMLab. All rights reserved.
import asyncio
from argparse import ArgumentParser
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector, show_result_pyplot)
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
parser.add_argument(
'--async-test',
action='store_true',
help='whether to set async options for async inference.')
args = parser.parse_args()
return args
def main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
result = inference_detector(model, args.img)
# show the results
show_result_pyplot(model, args.img, result, score_thr=args.score_thr)
async def async_main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
tasks = asyncio.create_task(async_inference_detector(model, args.img))
result = await asyncio.gather(tasks)
# show the results
show_result_pyplot(model, args.img, result[0], score_thr=args.score_thr)
if __name__ == '__main__':
args = parse_args()
if args.async_test:
asyncio.run(async_main(args))
else:
main(args)
|
import asyncio
from argparse import ArgumentParser
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector, show_result_pyplot)
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
parser.add_argument(
'--async-test',
action='store_true',
help='whether to set async options for async inference.')
args = parser.parse_args()
return args
def main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
result = inference_detector(model, args.img)
# show the results
show_result_pyplot(model, args.img, result, score_thr=args.score_thr)
async def async_main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
tasks = asyncio.create_task(async_inference_detector(model, args.img))
result = await asyncio.gather(tasks)
# show the results
show_result_pyplot(model, args.img, result[0], score_thr=args.score_thr)
if __name__ == '__main__':
args = parse_args()
if args.async_test:
asyncio.run(async_main(args))
else:
main(args)
|
"""
Computes embeddings
"""
from typing import Optional
import numpy as np
import pytest
from sentence_transformers import SentenceTransformer
@pytest.mark.parametrize("normalize_embeddings", (False, True))
@pytest.mark.parametrize("prompt_name", (None, "retrieval"))
def test_encode_multi_process(
stsb_bert_tiny_model: SentenceTransformer, normalize_embeddings: bool, prompt_name: Optional[str]
) -> None:
model = stsb_bert_tiny_model
model.prompts = {"retrieval": "Represent this sentence for searching relevant passages: "}
sentences = ["This is sentence {}".format(i) for i in range(40)]
# Start the multi-process pool on e.g. two CPU devices & compute the embeddings using the pool
pool = model.start_multi_process_pool(["cpu", "cpu"])
emb = model.encode_multi_process(
sentences, pool, chunk_size=10, normalize_embeddings=normalize_embeddings, prompt_name=prompt_name
)
model.stop_multi_process_pool(pool)
assert emb.shape == (len(sentences), 128)
# Make sure the embeddings aren't just all 0
assert emb.sum() != 0.0
# Compare against normal embeddings
emb_normal = model.encode(sentences, normalize_embeddings=normalize_embeddings, prompt_name=prompt_name)
diff = np.max(np.abs(emb - emb_normal))
assert diff < 0.001
# Ensure that after normalizing, the means are all almost 0, and otherwise not
assert np.all(np.abs(emb.mean(1)) < 0.01) == normalize_embeddings
|
"""
Computes embeddings
"""
import numpy as np
import pytest
from typing import Optional
from sentence_transformers import SentenceTransformer
@pytest.mark.parametrize("normalize_embeddings", (False, True))
@pytest.mark.parametrize("prompt_name", (None, "retrieval"))
def test_encode_multi_process(
stsb_bert_tiny_model: SentenceTransformer, normalize_embeddings: bool, prompt_name: Optional[str]
) -> None:
model = stsb_bert_tiny_model
model.prompts = {"retrieval": "Represent this sentence for searching relevant passages: "}
sentences = ["This is sentence {}".format(i) for i in range(40)]
# Start the multi-process pool on e.g. two CPU devices & compute the embeddings using the pool
pool = model.start_multi_process_pool(["cpu", "cpu"])
emb = model.encode_multi_process(
sentences, pool, chunk_size=10, normalize_embeddings=normalize_embeddings, prompt_name=prompt_name
)
model.stop_multi_process_pool(pool)
assert emb.shape == (len(sentences), 128)
# Make sure the embeddings aren't just all 0
assert emb.sum() != 0.0
# Compare against normal embeddings
emb_normal = model.encode(sentences, normalize_embeddings=normalize_embeddings, prompt_name=prompt_name)
diff = np.max(np.abs(emb - emb_normal))
assert diff < 0.001
# Ensure that after normalizing, the means are all almost 0, and otherwise not
assert np.all(np.abs(emb.mean(1)) < 0.01) == normalize_embeddings
|
# Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from unittest import TestCase
from unittest.mock import Mock
import torch
import torch.nn as nn
from mmengine.evaluator import Evaluator
from mmengine.model import BaseModel
from mmengine.optim import OptimWrapper
from mmengine.runner import Runner
from torch.utils.data import Dataset
from mmdet.registry import DATASETS
from mmdet.utils import register_all_modules
register_all_modules()
class ToyModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, inputs, data_samples, mode='tensor'):
labels = torch.stack(data_samples)
outputs = self.linear(inputs)
if mode == 'tensor':
return outputs
elif mode == 'loss':
loss = (labels - outputs).sum()
outputs = dict(loss=loss)
return outputs
else:
return outputs
class ToyModel1(BaseModel, ToyModel):
def __init__(self):
super().__init__()
def forward(self, *args, **kwargs):
return super(BaseModel, self).forward(*args, **kwargs)
class ToyModel2(BaseModel):
def __init__(self):
super().__init__()
self.teacher = ToyModel1()
self.student = ToyModel1()
self.semi_test_cfg = dict(predict_on='teacher')
def forward(self, *args, **kwargs):
return self.student(*args, **kwargs)
@DATASETS.register_module(force=True)
class DummyDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
@property
def metainfo(self):
return self.METAINFO
def __len__(self):
return self.data.size(0)
def __getitem__(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
class TestTeacherStudentValLoop(TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_mean_teacher_hook(self):
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
model = ToyModel2().to(device)
evaluator = Mock()
evaluator.evaluate = Mock(return_value=dict(acc=0.5))
evaluator.__class__ = Evaluator
runner = Runner(
model=model,
train_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
val_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=3,
num_workers=0),
val_evaluator=evaluator,
work_dir=self.temp_dir.name,
default_scope='mmdet',
optim_wrapper=OptimWrapper(
torch.optim.Adam(ToyModel().parameters())),
train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=1),
val_cfg=dict(type='TeacherStudentValLoop'),
default_hooks=dict(logger=dict(type='LoggerHook', interval=1)),
experiment_name='test1')
runner.train()
|
# Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from unittest import TestCase
from unittest.mock import Mock
import torch
import torch.nn as nn
from mmengine.evaluator import Evaluator
from mmengine.model import BaseModel
from mmengine.optim import OptimWrapper
from mmengine.runner import Runner
from torch.utils.data import Dataset
from mmdet.registry import DATASETS
from mmdet.utils import register_all_modules
register_all_modules()
class ToyModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, batch_inputs, labels, mode='tensor'):
labels = torch.stack(labels)
outputs = self.linear(batch_inputs)
if mode == 'tensor':
return outputs
elif mode == 'loss':
loss = (labels - outputs).sum()
outputs = dict(loss=loss)
return outputs
else:
return outputs
class ToyModel1(BaseModel, ToyModel):
def __init__(self):
super().__init__()
def forward(self, *args, **kwargs):
return super(BaseModel, self).forward(*args, **kwargs)
class ToyModel2(BaseModel):
def __init__(self):
super().__init__()
self.teacher = ToyModel1()
self.student = ToyModel1()
self.semi_test_cfg = dict(predict_on='teacher')
def forward(self, *args, **kwargs):
return self.student(*args, **kwargs)
@DATASETS.register_module(force=True)
class DummyDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
@property
def metainfo(self):
return self.METAINFO
def __len__(self):
return self.data.size(0)
def __getitem__(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
class TestTeacherStudentValLoop(TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_mean_teacher_hook(self):
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
model = ToyModel2().to(device)
evaluator = Mock()
evaluator.evaluate = Mock(return_value=dict(acc=0.5))
evaluator.__class__ = Evaluator
runner = Runner(
model=model,
train_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
val_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=3,
num_workers=0),
val_evaluator=evaluator,
work_dir=self.temp_dir.name,
default_scope='mmdet',
optim_wrapper=OptimWrapper(
torch.optim.Adam(ToyModel().parameters())),
train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=1),
val_cfg=dict(type='TeacherStudentValLoop'),
default_hooks=dict(logger=dict(type='LoggerHook', interval=1)),
experiment_name='test1')
runner.train()
|
from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.json import json
class StepThroughItemsBlock(Block):
class Input(BlockSchema):
items: list = SchemaField(
advanced=False,
description="The list or dictionary of items to iterate over",
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
default=[],
)
items_object: dict = SchemaField(
advanced=False,
description="The list or dictionary of items to iterate over",
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
default={},
)
items_str: str = SchemaField(
advanced=False,
description="The list or dictionary of items to iterate over",
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
default="",
)
class Output(BlockSchema):
item: Any = SchemaField(description="The current item in the iteration")
key: Any = SchemaField(
description="The key or index of the current item in the iteration",
)
def __init__(self):
super().__init__(
id="f66a3543-28d3-4ab5-8945-9b336371e2ce",
input_schema=StepThroughItemsBlock.Input,
output_schema=StepThroughItemsBlock.Output,
categories={BlockCategory.LOGIC},
description="Iterates over a list or dictionary and outputs each item.",
test_input={"items": [1, 2, 3, {"key1": "value1", "key2": "value2"}]},
test_output=[
("item", 1),
("key", 0),
("item", 2),
("key", 1),
("item", 3),
("key", 2),
("item", {"key1": "value1", "key2": "value2"}),
("key", 3),
],
test_mock={},
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
for data in [input_data.items, input_data.items_object, input_data.items_str]:
if not data:
continue
if isinstance(data, str):
items = json.loads(data)
else:
items = data
if isinstance(items, dict):
# If items is a dictionary, iterate over its values
for item in items.values():
yield "item", item
yield "key", item
else:
# If items is a list, iterate over the list
for index, item in enumerate(items):
yield "item", item
yield "key", index
|
from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class StepThroughItemsBlock(Block):
class Input(BlockSchema):
items: list | dict = SchemaField(
description="The list or dictionary of items to iterate over",
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
)
class Output(BlockSchema):
item: Any = SchemaField(description="The current item in the iteration")
key: Any = SchemaField(
description="The key or index of the current item in the iteration",
)
def __init__(self):
super().__init__(
id="f66a3543-28d3-4ab5-8945-9b336371e2ce",
input_schema=StepThroughItemsBlock.Input,
output_schema=StepThroughItemsBlock.Output,
categories={BlockCategory.LOGIC},
description="Iterates over a list or dictionary and outputs each item.",
test_input={"items": [1, 2, 3, {"key1": "value1", "key2": "value2"}]},
test_output=[
("item", 1),
("key", 0),
("item", 2),
("key", 1),
("item", 3),
("key", 2),
("item", {"key1": "value1", "key2": "value2"}),
("key", 3),
],
test_mock={},
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
items = input_data.items
if isinstance(items, dict):
# If items is a dictionary, iterate over its values
for item in items.values():
yield "item", item
yield "key", item
else:
# If items is a list, iterate over the list
for index, item in enumerate(items):
yield "item", item
yield "key", index
|
from torio.io import CodecConfig, StreamingMediaDecoder as StreamReader, StreamingMediaEncoder as StreamWriter
from ._effector import AudioEffector
from ._playback import play_audio
__all__ = [
"AudioEffector",
"StreamReader",
"StreamWriter",
"CodecConfig",
"play_audio",
]
|
from ._effector import AudioEffector
from ._playback import play_audio
from ._stream_reader import StreamReader
from ._stream_writer import CodecConfig, StreamWriter
__all__ = [
"AudioEffector",
"StreamReader",
"StreamWriter",
"CodecConfig",
"play_audio",
]
|
import asyncio
import os
from jina import __default_host__
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.gateway.http.app import get_fastapi_app
__all__ = ['HTTPGatewayRuntime']
from jina.serve.runtimes.gateway.http.gateway import HTTPGateway
class HTTPGatewayRuntime(GatewayRuntime):
"""Runtime for HTTP interface."""
async def async_setup(self):
"""
The async method setup the runtime.
Setup the uvicorn server.
"""
self.gateway = HTTPGateway(
name=self.name,
port=self.args.port,
title=self.args.title,
description=self.args.description,
no_debug_endpoints=self.args.no_debug_endpoints,
no_crud_endpoints=self.args.no_crud_endpoints,
expose_endpoints=self.args.expose_endpoints,
expose_graphql_endpoint=self.args.expose_graphql_endpoint,
cors=self.args.cors,
ssl_keyfile=self.args.ssl_keyfile,
ssl_certfile=self.args.ssl_certfile,
uvicorn_kwargs=self.args.uvicorn_kwargs,
)
self.gateway.set_streamer(
args=self.args,
timeout_send=self.timeout_send,
metrics_registry=self.metrics_registry,
runtime_name=self.args.name,
)
await self.gateway.setup_server()
async def _wait_for_cancel(self):
"""Do NOT override this method when inheriting from :class:`GatewayPod`"""
# handle terminate signals
while not self.is_cancel.is_set() and not self.gateway.should_exit:
await asyncio.sleep(0.1)
await self.async_cancel()
async def async_teardown(self):
"""Shutdown the server."""
await self.gateway.teardown()
async def async_cancel(self):
"""Stop the server."""
await self.gateway.stop_server()
async def async_run_forever(self):
"""Running method of the server."""
await self.gateway.run_server()
|
import asyncio
import logging
import os
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.gateway.http.app import get_fastapi_app
__all__ = ['HTTPGatewayRuntime']
class HTTPGatewayRuntime(GatewayRuntime):
"""Runtime for HTTP interface."""
async def async_setup(self):
"""
The async method setup the runtime.
Setup the uvicorn server.
"""
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
self.install_signal_handlers()
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
from jina.helper import extend_rest_interface
uvicorn_kwargs = self.args.uvicorn_kwargs or {}
for ssl_file in ['ssl_keyfile', 'ssl_certfile']:
if getattr(self.args, ssl_file):
if ssl_file not in uvicorn_kwargs.keys():
uvicorn_kwargs[ssl_file] = getattr(self.args, ssl_file)
self._server = UviServer(
config=Config(
app=extend_rest_interface(
get_fastapi_app(
args=self.args,
logger=self.logger,
timeout_send=self.timeout_send,
metrics_registry=self.metrics_registry,
)
),
host=__default_host__,
port=self.args.port,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs
)
)
await self._server.setup()
async def async_run_forever(self):
"""Running method of the server."""
await self._server.serve()
async def _wait_for_cancel(self):
"""Do NOT override this method when inheriting from :class:`GatewayPod`"""
# handle terminate signals
while not self.is_cancel.is_set() and not self._server.should_exit:
await asyncio.sleep(0.1)
await self.async_cancel()
async def async_teardown(self):
"""Shutdown the server."""
await self._server.shutdown()
async def async_cancel(self):
"""Stop the server."""
self._server.should_exit = True
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import numpy as np
import torch
from mmcv import Config, DictAction
from mmdet.models import build_detector
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1280, 800],
help='input image size')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--size-divisor',
type=int,
default=32,
help='Pad the input image, the minimum size that is divisible '
'by size_divisor, -1 means do not pad the image.')
args = parser.parse_args()
return args
def main():
args = parse_args()
if len(args.shape) == 1:
h = w = args.shape[0]
elif len(args.shape) == 2:
h, w = args.shape
else:
raise ValueError('invalid input shape')
orig_shape = (3, h, w)
divisor = args.size_divisor
if divisor > 0:
h = int(np.ceil(h / divisor)) * divisor
w = int(np.ceil(w / divisor)) * divisor
input_shape = (3, h, w)
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
if torch.cuda.is_available():
model.cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'FLOPs counter is currently not currently supported with {}'.
format(model.__class__.__name__))
flops, params = get_model_complexity_info(model, input_shape)
split_line = '=' * 30
if divisor > 0 and \
input_shape != orig_shape:
print(f'{split_line}\nUse size divisor set input shape '
f'from {orig_shape} to {input_shape}\n')
print(f'{split_line}\nInput shape: {input_shape}\n'
f'Flops: {flops}\nParams: {params}\n{split_line}')
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify that the '
'flops computation is correct.')
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import torch
from mmcv import Config, DictAction
from mmdet.models import build_detector
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1280, 800],
help='input image size')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
if len(args.shape) == 1:
input_shape = (3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (3, ) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
if torch.cuda.is_available():
model.cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'FLOPs counter is currently not currently supported with {}'.
format(model.__class__.__name__))
flops, params = get_model_complexity_info(model, input_shape)
split_line = '=' * 30
print(f'{split_line}\nInput shape: {input_shape}\n'
f'Flops: {flops}\nParams: {params}\n{split_line}')
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify that the '
'flops computation is correct.')
if __name__ == '__main__':
main()
|
"""Functionality for loading agents."""
import json
import logging
from pathlib import Path
from typing import Any, Optional, Union
import yaml
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import Tool
from langchain.agents.agent import BaseMultiActionAgent, BaseSingleActionAgent
from langchain.agents.types import AGENT_TO_CLASS
from langchain.chains.loading import load_chain, load_chain_from_config
logger = logging.getLogger(__file__)
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/agents/"
def _load_agent_from_tools(
config: dict, llm: BaseLanguageModel, tools: list[Tool], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
msg = f"Loading {config_type} agent not supported"
raise ValueError(msg)
agent_cls = AGENT_TO_CLASS[config_type]
combined_config = {**config, **kwargs}
return agent_cls.from_llm_and_tools(llm, tools, **combined_config)
@deprecated("0.1.0", removal="1.0")
def load_agent_from_config(
config: dict,
llm: Optional[BaseLanguageModel] = None,
tools: Optional[list[Tool]] = None,
**kwargs: Any,
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from Config Dict.
Args:
config: Config dict to load agent from.
llm: Language model to use as the agent.
tools: List of tools this agent has access to.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
ValueError: If agent type is not specified in the config.
"""
if "_type" not in config:
msg = "Must specify an agent Type in config"
raise ValueError(msg)
load_from_tools = config.pop("load_from_llm_and_tools", False)
if load_from_tools:
if llm is None:
msg = (
"If `load_from_llm_and_tools` is set to True, then LLM must be provided"
)
raise ValueError(msg)
if tools is None:
msg = (
"If `load_from_llm_and_tools` is set to True, "
"then tools must be provided"
)
raise ValueError(msg)
return _load_agent_from_tools(config, llm, tools, **kwargs)
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
msg = f"Loading {config_type} agent not supported"
raise ValueError(msg)
agent_cls = AGENT_TO_CLASS[config_type]
if "llm_chain" in config:
config["llm_chain"] = load_chain_from_config(config.pop("llm_chain"))
elif "llm_chain_path" in config:
config["llm_chain"] = load_chain(config.pop("llm_chain_path"))
else:
msg = "One of `llm_chain` and `llm_chain_path` should be specified."
raise ValueError(msg)
if "output_parser" in config:
logger.warning(
"Currently loading output parsers on agent is not supported, "
"will just use the default one."
)
del config["output_parser"]
combined_config = {**config, **kwargs}
return agent_cls(**combined_config)
@deprecated("0.1.0", removal="1.0")
def load_agent(
path: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Unified method for loading an agent from LangChainHub or local fs.
Args:
path: Path to the agent file.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
RuntimeError: If loading from the deprecated github-based
Hub is attempted.
"""
if isinstance(path, str) and path.startswith("lc://"):
msg = (
"Loading from the deprecated github-based Hub is no longer supported. "
"Please use the new LangChain Hub at https://smith.langchain.com/hub "
"instead."
)
raise RuntimeError(msg)
return _load_agent_from_file(path, **kwargs)
def _load_agent_from_file(
file: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from file."""
valid_suffixes = {"json", "yaml"}
# Convert file to Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix[1:] == "json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix[1:] == "yaml":
with open(file_path) as f:
config = yaml.safe_load(f)
else:
msg = f"Unsupported file type, must be one of {valid_suffixes}."
raise ValueError(msg)
# Load the agent from the config now.
return load_agent_from_config(config, **kwargs)
|
"""Functionality for loading agents."""
import json
import logging
from pathlib import Path
from typing import Any, Optional, Union
import yaml
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import Tool
from langchain.agents.agent import BaseMultiActionAgent, BaseSingleActionAgent
from langchain.agents.types import AGENT_TO_CLASS
from langchain.chains.loading import load_chain, load_chain_from_config
logger = logging.getLogger(__file__)
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/agents/"
def _load_agent_from_tools(
config: dict, llm: BaseLanguageModel, tools: list[Tool], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
agent_cls = AGENT_TO_CLASS[config_type]
combined_config = {**config, **kwargs}
return agent_cls.from_llm_and_tools(llm, tools, **combined_config)
@deprecated("0.1.0", removal="1.0")
def load_agent_from_config(
config: dict,
llm: Optional[BaseLanguageModel] = None,
tools: Optional[list[Tool]] = None,
**kwargs: Any,
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from Config Dict.
Args:
config: Config dict to load agent from.
llm: Language model to use as the agent.
tools: List of tools this agent has access to.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
ValueError: If agent type is not specified in the config.
"""
if "_type" not in config:
raise ValueError("Must specify an agent Type in config")
load_from_tools = config.pop("load_from_llm_and_tools", False)
if load_from_tools:
if llm is None:
raise ValueError(
"If `load_from_llm_and_tools` is set to True, then LLM must be provided"
)
if tools is None:
raise ValueError(
"If `load_from_llm_and_tools` is set to True, "
"then tools must be provided"
)
return _load_agent_from_tools(config, llm, tools, **kwargs)
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
agent_cls = AGENT_TO_CLASS[config_type]
if "llm_chain" in config:
config["llm_chain"] = load_chain_from_config(config.pop("llm_chain"))
elif "llm_chain_path" in config:
config["llm_chain"] = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` and `llm_chain_path` should be specified.")
if "output_parser" in config:
logger.warning(
"Currently loading output parsers on agent is not supported, "
"will just use the default one."
)
del config["output_parser"]
combined_config = {**config, **kwargs}
return agent_cls(**combined_config)
@deprecated("0.1.0", removal="1.0")
def load_agent(
path: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Unified method for loading an agent from LangChainHub or local fs.
Args:
path: Path to the agent file.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
RuntimeError: If loading from the deprecated github-based
Hub is attempted.
"""
if isinstance(path, str) and path.startswith("lc://"):
raise RuntimeError(
"Loading from the deprecated github-based Hub is no longer supported. "
"Please use the new LangChain Hub at https://smith.langchain.com/hub "
"instead."
)
return _load_agent_from_file(path, **kwargs)
def _load_agent_from_file(
file: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from file."""
valid_suffixes = {"json", "yaml"}
# Convert file to Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix[1:] == "json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix[1:] == "yaml":
with open(file_path) as f:
config = yaml.safe_load(f)
else:
raise ValueError(f"Unsupported file type, must be one of {valid_suffixes}.")
# Load the agent from the config now.
return load_agent_from_config(config, **kwargs)
|
"""
Official evaluation script for ReCoRD v1.0.
(Some functions are adopted from the SQuAD evaluation script.)
"""
import argparse
import json
import re
import string
import sys
from collections import Counter
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return normalize_answer(prediction) == normalize_answer(ground_truth)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
correct_ids = []
for passage in dataset:
for qa in passage["qas"]:
total += 1
if qa["id"] not in predictions:
message = f'Unanswered question {qa["id"]} will receive score 0.'
print(message, file=sys.stderr)
continue
ground_truths = [x["text"] for x in qa["answers"]]
prediction = predictions[qa["id"]]
_exact_match = metric_max_over_ground_truths(exact_match_score, prediction, ground_truths)
if int(_exact_match) == 1:
correct_ids.append(qa["id"])
exact_match += _exact_match
f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
exact_match = exact_match / total
f1 = f1 / total
return {"exact_match": exact_match, "f1": f1}, correct_ids
if __name__ == "__main__":
expected_version = "1.0"
parser = argparse.ArgumentParser("Official evaluation script for ReCoRD v1.0.")
parser.add_argument("data_file", help="The dataset file in JSON format.")
parser.add_argument("pred_file", help="The model prediction file in JSON format.")
parser.add_argument("--output_correct_ids", action="store_true", help="Output the correctly answered query IDs.")
args = parser.parse_args()
with open(args.data_file) as data_file:
dataset_json = json.load(data_file)
if dataset_json["version"] != expected_version:
print(
f'Evaluation expects v-{expected_version}, but got dataset with v-{dataset_json["version"]}',
file=sys.stderr,
)
dataset = dataset_json["data"]
with open(args.pred_file) as pred_file:
predictions = json.load(pred_file)
metrics, correct_ids = evaluate(dataset, predictions)
if args.output_correct_ids:
print(f"Output {len(correct_ids)} correctly answered question IDs.")
with open("correct_ids.json", "w") as f:
json.dump(correct_ids, f)
|
"""
Official evaluation script for ReCoRD v1.0.
(Some functions are adopted from the SQuAD evaluation script.)
"""
import argparse
import json
import re
import string
import sys
from collections import Counter
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return normalize_answer(prediction) == normalize_answer(ground_truth)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
correct_ids = []
for passage in dataset:
for qa in passage["qas"]:
total += 1
if qa["id"] not in predictions:
message = f'Unanswered question {qa["id"]} will receive score 0.'
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x["text"], qa["answers"]))
prediction = predictions[qa["id"]]
_exact_match = metric_max_over_ground_truths(exact_match_score, prediction, ground_truths)
if int(_exact_match) == 1:
correct_ids.append(qa["id"])
exact_match += _exact_match
f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
exact_match = exact_match / total
f1 = f1 / total
return {"exact_match": exact_match, "f1": f1}, correct_ids
if __name__ == "__main__":
expected_version = "1.0"
parser = argparse.ArgumentParser("Official evaluation script for ReCoRD v1.0.")
parser.add_argument("data_file", help="The dataset file in JSON format.")
parser.add_argument("pred_file", help="The model prediction file in JSON format.")
parser.add_argument("--output_correct_ids", action="store_true", help="Output the correctly answered query IDs.")
args = parser.parse_args()
with open(args.data_file) as data_file:
dataset_json = json.load(data_file)
if dataset_json["version"] != expected_version:
print(
f'Evaluation expects v-{expected_version}, but got dataset with v-{dataset_json["version"]}',
file=sys.stderr,
)
dataset = dataset_json["data"]
with open(args.pred_file) as pred_file:
predictions = json.load(pred_file)
metrics, correct_ids = evaluate(dataset, predictions)
if args.output_correct_ids:
print(f"Output {len(correct_ids)} correctly answered question IDs.")
with open("correct_ids.json", "w") as f:
json.dump(correct_ids, f)
|
from ._optical_flow import FlyingChairs, FlyingThings3D, HD1K, KittiFlow, Sintel
from ._stereo_matching import (
CarlaStereo,
CREStereo,
ETH3DStereo,
FallingThingsStereo,
InStereo2k,
Kitti2012Stereo,
Kitti2015Stereo,
Middlebury2014Stereo,
SceneFlowStereo,
SintelStereo,
)
from .caltech import Caltech101, Caltech256
from .celeba import CelebA
from .cifar import CIFAR10, CIFAR100
from .cityscapes import Cityscapes
from .clevr import CLEVRClassification
from .coco import CocoCaptions, CocoDetection
from .country211 import Country211
from .dtd import DTD
from .eurosat import EuroSAT
from .fakedata import FakeData
from .fer2013 import FER2013
from .fgvc_aircraft import FGVCAircraft
from .flickr import Flickr30k, Flickr8k
from .flowers102 import Flowers102
from .folder import DatasetFolder, ImageFolder
from .food101 import Food101
from .gtsrb import GTSRB
from .hmdb51 import HMDB51
from .imagenet import ImageNet
from .imagenette import Imagenette
from .inaturalist import INaturalist
from .kinetics import Kinetics
from .kitti import Kitti
from .lfw import LFWPairs, LFWPeople
from .lsun import LSUN, LSUNClass
from .mnist import EMNIST, FashionMNIST, KMNIST, MNIST, QMNIST
from .moving_mnist import MovingMNIST
from .omniglot import Omniglot
from .oxford_iiit_pet import OxfordIIITPet
from .pcam import PCAM
from .phototour import PhotoTour
from .places365 import Places365
from .rendered_sst2 import RenderedSST2
from .sbd import SBDataset
from .sbu import SBU
from .semeion import SEMEION
from .stanford_cars import StanfordCars
from .stl10 import STL10
from .sun397 import SUN397
from .svhn import SVHN
from .ucf101 import UCF101
from .usps import USPS
from .vision import VisionDataset
from .voc import VOCDetection, VOCSegmentation
from .widerface import WIDERFace
__all__ = (
"LSUN",
"LSUNClass",
"ImageFolder",
"DatasetFolder",
"FakeData",
"CocoCaptions",
"CocoDetection",
"CIFAR10",
"CIFAR100",
"EMNIST",
"FashionMNIST",
"QMNIST",
"MNIST",
"KMNIST",
"StanfordCars",
"STL10",
"SUN397",
"SVHN",
"PhotoTour",
"SEMEION",
"Omniglot",
"SBU",
"Flickr8k",
"Flickr30k",
"Flowers102",
"VOCSegmentation",
"VOCDetection",
"Cityscapes",
"ImageNet",
"Caltech101",
"Caltech256",
"CelebA",
"WIDERFace",
"SBDataset",
"VisionDataset",
"USPS",
"Kinetics",
"HMDB51",
"UCF101",
"Places365",
"Kitti",
"INaturalist",
"LFWPeople",
"LFWPairs",
"KittiFlow",
"Sintel",
"FlyingChairs",
"FlyingThings3D",
"HD1K",
"Food101",
"DTD",
"FER2013",
"GTSRB",
"CLEVRClassification",
"OxfordIIITPet",
"PCAM",
"Country211",
"FGVCAircraft",
"EuroSAT",
"RenderedSST2",
"Kitti2012Stereo",
"Kitti2015Stereo",
"CarlaStereo",
"Middlebury2014Stereo",
"CREStereo",
"FallingThingsStereo",
"SceneFlowStereo",
"SintelStereo",
"InStereo2k",
"ETH3DStereo",
"wrap_dataset_for_transforms_v2",
"Imagenette",
)
# We override current module's attributes to handle the import:
# from torchvision.datasets import wrap_dataset_for_transforms_v2
# without a cyclic error.
# Ref: https://peps.python.org/pep-0562/
def __getattr__(name):
if name in ("wrap_dataset_for_transforms_v2",):
from torchvision.tv_tensors._dataset_wrapper import wrap_dataset_for_transforms_v2
return wrap_dataset_for_transforms_v2
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
from ._optical_flow import FlyingChairs, FlyingThings3D, HD1K, KittiFlow, Sintel
from ._stereo_matching import (
CarlaStereo,
CREStereo,
ETH3DStereo,
FallingThingsStereo,
InStereo2k,
Kitti2012Stereo,
Kitti2015Stereo,
Middlebury2014Stereo,
SceneFlowStereo,
SintelStereo,
)
from .caltech import Caltech101, Caltech256
from .celeba import CelebA
from .cifar import CIFAR10, CIFAR100
from .cityscapes import Cityscapes
from .clevr import CLEVRClassification
from .coco import CocoCaptions, CocoDetection
from .country211 import Country211
from .dtd import DTD
from .eurosat import EuroSAT
from .fakedata import FakeData
from .fer2013 import FER2013
from .fgvc_aircraft import FGVCAircraft
from .flickr import Flickr30k, Flickr8k
from .flowers102 import Flowers102
from .folder import DatasetFolder, ImageFolder
from .food101 import Food101
from .gtsrb import GTSRB
from .hmdb51 import HMDB51
from .imagenet import ImageNet
from .inaturalist import INaturalist
from .kinetics import Kinetics
from .kitti import Kitti
from .lfw import LFWPairs, LFWPeople
from .lsun import LSUN, LSUNClass
from .mnist import EMNIST, FashionMNIST, KMNIST, MNIST, QMNIST
from .moving_mnist import MovingMNIST
from .omniglot import Omniglot
from .oxford_iiit_pet import OxfordIIITPet
from .pcam import PCAM
from .phototour import PhotoTour
from .places365 import Places365
from .rendered_sst2 import RenderedSST2
from .sbd import SBDataset
from .sbu import SBU
from .semeion import SEMEION
from .stanford_cars import StanfordCars
from .stl10 import STL10
from .sun397 import SUN397
from .svhn import SVHN
from .ucf101 import UCF101
from .usps import USPS
from .vision import VisionDataset
from .voc import VOCDetection, VOCSegmentation
from .widerface import WIDERFace
__all__ = (
"LSUN",
"LSUNClass",
"ImageFolder",
"DatasetFolder",
"FakeData",
"CocoCaptions",
"CocoDetection",
"CIFAR10",
"CIFAR100",
"EMNIST",
"FashionMNIST",
"QMNIST",
"MNIST",
"KMNIST",
"StanfordCars",
"STL10",
"SUN397",
"SVHN",
"PhotoTour",
"SEMEION",
"Omniglot",
"SBU",
"Flickr8k",
"Flickr30k",
"Flowers102",
"VOCSegmentation",
"VOCDetection",
"Cityscapes",
"ImageNet",
"Caltech101",
"Caltech256",
"CelebA",
"WIDERFace",
"SBDataset",
"VisionDataset",
"USPS",
"Kinetics",
"HMDB51",
"UCF101",
"Places365",
"Kitti",
"INaturalist",
"LFWPeople",
"LFWPairs",
"KittiFlow",
"Sintel",
"FlyingChairs",
"FlyingThings3D",
"HD1K",
"Food101",
"DTD",
"FER2013",
"GTSRB",
"CLEVRClassification",
"OxfordIIITPet",
"PCAM",
"Country211",
"FGVCAircraft",
"EuroSAT",
"RenderedSST2",
"Kitti2012Stereo",
"Kitti2015Stereo",
"CarlaStereo",
"Middlebury2014Stereo",
"CREStereo",
"FallingThingsStereo",
"SceneFlowStereo",
"SintelStereo",
"InStereo2k",
"ETH3DStereo",
"wrap_dataset_for_transforms_v2",
)
# We override current module's attributes to handle the import:
# from torchvision.datasets import wrap_dataset_for_transforms_v2
# without a cyclic error.
# Ref: https://peps.python.org/pep-0562/
def __getattr__(name):
if name in ("wrap_dataset_for_transforms_v2",):
from torchvision.tv_tensors._dataset_wrapper import wrap_dataset_for_transforms_v2
return wrap_dataset_for_transforms_v2
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
"""Product extraction pack."""
import asyncio
from typing import Any, Dict
from llama_index.core import SimpleDirectoryReader
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.output_parsers import PydanticOutputParser
from llama_index.core.program.multi_modal_llm_program import (
MultiModalLLMCompletionProgram,
)
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from pydantic import BaseModel, Field
async def _screenshot_page(
url: str, out_path: str, width: int = 1200, height: int = 800
) -> None:
from pyppeteer import launch
browser = await launch()
page = await browser.newPage()
await page.setViewport({"width": 1200, "height": 800})
await page.goto(url, {"waitUntil": "domcontentloaded"})
await page.screenshot({"path": out_path})
await browser.close()
class Product(BaseModel):
"""Data model for an Amazon Product."""
title: str = Field(..., description="Title of product")
category: str = Field(..., description="Category of product")
discount: float = Field(..., description="Discount of product")
price: float = Field(..., description="Price of product")
rating: float = Field(..., description="Rating of product")
description: str = Field(..., description="Description of product")
img_description: str = Field(..., description="Description of product image")
inventory: str = Field(..., description="Inventory of product")
DEFAULT_PROMPT_TEMPLATE_STR = """\
Can you extract the following fields from this product, in JSON format?
"""
class AmazonProductExtractionPack(BaseLlamaPack):
"""
Product extraction pack.
Given a website url of a product (e.g. Amazon page), screenshot it,
and use GPT-4V to extract structured outputs.
"""
def __init__(
self,
website_url: str,
tmp_file_path: str = "./tmp.png",
screenshot_width: int = 1200,
screenshot_height: int = 800,
prompt_template_str: str = DEFAULT_PROMPT_TEMPLATE_STR,
**kwargs: Any,
) -> None:
"""Init params."""
self.website_url = website_url
# download image to temporary file
asyncio.get_event_loop().run_until_complete(
_screenshot_page(
website_url,
tmp_file_path,
width=screenshot_width,
height=screenshot_height,
)
)
# put your local directory here
self.image_documents = SimpleDirectoryReader(
input_files=[tmp_file_path]
).load_data()
# initialize openai pydantic program
self.openai_mm_llm = OpenAIMultiModal(
model="gpt-4-vision-preview", max_new_tokens=1000
)
self.openai_program = MultiModalLLMCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(Product),
image_documents=self.image_documents,
prompt_template_str=prompt_template_str,
llm=self.openai_mm_llm,
verbose=True,
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"openai_program": self.openai_program,
"openai_mm_llm": self.openai_mm_llm,
"image_documents": self.image_documents,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.openai_program(*args, **kwargs)
|
"""Product extraction pack."""
import asyncio
from typing import Any, Dict
from llama_index.core import SimpleDirectoryReader
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.output_parsers import PydanticOutputParser
from llama_index.core.program.multi_modal_llm_program import (
MultiModalLLMCompletionProgram,
)
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from pydantic import BaseModel, Field
async def _screenshot_page(
url: str, out_path: str, width: int = 1200, height: int = 800
) -> None:
from pyppeteer import launch
browser = await launch()
page = await browser.newPage()
await page.setViewport({"width": 1200, "height": 800})
await page.goto(url, {"waitUntil": "domcontentloaded"})
await page.screenshot({"path": out_path})
await browser.close()
class Product(BaseModel):
"""Data model for an Amazon Product."""
title: str = Field(..., description="Title of product")
category: str = Field(..., description="Category of product")
discount: float = Field(..., description="Discount of product")
price: float = Field(..., description="Price of product")
rating: float = Field(..., description="Rating of product")
description: str = Field(..., description="Description of product")
img_description: str = Field(..., description="Description of product image")
inventory: str = Field(..., description="Inventory of product")
DEFAULT_PROMPT_TEMPLATE_STR = """\
Can you extract the following fields from this product, in JSON format?
"""
class AmazonProductExtractionPack(BaseLlamaPack):
"""Product extraction pack.
Given a website url of a product (e.g. Amazon page), screenshot it,
and use GPT-4V to extract structured outputs.
"""
def __init__(
self,
website_url: str,
tmp_file_path: str = "./tmp.png",
screenshot_width: int = 1200,
screenshot_height: int = 800,
prompt_template_str: str = DEFAULT_PROMPT_TEMPLATE_STR,
**kwargs: Any,
) -> None:
"""Init params."""
self.website_url = website_url
# download image to temporary file
asyncio.get_event_loop().run_until_complete(
_screenshot_page(
website_url,
tmp_file_path,
width=screenshot_width,
height=screenshot_height,
)
)
# put your local directory here
self.image_documents = SimpleDirectoryReader(
input_files=[tmp_file_path]
).load_data()
# initialize openai pydantic program
self.openai_mm_llm = OpenAIMultiModal(
model="gpt-4-vision-preview", max_new_tokens=1000
)
self.openai_program = MultiModalLLMCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(Product),
image_documents=self.image_documents,
prompt_template_str=prompt_template_str,
llm=self.openai_mm_llm,
verbose=True,
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"openai_program": self.openai_program,
"openai_mm_llm": self.openai_mm_llm,
"image_documents": self.image_documents,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.openai_program(*args, **kwargs)
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.ndarray import NdArray
@_register_proto(proto_type_name='audio_ndarray')
class AudioNdArray(AbstractAudioTensor, NdArray):
"""
Subclass of NdArray, to represent an audio tensor.
Adds audio-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
from pydantic import parse_obj_as
from docarray import Document
from docarray.typing import AudioNdArray, AudioUrl
import numpy as np
class MyAudioDoc(Document):
title: str
audio_tensor: Optional[AudioNdArray]
url: Optional[AudioUrl]
bytes_: Optional[bytes]
# from tensor
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=np.random.rand(1000, 2),
)
doc_1.audio_tensor.save_to_wav_file(file_path='path/to/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
# from url
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor = doc_2.url.load()
doc_2.audio_tensor.save_to_wav_file(file_path='path/to/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
"""
...
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.ndarray import NdArray
MAX_INT_16 = 2**15
T = TypeVar('T', bound='AudioNdArray')
@_register_proto(proto_type_name='audio_ndarray')
class AudioNdArray(AbstractAudioTensor, NdArray):
"""
Subclass of NdArray, to represent an audio tensor.
Adds audio-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
from pydantic import parse_obj_as
from docarray import Document
from docarray.typing import AudioNdArray, AudioUrl
import numpy as np
class MyAudioDoc(Document):
title: str
audio_tensor: Optional[AudioNdArray]
url: Optional[AudioUrl]
# from tensor
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=np.random.rand(1000, 2),
)
doc_1.audio_tensor.save_to_wav_file(file_path='path/to/file_1.wav')
# from url
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor = parse_obj_as(AudioNdArray, doc_2.url.load())
doc_2.audio_tensor.save_to_wav_file(file_path='path/to/file_2.wav')
"""
def to_audio_bytes(self):
tensor = (self * MAX_INT_16).astype('<h')
return tensor.tobytes()
|
from setuptools import setup, find_packages
with open("README.md", mode="r", encoding="utf-8") as readme_file:
readme = readme_file.read()
setup(
name="sentence-transformers",
version="2.7.0.dev0",
author="Nils Reimers",
author_email="[email protected]",
description="Multilingual text embeddings",
long_description=readme,
long_description_content_type="text/markdown",
license="Apache License 2.0",
url="https://www.SBERT.net",
download_url="https://github.com/UKPLab/sentence-transformers/",
packages=find_packages(),
python_requires=">=3.8.0",
install_requires=[
"transformers>=4.32.0,<5.0.0",
"tqdm",
"torch>=1.11.0",
"numpy",
"scikit-learn",
"scipy",
"huggingface-hub>=0.15.1",
"Pillow",
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="Transformer Networks BERT XLNet sentence embedding PyTorch NLP deep learning",
)
|
from setuptools import setup, find_packages
with open("README.md", mode="r", encoding="utf-8") as readme_file:
readme = readme_file.read()
setup(
name="sentence-transformers",
version="2.6.0.dev0",
author="Nils Reimers",
author_email="[email protected]",
description="Multilingual text embeddings",
long_description=readme,
long_description_content_type="text/markdown",
license="Apache License 2.0",
url="https://www.SBERT.net",
download_url="https://github.com/UKPLab/sentence-transformers/",
packages=find_packages(),
python_requires=">=3.8.0",
install_requires=[
"transformers>=4.32.0,<5.0.0",
"tqdm",
"torch>=1.11.0",
"numpy",
"scikit-learn",
"scipy",
"huggingface-hub>=0.15.1",
"Pillow",
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="Transformer Networks BERT XLNet sentence embedding PyTorch NLP deep learning",
)
|
from typing import List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class TrafilaturaWebReader(BasePydanticReader):
"""
Trafilatura web page reader.
Reads pages from the web.
Requires the `trafilatura` package.
"""
is_remote: bool = True
@classmethod
def class_name(cls) -> str:
"""Get the name identifier of the class."""
return "TrafilaturaWebReader"
def load_data(
self,
urls: List[str],
include_comments=True,
output_format="txt",
include_tables=True,
include_images=False,
include_formatting=False,
include_links=False,
show_progress=False,
no_ssl=False,
**kwargs,
) -> List[Document]:
"""
Load data from the urls.
Args:
urls (List[str]): List of URLs to scrape.
include_comments (bool, optional): Include comments in the output. Defaults to True.
output_format (str, optional): Output format. Defaults to 'txt'.
include_tables (bool, optional): Include tables in the output. Defaults to True.
include_images (bool, optional): Include images in the output. Defaults to False.
include_formatting (bool, optional): Include formatting in the output. Defaults to False.
include_links (bool, optional): Include links in the output. Defaults to False.
show_progress (bool, optional): Show progress bar. Defaults to False
no_ssl (bool, optional): Bypass SSL verification. Defaults to False.
kwargs: Additional keyword arguments for the `trafilatura.extract` function.
Returns:
List[Document]: List of documents.
"""
import trafilatura
if not isinstance(urls, list):
raise ValueError("urls must be a list of strings.")
documents = []
if show_progress:
from tqdm import tqdm
iterator = tqdm(urls, desc="Downloading pages")
else:
iterator = urls
for url in iterator:
downloaded = trafilatura.fetch_url(url, no_ssl=no_ssl)
response = trafilatura.extract(
downloaded,
include_comments=include_comments,
output_format=output_format,
include_tables=include_tables,
include_images=include_images,
include_formatting=include_formatting,
include_links=include_links,
**kwargs,
)
documents.append(Document(text=response, id_=url))
return documents
|
from typing import List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class TrafilaturaWebReader(BasePydanticReader):
"""Trafilatura web page reader.
Reads pages from the web.
Requires the `trafilatura` package.
"""
is_remote: bool = True
@classmethod
def class_name(cls) -> str:
"""Get the name identifier of the class."""
return "TrafilaturaWebReader"
def load_data(
self,
urls: List[str],
include_comments=True,
output_format="txt",
include_tables=True,
include_images=False,
include_formatting=False,
include_links=False,
show_progress=False,
no_ssl=False,
**kwargs,
) -> List[Document]:
"""Load data from the urls.
Args:
urls (List[str]): List of URLs to scrape.
include_comments (bool, optional): Include comments in the output. Defaults to True.
output_format (str, optional): Output format. Defaults to 'txt'.
include_tables (bool, optional): Include tables in the output. Defaults to True.
include_images (bool, optional): Include images in the output. Defaults to False.
include_formatting (bool, optional): Include formatting in the output. Defaults to False.
include_links (bool, optional): Include links in the output. Defaults to False.
show_progress (bool, optional): Show progress bar. Defaults to False
no_ssl (bool, optional): Bypass SSL verification. Defaults to False.
kwargs: Additional keyword arguments for the `trafilatura.extract` function.
Returns:
List[Document]: List of documents.
"""
import trafilatura
if not isinstance(urls, list):
raise ValueError("urls must be a list of strings.")
documents = []
if show_progress:
from tqdm import tqdm
iterator = tqdm(urls, desc="Downloading pages")
else:
iterator = urls
for url in iterator:
downloaded = trafilatura.fetch_url(url, no_ssl=no_ssl)
response = trafilatura.extract(
downloaded,
include_comments=include_comments,
output_format=output_format,
include_tables=include_tables,
include_images=include_images,
include_formatting=include_formatting,
include_links=include_links,
**kwargs,
)
documents.append(Document(text=response, id_=url))
return documents
|
from workflows.resource import Resource, ResourceDefinition, ResourceManager # noqa
|
import inspect
from typing import (
Callable,
Generic,
TypeVar,
Union,
Awaitable,
Dict,
Any,
cast,
)
from pydantic import (
BaseModel,
ConfigDict,
)
T = TypeVar("T")
class _Resource(Generic[T]):
def __init__(
self, factory: Callable[..., Union[T, Awaitable[T]]], cache: bool
) -> None:
self._factory = factory
self._is_async = inspect.iscoroutinefunction(factory)
self.name = factory.__qualname__
self.cache = cache
async def call(self) -> T:
if self._is_async:
result = await cast(Callable[..., Awaitable[T]], self._factory)()
else:
result = cast(Callable[..., T], self._factory)()
return result
class ResourceDefinition(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
name: str
resource: _Resource
def Resource(factory: Callable[..., T], cache: bool = True) -> _Resource[T]:
return _Resource(factory, cache)
class ResourceManager:
def __init__(self) -> None:
self.resources: Dict[str, Any] = {}
async def set(self, name: str, val: Any) -> None:
self.resources.update({name: val})
async def get(self, resource: _Resource) -> Any:
if not resource.cache:
val = await resource.call()
elif resource.cache and not self.resources.get(resource.name, None):
val = await resource.call()
await self.set(resource.name, val)
else:
val = self.resources.get(resource.name)
return val
def get_all(self) -> Dict[str, Any]:
return self.resources
|
# mypy: allow-untyped-defs
from collections import OrderedDict
__all__ = ["raises", "expand_tuples", "reverse_dict", "groupby", "typename"]
def raises(err, lamda): # codespell:ignore lamda
try:
lamda() # codespell:ignore lamda
return False
except err:
return True
def expand_tuples(L):
"""
>>> expand_tuples([1, (2, 3)])
[(1, 2), (1, 3)]
>>> expand_tuples([1, 2])
[(1, 2)]
"""
if not L:
return [()]
elif not isinstance(L[0], tuple):
rest = expand_tuples(L[1:])
return [(L[0],) + t for t in rest]
else:
rest = expand_tuples(L[1:])
return [(item,) + t for t in rest for item in L[0]]
# Taken from theano/theano/gof/sched.py
# Avoids licensing issues because this was written by Matthew Rocklin
def _toposort(edges):
"""Topological sort algorithm by Kahn [1] - O(nodes + vertices)
inputs:
edges - a dict of the form {a: {b, c}} where b and c depend on a
outputs:
L - an ordered list of nodes that satisfy the dependencies of edges
>>> _toposort({1: (2, 3), 2: (3,)})
[1, 2, 3]
>>> # Closely follows the wikipedia page [2]
>>> # [1] Kahn, Arthur B. (1962), "Topological sorting of large networks",
>>> # Communications of the ACM
>>> # [2] http://en.wikipedia.org/wiki/Toposort#Algorithms
"""
incoming_edges = reverse_dict(edges)
incoming_edges = OrderedDict((k, set(val)) for k, val in incoming_edges.items())
S = OrderedDict.fromkeys(v for v in edges if v not in incoming_edges)
L = []
while S:
n, _ = S.popitem()
L.append(n)
for m in edges.get(n, ()):
assert n in incoming_edges[m]
incoming_edges[m].remove(n)
if not incoming_edges[m]:
S[m] = None
if any(incoming_edges.get(v, None) for v in edges):
raise ValueError("Input has cycles")
return L
def reverse_dict(d):
"""Reverses direction of dependence dict
>>> d = {"a": (1, 2), "b": (2, 3), "c": ()}
>>> reverse_dict(d) # doctest: +SKIP
{1: ('a',), 2: ('a', 'b'), 3: ('b',)}
:note: dict order are not deterministic. As we iterate on the
input dict, it make the output of this function depend on the
dict order. So this function output order should be considered
as undeterministic.
"""
result = OrderedDict() # type: ignore[var-annotated]
for key in d:
for val in d[key]:
result[val] = result.get(val, ()) + (key,)
return result
# Taken from toolz
# Avoids licensing issues because this version was authored by Matthew Rocklin
def groupby(func, seq):
"""Group a collection by a key function
>>> names = ["Alice", "Bob", "Charlie", "Dan", "Edith", "Frank"]
>>> groupby(len, names) # doctest: +SKIP
{3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}
>>> iseven = lambda x: x % 2 == 0
>>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP
{False: [1, 3, 5, 7], True: [2, 4, 6, 8]}
See Also:
``countby``
"""
d = OrderedDict() # type: ignore[var-annotated]
for item in seq:
key = func(item)
if key not in d:
d[key] = []
d[key].append(item)
return d
def typename(type):
"""Get the name of `type`.
Parameters
----------
type : Union[Type, Tuple[Type]]
Returns
-------
str
The name of `type` or a tuple of the names of the types in `type`.
Examples
--------
>>> typename(int)
'int'
>>> typename((int, float))
'(int, float)'
"""
try:
return type.__name__
except AttributeError:
if len(type) == 1:
return typename(*type)
return f"({', '.join(map(typename, type))})"
|
# mypy: allow-untyped-defs
from collections import OrderedDict
__all__ = ["raises", "expand_tuples", "reverse_dict", "groupby", "typename"]
def raises(err, lamda):
try:
lamda()
return False
except err:
return True
def expand_tuples(L):
"""
>>> expand_tuples([1, (2, 3)])
[(1, 2), (1, 3)]
>>> expand_tuples([1, 2])
[(1, 2)]
"""
if not L:
return [()]
elif not isinstance(L[0], tuple):
rest = expand_tuples(L[1:])
return [(L[0],) + t for t in rest]
else:
rest = expand_tuples(L[1:])
return [(item,) + t for t in rest for item in L[0]]
# Taken from theano/theano/gof/sched.py
# Avoids licensing issues because this was written by Matthew Rocklin
def _toposort(edges):
"""Topological sort algorithm by Kahn [1] - O(nodes + vertices)
inputs:
edges - a dict of the form {a: {b, c}} where b and c depend on a
outputs:
L - an ordered list of nodes that satisfy the dependencies of edges
>>> _toposort({1: (2, 3), 2: (3,)})
[1, 2, 3]
>>> # Closely follows the wikipedia page [2]
>>> # [1] Kahn, Arthur B. (1962), "Topological sorting of large networks",
>>> # Communications of the ACM
>>> # [2] http://en.wikipedia.org/wiki/Toposort#Algorithms
"""
incoming_edges = reverse_dict(edges)
incoming_edges = OrderedDict((k, set(val)) for k, val in incoming_edges.items())
S = OrderedDict.fromkeys(v for v in edges if v not in incoming_edges)
L = []
while S:
n, _ = S.popitem()
L.append(n)
for m in edges.get(n, ()):
assert n in incoming_edges[m]
incoming_edges[m].remove(n)
if not incoming_edges[m]:
S[m] = None
if any(incoming_edges.get(v, None) for v in edges):
raise ValueError("Input has cycles")
return L
def reverse_dict(d):
"""Reverses direction of dependence dict
>>> d = {"a": (1, 2), "b": (2, 3), "c": ()}
>>> reverse_dict(d) # doctest: +SKIP
{1: ('a',), 2: ('a', 'b'), 3: ('b',)}
:note: dict order are not deterministic. As we iterate on the
input dict, it make the output of this function depend on the
dict order. So this function output order should be considered
as undeterministic.
"""
result = OrderedDict() # type: ignore[var-annotated]
for key in d:
for val in d[key]:
result[val] = result.get(val, ()) + (key,)
return result
# Taken from toolz
# Avoids licensing issues because this version was authored by Matthew Rocklin
def groupby(func, seq):
"""Group a collection by a key function
>>> names = ["Alice", "Bob", "Charlie", "Dan", "Edith", "Frank"]
>>> groupby(len, names) # doctest: +SKIP
{3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}
>>> iseven = lambda x: x % 2 == 0
>>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP
{False: [1, 3, 5, 7], True: [2, 4, 6, 8]}
See Also:
``countby``
"""
d = OrderedDict() # type: ignore[var-annotated]
for item in seq:
key = func(item)
if key not in d:
d[key] = []
d[key].append(item)
return d
def typename(type):
"""Get the name of `type`.
Parameters
----------
type : Union[Type, Tuple[Type]]
Returns
-------
str
The name of `type` or a tuple of the names of the types in `type`.
Examples
--------
>>> typename(int)
'int'
>>> typename((int, float))
'(int, float)'
"""
try:
return type.__name__
except AttributeError:
if len(type) == 1:
return typename(*type)
return f"({', '.join(map(typename, type))})"
|
"""
This script trains sentence transformers with a triplet loss function.
As corpus, we use the wikipedia sections dataset that was describd by Dor et al., 2018, Learning Thematic Similarity Metric Using Triplet Networks.
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import TripletEvaluator
from sentence_transformers.losses import TripletLoss
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = "distilbert-base-uncased"
batch_size = 16
num_train_epochs = 1
output_dir = "output/training-wikipedia-sections-" + model_name + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# If we want, we can limit the maximum sequence length for the model
# model.max_seq_length = 75
logging.info(model)
# 2. Load the Wikipedia-Sections dataset: https://huggingface.co/datasets/sentence-transformers/wikipedia-sections
train_dataset = load_dataset("sentence-transformers/wikipedia-sections", "triplet", split="train").select(
range(10_000)
)
eval_dataset = load_dataset("sentence-transformers/wikipedia-sections", "triplet", split="validation").select(
range(1000)
)
test_dataset = load_dataset("sentence-transformers/wikipedia-sections", "triplet", split="test").select(range(1000))
logging.info(train_dataset)
# 3. Define our training loss
# TripletLoss (https://sbert.net/docs/package_reference/losses.html#tripletloss) needs three text columns
train_loss = TripletLoss(model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = TripletEvaluator(
anchors=eval_dataset[:1000]["anchor"],
positives=eval_dataset[:1000]["positive"],
negatives=eval_dataset[:1000]["negative"],
name="wikipedia-sections-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="wikipedia-sections-triplet", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = TripletEvaluator(
anchors=test_dataset["anchor"],
positives=test_dataset["positive"],
negatives=test_dataset["negative"],
name="wikipedia-sections-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-wikipedia-sections-triplet")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-wikipedia-sections-triplet')`."
)
|
"""
This script trains sentence transformers with a triplet loss function.
As corpus, we use the wikipedia sections dataset that was describd by Dor et al., 2018, Learning Thematic Similarity Metric Using Triplet Networks.
"""
import traceback
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import TripletEvaluator
from sentence_transformers.losses import TripletLoss
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
from datetime import datetime
from datasets import load_dataset
import logging
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = "distilbert-base-uncased"
batch_size = 16
num_train_epochs = 1
output_dir = "output/training-wikipedia-sections-" + model_name + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# If we want, we can limit the maximum sequence length for the model
# model.max_seq_length = 75
logging.info(model)
# 2. Load the Wikipedia-Sections dataset: https://huggingface.co/datasets/sentence-transformers/wikipedia-sections
train_dataset = load_dataset("sentence-transformers/wikipedia-sections", "triplet", split="train").select(
range(10_000)
)
eval_dataset = load_dataset("sentence-transformers/wikipedia-sections", "triplet", split="validation").select(
range(1000)
)
test_dataset = load_dataset("sentence-transformers/wikipedia-sections", "triplet", split="test").select(range(1000))
logging.info(train_dataset)
# 3. Define our training loss
# TripletLoss (https://sbert.net/docs/package_reference/losses.html#tripletloss) needs three text columns
train_loss = TripletLoss(model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = TripletEvaluator(
anchors=eval_dataset[:1000]["anchor"],
positives=eval_dataset[:1000]["positive"],
negatives=eval_dataset[:1000]["negative"],
name="wikipedia-sections-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="wikipedia-sections-triplet", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = TripletEvaluator(
anchors=test_dataset["anchor"],
positives=test_dataset["positive"],
negatives=test_dataset["negative"],
name="wikipedia-sections-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-wikipedia-sections-triplet")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-wikipedia-sections-triplet')`."
)
|
from typing import Union, Iterable, MutableSequence, Iterator
from docarray.array.storage.memory.backend import needs_id2offset_rebuild
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods"""
@needs_id2offset_rebuild
def insert(self, index: int, value: 'Document'):
"""Insert `doc` at `index`.
:param index: Position of the insertion.
:param value: The doc needs to be inserted.
"""
self._data.insert(index, value)
def append(self, value: 'Document'):
"""Append `doc` to the end of the array.
:param value: The doc needs to be appended.
"""
self._data.append(value)
if not self._needs_id2offset_rebuild:
self._id_to_index[value.id] = len(self) - 1
def __eq__(self, other):
return (
type(self) is type(other)
and type(self._data) is type(other._data)
and self._data == other._data
)
def __len__(self):
return len(self._data)
def __iter__(self) -> Iterator['Document']:
yield from self._data
def __contains__(self, x: Union[str, 'Document']):
if isinstance(x, str):
return x in self._id2offset
elif isinstance(x, Document):
return x.id in self._id2offset
else:
return False
def __repr__(self):
return f'<DocumentArray (length={len(self)}) at {id(self)}>'
def __add__(self, other: Union['Document', Iterable['Document']]):
v = type(self)(self)
v.extend(other)
return v
def extend(self, values: Iterable['Document']) -> None:
values = list(values) # consume the iterator only once
last_idx = len(self._id2offset)
self._data.extend(values)
self._id_to_index.update({d.id: i + last_idx for i, d in enumerate(values)})
|
from typing import Union, Iterable, MutableSequence, Iterator
from ..memory.backend import needs_id2offset_rebuild
from ..base.seqlike import BaseSequenceLikeMixin
from .... import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods"""
@needs_id2offset_rebuild
def insert(self, index: int, value: 'Document'):
"""Insert `doc` at `index`.
:param index: Position of the insertion.
:param value: The doc needs to be inserted.
"""
self._data.insert(index, value)
def append(self, value: 'Document'):
"""Append `doc` to the end of the array.
:param value: The doc needs to be appended.
"""
self._data.append(value)
if not self._needs_id2offset_rebuild:
self._id_to_index[value.id] = len(self) - 1
def __eq__(self, other):
return (
type(self) is type(other)
and type(self._data) is type(other._data)
and self._data == other._data
)
def __len__(self):
return len(self._data)
def __iter__(self) -> Iterator['Document']:
yield from self._data
def __contains__(self, x: Union[str, 'Document']):
if isinstance(x, str):
return x in self._id2offset
elif isinstance(x, Document):
return x.id in self._id2offset
else:
return False
def __repr__(self):
return f'<DocumentArray (length={len(self)}) at {id(self)}>'
def __add__(self, other: Union['Document', Iterable['Document']]):
v = type(self)(self)
v.extend(other)
return v
def extend(self, values: Iterable['Document']) -> None:
values = list(values) # consume the iterator only once
last_idx = len(self._id2offset)
self._data.extend(values)
self._id_to_index.update({d.id: i + last_idx for i, d in enumerate(values)})
|
import copy
import warnings
from collections.abc import Mapping, Sequence
from typing import Any, TypeVar, Union
from torch.utils.data.datapipes.datapipe import MapDataPipe
_T = TypeVar("_T")
__all__ = ["SequenceWrapperMapDataPipe"]
class SequenceWrapperMapDataPipe(MapDataPipe[_T]):
r"""
Wraps a sequence object into a MapDataPipe.
Args:
sequence: Sequence object to be wrapped into an MapDataPipe
deepcopy: Option to deepcopy input sequence object
.. note::
If ``deepcopy`` is set to False explicitly, users should ensure
that data pipeline doesn't contain any in-place operations over
the iterable instance, in order to prevent data inconsistency
across iterations.
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.map import SequenceWrapper
>>> dp = SequenceWrapper(range(10))
>>> list(dp)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> dp = SequenceWrapper({'a': 100, 'b': 200, 'c': 300, 'd': 400})
>>> dp['a']
100
"""
sequence: Union[Sequence[_T], Mapping[Any, _T]]
def __init__(
self, sequence: Union[Sequence[_T], Mapping[Any, _T]], deepcopy: bool = True
) -> None:
if deepcopy:
try:
self.sequence = copy.deepcopy(sequence)
except TypeError:
warnings.warn(
"The input sequence can not be deepcopied, "
"please be aware of in-place modification would affect source data"
)
self.sequence = sequence
else:
self.sequence = sequence
def __getitem__(self, index: int) -> _T:
return self.sequence[index]
def __len__(self) -> int:
return len(self.sequence)
|
# mypy: allow-untyped-defs
import copy
import warnings
from torch.utils.data.datapipes.datapipe import MapDataPipe
__all__ = ["SequenceWrapperMapDataPipe"]
class SequenceWrapperMapDataPipe(MapDataPipe):
r"""
Wraps a sequence object into a MapDataPipe.
Args:
sequence: Sequence object to be wrapped into an MapDataPipe
deepcopy: Option to deepcopy input sequence object
.. note::
If ``deepcopy`` is set to False explicitly, users should ensure
that data pipeline doesn't contain any in-place operations over
the iterable instance, in order to prevent data inconsistency
across iterations.
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.map import SequenceWrapper
>>> dp = SequenceWrapper(range(10))
>>> list(dp)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> dp = SequenceWrapper({'a': 100, 'b': 200, 'c': 300, 'd': 400})
>>> dp['a']
100
"""
def __init__(self, sequence, deepcopy=True):
if deepcopy:
try:
self.sequence = copy.deepcopy(sequence)
except TypeError:
warnings.warn(
"The input sequence can not be deepcopied, "
"please be aware of in-place modification would affect source data"
)
self.sequence = sequence
else:
self.sequence = sequence
def __getitem__(self, index):
return self.sequence[index]
def __len__(self):
return len(self.sequence)
|
from __future__ import annotations
import json
import logging
import re
from re import Pattern
from typing import Optional, Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import BaseLanguageModel
from pydantic import Field
from langchain.agents.agent import AgentOutputParser
from langchain.agents.structured_chat.prompt import FORMAT_INSTRUCTIONS
from langchain.output_parsers import OutputFixingParser
logger = logging.getLogger(__name__)
class StructuredChatOutputParser(AgentOutputParser):
"""Output parser for the structured chat agent."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
pattern: Pattern = re.compile(r"```(?:json\s+)?(\W.*?)```", re.DOTALL)
"""Regex pattern to parse the output."""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
try:
action_match = self.pattern.search(text)
if action_match is not None:
response = json.loads(action_match.group(1).strip(), strict=False)
if isinstance(response, list):
# gpt turbo frequently ignores the directive to emit a single action
logger.warning("Got multiple action responses: %s", response)
response = response[0]
if response["action"] == "Final Answer":
return AgentFinish({"output": response["action_input"]}, text)
return AgentAction(
response["action"],
response.get("action_input", {}),
text,
)
return AgentFinish({"output": text}, text)
except Exception as e:
msg = f"Could not parse LLM output: {text}"
raise OutputParserException(msg) from e
@property
def _type(self) -> str:
return "structured_chat"
class StructuredChatOutputParserWithRetries(AgentOutputParser):
"""Output parser with retries for the structured chat agent."""
base_parser: AgentOutputParser = Field(default_factory=StructuredChatOutputParser)
"""The base parser to use."""
output_fixing_parser: Optional[OutputFixingParser] = None
"""The output fixing parser to use."""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
try:
if self.output_fixing_parser is not None:
parsed_obj: Union[AgentAction, AgentFinish] = (
self.output_fixing_parser.parse(text)
)
else:
parsed_obj = self.base_parser.parse(text)
return parsed_obj
except Exception as e:
msg = f"Could not parse LLM output: {text}"
raise OutputParserException(msg) from e
@classmethod
def from_llm(
cls,
llm: Optional[BaseLanguageModel] = None,
base_parser: Optional[StructuredChatOutputParser] = None,
) -> StructuredChatOutputParserWithRetries:
if llm is not None:
base_parser = base_parser or StructuredChatOutputParser()
output_fixing_parser: OutputFixingParser = OutputFixingParser.from_llm(
llm=llm,
parser=base_parser,
)
return cls(output_fixing_parser=output_fixing_parser)
if base_parser is not None:
return cls(base_parser=base_parser)
return cls()
@property
def _type(self) -> str:
return "structured_chat_with_retries"
|
from __future__ import annotations
import json
import logging
import re
from re import Pattern
from typing import Optional, Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import BaseLanguageModel
from pydantic import Field
from langchain.agents.agent import AgentOutputParser
from langchain.agents.structured_chat.prompt import FORMAT_INSTRUCTIONS
from langchain.output_parsers import OutputFixingParser
logger = logging.getLogger(__name__)
class StructuredChatOutputParser(AgentOutputParser):
"""Output parser for the structured chat agent."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
pattern: Pattern = re.compile(r"```(?:json\s+)?(\W.*?)```", re.DOTALL)
"""Regex pattern to parse the output."""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
try:
action_match = self.pattern.search(text)
if action_match is not None:
response = json.loads(action_match.group(1).strip(), strict=False)
if isinstance(response, list):
# gpt turbo frequently ignores the directive to emit a single action
logger.warning("Got multiple action responses: %s", response)
response = response[0]
if response["action"] == "Final Answer":
return AgentFinish({"output": response["action_input"]}, text)
return AgentAction(
response["action"], response.get("action_input", {}), text
)
return AgentFinish({"output": text}, text)
except Exception as e:
msg = f"Could not parse LLM output: {text}"
raise OutputParserException(msg) from e
@property
def _type(self) -> str:
return "structured_chat"
class StructuredChatOutputParserWithRetries(AgentOutputParser):
"""Output parser with retries for the structured chat agent."""
base_parser: AgentOutputParser = Field(default_factory=StructuredChatOutputParser)
"""The base parser to use."""
output_fixing_parser: Optional[OutputFixingParser] = None
"""The output fixing parser to use."""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
try:
if self.output_fixing_parser is not None:
parsed_obj: Union[AgentAction, AgentFinish] = (
self.output_fixing_parser.parse(text)
)
else:
parsed_obj = self.base_parser.parse(text)
return parsed_obj
except Exception as e:
msg = f"Could not parse LLM output: {text}"
raise OutputParserException(msg) from e
@classmethod
def from_llm(
cls,
llm: Optional[BaseLanguageModel] = None,
base_parser: Optional[StructuredChatOutputParser] = None,
) -> StructuredChatOutputParserWithRetries:
if llm is not None:
base_parser = base_parser or StructuredChatOutputParser()
output_fixing_parser: OutputFixingParser = OutputFixingParser.from_llm(
llm=llm, parser=base_parser
)
return cls(output_fixing_parser=output_fixing_parser)
if base_parser is not None:
return cls(base_parser=base_parser)
return cls()
@property
def _type(self) -> str:
return "structured_chat_with_retries"
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase
from .model_test_impl import Tacotron2DecoderTests, Tacotron2EncoderTests, Tacotron2Tests
class TestTacotron2EncoderFloat32CPU(Tacotron2EncoderTests, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
class TestTacotron2DecoderFloat32CPU(Tacotron2DecoderTests, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
class TestTacotron2Float32CPU(Tacotron2Tests, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase
from .model_test_impl import (
Tacotron2DecoderTests,
Tacotron2EncoderTests,
Tacotron2Tests,
)
class TestTacotron2EncoderFloat32CPU(Tacotron2EncoderTests, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
class TestTacotron2DecoderFloat32CPU(Tacotron2DecoderTests, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
class TestTacotron2Float32CPU(Tacotron2Tests, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(
f'multiprocessing start method is set to `fork`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `fork`: {e!r}'
)
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.16.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
_set_start_method('fork')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.16.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
from keras.src import backend
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
class BaseGlobalPooling(Layer):
"""Base global pooling layer."""
def __init__(
self, pool_dimensions, data_format=None, keepdims=False, **kwargs
):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
self.keepdims = keepdims
self.input_spec = InputSpec(ndim=pool_dimensions + 2)
self.built = True
def call(self, inputs):
raise NotImplementedError
def compute_output_shape(self, input_shape):
num_spatial_dims = len(input_shape) - 2
if self.data_format == "channels_last":
if self.keepdims:
return (
(input_shape[0],)
+ (1,) * num_spatial_dims
+ (input_shape[-1],)
)
else:
return (input_shape[0],) + (input_shape[-1],)
else:
if self.keepdims:
return (input_shape[0], input_shape[1]) + (
1,
) * num_spatial_dims
else:
return (input_shape[0], input_shape[1])
def get_config(self):
config = super().get_config()
config.update(
{
"data_format": self.data_format,
"keepdims": self.keepdims,
}
)
return config
|
from keras.src import backend
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
class BaseGlobalPooling(Layer):
"""Base global pooling layer."""
def __init__(
self, pool_dimensions, data_format=None, keepdims=False, **kwargs
):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
self.keepdims = keepdims
self.input_spec = InputSpec(ndim=pool_dimensions + 2)
def call(self, inputs):
raise NotImplementedError
def compute_output_shape(self, input_shape):
num_spatial_dims = len(input_shape) - 2
if self.data_format == "channels_last":
if self.keepdims:
return (
(input_shape[0],)
+ (1,) * num_spatial_dims
+ (input_shape[-1],)
)
else:
return (input_shape[0],) + (input_shape[-1],)
else:
if self.keepdims:
return (input_shape[0], input_shape[1]) + (
1,
) * num_spatial_dims
else:
return (input_shape[0], input_shape[1])
def get_config(self):
config = super().get_config()
config.update(
{
"data_format": self.data_format,
"keepdims": self.keepdims,
}
)
return config
|
import logging
from typing import List, Optional
from llama_index.core.schema import Document
from llama_index.readers.box.BoxAPI.box_api import (
box_check_connection,
get_box_files_details,
get_box_folder_files_details,
get_text_representation,
)
from llama_index.readers.box.BoxAPI.box_llama_adaptors import box_file_to_llama_document
from llama_index.readers.box import BoxReaderBase
from box_sdk_gen import (
BoxClient,
File,
)
logger = logging.getLogger(__name__)
class BoxReaderTextExtraction(BoxReaderBase):
"""
A reader class for loading text content from Box files.
This class inherits from the `BaseReader` class and specializes in
extracting plain text content from Box files. It utilizes the provided
BoxClient object to interact with the Box API and retrieves the text
representation of the files.
Attributes:
_box_client (BoxClient): An authenticated Box client object used
for interacting with the Box API.
"""
@classmethod
def class_name(cls) -> str:
return "BoxReaderTextExtraction"
def __init__(self, box_client: BoxClient):
super().__init__(box_client=box_client)
# def load_data(self, *args: Any, **load_kwargs: Any) -> List[Document]:
def load_data(
self,
file_ids: Optional[List[str]] = None,
folder_id: Optional[str] = None,
is_recursive: bool = False,
) -> List[Document]:
"""
Extracts text content from Box files and creates Document objects.
This method utilizes the Box API to retrieve the text representation
(if available) of the specified Box files. It then creates Document
objects containing the extracted text and file metadata.
Args:
file_ids (Optional[List[str]], optional): A list of Box file IDs
to extract text from. If provided, folder_id is ignored.
Defaults to None.
folder_id (Optional[str], optional): The ID of the Box folder to
extract text from. If provided, along with is_recursive set to
True, retrieves data from sub-folders as well. Defaults to None.
is_recursive (bool, optional): If True and folder_id is provided,
extracts text from sub-folders within the specified folder.
Defaults to False.
Returns:
List[Document]: A list of Document objects containing the extracted
text content and file metadata.
"""
# Connect to Box
box_check_connection(self._box_client)
docs: List[Document] = []
box_files: List[File] = []
# get Box files details
if file_ids is not None:
box_files.extend(
get_box_files_details(box_client=self._box_client, file_ids=file_ids)
)
elif folder_id is not None:
box_files.extend(
get_box_folder_files_details(
box_client=self._box_client,
folder_id=folder_id,
is_recursive=is_recursive,
)
)
box_files = get_text_representation(
box_client=self._box_client,
box_files=box_files,
)
for file in box_files:
doc = box_file_to_llama_document(file)
doc.text = file.text_representation if file.text_representation else ""
docs.append(doc)
return docs
|
import logging
from typing import List, Optional
from llama_index.core.schema import Document
from llama_index.readers.box.BoxAPI.box_api import (
box_check_connection,
get_box_files_details,
get_box_folder_files_details,
get_text_representation,
)
from llama_index.readers.box.BoxAPI.box_llama_adaptors import box_file_to_llama_document
from llama_index.readers.box import BoxReaderBase
from box_sdk_gen import (
BoxClient,
File,
)
logger = logging.getLogger(__name__)
class BoxReaderTextExtraction(BoxReaderBase):
"""
A reader class for loading text content from Box files.
This class inherits from the `BaseReader` class and specializes in
extracting plain text content from Box files. It utilizes the provided
BoxClient object to interact with the Box API and retrieves the text
representation of the files.
Attributes:
_box_client (BoxClient): An authenticated Box client object used
for interacting with the Box API.
"""
@classmethod
def class_name(cls) -> str:
return "BoxReaderTextExtraction"
def __init__(self, box_client: BoxClient):
super().__init__(box_client=box_client)
# def load_data(self, *args: Any, **load_kwargs: Any) -> List[Document]:
def load_data(
self,
file_ids: Optional[List[str]] = None,
folder_id: Optional[str] = None,
is_recursive: bool = False,
) -> List[Document]:
"""
Extracts text content from Box files and creates Document objects.
This method utilizes the Box API to retrieve the text representation
(if available) of the specified Box files. It then creates Document
objects containing the extracted text and file metadata.
Args:
file_ids (Optional[List[str]], optional): A list of Box file IDs
to extract text from. If provided, folder_id is ignored.
Defaults to None.
folder_id (Optional[str], optional): The ID of the Box folder to
extract text from. If provided, along with is_recursive set to
True, retrieves data from sub-folders as well. Defaults to None.
is_recursive (bool, optional): If True and folder_id is provided,
extracts text from sub-folders within the specified folder.
Defaults to False.
Returns:
List[Document]: A list of Document objects containing the extracted
text content and file metadata.
"""
# Connect to Box
box_check_connection(self._box_client)
docs: List[Document] = []
box_files: List[File] = []
# get Box files details
if file_ids is not None:
box_files.extend(
get_box_files_details(box_client=self._box_client, file_ids=file_ids)
)
elif folder_id is not None:
box_files.extend(
get_box_folder_files_details(
box_client=self._box_client,
folder_id=folder_id,
is_recursive=is_recursive,
)
)
box_files = get_text_representation(
box_client=self._box_client,
box_files=box_files,
)
for file in box_files:
doc = box_file_to_llama_document(file)
doc.text = file.text_representation if file.text_representation else ""
docs.append(doc)
return docs
|
import os
from llama_index.core.tools.function_tool import FunctionTool
import pytest
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.base.llms.types import ChatMessage, ImageBlock, MessageRole
from llama_index.llms.gemini import Gemini
from llama_index.llms.gemini.utils import chat_message_to_gemini
from google.ai.generativelanguage_v1beta.types import (
FunctionCallingConfig,
ToolConfig,
)
def test_embedding_class() -> None:
names_of_base_classes = [b.__name__ for b in Gemini.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
def test_chat_message_to_gemini() -> None:
msg = ChatMessage("Some content")
assert chat_message_to_gemini(msg) == {
"role": MessageRole.USER,
"parts": ["Some content"],
}
msg = ChatMessage("Some content")
msg.blocks.append(ImageBlock(image=b"foo", image_mimetype="image/png"))
assert chat_message_to_gemini(msg) == {
"role": MessageRole.USER,
"parts": ["Some content", {"data": b"foo", "mime_type": "image/png"}],
}
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_generate_image_prompt() -> None:
msg = ChatMessage("Tell me the brand of the car in this image:")
msg.blocks.append(
ImageBlock(
url="https://upload.wikimedia.org/wikipedia/commons/5/52/Ferrari_SP_FFX.jpg"
)
)
response = Gemini(model="models/gemini-1.5-flash").chat(messages=[msg])
assert "ferrari" in str(response).lower()
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_chat_stream() -> None:
msg = ChatMessage("List three types of software testing strategies")
response = list(Gemini(model="models/gemini-1.5-flash").stream_chat(messages=[msg]))
assert response
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_chat_with_tools() -> None:
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer."""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
msg = ChatMessage("What is the result of adding 2 and 3?")
model = Gemini(model="models/gemini-1.5-flash")
response = model.chat_with_tools(
user_msg=msg,
tools=[add_tool],
tool_config=ToolConfig(
function_calling_config=FunctionCallingConfig(
mode=FunctionCallingConfig.Mode.ANY
)
),
)
tool_calls = model.get_tool_calls_from_response(response)
assert len(tool_calls) == 1
assert tool_calls[0].tool_name == "add"
assert tool_calls[0].tool_kwargs == {"a": 2, "b": 3}
assert len(response.additional_kwargs["tool_calls"]) >= 1
|
import os
import pytest
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.base.llms.types import ChatMessage, ImageBlock, MessageRole
from llama_index.llms.gemini import Gemini
from llama_index.llms.gemini.utils import chat_message_to_gemini
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in Gemini.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
def test_chat_message_to_gemini():
msg = ChatMessage("Some content")
assert chat_message_to_gemini(msg) == {
"role": MessageRole.USER,
"parts": ["Some content"],
}
msg = ChatMessage("Some content")
msg.blocks.append(ImageBlock(image=b"foo", image_mimetype="image/png"))
assert chat_message_to_gemini(msg) == {
"role": MessageRole.USER,
"parts": ["Some content", {"data": b"foo", "mime_type": "image/png"}],
}
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_generate_image_prompt():
msg = ChatMessage("Tell me the brand of the car in this image:")
msg.blocks.append(
ImageBlock(
url="https://upload.wikimedia.org/wikipedia/commons/5/52/Ferrari_SP_FFX.jpg"
)
)
response = Gemini().chat(messages=[msg])
assert "ferrari" in str(response).lower()
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_chat_stream():
msg = ChatMessage("List three types of software testing strategies")
response = list(Gemini().stream_chat(messages=[msg]))
assert response
|
try:
from docarray import BaseDoc as Document
from docarray import DocList as DocumentArray
docarray_v2 = True
except ImportError:
from docarray import Document, DocumentArray
docarray_v2 = False
|
try:
from docarray import BaseDoc as Document
from docarray import DocArray as DocumentArray
docarray_v2 = True
except ImportError:
from docarray import Document, DocumentArray
docarray_v2 = False
|
from collections.abc import AsyncIterator
import pytest
from langchain_core.utils.aiter import abatch_iterate
@pytest.mark.parametrize(
("input_size", "input_iterable", "expected_output"),
[
(2, [1, 2, 3, 4, 5], [[1, 2], [3, 4], [5]]),
(3, [10, 20, 30, 40, 50], [[10, 20, 30], [40, 50]]),
(1, [100, 200, 300], [[100], [200], [300]]),
(4, [], []),
],
)
async def test_abatch_iterate(
input_size: int, input_iterable: list[str], expected_output: list[list[str]]
) -> None:
"""Test batching function."""
async def _to_async_iterable(iterable: list[str]) -> AsyncIterator[str]:
for item in iterable:
yield item
iterator_ = abatch_iterate(input_size, _to_async_iterable(input_iterable))
assert isinstance(iterator_, AsyncIterator)
output = [el async for el in iterator_]
assert output == expected_output
|
from collections.abc import AsyncIterator
import pytest
from langchain_core.utils.aiter import abatch_iterate
@pytest.mark.parametrize(
("input_size", "input_iterable", "expected_output"),
[
(2, [1, 2, 3, 4, 5], [[1, 2], [3, 4], [5]]),
(3, [10, 20, 30, 40, 50], [[10, 20, 30], [40, 50]]),
(1, [100, 200, 300], [[100], [200], [300]]),
(4, [], []),
],
)
async def test_abatch_iterate(
input_size: int, input_iterable: list[str], expected_output: list[str]
) -> None:
"""Test batching function."""
async def _to_async_iterable(iterable: list[str]) -> AsyncIterator[str]:
for item in iterable:
yield item
iterator_ = abatch_iterate(input_size, _to_async_iterable(input_iterable))
assert isinstance(iterator_, AsyncIterator)
output = [el async for el in iterator_]
assert output == expected_output
|
from llama_index_instrumentation.event_handlers.base import BaseEventHandler # noqa
|
from typing import Any
from abc import abstractmethod
from llama_index.core.bridge.pydantic import BaseModel, ConfigDict
from llama_index.core.instrumentation.events.base import BaseEvent
class BaseEventHandler(BaseModel):
"""Base callback handler that can be used to track event starts and ends."""
model_config = ConfigDict(arbitrary_types_allowed=True)
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "BaseEventHandler"
@abstractmethod
def handle(self, event: BaseEvent, **kwargs: Any) -> Any:
"""Logic for handling event."""
|
"""Run smoke tests"""
import sys
from pathlib import Path
import torch
import torchvision
from torchvision.io import decode_jpeg, read_file, read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.shape != (4, 471, 354):
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
def smoke_test_torchvision_decode_jpeg(device: str = "cpu"):
img_jpg_data = read_file(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
img_jpg = decode_jpeg(img_jpg_data, device=device)
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
def smoke_test_compile() -> None:
try:
model = resnet50().cuda()
model = torch.compile(model)
x = torch.randn(1, 3, 224, 224, device="cuda")
out = model(x)
print(f"torch.compile model output: {out.shape}")
except RuntimeError:
if sys.platform == "win32":
print("Successfully caught torch.compile RuntimeError on win")
elif sys.version_info >= (3, 11, 0):
print("Successfully caught torch.compile RuntimeError on Python 3.11")
else:
raise
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms()
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
print(f"torch.cuda.is_available: {torch.cuda.is_available()}")
print(f"{torch.ops.image._jpeg_version() = }")
assert torch.ops.image._is_compiled_against_turbo()
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
smoke_test_torchvision_decode_jpeg()
if torch.cuda.is_available():
smoke_test_torchvision_decode_jpeg("cuda")
smoke_test_torchvision_resnet50_classify("cuda")
smoke_test_compile()
if torch.backends.mps.is_available():
smoke_test_torchvision_resnet50_classify("mps")
if __name__ == "__main__":
main()
|
"""Run smoke tests"""
import sys
from pathlib import Path
import torch
import torchvision
from torchvision.io import decode_jpeg, read_file, read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.shape != (4, 471, 354):
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
def smoke_test_torchvision_decode_jpeg(device: str = "cpu"):
img_jpg_data = read_file(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
img_jpg = decode_jpeg(img_jpg_data, device=device)
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
def smoke_test_compile() -> None:
try:
model = resnet50().cuda()
model = torch.compile(model)
x = torch.randn(1, 3, 224, 224, device="cuda")
out = model(x)
print(f"torch.compile model output: {out.shape}")
except RuntimeError:
if sys.platform == "win32":
print("Successfully caught torch.compile RuntimeError on win")
elif sys.version_info >= (3, 11, 0):
print("Successfully caught torch.compile RuntimeError on Python 3.11")
else:
raise
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms()
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
print(f"torch.cuda.is_available: {torch.cuda.is_available()}")
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
smoke_test_torchvision_decode_jpeg()
if torch.cuda.is_available():
smoke_test_torchvision_decode_jpeg("cuda")
smoke_test_torchvision_resnet50_classify("cuda")
smoke_test_compile()
if torch.backends.mps.is_available():
smoke_test_torchvision_resnet50_classify("mps")
if __name__ == "__main__":
main()
|
"""Module for Jina Requests."""
from typing import (
TYPE_CHECKING,
AsyncIterable,
Dict,
Iterable,
Iterator,
Optional,
Tuple,
Union,
)
from jina._docarray import Document
from jina.clients.request.helper import _new_data_request, _new_data_request_from_batch
from jina.enums import DataInputType
from jina.helper import batch_iterator
from jina.logging.predefined import default_logger
if TYPE_CHECKING: # pragma: no cover
from jina._docarray import Document
from jina._docarray.document import DocumentSourceType
from jina._docarray.document.mixins.content import DocumentContentType
from jina.types.request import Request
from docarray import DocList, BaseDoc
SingletonDataType = Union[
DocumentContentType,
DocumentSourceType,
Document,
BaseDoc,
Tuple[DocumentContentType, DocumentContentType],
Tuple[DocumentSourceType, DocumentSourceType],
]
GeneratorSourceType = Union[
Document, Iterable[SingletonDataType], AsyncIterable[SingletonDataType], DocList
]
def request_generator(
exec_endpoint: str,
data: Optional['GeneratorSourceType'] = None,
request_size: int = 0,
data_type: DataInputType = DataInputType.AUTO,
target_executor: Optional[str] = None,
parameters: Optional[Dict] = None,
**kwargs, # do not remove this, add on purpose to suppress unknown kwargs
) -> Iterator['Request']:
"""Generate a request iterator.
:param exec_endpoint: the endpoint string, by convention starts with `/`
:param data: data to send, a list of dict/string/bytes that can be converted into a list of `Document` objects
:param request_size: the number of the `Documents` in each request
:param data_type: if ``data`` is an iterator over self-contained document, i.e. :class:`DocumentSourceType`;
or an iterator over possible Document content (set to text, blob and buffer).
:param parameters: a dictionary of parameters to be sent to the executor
:param target_executor: a regex string. Only matching Executors will process the request.
:param kwargs: additional arguments
:yield: request
"""
try:
if data is None:
# this allows empty inputs, i.e. a data request with only parameters
yield _new_data_request(
endpoint=exec_endpoint, target=target_executor, parameters=parameters
)
else:
if not isinstance(data, Iterable) or isinstance(data, Document):
data = [data]
for batch in batch_iterator(data, request_size):
yield _new_data_request_from_batch(
batch=batch,
data_type=data_type,
endpoint=exec_endpoint,
target=target_executor,
parameters=parameters,
)
except Exception as ex:
# must be handled here, as grpc channel wont handle Python exception
default_logger.critical(f'inputs is not valid! {ex!r}', exc_info=True)
raise
|
"""Module for Jina Requests."""
from typing import (
TYPE_CHECKING,
AsyncIterable,
Dict,
Iterable,
Iterator,
Optional,
Tuple,
Union,
)
from jina._docarray import Document
from jina.clients.request.helper import _new_data_request, _new_data_request_from_batch
from jina.enums import DataInputType
from jina.helper import batch_iterator
from jina.logging.predefined import default_logger
if TYPE_CHECKING: # pragma: no cover
from jina._docarray import Document
from jina._docarray.document import DocumentSourceType
from jina._docarray.document.mixins.content import DocumentContentType
from jina.types.request import Request
SingletonDataType = Union[
DocumentContentType,
DocumentSourceType,
Document,
Tuple[DocumentContentType, DocumentContentType],
Tuple[DocumentSourceType, DocumentSourceType],
]
GeneratorSourceType = Union[
Document, Iterable[SingletonDataType], AsyncIterable[SingletonDataType]
]
def request_generator(
exec_endpoint: str,
data: Optional['GeneratorSourceType'] = None,
request_size: int = 0,
data_type: DataInputType = DataInputType.AUTO,
target_executor: Optional[str] = None,
parameters: Optional[Dict] = None,
**kwargs, # do not remove this, add on purpose to suppress unknown kwargs
) -> Iterator['Request']:
"""Generate a request iterator.
:param exec_endpoint: the endpoint string, by convention starts with `/`
:param data: data to send, a list of dict/string/bytes that can be converted into a list of `Document` objects
:param request_size: the number of the `Documents` in each request
:param data_type: if ``data`` is an iterator over self-contained document, i.e. :class:`DocumentSourceType`;
or an iterator over possible Document content (set to text, blob and buffer).
:param parameters: a dictionary of parameters to be sent to the executor
:param target_executor: a regex string. Only matching Executors will process the request.
:param kwargs: additional arguments
:yield: request
"""
try:
if data is None:
# this allows empty inputs, i.e. a data request with only parameters
yield _new_data_request(
endpoint=exec_endpoint, target=target_executor, parameters=parameters
)
else:
if not isinstance(data, Iterable) or isinstance(data, Document):
data = [data]
for batch in batch_iterator(data, request_size):
yield _new_data_request_from_batch(
batch=batch,
data_type=data_type,
endpoint=exec_endpoint,
target=target_executor,
parameters=parameters,
)
except Exception as ex:
# must be handled here, as grpc channel wont handle Python exception
default_logger.critical(f'inputs is not valid! {ex!r}', exc_info=True)
raise
|
from typing import Any
from langchain_core.exceptions import OutputParserException
from langchain.output_parsers import ResponseSchema, StructuredOutputParser
def test_parse() -> None:
"""Test parsing structured output."""
response_schemas = [
ResponseSchema(name="name", description="desc"),
ResponseSchema(name="age", description="desc"),
]
parser = StructuredOutputParser.from_response_schemas(response_schemas)
# Test valid JSON input
text = '```json\n{"name": "John", "age": 30}\n```'
expected_result = {"name": "John", "age": 30}
result = parser.parse(text)
assert result == expected_result, f"Expected {expected_result}, but got {result}"
# Test invalid JSON input
text = '```json\n{"name": "John"}\n```'
try:
parser.parse(text)
except OutputParserException:
pass # Test passes if OutputParserException is raised
else:
assert False, f"Expected OutputParserException, but got {parser.parse(text)}"
def test_output_type() -> None:
"""Test the output type of the structured output parser is Dict[str, Any]."""
response_schemas = [
ResponseSchema(name="name", description="desc"),
ResponseSchema(name="age", description="desc"),
]
parser = StructuredOutputParser.from_response_schemas(response_schemas)
assert parser.OutputType == dict[str, Any]
|
from typing import Any, Dict
from langchain_core.exceptions import OutputParserException
from langchain.output_parsers import ResponseSchema, StructuredOutputParser
def test_parse() -> None:
"""Test parsing structured output."""
response_schemas = [
ResponseSchema(name="name", description="desc"),
ResponseSchema(name="age", description="desc"),
]
parser = StructuredOutputParser.from_response_schemas(response_schemas)
# Test valid JSON input
text = '```json\n{"name": "John", "age": 30}\n```'
expected_result = {"name": "John", "age": 30}
result = parser.parse(text)
assert result == expected_result, f"Expected {expected_result}, but got {result}"
# Test invalid JSON input
text = '```json\n{"name": "John"}\n```'
try:
parser.parse(text)
except OutputParserException:
pass # Test passes if OutputParserException is raised
else:
assert False, f"Expected OutputParserException, but got {parser.parse(text)}"
def test_output_type() -> None:
"""Test the output type of the structured output parser is Dict[str, Any]."""
response_schemas = [
ResponseSchema(name="name", description="desc"),
ResponseSchema(name="age", description="desc"),
]
parser = StructuredOutputParser.from_response_schemas(response_schemas)
assert parser.OutputType == Dict[str, Any]
|
from __future__ import annotations
import logging
import os
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
)
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import SparseNanoBEIREvaluator
from sentence_transformers.sparse_encoder.losses import SparseMultipleNegativesRankingLoss, SpladeLoss
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import SparseEncoderTrainingArguments
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
# Initialize the SPLADE model
model_name = "sparse-embedding/splade-distilbert-base-uncased-init"
model = SparseEncoder(model_name)
model.eval()
# 2a. Load the NQ dataset: https://huggingface.co/datasets/sentence-transformers/natural-questions
logging.info("Read the Natural Questions training dataset")
full_dataset = load_dataset("sentence-transformers/natural-questions", split="train").select(range(100_000))
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Initialize the loss
lambda_query = 5e-5
lambda_corpus = 3e-5
loss = SpladeLoss(
model=model,
main_loss=SparseMultipleNegativesRankingLoss(model=model, scale=20, similarity_fct=model.similarity),
lambda_query=lambda_query, # Weight for query loss
lambda_corpus=lambda_corpus,
) # Weight for document loss
run_name = f"splade-distilbert-nq-fresh-lq{lambda_query}-lc{lambda_corpus}"
os.makedirs(f"runs/{run_name}", exist_ok=True)
dev_evaluator = SparseNanoBEIREvaluator(["msmarco", "nfcorpus", "nq"], show_progress_bar=True, batch_size=16)
os.makedirs(f"runs/{run_name}/eval", exist_ok=True)
# Set up training arguments
training_args = SparseEncoderTrainingArguments(
output_dir=f"runs/{run_name}",
num_train_epochs=1,
per_device_train_batch_size=12,
per_device_eval_batch_size=16,
bf16=True,
logging_steps=200,
eval_strategy="steps",
eval_steps=1400,
save_strategy="steps",
save_steps=1400,
learning_rate=4e-5,
optim="adamw_torch",
run_name=run_name,
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance again after training
dev_evaluator(model, output_path=f"runs/{run_name}/eval", epoch=1)
# 8. Save the trained & evaluated model locally
os.makedirs(f"runs/{run_name}/final", exist_ok=True)
model.save_pretrained(f"runs/{run_name}/final")
model.push_to_hub(run_name, private=True)
if __name__ == "__main__":
main()
|
from __future__ import annotations
import logging
import os
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
)
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import SparseNanoBEIREvaluator
from sentence_transformers.sparse_encoder.losses import SparseMultipleNegativesRankingLoss, SpladeLoss
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import SparseEncoderTrainingArguments
# Set up logging
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
# Initialize the SPLADE model
model_name = "sparse-embedding/splade-distilbert-base-uncased-init"
model = SparseEncoder(model_name)
model.eval()
# 2a. Load the NQ dataset: https://huggingface.co/datasets/sentence-transformers/natural-questions
logging.info("Read the Natural Questions training dataset")
full_dataset = load_dataset("sentence-transformers/natural-questions", split="train").select(range(100_000))
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Initialize the loss
lambda_query = 5e-5
lambda_corpus = 3e-5
loss = SpladeLoss(
model=model,
main_loss=SparseMultipleNegativesRankingLoss(model=model, scale=20, similarity_fct=model.similarity),
lambda_query=lambda_query, # Weight for query loss
lambda_corpus=lambda_corpus,
) # Weight for document loss
run_name = f"splade-distilbert-nq-fresh-lq{lambda_query}-lc{lambda_corpus}"
os.makedirs(f"runs/{run_name}", exist_ok=True)
dev_evaluator = SparseNanoBEIREvaluator(["msmarco", "nfcorpus", "nq"], show_progress_bar=True, batch_size=16)
os.makedirs(f"runs/{run_name}/eval", exist_ok=True)
# Set up training arguments
training_args = SparseEncoderTrainingArguments(
output_dir=f"runs/{run_name}",
num_train_epochs=1,
per_device_train_batch_size=12,
per_device_eval_batch_size=16,
bf16=True,
logging_steps=200,
eval_strategy="steps",
eval_steps=2400,
save_strategy="steps",
save_steps=2400,
learning_rate=4e-5,
optim="adamw_torch",
run_name=run_name,
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance again after training
dev_evaluator(model, output_path=f"runs/{run_name}/eval", epoch=1)
# 8. Save the trained & evaluated model locally
os.makedirs(f"runs/{run_name}/final", exist_ok=True)
model.save_pretrained(f"runs/{run_name}/final")
model.push_to_hub(run_name, private=True)
if __name__ == "__main__":
main()
|
from __future__ import annotations
from typing import Any, Optional, Sequence, Type, TypeVar, Union
import torch
from torch.utils._pytree import tree_map
from torchvision.tv_tensors._tv_tensor import TVTensor
L = TypeVar("L", bound="_LabelBase")
class _LabelBase(TVTensor):
categories: Optional[Sequence[str]]
@classmethod
def _wrap(cls: Type[L], tensor: torch.Tensor, *, categories: Optional[Sequence[str]]) -> L:
label_base = tensor.as_subclass(cls)
label_base.categories = categories
return label_base
def __new__(
cls: Type[L],
data: Any,
*,
categories: Optional[Sequence[str]] = None,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> L:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor, categories=categories)
@classmethod
def from_category(
cls: Type[L],
category: str,
*,
categories: Sequence[str],
**kwargs: Any,
) -> L:
return cls(categories.index(category), categories=categories, **kwargs)
class Label(_LabelBase):
def to_categories(self) -> Any:
if self.categories is None:
raise RuntimeError("Label does not have categories")
return tree_map(lambda idx: self.categories[idx], self.tolist()) # type: ignore[index]
class OneHotLabel(_LabelBase):
def __new__(
cls,
data: Any,
*,
categories: Optional[Sequence[str]] = None,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: bool = False,
) -> OneHotLabel:
one_hot_label = super().__new__(
cls, data, categories=categories, dtype=dtype, device=device, requires_grad=requires_grad
)
if categories is not None and len(categories) != one_hot_label.shape[-1]:
raise ValueError()
return one_hot_label
|
from __future__ import annotations
from typing import Any, Optional, Sequence, Type, TypeVar, Union
import torch
from torch.utils._pytree import tree_map
from torchvision.tv_tensors._tv_tensor import TVTensor
L = TypeVar("L", bound="_LabelBase")
class _LabelBase(TVTensor):
categories: Optional[Sequence[str]]
@classmethod
def _wrap(cls: Type[L], tensor: torch.Tensor, *, categories: Optional[Sequence[str]]) -> L:
label_base = tensor.as_subclass(cls)
label_base.categories = categories
return label_base
def __new__(
cls: Type[L],
data: Any,
*,
categories: Optional[Sequence[str]] = None,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> L:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor, categories=categories)
@classmethod
def from_category(
cls: Type[L],
category: str,
*,
categories: Sequence[str],
**kwargs: Any,
) -> L:
return cls(categories.index(category), categories=categories, **kwargs)
class Label(_LabelBase):
def to_categories(self) -> Any:
if self.categories is None:
raise RuntimeError("Label does not have categories")
return tree_map(lambda idx: self.categories[idx], self.tolist())
class OneHotLabel(_LabelBase):
def __new__(
cls,
data: Any,
*,
categories: Optional[Sequence[str]] = None,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: bool = False,
) -> OneHotLabel:
one_hot_label = super().__new__(
cls, data, categories=categories, dtype=dtype, device=device, requires_grad=requires_grad
)
if categories is not None and len(categories) != one_hot_label.shape[-1]:
raise ValueError()
return one_hot_label
|
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin
class AutogradTestImpl(TestBaseMixin):
@nested_params(
[F.convolve, F.fftconvolve],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (4, 3, 2)
L_x, L_y = 23, 40
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device, requires_grad=True)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device, requires_grad=True)
self.assertTrue(gradcheck(fn, (x, y, mode)))
self.assertTrue(gradgradcheck(fn, (x, y, mode)))
def test_add_noise(self):
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self.assertTrue(gradcheck(F.add_noise, (waveform, noise, lengths, snr)))
self.assertTrue(gradgradcheck(F.add_noise, (waveform, noise, lengths, snr)))
@parameterized.expand(
[
(8000, (2, 3, 5, 7)),
(8000, (8000, 1)),
]
)
def test_oscillator_bank(self, sample_rate, shape):
# can be replaced with math.prod when we drop 3.7 support
def prod(iterable):
ret = 1
for item in iterable:
ret *= item
return ret
numel = prod(shape)
# use 1.9 instead of 2 so as to include values above nyquist frequency
fmax = sample_rate / 1.9
freq = torch.linspace(-fmax, fmax, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(
shape
)
amps = torch.linspace(-5, 5, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(shape)
assert gradcheck(F.oscillator_bank, (freq, amps, sample_rate))
def test_extend_pitch(self):
num_frames, num_pitches = 5, 7
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype, requires_grad=True)
pattern = torch.linspace(1, num_pitches, num_pitches, device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.extend_pitch, (input, num_pitches))
assert gradcheck(F.extend_pitch, (input, pattern))
def test_sinc_ir(self):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, False))
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, True))
def test_speed(self):
leading_dims = (3, 2)
T = 200
waveform = torch.rand(*leading_dims, T, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, T, leading_dims, dtype=self.dtype, device=self.device)
self.assertTrue(gradcheck(F.speed, (waveform, lengths, 1000, 1.1)))
self.assertTrue(gradgradcheck(F.speed, (waveform, lengths, 1000, 1.1)))
def test_preemphasis(self):
waveform = torch.rand(3, 2, 100, device=self.device, dtype=self.dtype, requires_grad=True)
coeff = 0.9
self.assertTrue(gradcheck(F.preemphasis, (waveform, coeff)))
self.assertTrue(gradgradcheck(F.preemphasis, (waveform, coeff)))
def test_deemphasis(self):
waveform = torch.rand(3, 2, 100, device=self.device, dtype=self.dtype, requires_grad=True)
coeff = 0.9
self.assertTrue(gradcheck(F.deemphasis, (waveform, coeff)))
self.assertTrue(gradgradcheck(F.deemphasis, (waveform, coeff)))
def test_freq_ir(self):
mags = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.frequency_impulse_response, (mags,))
|
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin
class AutogradTestImpl(TestBaseMixin):
@nested_params(
[F.convolve, F.fftconvolve],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (4, 3, 2)
L_x, L_y = 23, 40
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device, requires_grad=True)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device, requires_grad=True)
self.assertTrue(gradcheck(fn, (x, y, mode)))
self.assertTrue(gradgradcheck(fn, (x, y, mode)))
def test_add_noise(self):
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self.assertTrue(gradcheck(F.add_noise, (waveform, noise, lengths, snr)))
self.assertTrue(gradgradcheck(F.add_noise, (waveform, noise, lengths, snr)))
@parameterized.expand(
[
(8000, (2, 3, 5, 7)),
(8000, (8000, 1)),
]
)
def test_oscillator_bank(self, sample_rate, shape):
# can be replaced with math.prod when we drop 3.7 support
def prod(iterable):
ret = 1
for item in iterable:
ret *= item
return ret
numel = prod(shape)
# use 1.9 instead of 2 so as to include values above nyquist frequency
fmax = sample_rate / 1.9
freq = torch.linspace(-fmax, fmax, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(
shape
)
amps = torch.linspace(-5, 5, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(shape)
assert gradcheck(F.oscillator_bank, (freq, amps, sample_rate))
def test_extend_pitch(self):
num_frames, num_pitches = 5, 7
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype, requires_grad=True)
pattern = torch.linspace(1, num_pitches, num_pitches, device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.extend_pitch, (input, num_pitches))
assert gradcheck(F.extend_pitch, (input, pattern))
def test_sinc_ir(self):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, False))
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, True))
def test_speed(self):
leading_dims = (3, 2)
T = 200
waveform = torch.rand(*leading_dims, T, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, T, leading_dims, dtype=self.dtype, device=self.device)
self.assertTrue(gradcheck(F.speed, (waveform, lengths, 1000, 1.1)))
self.assertTrue(gradgradcheck(F.speed, (waveform, lengths, 1000, 1.1)))
def test_preemphasis(self):
waveform = torch.rand(3, 2, 100, device=self.device, dtype=self.dtype, requires_grad=True)
coeff = 0.9
self.assertTrue(gradcheck(F.preemphasis, (waveform, coeff)))
self.assertTrue(gradgradcheck(F.preemphasis, (waveform, coeff)))
def test_deemphasis(self):
waveform = torch.rand(3, 2, 100, device=self.device, dtype=self.dtype, requires_grad=True)
coeff = 0.9
self.assertTrue(gradcheck(F.deemphasis, (waveform, coeff)))
self.assertTrue(gradgradcheck(F.deemphasis, (waveform, coeff)))
|
import copy
import warnings
from dataclasses import InitVar, dataclass, field
from pathlib import Path
from typing import Any, Dict, Optional, Union
from .. import config
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*):
Specify a cache directory to save the file to (overwrite the
default cache dir).
force_download (`bool`, defaults to `False`):
If `True`, re-dowload the file even if it's already cached in
the cache dir.
resume_download (`bool`, defaults to `False`):
If `True`, resume the download if an incompletely received file is
found.
proxies (`dict`, *optional*):
user_agent (`str`, *optional*):
Optional string or dict that will be appended to the user-agent on remote
requests.
extract_compressed_file (`bool`, defaults to `False`):
If `True` and the path point to a zip or tar file,
extract the compressed file in a folder along the archive.
force_extract (`bool`, defaults to `False`):
If `True` when `extract_compressed_file` is `True` and the archive
was already extracted, re-extract the archive and override the folder where it was extracted.
delete_extracted (`bool`, defaults to `False`):
Whether to delete (or keep) the extracted files.
use_etag (`bool`, defaults to `True`):
Whether to use the ETag HTTP response header to validate the cached files.
num_proc (`int`, *optional*):
The number of processes to launch to download the files in parallel.
max_retries (`int`, default to `1`):
The number of times to retry an HTTP request if it fails.
token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
use_auth_token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
<Deprecated version="2.14.0">
`use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
</Deprecated>
ignore_url_params (`bool`, defaults to `False`):
Whether to strip all query parameters and fragments from
the download URL before using it for caching the file.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the dataset file-system backend, if any.
download_desc (`str`, *optional*):
A description to be displayed alongside with the progress bar while downloading the files.
"""
cache_dir: Optional[Union[str, Path]] = None
force_download: bool = False
resume_download: bool = False
local_files_only: bool = False
proxies: Optional[Dict] = None
user_agent: Optional[str] = None
extract_compressed_file: bool = False
force_extract: bool = False
delete_extracted: bool = False
use_etag: bool = True
num_proc: Optional[int] = None
max_retries: int = 1
token: Optional[Union[str, bool]] = None
use_auth_token: InitVar[Optional[Union[str, bool]]] = "deprecated"
ignore_url_params: bool = False
storage_options: Dict[str, Any] = field(default_factory=dict)
download_desc: Optional[str] = None
def __post_init__(self, use_auth_token):
if use_auth_token != "deprecated":
warnings.warn(
"'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
f"You can remove this warning by passing 'token={use_auth_token}' instead.",
FutureWarning,
)
self.token = use_auth_token
if "hf" not in self.storage_options:
self.storage_options["hf"] = {"token": self.token, "endpoint": config.HF_ENDPOINT}
def copy(self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
|
import copy
import warnings
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Dict, Optional, Union
from .. import config
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*):
Specify a cache directory to save the file to (overwrite the
default cache dir).
force_download (`bool`, defaults to `False`):
If `True`, re-dowload the file even if it's already cached in
the cache dir.
resume_download (`bool`, defaults to `False`):
If `True`, resume the download if an incompletely received file is
found.
proxies (`dict`, *optional*):
user_agent (`str`, *optional*):
Optional string or dict that will be appended to the user-agent on remote
requests.
extract_compressed_file (`bool`, defaults to `False`):
If `True` and the path point to a zip or tar file,
extract the compressed file in a folder along the archive.
force_extract (`bool`, defaults to `False`):
If `True` when `extract_compressed_file` is `True` and the archive
was already extracted, re-extract the archive and override the folder where it was extracted.
delete_extracted (`bool`, defaults to `False`):
Whether to delete (or keep) the extracted files.
use_etag (`bool`, defaults to `True`):
Whether to use the ETag HTTP response header to validate the cached files.
num_proc (`int`, *optional*):
The number of processes to launch to download the files in parallel.
max_retries (`int`, default to `1`):
The number of times to retry an HTTP request if it fails.
token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
use_auth_token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
<Deprecated version="2.14.0">
`use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
</Deprecated>
ignore_url_params (`bool`, defaults to `False`):
Whether to strip all query parameters and fragments from
the download URL before using it for caching the file.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the dataset file-system backend, if any.
download_desc (`str`, *optional*):
A description to be displayed alongside with the progress bar while downloading the files.
"""
cache_dir: Optional[Union[str, Path]] = None
force_download: bool = False
resume_download: bool = False
local_files_only: bool = False
proxies: Optional[Dict] = None
user_agent: Optional[str] = None
extract_compressed_file: bool = False
force_extract: bool = False
delete_extracted: bool = False
use_etag: bool = True
num_proc: Optional[int] = None
max_retries: int = 1
token: Optional[Union[str, bool]] = None
use_auth_token = "deprecated"
ignore_url_params: bool = False
storage_options: Dict[str, Any] = field(default_factory=dict)
download_desc: Optional[str] = None
def __post_init__(self):
if self.use_auth_token != "deprecated":
warnings.warn(
"'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
f"You can remove this warning by passing 'token={self.use_auth_token}' instead.",
FutureWarning,
)
self.token = self.use_auth_token
if "hf" not in self.storage_options:
self.storage_options["hf"] = {"token": self.token, "endpoint": config.HF_ENDPOINT}
def copy(self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
|
"""Pass input through a moderation endpoint."""
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.utils import check_package_version, get_from_dict_or_env
from pydantic import Field, model_validator
from langchain.chains.base import Chain
class OpenAIModerationChain(Chain):
"""Pass input through a moderation endpoint.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.chains import OpenAIModerationChain
moderation = OpenAIModerationChain()
"""
client: Any = None #: :meta private:
async_client: Any = None #: :meta private:
model_name: Optional[str] = None
"""Moderation model name to use."""
error: bool = False
"""Whether or not to error if bad content was found."""
input_key: str = "input" #: :meta private:
output_key: str = "output" #: :meta private:
openai_api_key: Optional[str] = None
openai_organization: Optional[str] = None
openai_pre_1_0: bool = Field(default=False)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: dict) -> Any:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
openai_organization = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
try:
import openai
openai.api_key = openai_api_key
if openai_organization:
openai.organization = openai_organization
values["openai_pre_1_0"] = False
try:
check_package_version("openai", gte_version="1.0")
except ValueError:
values["openai_pre_1_0"] = True
if values["openai_pre_1_0"]:
values["client"] = openai.Moderation # type: ignore[attr-defined]
else:
values["client"] = openai.OpenAI(api_key=openai_api_key)
values["async_client"] = openai.AsyncOpenAI(api_key=openai_api_key)
except ImportError:
msg = (
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
raise ImportError(msg)
return values
@property
def input_keys(self) -> list[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> list[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _moderate(self, text: str, results: Any) -> str:
condition = results["flagged"] if self.openai_pre_1_0 else results.flagged
if condition:
error_str = "Text was found that violates OpenAI's content policy."
if self.error:
raise ValueError(error_str)
return error_str
return text
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, Any]:
text = inputs[self.input_key]
if self.openai_pre_1_0:
results = self.client.create(text)
output = self._moderate(text, results["results"][0])
else:
results = self.client.moderations.create(input=text)
output = self._moderate(text, results.results[0])
return {self.output_key: output}
async def _acall(
self,
inputs: dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> dict[str, Any]:
if self.openai_pre_1_0:
return await super()._acall(inputs, run_manager=run_manager)
text = inputs[self.input_key]
results = await self.async_client.moderations.create(input=text)
output = self._moderate(text, results.results[0])
return {self.output_key: output}
|
"""Pass input through a moderation endpoint."""
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.utils import check_package_version, get_from_dict_or_env
from pydantic import Field, model_validator
from langchain.chains.base import Chain
class OpenAIModerationChain(Chain):
"""Pass input through a moderation endpoint.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.chains import OpenAIModerationChain
moderation = OpenAIModerationChain()
"""
client: Any = None #: :meta private:
async_client: Any = None #: :meta private:
model_name: Optional[str] = None
"""Moderation model name to use."""
error: bool = False
"""Whether or not to error if bad content was found."""
input_key: str = "input" #: :meta private:
output_key: str = "output" #: :meta private:
openai_api_key: Optional[str] = None
openai_organization: Optional[str] = None
openai_pre_1_0: bool = Field(default=False)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: dict) -> Any:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
openai_organization = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
try:
import openai
openai.api_key = openai_api_key
if openai_organization:
openai.organization = openai_organization
values["openai_pre_1_0"] = False
try:
check_package_version("openai", gte_version="1.0")
except ValueError:
values["openai_pre_1_0"] = True
if values["openai_pre_1_0"]:
values["client"] = openai.Moderation # type: ignore[attr-defined]
else:
values["client"] = openai.OpenAI(api_key=openai_api_key)
values["async_client"] = openai.AsyncOpenAI(api_key=openai_api_key)
except ImportError:
msg = (
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
raise ImportError(msg)
return values
@property
def input_keys(self) -> list[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> list[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _moderate(self, text: str, results: Any) -> str:
condition = results["flagged"] if self.openai_pre_1_0 else results.flagged
if condition:
error_str = "Text was found that violates OpenAI's content policy."
if self.error:
raise ValueError(error_str)
else:
return error_str
return text
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, Any]:
text = inputs[self.input_key]
if self.openai_pre_1_0:
results = self.client.create(text)
output = self._moderate(text, results["results"][0])
else:
results = self.client.moderations.create(input=text)
output = self._moderate(text, results.results[0])
return {self.output_key: output}
async def _acall(
self,
inputs: dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> dict[str, Any]:
if self.openai_pre_1_0:
return await super()._acall(inputs, run_manager=run_manager)
text = inputs[self.input_key]
results = await self.async_client.moderations.create(input=text)
output = self._moderate(text, results.results[0])
return {self.output_key: output}
|
import requests
from typing import List, Dict
DEFAULT_GITBOOK_API_URL = "https://api.gitbook.com/v1"
class GitbookClient:
"""
Gitbook Restful API Client.
Helper Class to invoke gitbook restful api & parse result
Args:
api_token (str): Gitbook API Token.
api_url (str): Gitbook API Endpoint.
"""
def __init__(self, api_token: str, api_url: str = DEFAULT_GITBOOK_API_URL):
self.api_token = api_token
self.base_url = api_url or DEFAULT_GITBOOK_API_URL
self.headers = {
"Authorization": f"Bearer {self.api_token}",
"Content-Type": "application/json",
}
def _make_request(self, url: str) -> Dict:
"""Helper method to handle common HTTP GET requests."""
try:
response = requests.get(url, headers=self.headers)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
return self._handle_error(e)
def get_space(self, space_id) -> Dict:
"""Gets information for a specific space."""
url = f"{self.base_url}/spaces/{space_id}"
return self._make_request(url)
def list_pages(self, space_id) -> List[Dict]:
"""Gets all pages in a specific space."""
space_info = self.get_space(space_id)
url = f"{self.base_url}/spaces/{space_id}/content"
space = self._make_request(url)
pages_info = []
for page in space.get("pages"):
GitbookClient._extract_page_info(
pages_info, page, space_info.get("title", "ROOT")
)
return pages_info
def get_page(self, space_id, page_id) -> Dict:
"""Gets the details of a specific page."""
url = (
f"{self.base_url}/spaces/{space_id}/content/page/{page_id}?format=markdown"
)
return self._make_request(url)
def get_page_markdown(self, space_id, page_id) -> str:
"""Gets the content of a specific page in Markdown format."""
page_content = self.get_page(space_id, page_id)
return page_content.get("markdown")
def _handle_error(self, response):
"""Handles HTTP errors."""
if isinstance(response, requests.exceptions.HTTPError):
error_message = f"Error: {response.response.status_code} Client Error: {response.response.reason}"
else:
error_message = f"Error: {response}"
raise Exception(error_message)
@staticmethod
def _extract_page_info(
pages: list, page: dict, prev_title: str = "", parent: str = ""
):
pageType = page.get("type", "")
title = prev_title + " > " + page.get("title")
id = page.get("id")
if pageType == "document":
pages.append(
{
"id": id,
"title": title,
"path": page.get("path"),
"description": page.get("description", ""),
"parent": parent,
}
)
for _page in page.get("pages"):
GitbookClient._extract_page_info(pages, _page, title, id)
elif pageType == "group":
for _page in page.get("pages"):
GitbookClient._extract_page_info(pages, _page, title, id)
|
import requests
from typing import List, Dict
DEFAULT_GITBOOK_API_URL = "https://api.gitbook.com/v1"
class GitbookClient:
"""Gitbook Restful API Client.
Helper Class to invoke gitbook restful api & parse result
Args:
api_token (str): Gitbook API Token.
api_url (str): Gitbook API Endpoint.
"""
def __init__(self, api_token: str, api_url: str = DEFAULT_GITBOOK_API_URL):
self.api_token = api_token
self.base_url = api_url or DEFAULT_GITBOOK_API_URL
self.headers = {
"Authorization": f"Bearer {self.api_token}",
"Content-Type": "application/json",
}
def _make_request(self, url: str) -> Dict:
"""Helper method to handle common HTTP GET requests."""
try:
response = requests.get(url, headers=self.headers)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
return self._handle_error(e)
def get_space(self, space_id) -> Dict:
"""Gets information for a specific space."""
url = f"{self.base_url}/spaces/{space_id}"
return self._make_request(url)
def list_pages(self, space_id) -> List[Dict]:
"""Gets all pages in a specific space."""
space_info = self.get_space(space_id)
url = f"{self.base_url}/spaces/{space_id}/content"
space = self._make_request(url)
pages_info = []
for page in space.get("pages"):
GitbookClient._extract_page_info(
pages_info, page, space_info.get("title", "ROOT")
)
return pages_info
def get_page(self, space_id, page_id) -> Dict:
"""Gets the details of a specific page."""
url = (
f"{self.base_url}/spaces/{space_id}/content/page/{page_id}?format=markdown"
)
return self._make_request(url)
def get_page_markdown(self, space_id, page_id) -> str:
"""Gets the content of a specific page in Markdown format."""
page_content = self.get_page(space_id, page_id)
return page_content.get("markdown")
def _handle_error(self, response):
"""Handles HTTP errors."""
if isinstance(response, requests.exceptions.HTTPError):
error_message = f"Error: {response.response.status_code} Client Error: {response.response.reason}"
else:
error_message = f"Error: {response}"
raise Exception(error_message)
@staticmethod
def _extract_page_info(
pages: list, page: dict, prev_title: str = "", parent: str = ""
):
pageType = page.get("type", "")
title = prev_title + " > " + page.get("title")
id = page.get("id")
if pageType == "document":
pages.append(
{
"id": id,
"title": title,
"path": page.get("path"),
"description": page.get("description", ""),
"parent": parent,
}
)
for _page in page.get("pages"):
GitbookClient._extract_page_info(pages, _page, title, id)
elif pageType == "group":
for _page in page.get("pages"):
GitbookClient._extract_page_info(pages, _page, title, id)
|
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.
The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
self.greater_is_better = True
self.primary_metric = None
def __call__(
self, model: SentenceTransformer, output_path: str | None = None, epoch: int = -1, steps: int = -1
) -> float | dict[str, float]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
Args:
model: the model to evaluate
output_path: path where predictions and metrics are written
to
epoch: the epoch where the evaluation takes place. This is
used for the file prefixes. If this is -1, then we
assume evaluation on test data.
steps: the steps in the current epoch at time of the
evaluation. This is used for the file prefixes. If this
is -1, then we assume evaluation at the end of the
epoch.
Returns:
Either a score for the evaluation with a higher score
indicating a better result, or a dictionary with scores. If
the latter is chosen, then `evaluator.primary_metric` must
be defined
"""
pass
def prefix_name_to_metrics(self, metrics: dict[str, float], name: str) -> dict[str, float]:
def maybe_to_float(value: Any) -> Any:
try:
return float(value)
except ValueError:
return value
if not name:
return {key: maybe_to_float(value) for key, value in metrics.items()}
metrics = {name + "_" + key: maybe_to_float(value) for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(
self, model: SentenceTransformer, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Replace "CE" prefix with "CrossEncoder"
2. Remove "Evaluator" from the class name
3. Add a space before every capital letter
"""
class_name = self.__class__.__name__
if class_name.startswith("CE"):
class_name = "CrossEncoder" + class_name[2:]
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", class_name)
def get_config_dict(self) -> dict[str, Any]:
"""
Return a dictionary with all meaningful configuration values of the evaluator to store in the model card.
"""
return {}
def embed_inputs(
self,
model: SentenceTransformer,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> list[Tensor] | np.ndarray | Tensor | dict[str, Tensor] | list[dict[str, Tensor]]:
"""
Call the encoder method of the model pass
Args:
model (SentenceTransformer): Model we are evaluating
sentences (str | list[str] | np.ndarray): Text that we are embedding
Returns:
list[Tensor] | np.ndarray | Tensor | dict[str, Tensor] | list[dict[str, Tensor]]: The associated embedding
"""
return model.encode(sentences, **kwargs)
|
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.
The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
self.greater_is_better = True
self.primary_metric = None
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> float | dict[str, float]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
Args:
model: the model to evaluate
output_path: path where predictions and metrics are written
to
epoch: the epoch where the evaluation takes place. This is
used for the file prefixes. If this is -1, then we
assume evaluation on test data.
steps: the steps in the current epoch at time of the
evaluation. This is used for the file prefixes. If this
is -1, then we assume evaluation at the end of the
epoch.
Returns:
Either a score for the evaluation with a higher score
indicating a better result, or a dictionary with scores. If
the latter is chosen, then `evaluator.primary_metric` must
be defined
"""
pass
def prefix_name_to_metrics(self, metrics: dict[str, float], name: str) -> dict[str, float]:
def maybe_to_float(value: Any) -> Any:
try:
return float(value)
except ValueError:
return value
if not name:
return {key: maybe_to_float(value) for key, value in metrics.items()}
metrics = {name + "_" + key: maybe_to_float(value) for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(
self, model: SentenceTransformer, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Replace "CE" prefix with "CrossEncoder"
2. Remove "Evaluator" from the class name
3. Add a space before every capital letter
"""
class_name = self.__class__.__name__
if class_name.startswith("CE"):
class_name = "CrossEncoder" + class_name[2:]
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", class_name)
def get_config_dict(self) -> dict[str, Any]:
"""
Return a dictionary with all meaningful configuration values of the evaluator to store in the model card.
"""
return {}
def embed_inputs(
self,
model: SentenceTransformer,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> list[Tensor] | np.ndarray | Tensor | dict[str, Tensor] | list[dict[str, Tensor]]:
"""
Call the encoder method of the model pass
Args:
model (SentenceTransformer): Model we are evaluating
sentences (str | list[str] | np.ndarray): Text that we are embedding
Returns:
list[Tensor] | np.ndarray | Tensor | dict[str, Tensor] | list[dict[str, Tensor]]: The associated embedding
"""
return model.encode(sentences, **kwargs)
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, List, Tuple, Union
import torch.nn.functional as F
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.data_elements import SampleList
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptMultiConfig
@MODELS.register_module()
class BaseSemanticHead(BaseModule, metaclass=ABCMeta):
"""Base module of Semantic Head.
Args:
num_classes (int): the number of classes.
seg_rescale_factor (float): the rescale factor for ``gt_sem_seg``,
which equals to ``1 / output_strides``. The output_strides is
for ``seg_preds``. Defaults to 1 / 4.
init_cfg (Optional[Union[:obj:`ConfigDict`, dict]]): the initialization
config.
loss_seg (Union[:obj:`ConfigDict`, dict]): the loss of the semantic
head.
"""
def __init__(self,
num_classes: int,
seg_rescale_factor: float = 1 / 4.,
loss_seg: ConfigType = dict(
type='CrossEntropyLoss',
ignore_index=255,
loss_weight=1.0),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.loss_seg = MODELS.build(loss_seg)
self.num_classes = num_classes
self.seg_rescale_factor = seg_rescale_factor
@abstractmethod
def forward(self, x: Union[Tensor, Tuple[Tensor]]) -> Dict[str, Tensor]:
"""Placeholder of forward function.
Args:
x (Tensor): Feature maps.
Returns:
Dict[str, Tensor]: A dictionary, including features
and predicted scores. Required keys: 'seg_preds'
and 'feats'.
"""
pass
@abstractmethod
def loss(self, x: Union[Tensor, Tuple[Tensor]],
batch_data_samples: SampleList) -> Dict[str, Tensor]:
"""
Args:
x (Union[Tensor, Tuple[Tensor]]): Feature maps.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Args:
x (Tensor): Feature maps.
Returns:
Dict[str, Tensor]: The loss of semantic head.
"""
pass
def predict(self,
x: Union[Tensor, Tuple[Tensor]],
batch_img_metas: List[dict],
rescale: bool = False) -> List[Tensor]:
"""Test without Augmentation.
Args:
x (Union[Tensor, Tuple[Tensor]]): Feature maps.
batch_img_metas (List[dict]): List of image information.
rescale (bool): Whether to rescale the results.
Defaults to False.
Returns:
list[Tensor]: semantic segmentation logits.
"""
seg_preds = self.forward(x)['seg_preds']
seg_preds = F.interpolate(
seg_preds,
size=batch_img_metas[0]['batch_input_shape'],
mode='bilinear',
align_corners=False)
seg_preds = [seg_preds[i] for i in range(len(batch_img_metas))]
if rescale:
seg_pred_list = []
for i in range(len(batch_img_metas)):
h, w = batch_img_metas[i]['img_shape']
seg_pred = seg_preds[i][:, :h, :w]
h, w = batch_img_metas[i]['ori_shape']
seg_pred = F.interpolate(
seg_pred[None],
size=(h, w),
mode='bilinear',
align_corners=False)[0]
seg_pred_list.append(seg_pred)
else:
seg_pred_list = seg_preds
return seg_pred_list
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, List, Tuple, Union
import torch.nn.functional as F
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.core.utils import ConfigType, OptMultiConfig, SampleList
from mmdet.registry import MODELS
@MODELS.register_module()
class BaseSemanticHead(BaseModule, metaclass=ABCMeta):
"""Base module of Semantic Head.
Args:
num_classes (int): the number of classes.
seg_rescale_factor (float): the rescale factor for ``gt_sem_seg``,
which equals to ``1 / output_strides``. The output_strides is
for ``seg_preds``. Defaults to 1 / 4.
init_cfg (Optional[Union[:obj:`ConfigDict`, dict]]): the initialization
config.
loss_seg (Union[:obj:`ConfigDict`, dict]): the loss of the semantic
head.
"""
def __init__(self,
num_classes: int,
seg_rescale_factor: float = 1 / 4.,
loss_seg: ConfigType = dict(
type='CrossEntropyLoss',
ignore_index=255,
loss_weight=1.0),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.loss_seg = MODELS.build(loss_seg)
self.num_classes = num_classes
self.seg_rescale_factor = seg_rescale_factor
@abstractmethod
def forward(self, x: Union[Tensor, Tuple[Tensor]]) -> Dict[str, Tensor]:
"""Placeholder of forward function.
Args:
x (Tensor): Feature maps.
Returns:
Dict[str, Tensor]: A dictionary, including features
and predicted scores. Required keys: 'seg_preds'
and 'feats'.
"""
pass
@abstractmethod
def loss(self, x: Union[Tensor, Tuple[Tensor]],
batch_data_samples: SampleList) -> Dict[str, Tensor]:
"""
Args:
x (Union[Tensor, Tuple[Tensor]]): Feature maps.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Args:
x (Tensor): Feature maps.
Returns:
Dict[str, Tensor]: The loss of semantic head.
"""
pass
def predict(self,
x: Union[Tensor, Tuple[Tensor]],
batch_img_metas: List[dict],
rescale: bool = False) -> List[Tensor]:
"""Test without Augmentation.
Args:
x (Union[Tensor, Tuple[Tensor]]): Feature maps.
batch_img_metas (List[dict]): List of image information.
rescale (bool): Whether to rescale the results.
Defaults to False.
Returns:
list[Tensor]: semantic segmentation logits.
"""
seg_preds = self.forward(x)['seg_preds']
seg_preds = F.interpolate(
seg_preds,
size=batch_img_metas[0]['batch_input_shape'],
mode='bilinear',
align_corners=False)
seg_preds = [seg_preds[i] for i in range(len(batch_img_metas))]
if rescale:
seg_pred_list = []
for i in range(len(batch_img_metas)):
h, w = batch_img_metas[i]['img_shape']
seg_pred = seg_preds[i][:, :h, :w]
h, w = batch_img_metas[i]['ori_shape']
seg_pred = F.interpolate(
seg_pred[None],
size=(h, w),
mode='bilinear',
align_corners=False)[0]
seg_pred_list.append(seg_pred)
else:
seg_pred_list = seg_preds
return seg_pred_list
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
|
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
class HubSpotContactBlock(Block):
class Input(BlockSchema):
credentials: HubSpotCredentialsInput = HubSpotCredentialsField()
operation: str = SchemaField(
description="Operation to perform (create, update, get)", default="get"
)
contact_data: dict = SchemaField(
description="Contact data for create/update operations",
default_factory=dict,
)
email: str = SchemaField(
description="Email address for get/update operations", default=""
)
class Output(BlockSchema):
contact: dict = SchemaField(description="Contact information")
status: str = SchemaField(description="Operation status")
def __init__(self):
super().__init__(
id="5267326e-c4c1-4016-9f54-4e72ad02f813",
description="Manages HubSpot contacts - create, update, and retrieve contact information",
categories={BlockCategory.CRM},
input_schema=HubSpotContactBlock.Input,
output_schema=HubSpotContactBlock.Output,
)
def run(
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
) -> BlockOutput:
base_url = "https://api.hubapi.com/crm/v3/objects/contacts"
headers = {
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
"Content-Type": "application/json",
}
if input_data.operation == "create":
response = Requests().post(
base_url, headers=headers, json={"properties": input_data.contact_data}
)
result = response.json()
yield "contact", result
yield "status", "created"
elif input_data.operation == "get":
# Search for contact by email
search_url = f"{base_url}/search"
search_data = {
"filterGroups": [
{
"filters": [
{
"propertyName": "email",
"operator": "EQ",
"value": input_data.email,
}
]
}
]
}
response = Requests().post(search_url, headers=headers, json=search_data)
result = response.json()
yield "contact", result.get("results", [{}])[0]
yield "status", "retrieved"
elif input_data.operation == "update":
search_response = Requests().post(
f"{base_url}/search",
headers=headers,
json={
"filterGroups": [
{
"filters": [
{
"propertyName": "email",
"operator": "EQ",
"value": input_data.email,
}
]
}
]
},
)
contact_id = search_response.json().get("results", [{}])[0].get("id")
if contact_id:
response = Requests().patch(
f"{base_url}/{contact_id}",
headers=headers,
json={"properties": input_data.contact_data},
)
result = response.json()
yield "contact", result
yield "status", "updated"
else:
yield "contact", {}
yield "status", "contact_not_found"
|
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class HubSpotContactBlock(Block):
class Input(BlockSchema):
credentials: HubSpotCredentialsInput = HubSpotCredentialsField()
operation: str = SchemaField(
description="Operation to perform (create, update, get)", default="get"
)
contact_data: dict = SchemaField(
description="Contact data for create/update operations",
default_factory=dict,
)
email: str = SchemaField(
description="Email address for get/update operations", default=""
)
class Output(BlockSchema):
contact: dict = SchemaField(description="Contact information")
status: str = SchemaField(description="Operation status")
def __init__(self):
super().__init__(
id="5267326e-c4c1-4016-9f54-4e72ad02f813",
description="Manages HubSpot contacts - create, update, and retrieve contact information",
categories={BlockCategory.CRM},
input_schema=HubSpotContactBlock.Input,
output_schema=HubSpotContactBlock.Output,
)
def run(
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
) -> BlockOutput:
base_url = "https://api.hubapi.com/crm/v3/objects/contacts"
headers = {
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
"Content-Type": "application/json",
}
if input_data.operation == "create":
response = requests.post(
base_url, headers=headers, json={"properties": input_data.contact_data}
)
result = response.json()
yield "contact", result
yield "status", "created"
elif input_data.operation == "get":
# Search for contact by email
search_url = f"{base_url}/search"
search_data = {
"filterGroups": [
{
"filters": [
{
"propertyName": "email",
"operator": "EQ",
"value": input_data.email,
}
]
}
]
}
response = requests.post(search_url, headers=headers, json=search_data)
result = response.json()
yield "contact", result.get("results", [{}])[0]
yield "status", "retrieved"
elif input_data.operation == "update":
search_response = requests.post(
f"{base_url}/search",
headers=headers,
json={
"filterGroups": [
{
"filters": [
{
"propertyName": "email",
"operator": "EQ",
"value": input_data.email,
}
]
}
]
},
)
contact_id = search_response.json().get("results", [{}])[0].get("id")
if contact_id:
response = requests.patch(
f"{base_url}/{contact_id}",
headers=headers,
json={"properties": input_data.contact_data},
)
result = response.json()
yield "contact", result
yield "status", "updated"
else:
yield "contact", {}
yield "status", "contact_not_found"
|
import os
import fsspec
import pytest
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lz4, require_zstandard
def test_mockfs(mockfs):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def test_non_mockfs():
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def test_extract_path_from_uri():
mock_bucket = "mock-s3-bucket"
dataset_path = f"s3://{mock_bucket}"
dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path.startswith("s3://") is False
dataset_path = "./local/path"
new_dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path == new_dataset_path
def test_is_remote_filesystem(mockfs):
is_remote = is_remote_filesystem(mockfs)
assert is_remote is True
fs = fsspec.filesystem("file")
is_remote = is_remote_filesystem(fs)
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS)
def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file):
input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file}
input_path = input_paths[compression_fs_class.protocol]
if input_path is None:
reason = f"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lz4.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(reason)
fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path)
assert isinstance(fs, compression_fs_class)
expected_filename = os.path.basename(input_path)
expected_filename = expected_filename[: expected_filename.rindex(".")]
assert fs.glob("*") == [expected_filename]
with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol", ["zip", "gzip"])
def test_fs_isfile(protocol, zip_jsonl_path, jsonl_gz_path):
compressed_file_paths = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
compressed_file_path = compressed_file_paths[protocol]
member_file_path = "dataset.jsonl"
path = f"{protocol}://{member_file_path}::{compressed_file_path}"
fs, *_ = fsspec.get_fs_token_paths(path)
assert fs.isfile(member_file_path)
assert not fs.isfile("non_existing_" + member_file_path)
@pytest.mark.integration
def test_hf_filesystem(hf_token, hf_api, hf_private_dataset_repo_txt_data, text_file):
repo_info = hf_api.dataset_info(hf_private_dataset_repo_txt_data, token=hf_token)
hffs = HfFileSystem(repo_info=repo_info, token=hf_token)
assert sorted(hffs.glob("*")) == [".gitattributes", "data"]
assert hffs.isdir("data")
assert hffs.isfile(".gitattributes") and hffs.isfile("data/text_data.txt")
with open(text_file) as f:
assert hffs.open("data/text_data.txt", "r").read() == f.read()
|
import os
import fsspec
import pytest
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lz4, require_zstandard
def test_extract_path_from_uri():
mock_bucket = "mock-s3-bucket"
dataset_path = f"s3://{mock_bucket}"
dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path.startswith("s3://") is False
dataset_path = "./local/path"
new_dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path == new_dataset_path
def test_is_remote_filesystem(mockfs):
is_remote = is_remote_filesystem(mockfs)
assert is_remote is True
fs = fsspec.filesystem("file")
is_remote = is_remote_filesystem(fs)
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS)
def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file):
input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file}
input_path = input_paths[compression_fs_class.protocol]
if input_path is None:
reason = f"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lz4.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(reason)
fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path)
assert isinstance(fs, compression_fs_class)
expected_filename = os.path.basename(input_path)
expected_filename = expected_filename[: expected_filename.rindex(".")]
assert fs.glob("*") == [expected_filename]
with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol", ["zip", "gzip"])
def test_fs_isfile(protocol, zip_jsonl_path, jsonl_gz_path):
compressed_file_paths = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
compressed_file_path = compressed_file_paths[protocol]
member_file_path = "dataset.jsonl"
path = f"{protocol}://{member_file_path}::{compressed_file_path}"
fs, *_ = fsspec.get_fs_token_paths(path)
assert fs.isfile(member_file_path)
assert not fs.isfile("non_existing_" + member_file_path)
@pytest.mark.integration
def test_hf_filesystem(hf_token, hf_api, hf_private_dataset_repo_txt_data, text_file):
repo_info = hf_api.dataset_info(hf_private_dataset_repo_txt_data, token=hf_token)
hffs = HfFileSystem(repo_info=repo_info, token=hf_token)
assert sorted(hffs.glob("*")) == [".gitattributes", "data"]
assert hffs.isdir("data")
assert hffs.isfile(".gitattributes") and hffs.isfile("data/text_data.txt")
with open(text_file) as f:
assert hffs.open("data/text_data.txt", "r").read() == f.read()
|
from typing import Optional
import pytest
import torch
from docarray import BaseDocument, DocumentArray, Text
from docarray.array.abstract_array import AnyDocumentArray
from docarray.typing import TorchTensor
num_docs = 5
num_sub_docs = 2
num_sub_sub_docs = 3
@pytest.fixture
def multi_model_docs():
class SubSubDoc(BaseDocument):
sub_sub_text: Text
sub_sub_tensor: TorchTensor[2]
class SubDoc(BaseDocument):
sub_text: Text
sub_da: DocumentArray[SubSubDoc]
class MultiModalDoc(BaseDocument):
mm_text: Text
mm_tensor: Optional[TorchTensor[3, 2, 2]]
mm_da: DocumentArray[SubDoc]
docs = DocumentArray[MultiModalDoc](
[
MultiModalDoc(
mm_text=Text(text=f'hello{i}'),
mm_da=[
SubDoc(
sub_text=Text(text=f'sub_{i}_1'),
sub_da=DocumentArray[SubSubDoc](
[
SubSubDoc(
sub_sub_text=Text(text='subsub'),
sub_sub_tensor=torch.zeros(2),
)
for _ in range(num_sub_sub_docs)
]
),
)
for _ in range(num_sub_docs)
],
)
for i in range(num_docs)
]
)
return docs
@pytest.mark.parametrize(
'access_path,len_result',
[
('mm_text', num_docs), # List of 5 Text objs
('mm_text.text', num_docs), # List of 5 strings
('mm_da', num_docs * num_sub_docs), # List of 5 * 2 SubDoc objs
('mm_da.sub_text', num_docs * num_sub_docs), # List of 5 * 2 Text objs
(
'mm_da.sub_da',
num_docs * num_sub_docs * num_sub_sub_docs,
), # List of 5 * 2 * 3 SubSubDoc objs
(
'mm_da.sub_da.sub_sub_text',
num_docs * num_sub_docs * num_sub_sub_docs,
), # List of 5 * 2 * 3 Text objs
],
)
def test_traverse_flat(multi_model_docs, access_path, len_result):
traversed = multi_model_docs.traverse_flat(access_path)
assert len(traversed) == len_result
def test_traverse_stacked_da():
class Image(BaseDocument):
tensor: TorchTensor[3, 224, 224]
batch = DocumentArray[Image](
[
Image(
tensor=torch.zeros(3, 224, 224),
)
for _ in range(2)
]
)
batch_stacked = batch.stack()
tensors = batch_stacked.traverse_flat(access_path='tensor')
assert tensors.shape == (2, 3, 224, 224)
assert isinstance(tensors, torch.Tensor)
@pytest.mark.parametrize(
'input_list,output_list',
[
([1, 2, 3], [1, 2, 3]),
([[1], [2], [3]], [1, 2, 3]),
([[[1]], [[2]], [[3]]], [[1], [2], [3]]),
],
)
def test_flatten_one_level(input_list, output_list):
flattened = AnyDocumentArray._flatten_one_level(sequence=input_list)
assert flattened == output_list
def test_flatten_one_level_list_of_da():
doc = BaseDocument()
input_list = [DocumentArray([doc, doc, doc])]
flattened = AnyDocumentArray._flatten_one_level(sequence=input_list)
assert flattened == [doc, doc, doc]
|
from typing import Optional
import pytest
import torch
from docarray import Document, DocumentArray, Text
from docarray.array.abstract_array import AnyDocumentArray
from docarray.typing import TorchTensor
num_docs = 5
num_sub_docs = 2
num_sub_sub_docs = 3
@pytest.fixture
def multi_model_docs():
class SubSubDoc(Document):
sub_sub_text: Text
sub_sub_tensor: TorchTensor[2]
class SubDoc(Document):
sub_text: Text
sub_da: DocumentArray[SubSubDoc]
class MultiModalDoc(Document):
mm_text: Text
mm_tensor: Optional[TorchTensor[3, 2, 2]]
mm_da: DocumentArray[SubDoc]
docs = DocumentArray[MultiModalDoc](
[
MultiModalDoc(
mm_text=Text(text=f'hello{i}'),
mm_da=[
SubDoc(
sub_text=Text(text=f'sub_{i}_1'),
sub_da=DocumentArray[SubSubDoc](
[
SubSubDoc(
sub_sub_text=Text(text='subsub'),
sub_sub_tensor=torch.zeros(2),
)
for _ in range(num_sub_sub_docs)
]
),
)
for _ in range(num_sub_docs)
],
)
for i in range(num_docs)
]
)
return docs
@pytest.mark.parametrize(
'access_path,len_result',
[
('mm_text', num_docs), # List of 5 Text objs
('mm_text.text', num_docs), # List of 5 strings
('mm_da', num_docs * num_sub_docs), # List of 5 * 2 SubDoc objs
('mm_da.sub_text', num_docs * num_sub_docs), # List of 5 * 2 Text objs
(
'mm_da.sub_da',
num_docs * num_sub_docs * num_sub_sub_docs,
), # List of 5 * 2 * 3 SubSubDoc objs
(
'mm_da.sub_da.sub_sub_text',
num_docs * num_sub_docs * num_sub_sub_docs,
), # List of 5 * 2 * 3 Text objs
],
)
def test_traverse_flat(multi_model_docs, access_path, len_result):
traversed = multi_model_docs.traverse_flat(access_path)
assert len(traversed) == len_result
def test_traverse_stacked_da():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
batch = DocumentArray[Image](
[
Image(
tensor=torch.zeros(3, 224, 224),
)
for _ in range(2)
]
)
batch_stacked = batch.stack()
tensors = batch_stacked.traverse_flat(access_path='tensor')
assert tensors.shape == (2, 3, 224, 224)
assert isinstance(tensors, torch.Tensor)
@pytest.mark.parametrize(
'input_list,output_list',
[
([1, 2, 3], [1, 2, 3]),
([[1], [2], [3]], [1, 2, 3]),
([[[1]], [[2]], [[3]]], [[1], [2], [3]]),
],
)
def test_flatten_one_level(input_list, output_list):
flattened = AnyDocumentArray._flatten_one_level(sequence=input_list)
assert flattened == output_list
def test_flatten_one_level_list_of_da():
doc = Document()
input_list = [DocumentArray([doc, doc, doc])]
flattened = AnyDocumentArray._flatten_one_level(sequence=input_list)
assert flattened == [doc, doc, doc]
|
import numpy as np
import pytest
from docarray.proto import DocProto, NodeProto
from docarray.typing import NdArray
@pytest.mark.proto
def test_ndarray():
original_ndarray = np.zeros((3, 224, 224))
custom_ndarray = NdArray._docarray_from_native(original_ndarray)
tensor = NdArray.from_protobuf(custom_ndarray.to_protobuf())
assert (tensor == original_ndarray).all()
@pytest.mark.proto
def test_document_proto_set():
data = {}
nested_item1 = NodeProto(text='hello')
ndarray = NdArray._docarray_from_native(np.zeros((3, 224, 224)))
nd_proto = ndarray.to_protobuf()
nested_item2 = NodeProto(ndarray=nd_proto)
data['a'] = nested_item1
data['b'] = nested_item2
DocProto(data=data)
|
import numpy as np
import pytest
from docarray.proto import DocumentProto, NodeProto
from docarray.typing import NdArray
@pytest.mark.proto
def test_ndarray():
original_ndarray = np.zeros((3, 224, 224))
custom_ndarray = NdArray._docarray_from_native(original_ndarray)
tensor = NdArray.from_protobuf(custom_ndarray.to_protobuf())
assert (tensor == original_ndarray).all()
@pytest.mark.proto
def test_document_proto_set():
data = {}
nested_item1 = NodeProto(text='hello')
ndarray = NdArray._docarray_from_native(np.zeros((3, 224, 224)))
nd_proto = ndarray.to_protobuf()
nested_item2 = NodeProto(ndarray=nd_proto)
data['a'] = nested_item1
data['b'] = nested_item2
DocumentProto(data=data)
|
"""Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from collections import defaultdict
from time import time
import numpy as np
from scipy.linalg import svd
from sklearn.datasets import make_low_rank_matrix
from sklearn.utils.extmath import randomized_svd
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print("====================")
print("Iteration %03d of %03d" % (it, max_it))
print("====================")
X = make_low_rank_matrix(
n_samples, n_features, effective_rank=rank, tail_strength=0.2
)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results["scipy svd"].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results["scikit-learn randomized_svd (n_iter=0)"].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d " % n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results["scikit-learn randomized_svd (n_iter=%d)" % n_iter].append(
time() - tstart
)
return results
if __name__ == "__main__":
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d # register the 3d projection # noqa: F401
samples_range = np.linspace(2, 1000, 4).astype(int)
features_range = np.linspace(2, 1000, 4).astype(int)
results = compute_bench(samples_range, features_range)
label = "scikit-learn singular value decomposition benchmark results"
fig = plt.figure(label)
ax = fig.gca(projection="3d")
for c, (label, timings) in zip("rbg", sorted(results.items())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0], features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3, color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel("n_samples")
ax.set_ylabel("n_features")
ax.set_zlabel("Time (s)")
ax.legend()
plt.show()
|
"""Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from collections import defaultdict
from time import time
import numpy as np
from scipy.linalg import svd
from sklearn.datasets import make_low_rank_matrix
from sklearn.utils.extmath import randomized_svd
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print("====================")
print("Iteration %03d of %03d" % (it, max_it))
print("====================")
X = make_low_rank_matrix(
n_samples, n_features, effective_rank=rank, tail_strength=0.2
)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results["scipy svd"].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results["scikit-learn randomized_svd (n_iter=0)"].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d " % n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results["scikit-learn randomized_svd (n_iter=%d)" % n_iter].append(
time() - tstart
)
return results
if __name__ == "__main__":
from mpl_toolkits.mplot3d import axes3d # noqa register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(int)
features_range = np.linspace(2, 1000, 4).astype(int)
results = compute_bench(samples_range, features_range)
label = "scikit-learn singular value decomposition benchmark results"
fig = plt.figure(label)
ax = fig.gca(projection="3d")
for c, (label, timings) in zip("rbg", sorted(results.items())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0], features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3, color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel("n_samples")
ax.set_ylabel("n_features")
ax.set_zlabel("Time (s)")
ax.legend()
plt.show()
|
from __future__ import annotations
import re
import pytest
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import NanoBEIREvaluator
from sentence_transformers.util import is_datasets_available
from tests.utils import is_ci
if not is_datasets_available():
pytest.skip(
reason="Datasets are not installed. Please install `datasets` with `pip install datasets`",
allow_module_level=True,
)
if is_ci():
pytest.skip(
reason="Skip test in CI to try and avoid 429 Client Error",
allow_module_level=True,
)
def test_nanobeir_evaluator(stsb_bert_tiny_model: SentenceTransformer):
"""Tests that the NanoBERTEvaluator can be loaded and produces expected metrics"""
datasets = ["QuoraRetrieval", "MSMARCO"]
query_prompts = {
"QuoraRetrieval": "Instruct: Given a question, retrieve questions that are semantically equivalent to the given question\\nQuery: ",
"MSMARCO": "Instruct: Given a web search query, retrieve relevant passages that answer the query\\nQuery: ",
}
model = stsb_bert_tiny_model
evaluator = NanoBEIREvaluator(
dataset_names=datasets,
query_prompts=query_prompts,
)
results = evaluator(model)
assert len(results) > 0
assert all(isinstance(results[metric], float) for metric in results)
def test_nanobeir_evaluator_with_invalid_dataset():
"""Test that NanoBEIREvaluator raises an error for invalid dataset names."""
invalid_datasets = ["invalidDataset"]
with pytest.raises(
ValueError,
match=re.escape(
r"Dataset(s) ['invalidDataset'] not found in the NanoBEIR collection. "
r"Valid dataset names are: ['climatefever', 'dbpedia', 'fever', 'fiqa2018', 'hotpotqa', 'msmarco', 'nfcorpus', 'nq', 'quoraretrieval', 'scidocs', 'arguana', 'scifact', 'touche2020']"
),
):
NanoBEIREvaluator(dataset_names=invalid_datasets)
def test_nanobeir_evaluator_empty_inputs():
"""Test that NanoBEIREvaluator behaves correctly with empty datasets."""
with pytest.raises(ValueError, match="dataset_names cannot be empty. Use None to evaluate on all datasets."):
NanoBEIREvaluator(dataset_names=[])
|
from __future__ import annotations
import re
import pytest
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import NanoBEIREvaluator
from sentence_transformers.util import is_datasets_available
from tests.utils import is_ci
if not is_datasets_available():
pytest.skip(
reason="Datasets are not installed. Please install `datasets` with `pip install datasets`",
allow_module_level=True,
)
if is_ci():
pytest.skip(
reason="Skip test in CI to try and avoid 429 Client Error",
allow_module_level=True,
)
def test_nanobeir_evaluator(stsb_bert_tiny_model_reused: SentenceTransformer):
"""Tests that the NanoBERTEvaluator can be loaded and produces expected metrics"""
datasets = ["QuoraRetrieval", "MSMARCO"]
query_prompts = {
"QuoraRetrieval": "Instruct: Given a question, retrieve questions that are semantically equivalent to the given question\\nQuery: ",
"MSMARCO": "Instruct: Given a web search query, retrieve relevant passages that answer the query\\nQuery: ",
}
model = stsb_bert_tiny_model_reused
evaluator = NanoBEIREvaluator(
dataset_names=datasets,
query_prompts=query_prompts,
)
results = evaluator(model)
assert len(results) > 0
assert all(isinstance(results[metric], float) for metric in results)
def test_nanobeir_evaluator_with_invalid_dataset():
"""Test that NanoBEIREvaluator raises an error for invalid dataset names."""
invalid_datasets = ["invalidDataset"]
with pytest.raises(
ValueError,
match=re.escape(
r"Dataset(s) ['invalidDataset'] not found in the NanoBEIR collection. "
r"Valid dataset names are: ['climatefever', 'dbpedia', 'fever', 'fiqa2018', 'hotpotqa', 'msmarco', 'nfcorpus', 'nq', 'quoraretrieval', 'scidocs', 'arguana', 'scifact', 'touche2020']"
),
):
NanoBEIREvaluator(dataset_names=invalid_datasets)
def test_nanobeir_evaluator_empty_inputs():
"""Test that NanoBEIREvaluator behaves correctly with empty datasets."""
with pytest.raises(ValueError, match="dataset_names cannot be empty. Use None to evaluate on all datasets."):
NanoBEIREvaluator(dataset_names=[])
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTripletEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load triplets from the AllNLI dataset
# The dataset contains triplets of (anchor, positive, negative) sentences
dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev[:1000]")
# Initialize the SparseTripletEvaluator
evaluator = SparseTripletEvaluator(
anchors=dataset[:1000]["anchor"],
positives=dataset[:1000]["positive"],
negatives=dataset[:1000]["negative"],
name="all_nli_dev",
batch_size=32,
show_progress_bar=True,
)
# Run the evaluation
results = evaluator(model)
"""
TripletEvaluator: Evaluating the model on the all_nli_dev dataset:
Accuracy Dot Similarity: 85.10%
Model Anchor Sparsity: Active Dimensions: 105.5, Sparsity Ratio: 0.9965
Model Positive Sparsity: Active Dimensions: 69.8, Sparsity Ratio: 0.9977
Model Negative Sparsity: Active Dimensions: 68.6, Sparsity Ratio: 0.9978
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: all_nli_dev_dot_accuracy
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8510
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTripletEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load triplets from the AllNLI dataset
# The dataset contains triplets of (anchor, positive, negative) sentences
dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev[:1000]")
# Initialize the SparseTripletEvaluator
evaluator = SparseTripletEvaluator(
anchors=dataset[:1000]["anchor"],
positives=dataset[:1000]["positive"],
negatives=dataset[:1000]["negative"],
name="all_nli_dev",
batch_size=32,
show_progress_bar=True,
)
# Run the evaluation
results = evaluator(model, ".")
"""
TripletEvaluator: Evaluating the model on the all_nli_dev dataset:
Accuracy Dot Similarity: 85.10%
Model Sparsity Stats Query : Row Non-Zero Mean: 105.4530029296875, Row Sparsity Mean: 0.9965449571609497
Model Sparsity Stats Corpus : Row Non-Zero Mean: 69.18349838256836, Row Sparsity Mean: 0.9977333247661591
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: all_nli_dev_dot_accuracy
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8510
|
import os
import shutil
import pytest
import torch
import torchaudio
class GreedyCTCDecoder(torch.nn.Module):
def __init__(self, labels, blank: int = 0):
super().__init__()
self.blank = blank
self.labels = labels
def forward(self, logits: torch.Tensor) -> str:
"""Given a sequence logits over labels, get the best path string
Args:
logits (Tensor): Logit tensors. Shape `[num_seq, num_label]`.
Returns:
str: The resulting transcript
"""
best_path = torch.argmax(logits, dim=-1) # [num_seq,]
best_path = torch.unique_consecutive(best_path, dim=-1)
hypothesis = []
for i in best_path:
if i != self.blank:
hypothesis.append(self.labels[i])
return "".join(hypothesis)
@pytest.fixture
def ctc_decoder():
return GreedyCTCDecoder
_FILES = {
"en": "Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.flac",
"de": "20090505-0900-PLENARY-16-de_20090505-21_56_00_8.flac",
"en2": "20120613-0900-PLENARY-8-en_20120613-13_46_50_3.flac",
"es": "20130207-0900-PLENARY-7-es_20130207-13_02_05_5.flac",
"fr": "20121212-0900-PLENARY-5-fr_20121212-11_37_04_10.flac",
"it": "20170516-0900-PLENARY-16-it_20170516-18_56_31_1.flac",
}
_MIXTURE_FILES = {
"speech_separation": "mixture_3729-6852-0037_8463-287645-0000.wav",
"music_separation": "al_james_mixture_shorter.wav",
}
_CLEAN_FILES = {
"speech_separation": [
"s1_3729-6852-0037_8463-287645-0000.wav",
"s2_3729-6852-0037_8463-287645-0000.wav",
],
"music_separation": [
"al_james_drums_shorter.wav",
"al_james_bass_shorter.wav",
"al_james_other_shorter.wav",
"al_james_vocals_shorter.wav",
],
}
@pytest.fixture
def sample_speech(lang):
if lang not in _FILES:
raise NotImplementedError(f"Unexpected lang: {lang}")
filename = _FILES[lang]
path = torchaudio.utils.download_asset(f"test-assets/{filename}")
return path
@pytest.fixture
def mixture_source(task):
if task not in _MIXTURE_FILES:
raise NotImplementedError(f"Unexpected task: {task}")
path = torchaudio.utils.download_asset(f"test-assets/{_MIXTURE_FILES[task]}")
return path
@pytest.fixture
def clean_sources(task):
if task not in _CLEAN_FILES:
raise NotImplementedError(f"Unexpected task: {task}")
paths = []
for file in _CLEAN_FILES[task]:
path = torchaudio.utils.download_asset(f"test-assets/{file}")
paths.append(path)
return paths
def pytest_addoption(parser):
parser.addoption(
"--use-tmp-hub-dir",
action="store_true",
help=(
"When provided, tests will use temporary directory as Torch Hub directory. "
"Downloaded models will be deleted after each test."
),
)
@pytest.fixture(autouse=True)
def temp_hub_dir(tmp_path, pytestconfig):
if not pytestconfig.getoption("use_tmp_hub_dir", default=False):
yield
else:
org_dir = torch.hub.get_dir()
subdir = os.path.join(tmp_path, "hub")
torch.hub.set_dir(subdir)
yield
torch.hub.set_dir(org_dir)
shutil.rmtree(subdir, ignore_errors=True)
@pytest.fixture()
def emissions():
path = torchaudio.utils.download_asset("test-assets/emissions-8555-28447-0012.pt")
return torch.load(path)
|
import os
import shutil
import pytest
import torch
import torchaudio
class GreedyCTCDecoder(torch.nn.Module):
def __init__(self, labels, blank: int = 0):
super().__init__()
self.blank = blank
self.labels = labels
def forward(self, logits: torch.Tensor) -> str:
"""Given a sequence logits over labels, get the best path string
Args:
logits (Tensor): Logit tensors. Shape `[num_seq, num_label]`.
Returns:
str: The resulting transcript
"""
best_path = torch.argmax(logits, dim=-1) # [num_seq,]
best_path = torch.unique_consecutive(best_path, dim=-1)
hypothesis = []
for i in best_path:
if i != self.blank:
hypothesis.append(self.labels[i])
return "".join(hypothesis)
@pytest.fixture
def ctc_decoder():
return GreedyCTCDecoder
_FILES = {
"en": "Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.flac",
"de": "20090505-0900-PLENARY-16-de_20090505-21_56_00_8.flac",
"en2": "20120613-0900-PLENARY-8-en_20120613-13_46_50_3.flac",
"es": "20130207-0900-PLENARY-7-es_20130207-13_02_05_5.flac",
"fr": "20121212-0900-PLENARY-5-fr_20121212-11_37_04_10.flac",
"it": "20170516-0900-PLENARY-16-it_20170516-18_56_31_1.flac",
}
_MIXTURE_FILES = {
"speech_separation": "mixture_3729-6852-0037_8463-287645-0000.wav",
"music_separation": "al_james_mixture_shorter.wav",
}
_CLEAN_FILES = {
"speech_separation": [
"s1_3729-6852-0037_8463-287645-0000.wav",
"s2_3729-6852-0037_8463-287645-0000.wav",
],
"music_separation": [
"al_james_drums_shorter.wav",
"al_james_bass_shorter.wav",
"al_james_other_shorter.wav",
"al_james_vocals_shorter.wav",
],
}
@pytest.fixture
def sample_speech(lang):
if lang not in _FILES:
raise NotImplementedError(f"Unexpected lang: {lang}")
filename = _FILES[lang]
path = torchaudio.utils.download_asset(f"test-assets/{filename}")
return path
@pytest.fixture
def mixture_source(task):
if task not in _MIXTURE_FILES:
raise NotImplementedError(f"Unexpected task: {task}")
path = torchaudio.utils.download_asset(f"test-assets/{_MIXTURE_FILES[task]}")
return path
@pytest.fixture
def clean_sources(task):
if task not in _CLEAN_FILES:
raise NotImplementedError(f"Unexpected task: {task}")
paths = []
for file in _CLEAN_FILES[task]:
path = torchaudio.utils.download_asset(f"test-assets/{file}")
paths.append(path)
return paths
def pytest_addoption(parser):
parser.addoption(
"--use-tmp-hub-dir",
action="store_true",
help=(
"When provided, tests will use temporary directory as Torch Hub directory. "
"Downloaded models will be deleted after each test."
),
)
@pytest.fixture(autouse=True)
def temp_hub_dir(tmp_path, pytestconfig):
if not pytestconfig.getoption("use_tmp_hub_dir"):
yield
else:
org_dir = torch.hub.get_dir()
subdir = os.path.join(tmp_path, "hub")
torch.hub.set_dir(subdir)
yield
torch.hub.set_dir(org_dir)
shutil.rmtree(subdir, ignore_errors=True)
@pytest.fixture()
def emissions():
path = torchaudio.utils.download_asset("test-assets/emissions-8555-28447-0012.pt")
return torch.load(path)
|
# Copyright (c) OpenMMLab. All rights reserved.
import itertools
from typing import Dict, Optional
from mmengine.model import is_model_wrapper
from mmengine.registry import HOOKS, MODELS
from .hook import DATA_BATCH, Hook
@HOOKS.register_module()
class EMAHook(Hook):
"""A Hook to apply Exponential Moving Average (EMA) on the model during
training.
Note:
- EMAHook takes priority over CheckpointHook.
- The original model parameters are actually saved in ema field after
train.
Args:
ema_type (str): The type of EMA strategy to use. You can find the
supported strategies in ``mmengine.model.averaged_model``.
Defaults to 'ExponentialMovingAverage'
"""
priority = 'NORMAL'
def __init__(self, ema_type: str = 'ExponentialMovingAverage', **kwargs):
self.ema_cfg = dict(type=ema_type, **kwargs)
def before_run(self, runner) -> None:
"""Create an ema copy of the model."""
model = runner.model
if is_model_wrapper(model):
model = model.module
self.src_model = model
self.ema_model = MODELS.build(
self.ema_cfg, default_args=dict(model=self.src_model))
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""Update ema parameter."""
self.ema_model.update_parameters(self.src_model)
def before_val_epoch(self, runner) -> None:
"""We load parameter values from ema model to source model before
validation."""
self._swap_ema_parameters()
def after_val_epoch(self,
runner,
metrics: Optional[Dict[str, float]] = None) -> None:
"""We recover source model's parameter from ema model after
validation."""
self._swap_ema_parameters()
def before_test_epoch(self, runner) -> None:
"""We load parameter values from ema model to source model before
test."""
self._swap_ema_parameters()
def after_test_epoch(self,
runner,
metrics: Optional[Dict[str, float]] = None) -> None:
"""We recover source model's parameter from ema model after test."""
self._swap_ema_parameters()
def before_save_checkpoint(self, runner, checkpoint: dict) -> None:
"""Save ema parameters to checkpoint."""
checkpoint['ema_state_dict'] = self.ema_model.state_dict()
# Save ema parameters to the source model's state dict so that we can
# directly load the averaged model weights for deployment.
# Swapping the state_dict key-values instead of swapping model
# parameters because the state_dict is a shallow copy of model
# parameters.
self._swap_ema_state_dict(checkpoint)
def after_load_checkpoint(self, runner, checkpoint: dict) -> None:
"""Resume ema parameters from checkpoint."""
# The original model parameters are actually saved in ema field.
# swap the weights back to resume ema state.
self._swap_ema_state_dict(checkpoint)
self.ema_model.load_state_dict(checkpoint['ema_state_dict'])
def _swap_ema_parameters(self) -> None:
"""Swap the parameter of model with ema_model."""
avg_param = (
itertools.chain(self.ema_model.module.parameters(),
self.ema_model.module.buffers())
if self.ema_model.update_buffers else
self.ema_model.module.parameters())
src_param = (
itertools.chain(self.src_model.parameters(),
self.src_model.buffers())
if self.ema_model.update_buffers else self.src_model.parameters())
for p_avg, p_src in zip(avg_param, src_param):
tmp = p_avg.data.clone()
p_avg.data.copy_(p_src.data)
p_src.data.copy_(tmp)
def _swap_ema_state_dict(self, checkpoint):
"""Swap the state dict values of model with ema_model."""
model_state = checkpoint['state_dict']
ema_state = checkpoint['ema_state_dict']
for k in ema_state:
if k[:7] == 'module.':
tmp = ema_state[k]
ema_state[k] = model_state[k[7:]]
model_state[k[7:]] = tmp
|
# Copyright (c) OpenMMLab. All rights reserved.
import itertools
from typing import Dict, Optional
from mmengine.model import is_model_wrapper
from mmengine.registry import HOOKS, MODELS
from .hook import DATA_BATCH, Hook
@HOOKS.register_module()
class EMAHook(Hook):
"""A Hook to apply Exponential Moving Average (EMA) on the model during
training.
Note:
- EMAHook takes priority over CheckpointHook.
- The original model parameters are actually saved in ema field after
train.
Args:
ema_type (str): The type of EMA strategy to use. You can find the
supported strategies in ``mmengine.model.averaged_model``.
Defaults to 'ExponentialMovingAverage'
"""
priority = 'NORMAL'
def __init__(self, ema_type: str = 'ExponentialMovingAverage', **kwargs):
self.ema_cfg = dict(type=ema_type, **kwargs)
def before_run(self, runner) -> None:
"""Create an ema copy of the model."""
model = runner.model
if is_model_wrapper(model):
model = model.module
self.src_model = model
self.ema_model = MODELS.build(
self.ema_cfg, default_args=dict(model=self.src_model))
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""Update ema parameter."""
self.ema_model.update_parameters(self.src_model)
def before_val_epoch(self, runner) -> None:
"""We load parameter values from ema model to source model before
validation."""
self._swap_ema_parameters()
def after_val_epoch(self,
runner,
metrics: Optional[Dict[str, float]] = None) -> None:
"""We recover source model's parameter from ema model after
validation."""
self._swap_ema_parameters()
def before_test_epoch(self, runner) -> None:
"""We load parameter values from ema model to source model before
test."""
self._swap_ema_parameters()
def after_test_epoch(self,
runner,
metrics: Optional[Dict[str, float]] = None) -> None:
"""We recover source model's parameter from ema model after test."""
self._swap_ema_parameters()
def before_save_checkpoint(self, runner, checkpoint: dict) -> None:
"""Save ema parameters to checkpoint."""
# save ema parameters to the source model's state dict so that we can
# directly load the averaged model weights for deployment.
self._swap_ema_parameters()
checkpoint['ema_state_dict'] = self.ema_model.state_dict()
self._swap_ema_parameters()
def after_load_checkpoint(self, runner, checkpoint: dict) -> None:
"""Resume ema parameters from checkpoint."""
self.ema_model.load_state_dict(checkpoint['ema_state_dict'])
# The original model parameters are actually saved in ema field.
# swap the weights back to resume ema state.
self._swap_ema_parameters()
def _swap_ema_parameters(self) -> None:
"""Swap the parameter of model with ema_model."""
avg_param = (
itertools.chain(self.ema_model.module.parameters(),
self.ema_model.module.buffers())
if self.ema_model.update_buffers else
self.ema_model.module.parameters())
src_param = (
itertools.chain(self.src_model.parameters(),
self.src_model.buffers())
if self.ema_model.update_buffers else self.src_model.parameters())
for p_avg, p_src in zip(avg_param, src_param):
tmp = p_avg.data.clone()
p_avg.data.copy_(p_src.data)
p_src.data.copy_(tmp)
|
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel
from langchain_community.utilities.polygon import PolygonAPIWrapper
class Inputs(BaseModel):
"""Inputs for Polygon's Financials API"""
query: str
class PolygonFinancials(BaseTool):
"""Tool that gets the financials of a ticker from Polygon"""
mode: str = "get_financials"
name: str = "polygon_financials"
description: str = (
"A wrapper around Polygon's Stock Financials API. "
"This tool is useful for fetching fundamental financials from "
"balance sheets, income statements, and cash flow statements "
"for a stock ticker. The input should be the ticker that you want "
"to get the latest fundamental financial data for."
)
args_schema: Type[BaseModel] = Inputs
api_wrapper: PolygonAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Polygon API tool."""
return self.api_wrapper.run(self.mode, ticker=query)
|
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel
from langchain_community.utilities.polygon import PolygonAPIWrapper
class Inputs(BaseModel):
"""Inputs for Polygon's Financials API"""
query: str
class PolygonFinancials(BaseTool): # type: ignore[override, override]
"""Tool that gets the financials of a ticker from Polygon"""
mode: str = "get_financials"
name: str = "polygon_financials"
description: str = (
"A wrapper around Polygon's Stock Financials API. "
"This tool is useful for fetching fundamental financials from "
"balance sheets, income statements, and cash flow statements "
"for a stock ticker. The input should be the ticker that you want "
"to get the latest fundamental financial data for."
)
args_schema: Type[BaseModel] = Inputs
api_wrapper: PolygonAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Polygon API tool."""
return self.api_wrapper.run(self.mode, ticker=query)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import export_dump_streaming
from ...faiss_searcher import FaissSearcher
def _get_docs_from_vecs(queries):
docs = DocumentArray()
for q in queries:
doc = Document(embedding=q)
docs.append(doc)
return docs
@pytest.fixture(scope='function', autouse=True)
def metas(tmpdir):
os.environ['TEST_WORKSPACE'] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ['TEST_WORKSPACE']
metas['name'] = 'faiss_idx'
yield metas
del os.environ['TEST_WORKSPACE']
def test_train_and_index(metas, tmpdir):
vec_idx = np.random.randint(0, high=512, size=[512]).astype(str)
vec = np.array(np.random.random([512, 10]), dtype=np.float32)
query = np.array(np.random.random([10, 10]), dtype=np.float32)
query_docs = _get_docs_from_vecs(query)
train_data_file = os.path.join(os.environ['TEST_WORKSPACE'], 'train.npy')
train_data = np.array(np.random.random([1024, 10]), dtype=np.float32)
np.save(train_data_file, train_data)
trained_index_file = os.path.join(os.environ['TEST_WORKSPACE'], 'faiss.index')
export_dump_streaming(
os.path.join(tmpdir, 'dump'),
1,
len(vec_idx),
zip(vec_idx, vec, [b'' for _ in range(len(vec))]),
)
dump_path = os.path.join(tmpdir, 'dump')
f = Flow().add(
uses=FaissSearcher,
timeout_ready=-1,
uses_with={
'index_key': 'IVF10_HNSW32,PQ2',
'trained_index_file': trained_index_file,
},
)
with f:
# the trained index will be dumped to "faiss.index"
f.post(on='/train', parameters={'train_data_file': train_data_file})
f = Flow().add(
uses=FaissSearcher,
timeout_ready=-1,
uses_with={
'index_key': 'IVF10_HNSW32,PQ2',
'trained_index_file': trained_index_file,
'dump_path': dump_path,
},
)
with f:
result = f.post(
on='/search', data=query_docs, return_results=True, parameters={'top_k': 4}
)[0].docs
assert len(result[0].matches) == 4
for d in result:
assert (
d.matches[0].scores['cosine'].value
<= d.matches[1].scores['cosine'].value
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import export_dump_streaming
from ...faiss_searcher import FaissSearcher
def _get_docs_from_vecs(queries):
docs = DocumentArray()
for q in queries:
doc = Document(embedding=q)
docs.append(doc)
return docs
@pytest.fixture(scope='function', autouse=True)
def metas(tmpdir):
os.environ['TEST_WORKSPACE'] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ['TEST_WORKSPACE']
metas['name'] = 'faiss_idx'
yield metas
del os.environ['TEST_WORKSPACE']
def test_train_and_index(metas, tmpdir):
vec_idx = np.random.randint(0, high=512, size=[512]).astype(str)
vec = np.array(np.random.random([512, 10]), dtype=np.float32)
query = np.array(np.random.random([10, 10]), dtype=np.float32)
query_docs = _get_docs_from_vecs(query)
train_data_file = os.path.join(os.environ['TEST_WORKSPACE'], 'train.npy')
train_data = np.array(np.random.random([1024, 10]), dtype=np.float32)
np.save(train_data_file, train_data)
trained_index_file = os.path.join(os.environ['TEST_WORKSPACE'], 'faiss.index')
export_dump_streaming(
os.path.join(tmpdir, 'dump'),
1,
len(vec_idx),
zip(vec_idx, vec, [b'' for _ in range(len(vec))]),
)
dump_path = os.path.join(tmpdir, 'dump')
f = Flow().add(
uses=FaissSearcher,
timeout_ready=-1,
uses_with={
'index_key': 'IVF10_HNSW32,PQ2',
'trained_index_file': trained_index_file,
},
)
with f:
# the trained index will be dumped to "faiss.index"
f.post(on='/train', parameters={'train_data_file': train_data_file})
f = Flow().add(
uses=FaissSearcher,
timeout_ready=-1,
uses_with={
'index_key': 'IVF10_HNSW32,PQ2',
'trained_index_file': trained_index_file,
'dump_path': dump_path,
},
)
with f:
result = f.post(
on='/search', data=query_docs, return_results=True, parameters={'top_k': 4}
)[0].docs
assert len(result[0].matches) == 4
for d in result:
assert (
d.matches[0].scores['euclidean'].value
<= d.matches[1].scores['euclidean'].value
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
],
timeout=30,
check=True,
)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_bit import *
from .image_processing_bit import *
from .image_processing_bit_fast import *
from .modeling_bit import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_bit import *
from .image_processing_bit import *
from .modeling_bit import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import torch.nn as nn
from mmcv.cnn import ConvModule, Scale
from mmdet.models.dense_heads.fcos_head import FCOSHead
from ..builder import HEADS
@HEADS.register_module()
class NASFCOSHead(FCOSHead):
"""Anchor-free head used in `NASFCOS <https://arxiv.org/abs/1906.04423>`_.
It is quite similar with FCOS head, except for the searched structure of
classification branch and bbox regression branch, where a structure of
"dconv3x3, conv3x3, dconv3x3, conv1x1" is utilized instead.
"""
def __init__(self, *args, init_cfg=None, **kwargs):
if init_cfg is None:
init_cfg = [
dict(type='Caffe2Xavier', layer=['ConvModule', 'Conv2d']),
dict(
type='Normal',
std=0.01,
override=[
dict(name='conv_reg'),
dict(name='conv_centerness'),
dict(
name='conv_cls',
type='Normal',
std=0.01,
bias_prob=0.01)
]),
]
super(NASFCOSHead, self).__init__(*args, init_cfg=init_cfg, **kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
dconv3x3_config = dict(
type='DCNv2',
kernel_size=3,
use_bias=True,
deform_groups=2,
padding=1)
conv3x3_config = dict(type='Conv', kernel_size=3, padding=1)
conv1x1_config = dict(type='Conv', kernel_size=1)
self.arch_config = [
dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config
]
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i, op_ in enumerate(self.arch_config):
op = copy.deepcopy(op_)
chn = self.in_channels if i == 0 else self.feat_channels
assert isinstance(op, dict)
use_bias = op.pop('use_bias', False)
padding = op.pop('padding', 0)
kernel_size = op.pop('kernel_size')
module = ConvModule(
chn,
self.feat_channels,
kernel_size,
stride=1,
padding=padding,
norm_cfg=self.norm_cfg,
bias=use_bias,
conv_cfg=op)
self.cls_convs.append(copy.deepcopy(module))
self.reg_convs.append(copy.deepcopy(module))
self.conv_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
|
import copy
import torch.nn as nn
from mmcv.cnn import ConvModule, Scale
from mmdet.models.dense_heads.fcos_head import FCOSHead
from ..builder import HEADS
@HEADS.register_module()
class NASFCOSHead(FCOSHead):
"""Anchor-free head used in `NASFCOS <https://arxiv.org/abs/1906.04423>`_.
It is quite similar with FCOS head, except for the searched structure of
classification branch and bbox regression branch, where a structure of
"dconv3x3, conv3x3, dconv3x3, conv1x1" is utilized instead.
"""
def __init__(self, *args, init_cfg=None, **kwargs):
if init_cfg is None:
init_cfg = [
dict(type='Caffe2Xavier', layer=['ConvModule', 'Conv2d']),
dict(
type='Normal',
std=0.01,
override=[
dict(name='conv_reg'),
dict(name='conv_centerness'),
dict(
name='conv_cls',
type='Normal',
std=0.01,
bias_prob=0.01)
]),
]
super(NASFCOSHead, self).__init__(*args, init_cfg=init_cfg, **kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
dconv3x3_config = dict(
type='DCNv2',
kernel_size=3,
use_bias=True,
deform_groups=2,
padding=1)
conv3x3_config = dict(type='Conv', kernel_size=3, padding=1)
conv1x1_config = dict(type='Conv', kernel_size=1)
self.arch_config = [
dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config
]
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i, op_ in enumerate(self.arch_config):
op = copy.deepcopy(op_)
chn = self.in_channels if i == 0 else self.feat_channels
assert isinstance(op, dict)
use_bias = op.pop('use_bias', False)
padding = op.pop('padding', 0)
kernel_size = op.pop('kernel_size')
module = ConvModule(
chn,
self.feat_channels,
kernel_size,
stride=1,
padding=padding,
norm_cfg=self.norm_cfg,
bias=use_bias,
conv_cfg=op)
self.cls_convs.append(copy.deepcopy(module))
self.reg_convs.append(copy.deepcopy(module))
self.conv_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import TranslationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseTranslationEvaluator(TranslationEvaluator):
def __init__(
self,
source_sentences: list[str],
target_sentences: list[str],
show_progress_bar: bool = False,
batch_size: int = 16,
name: str = "",
print_wrong_matches: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
):
return super().__init__(
source_sentences,
target_sentences,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
print_wrong_matches=print_wrong_matches,
write_csv=write_csv,
truncate_dim=truncate_dim,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> list[Tensor]:
kwargs["truncate_dim"] = self.truncate_dim
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_tensor=False,
convert_to_sparse_tensor=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import TranslationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseTranslationEvaluator(TranslationEvaluator):
def __init__(
self,
source_sentences: list[str],
target_sentences: list[str],
show_progress_bar: bool = False,
batch_size: int = 16,
name: str = "",
print_wrong_matches: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
):
return super().__init__(
source_sentences,
target_sentences,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
print_wrong_matches=print_wrong_matches,
write_csv=write_csv,
truncate_dim=truncate_dim,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> list[Tensor]:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_tensor=False,
convert_to_sparse_tensor=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import time
from contextlib import contextmanager
from typing import Generator, Optional
from mmengine.utils.manager import ManagerMixin, _accquire_lock, _release_lock
class DefaultScope(ManagerMixin):
"""Scope of current task used to reset the current registry, which can be
accessed globally.
Consider the case of resetting the current ``Resgitry`` by``default_scope``
in the internal module which cannot access runner directly, it is difficult
to get the ``default_scope`` defined in ``Runner``. However, if ``Runner``
created ``DefaultScope`` instance by given ``default_scope``, the internal
module can get ``default_scope`` by ``DefaultScope.get_current_instance``
everywhere.
Args:
name (str): Name of default scope for global access.
scope_name (str): Scope of current task.
Examples:
>>> from mmengine import MODELS
>>> # Define default scope in runner.
>>> DefaultScope.get_instance('task', scope_name='mmdet')
>>> # Get default scope globally.
>>> scope_name = DefaultScope.get_instance('task').scope_name
"""
def __init__(self, name: str, scope_name: str):
super().__init__(name)
self._scope_name = scope_name
@property
def scope_name(self) -> str:
"""
Returns:
str: Get current scope.
"""
return self._scope_name
@classmethod
def get_current_instance(cls) -> Optional['DefaultScope']:
"""Get latest created default scope.
Since default_scope is an optional argument for ``Registry.build``.
``get_current_instance`` should return ``None`` if there is no
``DefaultScope`` created.
Examples:
>>> default_scope = DefaultScope.get_current_instance()
>>> # There is no `DefaultScope` created yet,
>>> # `get_current_instance` return `None`.
>>> default_scope = DefaultScope.get_instance(
>>> 'instance_name', scope_name='mmengine')
>>> default_scope.scope_name
mmengine
>>> default_scope = DefaultScope.get_current_instance()
>>> default_scope.scope_name
mmengine
Returns:
Optional[DefaultScope]: Return None If there has not been
``DefaultScope`` instance created yet, otherwise return the
latest created DefaultScope instance.
"""
_accquire_lock()
if cls._instance_dict:
instance = super().get_current_instance()
else:
instance = None
_release_lock()
return instance
@classmethod
@contextmanager
def overwrite_default_scope(cls, scope_name: Optional[str]) -> Generator:
"""overwrite the current default scope with `scope_name`"""
if scope_name is None:
yield
else:
tmp = copy.deepcopy(cls._instance_dict)
cls.get_instance(f'overwrite-{time.time()}', scope_name=scope_name)
try:
yield
finally:
cls._instance_dict = tmp
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
from mmengine.utils.manager import ManagerMixin, _accquire_lock, _release_lock
class DefaultScope(ManagerMixin):
"""Scope of current task used to reset the current registry, which can be
accessed globally.
Consider the case of resetting the current ``Resgitry`` by``default_scope``
in the internal module which cannot access runner directly, it is difficult
to get the ``default_scope`` defined in ``Runner``. However, if ``Runner``
created ``DefaultScope`` instance by given ``default_scope``, the internal
module can get ``default_scope`` by ``DefaultScope.get_current_instance``
everywhere.
Args:
name (str): Name of default scope for global access.
scope_name (str): Scope of current task.
Examples:
>>> from mmengine import MODELS
>>> # Define default scope in runner.
>>> DefaultScope.get_instance('task', scope_name='mmdet')
>>> # Get default scope globally.
>>> scope_name = DefaultScope.get_instance('task').scope_name
"""
def __init__(self, name: str, scope_name: str):
super().__init__(name)
self._scope_name = scope_name
@property
def scope_name(self) -> str:
"""
Returns:
str: Get current scope.
"""
return self._scope_name
@classmethod
def get_current_instance(cls) -> Optional['DefaultScope']:
"""Get latest created default scope.
Since default_scope is an optional argument for ``Registry.build``.
``get_current_instance`` should return ``None`` if there is no
``DefaultScope`` created.
Examples:
>>> default_scope = DefaultScope.get_current_instance()
>>> # There is no `DefaultScope` created yet,
>>> # `get_current_instance` return `None`.
>>> default_scope = DefaultScope.get_instance(
>>> 'instance_name', scope_name='mmengine')
>>> default_scope.scope_name
mmengine
>>> default_scope = DefaultScope.get_current_instance()
>>> default_scope.scope_name
mmengine
Returns:
Optional[DefaultScope]: Return None If there has not been
``DefaultScope`` instance created yet, otherwise return the
latest created DefaultScope instance.
"""
_accquire_lock()
if cls._instance_dict:
instance = super().get_current_instance()
else:
instance = None
_release_lock()
return instance
|
from .backend_utils import set_audio_backend
from .case_utils import (
HttpServerMixin,
is_ffmpeg_available,
PytorchTestCase,
skipIfNoCtcDecoder,
skipIfNoCuda,
skipIfNoExec,
skipIfNoFFmpeg,
skipIfNoKaldi,
skipIfNoModule,
skipIfNoQengine,
skipIfNoSox,
skipIfPy310,
skipIfRocm,
TempDirMixin,
TestBaseMixin,
TorchaudioTestCase,
)
from .data_utils import get_asset_path, get_sinusoid, get_spectrogram, get_whitenoise
from .func_utils import torch_script
from .image_utils import get_image, rgb_to_gray, rgb_to_yuv_ccir, save_image
from .parameterized_utils import load_params, nested_params
from .wav_utils import get_wav_data, load_wav, normalize_wav, save_wav
__all__ = [
"get_asset_path",
"get_whitenoise",
"get_sinusoid",
"get_spectrogram",
"set_audio_backend",
"TempDirMixin",
"HttpServerMixin",
"TestBaseMixin",
"PytorchTestCase",
"TorchaudioTestCase",
"is_ffmpeg_available",
"skipIfNoCtcDecoder",
"skipIfNoCuda",
"skipIfNoExec",
"skipIfNoModule",
"skipIfNoKaldi",
"skipIfNoSox",
"skipIfNoSoxBackend",
"skipIfRocm",
"skipIfNoQengine",
"skipIfNoFFmpeg",
"skipIfPy310",
"get_wav_data",
"normalize_wav",
"load_wav",
"save_wav",
"load_params",
"nested_params",
"torch_script",
"save_image",
"get_image",
"rgb_to_gray",
"rgb_to_yuv_ccir",
]
|
from .backend_utils import set_audio_backend
from .case_utils import (
HttpServerMixin,
is_ffmpeg_available,
PytorchTestCase,
skipIfNoCtcDecoder,
skipIfNoCuda,
skipIfNoExec,
skipIfNoFFmpeg,
skipIfNoKaldi,
skipIfNoModule,
skipIfNoQengine,
skipIfNoSox,
skipIfPy310,
skipIfRocm,
TempDirMixin,
TestBaseMixin,
TorchaudioTestCase,
)
from .data_utils import get_asset_path, get_sinusoid, get_spectrogram, get_whitenoise
from .func_utils import torch_script
from .image_utils import get_image, save_image
from .parameterized_utils import load_params, nested_params
from .wav_utils import get_wav_data, load_wav, normalize_wav, save_wav
__all__ = [
"get_asset_path",
"get_whitenoise",
"get_sinusoid",
"get_spectrogram",
"set_audio_backend",
"TempDirMixin",
"HttpServerMixin",
"TestBaseMixin",
"PytorchTestCase",
"TorchaudioTestCase",
"is_ffmpeg_available",
"skipIfNoCtcDecoder",
"skipIfNoCuda",
"skipIfNoExec",
"skipIfNoModule",
"skipIfNoKaldi",
"skipIfNoSox",
"skipIfNoSoxBackend",
"skipIfRocm",
"skipIfNoQengine",
"skipIfNoFFmpeg",
"skipIfPy310",
"get_wav_data",
"normalize_wav",
"load_wav",
"save_wav",
"load_params",
"nested_params",
"torch_script",
"save_image",
"get_image",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_transformers import GoogleTranslateTransformer
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GoogleTranslateTransformer": "langchain_community.document_transformers",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GoogleTranslateTransformer",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_transformers import GoogleTranslateTransformer
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GoogleTranslateTransformer": "langchain_community.document_transformers"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GoogleTranslateTransformer",
]
|
from typing import Optional
from opentelemetry.context.context import Context
from jina import DocumentArray, Executor, requests
class ExecutorTestWithTracing(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.meter:
self.request_counter = self.meter.create_counter('request_counter')
else:
self.request_counter = None
@requests
def testing(
self, docs: DocumentArray, tracing_context: Optional[Context], **kwargs
):
if self.request_counter:
self.request_counter.add(1)
if self.tracer:
with self.tracer.start_span('dummy', context=tracing_context) as span:
span.set_attribute('len_docs', len(docs))
return docs
|
from typing import Optional
from opentelemetry.context.context import Context
from jina import Executor, requests, DocumentArray
class ExecutorTestWithTracing(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.meter:
self.request_counter = self.meter.create_counter('request_counter')
else:
self.request_counter = None
@requests
def testing(
self, docs: 'DocumentArray', tracing_context: Optional[Context], **kwargs
):
if self.request_counter:
self.request_counter.add(1)
if self.tracer:
with self.tracer.start_span('dummy', context=tracing_context) as span:
span.set_attribute('len_docs', len(docs))
return docs
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import (find_latest_checkpoint, get_test_pipeline_cfg,
update_data_root)
from .mot_error_visualize import imshow_mot_errors
from .replace_cfg_vals import replace_cfg_vals
from .setup_env import (register_all_modules, setup_cache_size_limit_of_dynamo,
setup_multi_processes)
from .split_batch import split_batch
from .typing_utils import (ConfigType, InstanceList, MultiConfig,
OptConfigType, OptInstanceList, OptMultiConfig,
OptPixelList, PixelList, RangeType)
__all__ = [
'collect_env', 'find_latest_checkpoint', 'update_data_root',
'setup_multi_processes', 'get_caller_name', 'log_img_scale', 'compat_cfg',
'split_batch', 'register_all_modules', 'replace_cfg_vals', 'AvoidOOM',
'AvoidCUDAOOM', 'all_reduce_dict', 'allreduce_grads', 'reduce_mean',
'sync_random_seed', 'ConfigType', 'InstanceList', 'MultiConfig',
'OptConfigType', 'OptInstanceList', 'OptMultiConfig', 'OptPixelList',
'PixelList', 'RangeType', 'get_test_pipeline_cfg',
'setup_cache_size_limit_of_dynamo', 'imshow_mot_errors'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import (find_latest_checkpoint, get_test_pipeline_cfg,
update_data_root)
from .replace_cfg_vals import replace_cfg_vals
from .setup_env import (register_all_modules, setup_cache_size_limit_of_dynamo,
setup_multi_processes)
from .split_batch import split_batch
from .typing_utils import (ConfigType, InstanceList, MultiConfig,
OptConfigType, OptInstanceList, OptMultiConfig,
OptPixelList, PixelList, RangeType)
__all__ = [
'collect_env', 'find_latest_checkpoint', 'update_data_root',
'setup_multi_processes', 'get_caller_name', 'log_img_scale', 'compat_cfg',
'split_batch', 'register_all_modules', 'replace_cfg_vals', 'AvoidOOM',
'AvoidCUDAOOM', 'all_reduce_dict', 'allreduce_grads', 'reduce_mean',
'sync_random_seed', 'ConfigType', 'InstanceList', 'MultiConfig',
'OptConfigType', 'OptInstanceList', 'OptMultiConfig', 'OptPixelList',
'PixelList', 'RangeType', 'get_test_pipeline_cfg',
'setup_cache_size_limit_of_dynamo'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import mmcv
import numpy as np
from mmdet.datasets.pipelines import (LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles)
class TestLoading:
@classmethod
def setup_class(cls):
cls.data_prefix = osp.join(osp.dirname(__file__), '../../data')
def test_load_img(self):
results = dict(
img_prefix=self.data_prefix, img_info=dict(filename='color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['filename'] == osp.join(self.data_prefix, 'color.jpg')
assert results['ori_filename'] == 'color.jpg'
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3)
assert results['ori_shape'] == (288, 512, 3)
assert repr(transform) == transform.__class__.__name__ + \
"(to_float32=False, color_type='color', " + \
"file_client_args={'backend': 'disk'})"
# no img_prefix
results = dict(
img_prefix=None, img_info=dict(filename='tests/data/color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['filename'] == 'tests/data/color.jpg'
assert results['ori_filename'] == 'tests/data/color.jpg'
assert results['img'].shape == (288, 512, 3)
# to_float32
transform = LoadImageFromFile(to_float32=True)
results = transform(copy.deepcopy(results))
assert results['img'].dtype == np.float32
# gray image
results = dict(
img_prefix=self.data_prefix, img_info=dict(filename='gray.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
transform = LoadImageFromFile(color_type='unchanged')
results = transform(copy.deepcopy(results))
assert results['img'].shape == (288, 512)
assert results['img'].dtype == np.uint8
def test_load_multi_channel_img(self):
results = dict(
img_prefix=self.data_prefix,
img_info=dict(filename=['color.jpg', 'color.jpg']))
transform = LoadMultiChannelImageFromFiles()
results = transform(copy.deepcopy(results))
assert results['filename'] == [
osp.join(self.data_prefix, 'color.jpg'),
osp.join(self.data_prefix, 'color.jpg')
]
assert results['ori_filename'] == ['color.jpg', 'color.jpg']
assert results['img'].shape == (288, 512, 3, 2)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3, 2)
assert results['ori_shape'] == (288, 512, 3, 2)
assert results['pad_shape'] == (288, 512, 3, 2)
assert results['scale_factor'] == 1.0
assert repr(transform) == transform.__class__.__name__ + \
"(to_float32=False, color_type='unchanged', " + \
"file_client_args={'backend': 'disk'})"
def test_load_webcam_img(self):
img = mmcv.imread(osp.join(self.data_prefix, 'color.jpg'))
results = dict(img=img)
transform = LoadImageFromWebcam()
results = transform(copy.deepcopy(results))
assert results['filename'] is None
assert results['ori_filename'] is None
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3)
assert results['ori_shape'] == (288, 512, 3)
|
import copy
import os.path as osp
import mmcv
import numpy as np
from mmdet.datasets.pipelines import (LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles)
class TestLoading:
@classmethod
def setup_class(cls):
cls.data_prefix = osp.join(osp.dirname(__file__), '../../data')
def test_load_img(self):
results = dict(
img_prefix=self.data_prefix, img_info=dict(filename='color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['filename'] == osp.join(self.data_prefix, 'color.jpg')
assert results['ori_filename'] == 'color.jpg'
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3)
assert results['ori_shape'] == (288, 512, 3)
assert repr(transform) == transform.__class__.__name__ + \
"(to_float32=False, color_type='color', " + \
"file_client_args={'backend': 'disk'})"
# no img_prefix
results = dict(
img_prefix=None, img_info=dict(filename='tests/data/color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['filename'] == 'tests/data/color.jpg'
assert results['ori_filename'] == 'tests/data/color.jpg'
assert results['img'].shape == (288, 512, 3)
# to_float32
transform = LoadImageFromFile(to_float32=True)
results = transform(copy.deepcopy(results))
assert results['img'].dtype == np.float32
# gray image
results = dict(
img_prefix=self.data_prefix, img_info=dict(filename='gray.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
transform = LoadImageFromFile(color_type='unchanged')
results = transform(copy.deepcopy(results))
assert results['img'].shape == (288, 512)
assert results['img'].dtype == np.uint8
def test_load_multi_channel_img(self):
results = dict(
img_prefix=self.data_prefix,
img_info=dict(filename=['color.jpg', 'color.jpg']))
transform = LoadMultiChannelImageFromFiles()
results = transform(copy.deepcopy(results))
assert results['filename'] == [
osp.join(self.data_prefix, 'color.jpg'),
osp.join(self.data_prefix, 'color.jpg')
]
assert results['ori_filename'] == ['color.jpg', 'color.jpg']
assert results['img'].shape == (288, 512, 3, 2)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3, 2)
assert results['ori_shape'] == (288, 512, 3, 2)
assert results['pad_shape'] == (288, 512, 3, 2)
assert results['scale_factor'] == 1.0
assert repr(transform) == transform.__class__.__name__ + \
"(to_float32=False, color_type='unchanged', " + \
"file_client_args={'backend': 'disk'})"
def test_load_webcam_img(self):
img = mmcv.imread(osp.join(self.data_prefix, 'color.jpg'))
results = dict(img=img)
transform = LoadImageFromWebcam()
results = transform(copy.deepcopy(results))
assert results['filename'] is None
assert results['ori_filename'] is None
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3)
assert results['ori_shape'] == (288, 512, 3)
|
_base_ = 'faster-rcnn_r50_fpn_ms-3x_coco.py'
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
_base_ = 'faster-rcnn_r50_fpn_ms-3x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
_base_ = './ms-rcnn_r101-caffe_fpn_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './ms_rcnn_r101_caffe_fpn_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
import csv
import logging
import os
from typing import TYPE_CHECKING, Dict
import torch
from torch.utils.data import DataLoader
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
from sentence_transformers.util import batch_to_device
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
logger = logging.getLogger(__name__)
class LabelAccuracyEvaluator(SentenceEvaluator):
"""
Evaluate a model based on its accuracy on a labeled dataset
This requires a model with LossFunction.SOFTMAX
The results are written in a CSV. If a CSV already exists, then values are appended.
"""
def __init__(self, dataloader: DataLoader, name: str = "", softmax_model=None, write_csv: bool = True):
"""
Constructs an evaluator for the given dataset
Args:
dataloader (DataLoader): the data for the evaluation
"""
super().__init__()
self.dataloader = dataloader
self.name = name
self.softmax_model = softmax_model
if name:
name = "_" + name
self.write_csv = write_csv
self.csv_file = "accuracy_evaluation" + name + "_results.csv"
self.csv_headers = ["epoch", "steps", "accuracy"]
self.primary_metric = "accuracy"
def __call__(
self, model: "SentenceTransformer", output_path: str = None, epoch: int = -1, steps: int = -1
) -> Dict[str, float]:
model.eval()
total = 0
correct = 0
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("Evaluation on the " + self.name + " dataset" + out_txt)
self.dataloader.collate_fn = model.smart_batching_collate
for step, batch in enumerate(self.dataloader):
features, label_ids = batch
for idx in range(len(features)):
features[idx] = batch_to_device(features[idx], model.device)
label_ids = label_ids.to(model.device)
with torch.no_grad():
_, prediction = self.softmax_model(features, labels=None)
total += prediction.size(0)
correct += torch.argmax(prediction, dim=1).eq(label_ids).sum().item()
accuracy = correct / total
logger.info("Accuracy: {:.4f} ({}/{})\n".format(accuracy, correct, total))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
if not os.path.isfile(csv_path):
with open(csv_path, newline="", mode="w", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, accuracy])
else:
with open(csv_path, newline="", mode="a", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow([epoch, steps, accuracy])
metrics = {"accuracy": accuracy}
metrics = self.prefix_name_to_metrics(metrics, self.name)
self.store_metrics_in_model_card_data(model, metrics)
return metrics
|
from sentence_transformers import SentenceTransformer
from . import SentenceEvaluator
import torch
from torch.utils.data import DataLoader
import logging
from ..util import batch_to_device
import os
import csv
logger = logging.getLogger(__name__)
class LabelAccuracyEvaluator(SentenceEvaluator):
"""
Evaluate a model based on its accuracy on a labeled dataset
This requires a model with LossFunction.SOFTMAX
The results are written in a CSV. If a CSV already exists, then values are appended.
"""
def __init__(self, dataloader: DataLoader, name: str = "", softmax_model=None, write_csv: bool = True):
"""
Constructs an evaluator for the given dataset
:param dataloader:
the data for the evaluation
"""
self.dataloader = dataloader
self.name = name
self.softmax_model = softmax_model
if name:
name = "_" + name
self.write_csv = write_csv
self.csv_file = "accuracy_evaluation" + name + "_results.csv"
self.csv_headers = ["epoch", "steps", "accuracy"]
def __call__(self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
model.eval()
total = 0
correct = 0
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("Evaluation on the " + self.name + " dataset" + out_txt)
self.dataloader.collate_fn = model.smart_batching_collate
for step, batch in enumerate(self.dataloader):
features, label_ids = batch
for idx in range(len(features)):
features[idx] = batch_to_device(features[idx], model.device)
label_ids = label_ids.to(model.device)
with torch.no_grad():
_, prediction = self.softmax_model(features, labels=None)
total += prediction.size(0)
correct += torch.argmax(prediction, dim=1).eq(label_ids).sum().item()
accuracy = correct / total
logger.info("Accuracy: {:.4f} ({}/{})\n".format(accuracy, correct, total))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
if not os.path.isfile(csv_path):
with open(csv_path, newline="", mode="w", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, accuracy])
else:
with open(csv_path, newline="", mode="a", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow([epoch, steps, accuracy])
return accuracy
|
import pytest
from langchain_core.documents import Document
from langchain_core.indexing.api import _HashedDocument
def test_hashed_document_hashing() -> None:
hashed_document = _HashedDocument( # type: ignore[call-arg]
uid="123", page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
)
assert isinstance(hashed_document.hash_, str)
def test_hashing_with_missing_content() -> None:
"""Check that ValueError is raised if page_content is missing."""
with pytest.raises(TypeError):
_HashedDocument(
metadata={"key": "value"},
) # type: ignore[call-arg]
def test_uid_auto_assigned_to_hash() -> None:
"""Test uid is auto-assigned to the hashed_document hash."""
hashed_document = _HashedDocument( # type: ignore[call-arg]
page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
)
assert hashed_document.uid == hashed_document.hash_
def test_to_document() -> None:
"""Test to_document method."""
hashed_document = _HashedDocument( # type: ignore[call-arg]
page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
)
doc = hashed_document.to_document()
assert isinstance(doc, Document)
assert doc.page_content == "Lorem ipsum dolor sit amet"
assert doc.metadata == {"key": "value"}
def test_from_document() -> None:
"""Test from document class method."""
document = Document(
page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
)
hashed_document = _HashedDocument.from_document(document)
# hash should be deterministic
assert hashed_document.hash_ == "fd1dc827-051b-537d-a1fe-1fa043e8b276"
assert hashed_document.uid == hashed_document.hash_
|
import pytest
from langchain_core.documents import Document
from langchain_core.indexing.api import _HashedDocument
def test_hashed_document_hashing() -> None:
hashed_document = _HashedDocument( # type: ignore[call-arg]
uid="123", page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
)
assert isinstance(hashed_document.hash_, str)
def test_hashing_with_missing_content() -> None:
"""Check that ValueError is raised if page_content is missing."""
with pytest.raises(TypeError):
_HashedDocument(
metadata={"key": "value"},
) # type: ignore
def test_uid_auto_assigned_to_hash() -> None:
"""Test uid is auto-assigned to the hashed_document hash."""
hashed_document = _HashedDocument( # type: ignore[call-arg]
page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
)
assert hashed_document.uid == hashed_document.hash_
def test_to_document() -> None:
"""Test to_document method."""
hashed_document = _HashedDocument( # type: ignore[call-arg]
page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
)
doc = hashed_document.to_document()
assert isinstance(doc, Document)
assert doc.page_content == "Lorem ipsum dolor sit amet"
assert doc.metadata == {"key": "value"}
def test_from_document() -> None:
"""Test from document class method."""
document = Document(
page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
)
hashed_document = _HashedDocument.from_document(document)
# hash should be deterministic
assert hashed_document.hash_ == "fd1dc827-051b-537d-a1fe-1fa043e8b276"
assert hashed_document.uid == hashed_document.hash_
|
import csv
import os
from pathlib import Path
from typing import Tuple, Union
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _load_waveform
SAMPLE_RATE = 16000
class FluentSpeechCommands(Dataset):
"""*Fluent Speech Commands* :cite:`fluent` dataset
Args:
root (str of Path): Path to the directory where the dataset is found.
subset (str, optional): subset of the dataset to use.
Options: [``"train"``, ``"valid"``, ``"test"``].
(Default: ``"train"``)
"""
def __init__(self, root: Union[str, Path], subset: str = "train"):
if subset not in ["train", "valid", "test"]:
raise ValueError("`subset` must be one of ['train', 'valid', 'test']")
root = os.fspath(root)
self._path = os.path.join(root, "fluent_speech_commands_dataset")
if not os.path.isdir(self._path):
raise RuntimeError("Dataset not found.")
subset_path = os.path.join(self._path, "data", f"{subset}_data.csv")
with open(subset_path) as subset_csv:
subset_reader = csv.reader(subset_csv)
data = list(subset_reader)
self.header = data[0]
self.data = data[1:]
def get_metadata(self, n: int) -> Tuple[str, int, str, int, str, str, str, str]:
"""Get metadata for the n-th sample from the dataset. Returns filepath instead of waveform,
but otherwise returns the same fields as :py:func:`__getitem__`.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
str:
Path to audio
int:
Sample rate
str:
File name
int:
Speaker ID
str:
Transcription
str:
Action
str:
Object
str:
Location
"""
sample = self.data[n]
file_name = sample[self.header.index("path")].split("/")[-1]
file_name = file_name.split(".")[0]
speaker_id, transcription, action, obj, location = sample[2:]
file_path = os.path.join("wavs", "speakers", speaker_id, f"{file_name}.wav")
return file_path, SAMPLE_RATE, file_name, speaker_id, transcription, action, obj, location
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, int, str, str, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
Tensor:
Waveform
int:
Sample rate
str:
File name
int:
Speaker ID
str:
Transcription
str:
Action
str:
Object
str:
Location
"""
metadata = self.get_metadata(n)
waveform = _load_waveform(self._path, metadata[0], metadata[1])
return (waveform,) + metadata[1:]
|
import csv
import os
from pathlib import Path
from typing import Tuple, Union
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _load_waveform
SAMPLE_RATE = 16000
class FluentSpeechCommands(Dataset):
"""Create *Fluent Speech Commands* :cite:`fluent` Dataset
Args:
root (str of Path): Path to the directory where the dataset is found.
subset (str, optional): subset of the dataset to use. Options: [`"train"`, `"valid"`, `"test"`].
(Default: ``"train"``)
"""
def __init__(self, root: Union[str, Path], subset: str = "train"):
if subset not in ["train", "valid", "test"]:
raise ValueError("`subset` must be one of ['train', 'valid', 'test']")
root = os.fspath(root)
self._path = os.path.join(root, "fluent_speech_commands_dataset")
if not os.path.isdir(self._path):
raise RuntimeError("Dataset not found.")
subset_path = os.path.join(self._path, "data", f"{subset}_data.csv")
with open(subset_path) as subset_csv:
subset_reader = csv.reader(subset_csv)
data = list(subset_reader)
self.header = data[0]
self.data = data[1:]
def get_metadata(self, n: int) -> Tuple[str, int, str, int, str, str, str, str]:
"""Get metadata for the n-th sample from the dataset. Returns filepath instead of waveform,
but otherwise returns the same fields as :py:func:`__getitem__`.
Args:
n (int): The index of the sample to be loaded
Returns:
(str, int, str, int, str, str, str, str):
``(filepath, sample_rate, file_name, speaker_id, transcription, action, object, location)``
"""
sample = self.data[n]
file_name = sample[self.header.index("path")].split("/")[-1]
file_name = file_name.split(".")[0]
speaker_id, transcription, action, obj, location = sample[2:]
file_path = os.path.join("wavs", "speakers", speaker_id, f"{file_name}.wav")
return file_path, SAMPLE_RATE, file_name, speaker_id, transcription, action, obj, location
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, int, str, str, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, int, str, str, str, str):
``(waveform, sample_rate, file_name, speaker_id, transcription, action, object, location)``
"""
metadata = self.get_metadata(n)
waveform = _load_waveform(self._path, metadata[0], metadata[1])
return (waveform,) + metadata[1:]
|
"""Argparser module for the export API"""
from jina.parsers.base import set_base_parser
from jina.parsers.helper import _chf
def set_export_parser(parser=None):
"""Set the parser for exporting
:param parser: the parser configure
:return: the parser
"""
if not parser:
parser = set_base_parser()
spp = parser.add_subparsers(
dest='export',
description='use `%(prog)-8s [sub-command] --help` '
'to get detailed information about each sub-command',
required=True,
)
set_export_flowchart_parser(
spp.add_parser(
'flowchart',
help='Export a Flow YAML file to a flowchart',
formatter_class=_chf,
)
)
set_export_k8s_parser(
spp.add_parser(
'kubernetes',
help='Export a Flow YAML file to a Kubernetes YAML bundle',
formatter_class=_chf,
)
)
set_export_docker_compose_parser(
spp.add_parser(
'docker-compose',
help='Export a Flow YAML file to a Docker Compose YAML file',
formatter_class=_chf,
)
)
set_export_schema_parser(
spp.add_parser(
'schema',
help='Export Jina Executor & Flow API to JSONSchema files',
formatter_class=_chf,
)
)
return parser
def mixin_base_io_parser(parser):
"""Add basic IO parsing args
:param parser: the parser configure
"""
parser.add_argument(
'config_path',
type=str,
metavar='INPUT',
help='The input file path of a Flow or Deployment YAML ',
)
parser.add_argument(
'outpath',
type=str,
metavar='OUTPUT',
help='The output path',
)
def set_export_docker_compose_parser(parser=None):
"""Set the parser for the flow chart export
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
mixin_base_io_parser(parser)
parser.add_argument(
'--network_name',
type=str,
help='The name of the network that will be used by the deployment name.',
)
return parser
def set_export_k8s_parser(parser=None):
"""Set the parser for the flow chart export
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
mixin_base_io_parser(parser)
parser.add_argument(
'--k8s-namespace',
type=str,
help='The name of the k8s namespace to set for the configurations. If None, the name of the Flow will be used.',
)
return parser
def set_export_flowchart_parser(parser=None):
"""Set the parser for the flow chart export
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
mixin_base_io_parser(parser)
parser.add_argument(
'--vertical-layout',
action='store_true',
default=False,
help='If set, then the flowchart is rendered vertically from top to down.',
)
return parser
def set_export_schema_parser(parser=None):
"""Set the parser for the API export
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
parser.add_argument(
'--yaml-path',
type=str,
nargs='*',
metavar='PATH',
help='The YAML file path for storing the exported API',
)
parser.add_argument(
'--json-path',
type=str,
nargs='*',
metavar='PATH',
help='The JSON file path for storing the exported API',
)
parser.add_argument(
'--schema-path',
type=str,
nargs='*',
metavar='PATH',
help='The JSONSchema file path for storing the exported API',
)
return parser
|
"""Argparser module for the export API"""
from jina.parsers.base import set_base_parser
from jina.parsers.helper import _chf
def set_export_parser(parser=None):
"""Set the parser for exporting
:param parser: the parser configure
:return: the parser
"""
if not parser:
parser = set_base_parser()
spp = parser.add_subparsers(
dest='export',
description='use `%(prog)-8s [sub-command] --help` '
'to get detailed information about each sub-command',
required=True,
)
set_export_flowchart_parser(
spp.add_parser(
'flowchart',
help='Export a Flow YAML file to a flowchart',
formatter_class=_chf,
)
)
set_export_k8s_parser(
spp.add_parser(
'kubernetes',
help='Export a Flow YAML file to a Kubernetes YAML bundle',
formatter_class=_chf,
)
)
set_export_docker_compose_parser(
spp.add_parser(
'docker-compose',
help='Export a Flow YAML file to a Docker Compose YAML file',
formatter_class=_chf,
)
)
set_export_schema_parser(
spp.add_parser(
'schema',
help='Export Jina Executor & Flow API to JSONSchema files',
formatter_class=_chf,
)
)
return parser
def mixin_base_io_parser(parser):
"""Add basic IO parsing args
:param parser: the parser configure
"""
parser.add_argument(
'config_path', type=str, metavar='INPUT', help='The input file path of a Flow or Deployment YAML '
)
parser.add_argument(
'outpath',
type=str,
metavar='OUTPUT',
help='The output path',
)
def set_export_docker_compose_parser(parser=None):
"""Set the parser for the flow chart export
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
mixin_base_io_parser(parser)
parser.add_argument(
'--network_name',
type=str,
help='The name of the network that will be used by the deployment name.',
)
return parser
def set_export_k8s_parser(parser=None):
"""Set the parser for the flow chart export
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
mixin_base_io_parser(parser)
parser.add_argument(
'--k8s-namespace',
type=str,
help='The name of the k8s namespace to set for the configurations. If None, the name of the Flow will be used.',
)
return parser
def set_export_flowchart_parser(parser=None):
"""Set the parser for the flow chart export
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
mixin_base_io_parser(parser)
parser.add_argument(
'--vertical-layout',
action='store_true',
default=False,
help='If set, then the flowchart is rendered vertically from top to down.',
)
return parser
def set_export_schema_parser(parser=None):
"""Set the parser for the API export
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
parser.add_argument(
'--yaml-path',
type=str,
nargs='*',
metavar='PATH',
help='The YAML file path for storing the exported API',
)
parser.add_argument(
'--json-path',
type=str,
nargs='*',
metavar='PATH',
help='The JSON file path for storing the exported API',
)
parser.add_argument(
'--schema-path',
type=str,
nargs='*',
metavar='PATH',
help='The JSONSchema file path for storing the exported API',
)
return parser
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import pytest
from simpleranker import SimpleRanker
@pytest.mark.parametrize('traversal_paths', [['r'], ['c']])
@pytest.mark.parametrize('ranking', ['min', 'max'])
def test_ranking(documents_chunk, documents_chunk_chunk, traversal_paths, ranking):
ranker = SimpleRanker(
metric='cosine',
ranking=ranking,
traversal_paths=traversal_paths,
)
if traversal_paths == ['r']:
ranking_docs = documents_chunk
else:
ranking_docs = documents_chunk_chunk
ranker.rank(ranking_docs, parameters={})
assert ranking_docs
for doc in ranking_docs.traverse_flat(traversal_paths):
assert doc.matches
for i in range(len(doc.matches) - 1):
match = doc.matches[i]
assert match.tags
if ranking == 'min':
assert (
match.scores['cosine'].value
<= doc.matches[i + 1].scores['cosine'].value
)
else:
assert (
match.scores['cosine'].value
>= doc.matches[i + 1].scores['cosine'].value
)
@pytest.mark.parametrize('ranking', ['mean_min', 'mean_max'])
def test_mean_ranking(documents_chunk, ranking):
traversal_paths = ['r']
ranker = SimpleRanker(
metric='cosine',
ranking=ranking,
traversal_paths=traversal_paths,
)
ranking_docs = documents_chunk
mean_scores = []
for doc in ranking_docs[0].chunks:
scores = []
for match in doc.matches:
scores.append(match.scores['cosine'].value)
mean_scores.append(sum(scores) / 10)
mean_scores.sort(reverse=ranking == 'mean_max')
ranker.rank(ranking_docs, parameters={})
assert ranking_docs
for doc in ranking_docs.traverse_flat(traversal_paths):
assert doc.matches
for i in range(len(doc.matches) - 1):
match = doc.matches[i]
assert match.tags
assert match.scores['cosine'].value == pytest.approx(mean_scores[i], 1e-5)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import pytest
from simpleranker import SimpleRanker
@pytest.mark.parametrize('default_traversal_paths', [['r'], ['c']])
@pytest.mark.parametrize('ranking', ['min', 'max'])
def test_ranking(
documents_chunk, documents_chunk_chunk, default_traversal_paths, ranking
):
ranker = SimpleRanker(
metric='cosine',
ranking=ranking,
default_traversal_paths=default_traversal_paths,
)
if default_traversal_paths == ['r']:
ranking_docs = documents_chunk
else:
ranking_docs = documents_chunk_chunk
ranker.rank(ranking_docs, parameters={})
assert ranking_docs
for doc in ranking_docs.traverse_flat(default_traversal_paths):
assert doc.matches
for i in range(len(doc.matches) - 1):
match = doc.matches[i]
assert match.tags
if ranking == 'min':
assert (
match.scores['cosine'].value
<= doc.matches[i + 1].scores['cosine'].value
)
else:
assert (
match.scores['cosine'].value
>= doc.matches[i + 1].scores['cosine'].value
)
@pytest.mark.parametrize('ranking', ['mean_min', 'mean_max'])
def test_mean_ranking(documents_chunk, ranking):
default_traversal_paths = ['r']
ranker = SimpleRanker(
metric='cosine',
ranking=ranking,
default_traversal_paths=default_traversal_paths,
)
ranking_docs = documents_chunk
mean_scores = []
for doc in ranking_docs[0].chunks:
scores = []
for match in doc.matches:
scores.append(match.scores['cosine'].value)
mean_scores.append(sum(scores) / 10)
mean_scores.sort(reverse=ranking == 'mean_max')
ranker.rank(ranking_docs, parameters={})
assert ranking_docs
for doc in ranking_docs.traverse_flat(default_traversal_paths):
assert doc.matches
for i in range(len(doc.matches) - 1):
match = doc.matches[i]
assert match.tags
assert match.scores['cosine'].value == pytest.approx(mean_scores[i], 1e-5)
|
"""**Prompt values** for language model prompts.
Prompt values are used to represent different pieces of prompts.
They can be used to represent text, images, or chat message pieces.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from collections.abc import Sequence
from typing import Literal, cast
from typing_extensions import TypedDict
from langchain_core.load.serializable import Serializable
from langchain_core.messages import (
AnyMessage,
BaseMessage,
HumanMessage,
get_buffer_string,
)
class PromptValue(Serializable, ABC):
"""Base abstract class for inputs to any language model.
PromptValues can be converted to both LLM (pure text-generation) inputs and
ChatModel inputs.
"""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable. Defaults to True."""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
This is used to determine the namespace of the object when serializing.
Defaults to ["langchain", "schema", "prompt"].
"""
return ["langchain", "schema", "prompt"]
@abstractmethod
def to_string(self) -> str:
"""Return prompt value as string."""
@abstractmethod
def to_messages(self) -> list[BaseMessage]:
"""Return prompt as a list of Messages."""
class StringPromptValue(PromptValue):
"""String prompt value."""
text: str
"""Prompt text."""
type: Literal["StringPromptValue"] = "StringPromptValue"
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
This is used to determine the namespace of the object when serializing.
Defaults to ["langchain", "prompts", "base"].
"""
return ["langchain", "prompts", "base"]
def to_string(self) -> str:
"""Return prompt as string."""
return self.text
def to_messages(self) -> list[BaseMessage]:
"""Return prompt as messages."""
return [HumanMessage(content=self.text)]
class ChatPromptValue(PromptValue):
"""Chat prompt value.
A type of a prompt value that is built from messages.
"""
messages: Sequence[BaseMessage]
"""List of messages."""
def to_string(self) -> str:
"""Return prompt as string."""
return get_buffer_string(self.messages)
def to_messages(self) -> list[BaseMessage]:
"""Return prompt as a list of messages."""
return list(self.messages)
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
This is used to determine the namespace of the object when serializing.
Defaults to ["langchain", "prompts", "chat"].
"""
return ["langchain", "prompts", "chat"]
class ImageURL(TypedDict, total=False):
"""Image URL."""
detail: Literal["auto", "low", "high"]
"""Specifies the detail level of the image. Defaults to "auto".
Can be "auto", "low", or "high"."""
url: str
"""Either a URL of the image or the base64 encoded image data."""
class ImagePromptValue(PromptValue):
"""Image prompt value."""
image_url: ImageURL
"""Image URL."""
type: Literal["ImagePromptValue"] = "ImagePromptValue"
def to_string(self) -> str:
"""Return prompt (image URL) as string."""
return self.image_url["url"]
def to_messages(self) -> list[BaseMessage]:
"""Return prompt (image URL) as messages."""
return [HumanMessage(content=[cast("dict", self.image_url)])]
class ChatPromptValueConcrete(ChatPromptValue):
"""Chat prompt value which explicitly lists out the message types it accepts.
For use in external schemas.
"""
messages: Sequence[AnyMessage]
"""Sequence of messages."""
type: Literal["ChatPromptValueConcrete"] = "ChatPromptValueConcrete"
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
This is used to determine the namespace of the object when serializing.
Defaults to ["langchain", "prompts", "chat"].
"""
return ["langchain", "prompts", "chat"]
|
"""**Prompt values** for language model prompts.
Prompt values are used to represent different pieces of prompts.
They can be used to represent text, images, or chat message pieces.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from collections.abc import Sequence
from typing import Literal, cast
from typing_extensions import TypedDict
from langchain_core.load.serializable import Serializable
from langchain_core.messages import (
AnyMessage,
BaseMessage,
HumanMessage,
get_buffer_string,
)
class PromptValue(Serializable, ABC):
"""Base abstract class for inputs to any language model.
PromptValues can be converted to both LLM (pure text-generation) inputs and
ChatModel inputs.
"""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable. Defaults to True."""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
This is used to determine the namespace of the object when serializing.
Defaults to ["langchain", "schema", "prompt"].
"""
return ["langchain", "schema", "prompt"]
@abstractmethod
def to_string(self) -> str:
"""Return prompt value as string."""
@abstractmethod
def to_messages(self) -> list[BaseMessage]:
"""Return prompt as a list of Messages."""
class StringPromptValue(PromptValue):
"""String prompt value."""
text: str
"""Prompt text."""
type: Literal["StringPromptValue"] = "StringPromptValue"
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
This is used to determine the namespace of the object when serializing.
Defaults to ["langchain", "prompts", "base"].
"""
return ["langchain", "prompts", "base"]
def to_string(self) -> str:
"""Return prompt as string."""
return self.text
def to_messages(self) -> list[BaseMessage]:
"""Return prompt as messages."""
return [HumanMessage(content=self.text)]
class ChatPromptValue(PromptValue):
"""Chat prompt value.
A type of a prompt value that is built from messages.
"""
messages: Sequence[BaseMessage]
"""List of messages."""
def to_string(self) -> str:
"""Return prompt as string."""
return get_buffer_string(self.messages)
def to_messages(self) -> list[BaseMessage]:
"""Return prompt as a list of messages."""
return list(self.messages)
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
This is used to determine the namespace of the object when serializing.
Defaults to ["langchain", "prompts", "chat"].
"""
return ["langchain", "prompts", "chat"]
class ImageURL(TypedDict, total=False):
"""Image URL."""
detail: Literal["auto", "low", "high"]
"""Specifies the detail level of the image. Defaults to "auto".
Can be "auto", "low", or "high"."""
url: str
"""Either a URL of the image or the base64 encoded image data."""
class ImagePromptValue(PromptValue):
"""Image prompt value."""
image_url: ImageURL
"""Image URL."""
type: Literal["ImagePromptValue"] = "ImagePromptValue"
def to_string(self) -> str:
"""Return prompt (image URL) as string."""
return self.image_url["url"]
def to_messages(self) -> list[BaseMessage]:
"""Return prompt (image URL) as messages."""
return [HumanMessage(content=[cast(dict, self.image_url)])]
class ChatPromptValueConcrete(ChatPromptValue):
"""Chat prompt value which explicitly lists out the message types it accepts.
For use in external schemas.
"""
messages: Sequence[AnyMessage]
"""Sequence of messages."""
type: Literal["ChatPromptValueConcrete"] = "ChatPromptValueConcrete"
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
This is used to determine the namespace of the object when serializing.
Defaults to ["langchain", "prompts", "chat"].
"""
return ["langchain", "prompts", "chat"]
|
import logging
from typing import Any
from backend.data.block import (
Block,
BlockCategory,
BlockInput,
BlockOutput,
BlockSchema,
BlockType,
get_block,
)
from backend.data.execution import ExecutionStatus
from backend.data.model import SchemaField
from backend.util import json
logger = logging.getLogger(__name__)
class AgentExecutorBlock(Block):
class Input(BlockSchema):
user_id: str = SchemaField(description="User ID")
graph_id: str = SchemaField(description="Graph ID")
graph_version: int = SchemaField(description="Graph Version")
data: BlockInput = SchemaField(description="Input data for the graph")
input_schema: dict = SchemaField(description="Input schema for the graph")
output_schema: dict = SchemaField(description="Output schema for the graph")
@classmethod
def get_input_schema(cls, data: BlockInput) -> dict[str, Any]:
return data.get("input_schema", {})
@classmethod
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
return data.get("data", {})
@classmethod
def get_missing_input(cls, data: BlockInput) -> set[str]:
required_fields = cls.get_input_schema(data).get("required", [])
return set(required_fields) - set(data)
@classmethod
def get_mismatch_error(cls, data: BlockInput) -> str | None:
return json.validate_with_jsonschema(cls.get_input_schema(data), data)
class Output(BlockSchema):
pass
def __init__(self):
super().__init__(
id="e189baac-8c20-45a1-94a7-55177ea42565",
description="Executes an existing agent inside your agent",
input_schema=AgentExecutorBlock.Input,
output_schema=AgentExecutorBlock.Output,
block_type=BlockType.AGENT,
categories={BlockCategory.AGENT},
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
from backend.data.execution import ExecutionEventType
from backend.executor import utils as execution_utils
event_bus = execution_utils.get_execution_event_bus()
graph_exec = execution_utils.add_graph_execution(
graph_id=input_data.graph_id,
graph_version=input_data.graph_version,
user_id=input_data.user_id,
inputs=input_data.data,
)
log_id = f"Graph #{input_data.graph_id}-V{input_data.graph_version}, exec-id: {graph_exec.id}"
logger.info(f"Starting execution of {log_id}")
for event in event_bus.listen(
user_id=graph_exec.user_id,
graph_id=graph_exec.graph_id,
graph_exec_id=graph_exec.id,
):
if event.event_type == ExecutionEventType.GRAPH_EXEC_UPDATE:
if event.status in [
ExecutionStatus.COMPLETED,
ExecutionStatus.TERMINATED,
ExecutionStatus.FAILED,
]:
logger.info(f"Execution {log_id} ended with status {event.status}")
break
else:
continue
logger.debug(
f"Execution {log_id} produced input {event.input_data} output {event.output_data}"
)
if not event.block_id:
logger.warning(f"{log_id} received event without block_id {event}")
continue
block = get_block(event.block_id)
if not block or block.block_type != BlockType.OUTPUT:
continue
output_name = event.input_data.get("name")
if not output_name:
logger.warning(f"{log_id} produced an output with no name {event}")
continue
for output_data in event.output_data.get("output", []):
logger.debug(
f"Execution {log_id} produced {output_name}: {output_data}"
)
yield output_name, output_data
|
import logging
from typing import Any
from backend.data.block import (
Block,
BlockCategory,
BlockInput,
BlockOutput,
BlockSchema,
BlockType,
get_block,
)
from backend.data.execution import ExecutionStatus
from backend.data.model import SchemaField
from backend.util import json
logger = logging.getLogger(__name__)
class AgentExecutorBlock(Block):
class Input(BlockSchema):
user_id: str = SchemaField(description="User ID")
graph_id: str = SchemaField(description="Graph ID")
graph_version: int = SchemaField(description="Graph Version")
data: BlockInput = SchemaField(description="Input data for the graph")
input_schema: dict = SchemaField(description="Input schema for the graph")
output_schema: dict = SchemaField(description="Output schema for the graph")
@classmethod
def get_input_schema(cls, data: BlockInput) -> dict[str, Any]:
return data.get("input_schema", {})
@classmethod
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
return data.get("data", {})
@classmethod
def get_missing_input(cls, data: BlockInput) -> set[str]:
required_fields = cls.get_input_schema(data).get("required", [])
return set(required_fields) - set(data)
@classmethod
def get_mismatch_error(cls, data: BlockInput) -> str | None:
return json.validate_with_jsonschema(cls.get_input_schema(data), data)
class Output(BlockSchema):
pass
def __init__(self):
super().__init__(
id="e189baac-8c20-45a1-94a7-55177ea42565",
description="Executes an existing agent inside your agent",
input_schema=AgentExecutorBlock.Input,
output_schema=AgentExecutorBlock.Output,
block_type=BlockType.AGENT,
categories={BlockCategory.AGENT},
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
from backend.data.execution import ExecutionEventType
from backend.executor import utils as execution_utils
event_bus = execution_utils.get_execution_event_bus()
graph_exec = execution_utils.add_graph_execution(
graph_id=input_data.graph_id,
graph_version=input_data.graph_version,
user_id=input_data.user_id,
data=input_data.data,
)
log_id = f"Graph #{input_data.graph_id}-V{input_data.graph_version}, exec-id: {graph_exec.graph_exec_id}"
logger.info(f"Starting execution of {log_id}")
for event in event_bus.listen(
user_id=graph_exec.user_id,
graph_id=graph_exec.graph_id,
graph_exec_id=graph_exec.graph_exec_id,
):
if event.event_type == ExecutionEventType.GRAPH_EXEC_UPDATE:
if event.status in [
ExecutionStatus.COMPLETED,
ExecutionStatus.TERMINATED,
ExecutionStatus.FAILED,
]:
logger.info(f"Execution {log_id} ended with status {event.status}")
break
else:
continue
logger.debug(
f"Execution {log_id} produced input {event.input_data} output {event.output_data}"
)
if not event.block_id:
logger.warning(f"{log_id} received event without block_id {event}")
continue
block = get_block(event.block_id)
if not block or block.block_type != BlockType.OUTPUT:
continue
output_name = event.input_data.get("name")
if not output_name:
logger.warning(f"{log_id} produced an output with no name {event}")
continue
for output_data in event.output_data.get("output", []):
logger.debug(
f"Execution {log_id} produced {output_name}: {output_data}"
)
yield output_name, output_data
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.