input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
r'''
FX is a toolkit for developers to use to transform ``nn.Module``
instances. FX consists of three main components: a **symbolic tracer,**
an **intermediate representation**, and **Python code generation**. A
demonstration of these components in action:
::
import torch
# Simple module for demonstration
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
module = MyModule()
from torch.fx import symbolic_trace
# Symbolic tracing frontend - captures the semantics of the module
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
# High-level intermediate representation (IR) - Graph representation
print(symbolic_traced.graph)
"""
graph():
%x : [num_users=1] = placeholder[target=x]
%param : [num_users=1] = get_attr[target=param]
%add : [num_users=1] = call_function[target=operator.add](args = (%x, %param), kwargs = {})
%linear : [num_users=1] = call_module[target=linear](args = (%add,), kwargs = {})
%clamp : [num_users=1] = call_method[target=clamp](args = (%linear,), kwargs = {min: 0.0, max: 1.0})
return clamp
"""
# Code generation - valid Python code
print(symbolic_traced.code)
"""
def forward(self, x):
param = self.param
add = x + param; x = param = None
linear = self.linear(add); add = None
clamp = linear.clamp(min = 0.0, max = 1.0); linear = None
return clamp
"""
The **symbolic tracer** performs "symbolic execution" of the Python
code. It feeds fake values, called Proxies, through the code. Operations
on these Proxies are recorded. More information about symbolic tracing
can be found in the :func:`symbolic_trace` and :class:`Tracer`
documentation.
The **intermediate representation** is the container for the operations
that were recorded during symbolic tracing. It consists of a list of
Nodes that represent function inputs, callsites (to functions, methods,
or :class:`torch.nn.Module` instances), and return values. More information
about the IR can be found in the documentation for :class:`Graph`. The
IR is the format on which transformations are applied.
**Python code generation** is what makes FX a Python-to-Python (or
Module-to-Module) transformation toolkit. For each Graph IR, we can
create valid Python code matching the Graph's semantics. This
functionality is wrapped up in :class:`GraphModule`, which is a
:class:`torch.nn.Module` instance that holds a :class:`Graph` as well as a
``forward`` method generated from the Graph.
Taken together, this pipeline of components (symbolic tracing ->
intermediate representation -> transforms -> Python code generation)
constitutes the Python-to-Python transformation pipeline of FX. In
addition, these components can be used separately. For example,
symbolic tracing can be used in isolation to capture a form of
the code for analysis (and not transformation) purposes. Code
generation can be used for programmatically generating models, for
example from a config file. There are many uses for FX!
Several example transformations can be found at the
`examples <https://github.com/pytorch/examples/tree/master/fx>`__
repository.
'''
from torch.fx import immutable_collections
from torch.fx._symbolic_trace import ( # noqa: F401
PH,
ProxyableClassMeta,
symbolic_trace,
Tracer,
wrap,
)
from torch.fx.graph import CodeGen, Graph # noqa: F401
from torch.fx.graph_module import GraphModule
from torch.fx.interpreter import Interpreter, Transformer
from torch.fx.node import has_side_effect, map_arg, Node
from torch.fx.proxy import Proxy
from torch.fx.subgraph_rewriter import replace_pattern
__all__ = [
"symbolic_trace",
"Tracer",
"wrap",
"Graph",
"GraphModule",
"Interpreter",
"Transformer",
"Node",
"Proxy",
"replace_pattern",
"has_side_effect",
"map_arg",
]
|
r'''
FX is a toolkit for developers to use to transform ``nn.Module``
instances. FX consists of three main components: a **symbolic tracer,**
an **intermediate representation**, and **Python code generation**. A
demonstration of these components in action:
::
import torch
# Simple module for demonstration
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
module = MyModule()
from torch.fx import symbolic_trace
# Symbolic tracing frontend - captures the semantics of the module
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
# High-level intermediate representation (IR) - Graph representation
print(symbolic_traced.graph)
"""
graph():
%x : [num_users=1] = placeholder[target=x]
%param : [num_users=1] = get_attr[target=param]
%add : [num_users=1] = call_function[target=operator.add](args = (%x, %param), kwargs = {})
%linear : [num_users=1] = call_module[target=linear](args = (%add,), kwargs = {})
%clamp : [num_users=1] = call_method[target=clamp](args = (%linear,), kwargs = {min: 0.0, max: 1.0})
return clamp
"""
# Code generation - valid Python code
print(symbolic_traced.code)
"""
def forward(self, x):
param = self.param
add = x + param; x = param = None
linear = self.linear(add); add = None
clamp = linear.clamp(min = 0.0, max = 1.0); linear = None
return clamp
"""
The **symbolic tracer** performs "symbolic execution" of the Python
code. It feeds fake values, called Proxies, through the code. Operations
on theses Proxies are recorded. More information about symbolic tracing
can be found in the :func:`symbolic_trace` and :class:`Tracer`
documentation.
The **intermediate representation** is the container for the operations
that were recorded during symbolic tracing. It consists of a list of
Nodes that represent function inputs, callsites (to functions, methods,
or :class:`torch.nn.Module` instances), and return values. More information
about the IR can be found in the documentation for :class:`Graph`. The
IR is the format on which transformations are applied.
**Python code generation** is what makes FX a Python-to-Python (or
Module-to-Module) transformation toolkit. For each Graph IR, we can
create valid Python code matching the Graph's semantics. This
functionality is wrapped up in :class:`GraphModule`, which is a
:class:`torch.nn.Module` instance that holds a :class:`Graph` as well as a
``forward`` method generated from the Graph.
Taken together, this pipeline of components (symbolic tracing ->
intermediate representation -> transforms -> Python code generation)
constitutes the Python-to-Python transformation pipeline of FX. In
addition, these components can be used separately. For example,
symbolic tracing can be used in isolation to capture a form of
the code for analysis (and not transformation) purposes. Code
generation can be used for programmatically generating models, for
example from a config file. There are many uses for FX!
Several example transformations can be found at the
`examples <https://github.com/pytorch/examples/tree/master/fx>`__
repository.
'''
from torch.fx import immutable_collections
from torch.fx._symbolic_trace import ( # noqa: F401
PH,
ProxyableClassMeta,
symbolic_trace,
Tracer,
wrap,
)
from torch.fx.graph import CodeGen, Graph # noqa: F401
from torch.fx.graph_module import GraphModule
from torch.fx.interpreter import Interpreter, Transformer
from torch.fx.node import has_side_effect, map_arg, Node
from torch.fx.proxy import Proxy
from torch.fx.subgraph_rewriter import replace_pattern
__all__ = [
"symbolic_trace",
"Tracer",
"wrap",
"Graph",
"GraphModule",
"Interpreter",
"Transformer",
"Node",
"Proxy",
"replace_pattern",
"has_side_effect",
"map_arg",
]
|
_base_ = './mask-rcnn_hrnetv2p-w32-1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './mask_rcnn_hrnetv2p_w32_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
# coding: utf-8
"""Find the path to LightGBM dynamic library files."""
from pathlib import Path
from platform import system
from typing import List
__all__ = []
def find_lib_path() -> List[str]:
"""Find the path to LightGBM library files.
Returns
-------
lib_path: list of str
List of all found library paths to LightGBM.
"""
curr_path = Path(__file__).absolute().parent
dll_path = [curr_path,
curr_path.parents[1],
curr_path / 'compile',
curr_path.parent / 'compile',
curr_path.parents[1] / 'lib']
if system() in ('Windows', 'Microsoft'):
dll_path.append(curr_path.parent / 'compile' / 'Release')
dll_path.append(curr_path.parent / 'compile' / 'windows' / 'x64' / 'DLL')
dll_path.append(curr_path.parents[1] / 'Release')
dll_path.append(curr_path.parents[1] / 'windows' / 'x64' / 'DLL')
dll_path = [p / 'lib_lightgbm.dll' for p in dll_path]
else:
dll_path = [p / 'lib_lightgbm.so' for p in dll_path]
lib_path = [str(p) for p in dll_path if p.is_file()]
if not lib_path:
dll_path_joined = '\n'.join(map(str, dll_path))
raise Exception(f'Cannot find lightgbm library file in following paths:\n{dll_path_joined}')
return lib_path
|
# coding: utf-8
"""Find the path to LightGBM dynamic library files."""
from pathlib import Path
from platform import system
from typing import List
def find_lib_path() -> List[str]:
"""Find the path to LightGBM library files.
Returns
-------
lib_path: list of str
List of all found library paths to LightGBM.
"""
curr_path = Path(__file__).absolute().parent
dll_path = [curr_path,
curr_path.parents[1],
curr_path / 'compile',
curr_path.parent / 'compile',
curr_path.parents[1] / 'lib']
if system() in ('Windows', 'Microsoft'):
dll_path.append(curr_path.parent / 'compile' / 'Release')
dll_path.append(curr_path.parent / 'compile' / 'windows' / 'x64' / 'DLL')
dll_path.append(curr_path.parents[1] / 'Release')
dll_path.append(curr_path.parents[1] / 'windows' / 'x64' / 'DLL')
dll_path = [p / 'lib_lightgbm.dll' for p in dll_path]
else:
dll_path = [p / 'lib_lightgbm.so' for p in dll_path]
lib_path = [str(p) for p in dll_path if p.is_file()]
if not lib_path:
dll_path_joined = '\n'.join(map(str, dll_path))
raise Exception(f'Cannot find lightgbm library file in following paths:\n{dll_path_joined}')
return lib_path
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import CentripetalHead
class TestCentripetalHead(TestCase):
def test_centripetal_head_loss(self):
"""Tests corner head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}]
centripetal_head = CentripetalHead(
num_classes=4, in_channels=1, corner_emb_channels=0)
# Corner head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // 4, s // 4)
for _ in range(centripetal_head.num_feat_levels)
]
forward_outputs = centripetal_head.forward(feat)
# Test that empty ground truth encourages the network
# to predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
gt_bboxes_ignore = None
empty_gt_losses = centripetal_head.loss_by_feat(
*forward_outputs, [gt_instances], img_metas, gt_bboxes_ignore)
empty_det_loss = sum(empty_gt_losses['det_loss'])
empty_guiding_loss = sum(empty_gt_losses['guiding_loss'])
empty_centripetal_loss = sum(empty_gt_losses['centripetal_loss'])
empty_off_loss = sum(empty_gt_losses['off_loss'])
self.assertTrue(empty_det_loss.item() > 0,
'det loss should be non-zero')
self.assertTrue(
empty_guiding_loss.item() == 0,
'there should be no guiding loss when there are no true boxes')
self.assertTrue(
empty_centripetal_loss.item() == 0,
'there should be no centripetal loss when there are no true boxes')
self.assertTrue(
empty_off_loss.item() == 0,
'there should be no box loss when there are no true boxes')
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874],
[123.6667, 123.8757, 138.6326, 251.8874]])
gt_instances.labels = torch.LongTensor([2, 3])
two_gt_losses = centripetal_head.loss_by_feat(*forward_outputs,
[gt_instances],
img_metas,
gt_bboxes_ignore)
twogt_det_loss = sum(two_gt_losses['det_loss'])
twogt_guiding_loss = sum(two_gt_losses['guiding_loss'])
twogt_centripetal_loss = sum(two_gt_losses['centripetal_loss'])
twogt_off_loss = sum(two_gt_losses['off_loss'])
assert twogt_det_loss.item() > 0, 'det loss should be non-zero'
assert twogt_guiding_loss.item() > 0, 'push loss should be non-zero'
assert twogt_centripetal_loss.item(
) > 0, 'pull loss should be non-zero'
assert twogt_off_loss.item() > 0, 'off loss should be non-zero'
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.data import InstanceData
from mmdet.models.dense_heads import CentripetalHead
class TestCentripetalHead(TestCase):
def test_centripetal_head_loss(self):
"""Tests corner head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}]
centripetal_head = CentripetalHead(
num_classes=4, in_channels=1, corner_emb_channels=0)
# Corner head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // 4, s // 4)
for _ in range(centripetal_head.num_feat_levels)
]
forward_outputs = centripetal_head.forward(feat)
# Test that empty ground truth encourages the network
# to predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
gt_bboxes_ignore = None
empty_gt_losses = centripetal_head.loss_by_feat(
*forward_outputs, [gt_instances], img_metas, gt_bboxes_ignore)
empty_det_loss = sum(empty_gt_losses['det_loss'])
empty_guiding_loss = sum(empty_gt_losses['guiding_loss'])
empty_centripetal_loss = sum(empty_gt_losses['centripetal_loss'])
empty_off_loss = sum(empty_gt_losses['off_loss'])
self.assertTrue(empty_det_loss.item() > 0,
'det loss should be non-zero')
self.assertTrue(
empty_guiding_loss.item() == 0,
'there should be no guiding loss when there are no true boxes')
self.assertTrue(
empty_centripetal_loss.item() == 0,
'there should be no centripetal loss when there are no true boxes')
self.assertTrue(
empty_off_loss.item() == 0,
'there should be no box loss when there are no true boxes')
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874],
[123.6667, 123.8757, 138.6326, 251.8874]])
gt_instances.labels = torch.LongTensor([2, 3])
two_gt_losses = centripetal_head.loss_by_feat(*forward_outputs,
[gt_instances],
img_metas,
gt_bboxes_ignore)
twogt_det_loss = sum(two_gt_losses['det_loss'])
twogt_guiding_loss = sum(two_gt_losses['guiding_loss'])
twogt_centripetal_loss = sum(two_gt_losses['centripetal_loss'])
twogt_off_loss = sum(two_gt_losses['off_loss'])
assert twogt_det_loss.item() > 0, 'det loss should be non-zero'
assert twogt_guiding_loss.item() > 0, 'push loss should be non-zero'
assert twogt_centripetal_loss.item(
) > 0, 'pull loss should be non-zero'
assert twogt_off_loss.item() > 0, 'off loss should be non-zero'
|
from urllib.parse import urlparse
from backend.blocks.github._auth import (
GithubCredentials,
GithubFineGrainedAPICredentials,
)
from backend.util.request import Requests
def _convert_to_api_url(url: str) -> str:
"""
Converts a standard GitHub URL to the corresponding GitHub API URL.
Handles repository URLs, issue URLs, pull request URLs, and more.
"""
parsed_url = urlparse(url)
path_parts = parsed_url.path.strip("/").split("/")
if len(path_parts) >= 2:
owner, repo = path_parts[0], path_parts[1]
api_base = f"https://api.github.com/repos/{owner}/{repo}"
if len(path_parts) > 2:
additional_path = "/".join(path_parts[2:])
api_url = f"{api_base}/{additional_path}"
else:
# Repository base URL
api_url = api_base
else:
raise ValueError("Invalid GitHub URL format.")
return api_url
def _get_headers(credentials: GithubCredentials) -> dict[str, str]:
return {
"Authorization": credentials.auth_header(),
"Accept": "application/vnd.github.v3+json",
}
def convert_comment_url_to_api_endpoint(comment_url: str) -> str:
"""
Converts a GitHub comment URL (web interface) to the appropriate API endpoint URL.
Handles:
1. Issue/PR comments: #issuecomment-{id}
2. PR review comments: #discussion_r{id}
Returns the appropriate API endpoint path for the comment.
"""
# First, check if this is already an API URL
parsed_url = urlparse(comment_url)
if parsed_url.hostname == "api.github.com":
return comment_url
# Replace pull with issues for comment endpoints
if "/pull/" in comment_url:
comment_url = comment_url.replace("/pull/", "/issues/")
# Handle issue/PR comments (#issuecomment-xxx)
if "#issuecomment-" in comment_url:
base_url, comment_part = comment_url.split("#issuecomment-")
comment_id = comment_part
# Extract repo information from base URL
parsed_url = urlparse(base_url)
path_parts = parsed_url.path.strip("/").split("/")
owner, repo = path_parts[0], path_parts[1]
# Construct API URL for issue comments
return (
f"https://api.github.com/repos/{owner}/{repo}/issues/comments/{comment_id}"
)
# Handle PR review comments (#discussion_r)
elif "#discussion_r" in comment_url:
base_url, comment_part = comment_url.split("#discussion_r")
comment_id = comment_part
# Extract repo information from base URL
parsed_url = urlparse(base_url)
path_parts = parsed_url.path.strip("/").split("/")
owner, repo = path_parts[0], path_parts[1]
# Construct API URL for PR review comments
return (
f"https://api.github.com/repos/{owner}/{repo}/pulls/comments/{comment_id}"
)
# If no specific comment identifiers are found, use the general URL conversion
return _convert_to_api_url(comment_url)
def get_api(
credentials: GithubCredentials | GithubFineGrainedAPICredentials,
convert_urls: bool = True,
) -> Requests:
return Requests(
trusted_origins=["https://api.github.com", "https://github.com"],
extra_url_validator=_convert_to_api_url if convert_urls else None,
extra_headers=_get_headers(credentials),
)
|
from urllib.parse import urlparse
from backend.blocks.github._auth import (
GithubCredentials,
GithubFineGrainedAPICredentials,
)
from backend.util.request import Requests
def _convert_to_api_url(url: str) -> str:
"""
Converts a standard GitHub URL to the corresponding GitHub API URL.
Handles repository URLs, issue URLs, pull request URLs, and more.
"""
parsed_url = urlparse(url)
path_parts = parsed_url.path.strip("/").split("/")
if len(path_parts) >= 2:
owner, repo = path_parts[0], path_parts[1]
api_base = f"https://api.github.com/repos/{owner}/{repo}"
if len(path_parts) > 2:
additional_path = "/".join(path_parts[2:])
api_url = f"{api_base}/{additional_path}"
else:
# Repository base URL
api_url = api_base
else:
raise ValueError("Invalid GitHub URL format.")
return api_url
def _get_headers(credentials: GithubCredentials) -> dict[str, str]:
return {
"Authorization": credentials.auth_header(),
"Accept": "application/vnd.github.v3+json",
}
def get_api(
credentials: GithubCredentials | GithubFineGrainedAPICredentials,
convert_urls: bool = True,
) -> Requests:
return Requests(
trusted_origins=["https://api.github.com", "https://github.com"],
extra_url_validator=_convert_to_api_url if convert_urls else None,
extra_headers=_get_headers(credentials),
)
|
"""Tools for model selection, such as cross validation and hyper-parameter tuning."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import typing
from ._classification_threshold import (
FixedThresholdClassifier,
TunedThresholdClassifierCV,
)
from ._plot import LearningCurveDisplay, ValidationCurveDisplay
from ._search import GridSearchCV, ParameterGrid, ParameterSampler, RandomizedSearchCV
from ._split import (
BaseCrossValidator,
BaseShuffleSplit,
GroupKFold,
GroupShuffleSplit,
KFold,
LeaveOneGroupOut,
LeaveOneOut,
LeavePGroupsOut,
LeavePOut,
PredefinedSplit,
RepeatedKFold,
RepeatedStratifiedKFold,
ShuffleSplit,
StratifiedGroupKFold,
StratifiedKFold,
StratifiedShuffleSplit,
TimeSeriesSplit,
check_cv,
train_test_split,
)
from ._validation import (
cross_val_predict,
cross_val_score,
cross_validate,
learning_curve,
permutation_test_score,
validation_curve,
)
if typing.TYPE_CHECKING:
# Avoid errors in type checkers (e.g. mypy) for experimental estimators.
# TODO: remove this check once the estimator is no longer experimental.
from ._search_successive_halving import ( # noqa
HalvingGridSearchCV,
HalvingRandomSearchCV,
)
__all__ = [
"BaseCrossValidator",
"BaseShuffleSplit",
"FixedThresholdClassifier",
"GridSearchCV",
"GroupKFold",
"GroupShuffleSplit",
"KFold",
"LearningCurveDisplay",
"LeaveOneGroupOut",
"LeaveOneOut",
"LeavePGroupsOut",
"LeavePOut",
"ParameterGrid",
"ParameterSampler",
"PredefinedSplit",
"RandomizedSearchCV",
"RepeatedKFold",
"RepeatedStratifiedKFold",
"ShuffleSplit",
"StratifiedGroupKFold",
"StratifiedKFold",
"StratifiedShuffleSplit",
"TimeSeriesSplit",
"TunedThresholdClassifierCV",
"ValidationCurveDisplay",
"check_cv",
"cross_val_predict",
"cross_val_score",
"cross_validate",
"learning_curve",
"permutation_test_score",
"train_test_split",
"validation_curve",
]
# TODO: remove this check once the estimator is no longer experimental.
def __getattr__(name):
if name in {"HalvingGridSearchCV", "HalvingRandomSearchCV"}:
raise ImportError(
f"{name} is experimental and the API might change without any "
"deprecation cycle. To use it, you need to explicitly import "
"enable_halving_search_cv:\n"
"from sklearn.experimental import enable_halving_search_cv"
)
raise AttributeError(f"module {__name__} has no attribute {name}")
|
"""Tools for model selection, such as cross validation and hyper-parameter tuning."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import typing
from ._classification_threshold import (
FixedThresholdClassifier,
TunedThresholdClassifierCV,
)
from ._plot import LearningCurveDisplay, ValidationCurveDisplay
from ._search import GridSearchCV, ParameterGrid, ParameterSampler, RandomizedSearchCV
from ._split import (
BaseCrossValidator,
BaseShuffleSplit,
GroupKFold,
GroupShuffleSplit,
KFold,
LeaveOneGroupOut,
LeaveOneOut,
LeavePGroupsOut,
LeavePOut,
PredefinedSplit,
RepeatedKFold,
RepeatedStratifiedKFold,
ShuffleSplit,
StratifiedGroupKFold,
StratifiedKFold,
StratifiedShuffleSplit,
TimeSeriesSplit,
check_cv,
train_test_split,
)
from ._validation import (
cross_val_predict,
cross_val_score,
cross_validate,
learning_curve,
permutation_test_score,
validation_curve,
)
if typing.TYPE_CHECKING:
# Avoid errors in type checkers (e.g. mypy) for experimental estimators.
# TODO: remove this check once the estimator is no longer experimental.
from ._search_successive_halving import ( # noqa
HalvingGridSearchCV,
HalvingRandomSearchCV,
)
__all__ = [
"BaseCrossValidator",
"BaseShuffleSplit",
"GridSearchCV",
"TimeSeriesSplit",
"KFold",
"GroupKFold",
"GroupShuffleSplit",
"LeaveOneGroupOut",
"LeaveOneOut",
"LeavePGroupsOut",
"LeavePOut",
"RepeatedKFold",
"RepeatedStratifiedKFold",
"ParameterGrid",
"ParameterSampler",
"PredefinedSplit",
"RandomizedSearchCV",
"ShuffleSplit",
"StratifiedKFold",
"StratifiedGroupKFold",
"StratifiedShuffleSplit",
"FixedThresholdClassifier",
"TunedThresholdClassifierCV",
"check_cv",
"cross_val_predict",
"cross_val_score",
"cross_validate",
"learning_curve",
"LearningCurveDisplay",
"permutation_test_score",
"train_test_split",
"validation_curve",
"ValidationCurveDisplay",
]
# TODO: remove this check once the estimator is no longer experimental.
def __getattr__(name):
if name in {"HalvingGridSearchCV", "HalvingRandomSearchCV"}:
raise ImportError(
f"{name} is experimental and the API might change without any "
"deprecation cycle. To use it, you need to explicitly import "
"enable_halving_search_cv:\n"
"from sklearn.experimental import enable_halving_search_cv"
)
raise AttributeError(f"module {__name__} has no attribute {name}")
|
from typing import (
TYPE_CHECKING,
TypeVar,
Sequence,
List,
)
import numpy as np
from .... import Document, DocumentArray
from ....math import ndarray
from ....math.helper import EPSILON
from ....math.ndarray import to_numpy_array
from ....score import NamedScore
if TYPE_CHECKING:
import tensorflow
import torch
WeaviateArrayType = TypeVar(
'WeaviateArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
class FindMixin:
def _find_similar_vectors(self, query: 'WeaviateArrayType', limit=10):
query = to_numpy_array(query)
is_all_zero = np.all(query == 0)
if is_all_zero:
query = query + EPSILON
query_dict = {'vector': query}
results = (
self._client.query.get(
self._class_name,
['_serialized', '_additional {certainty}', '_additional {id}'],
)
.with_limit(limit)
.with_near_vector(query_dict)
.do()
)
docs = []
# The serialized document is stored in results['data']['Get'][self._class_name]
for result in results.get('data', {}).get('Get', {}).get(self._class_name, []):
doc = Document.from_base64(result['_serialized'], **self._serialize_config)
certainty = result['_additional']['certainty']
doc.scores['weaviate_certainty'] = NamedScore(value=certainty)
if certainty is None:
doc.scores['cosine_similarity'] = NamedScore(value=None)
else:
doc.scores['cosine_similarity'] = NamedScore(value=2 * certainty - 1)
doc.tags['wid'] = result['_additional']['id']
docs.append(doc)
return DocumentArray(docs)
def _find(
self, query: 'WeaviateArrayType', limit: int = 10, **kwargs
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given a batch of input queries.
:param query: input supported to be stored in Weaviate. This includes any from the list '[np.ndarray, tensorflow.Tensor, torch.Tensor, Sequence[float]]'
:param limit: number of retrieved items
:return: DocumentArray containing the closest documents to the query if it is a single query, otherwise a list of DocumentArrays containing
the closest Document objects for each of the queries in `query`.
Note: Weaviate returns `certainty` values. To get cosine similarities one needs to use `cosine_sim = 2*certainty - 1` as explained here:
https://www.semi.technology/developers/weaviate/current/more-resources/faq.html#q-how-do-i-get-the-cosine-similarity-from-weaviates-certainty
"""
num_rows, _ = ndarray.get_array_rows(query)
if num_rows == 1:
return [self._find_similar_vectors(query, limit=limit)]
else:
closest_docs = []
for q in query:
da = self._find_similar_vectors(q, limit=limit)
closest_docs.append(da)
return closest_docs
|
from typing import (
TYPE_CHECKING,
TypeVar,
Sequence,
List,
)
import numpy as np
from .... import Document, DocumentArray
from ....math import ndarray
from ....math.helper import EPSILON
from ....math.ndarray import to_numpy_array
from ....score import NamedScore
if TYPE_CHECKING:
import tensorflow
import torch
WeaviateArrayType = TypeVar(
'WeaviateArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
class FindMixin:
def _find_similar_vectors(self, query: 'WeaviateArrayType', limit=10):
query = to_numpy_array(query)
is_all_zero = np.all(query == 0)
if is_all_zero:
query = query + EPSILON
query_dict = {'vector': query}
results = (
self._client.query.get(
self._class_name,
['_serialized', '_additional {certainty}', '_additional {id}'],
)
.with_limit(limit)
.with_near_vector(query_dict)
.do()
)
docs = []
# The serialized document is stored in results['data']['Get'][self._class_name]
for result in results.get('data', {}).get('Get', {}).get(self._class_name, []):
doc = Document.from_base64(result['_serialized'], **self._serialize_config)
certainty = result['_additional']['certainty']
doc.scores['weaviate_certainty'] = NamedScore(value=certainty)
if certainty is None:
doc.scores['cosine_similarity'] = NamedScore(value=None)
else:
doc.scores['cosine_similarity'] = NamedScore(value=2 * certainty - 1)
doc.tags = {
'wid': result['_additional']['id'],
}
docs.append(doc)
return DocumentArray(docs)
def _find(
self, query: 'WeaviateArrayType', limit: int = 10, **kwargs
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given a batch of input queries.
:param query: input supported to be stored in Weaviate. This includes any from the list '[np.ndarray, tensorflow.Tensor, torch.Tensor, Sequence[float]]'
:param limit: number of retrieved items
:return: DocumentArray containing the closest documents to the query if it is a single query, otherwise a list of DocumentArrays containing
the closest Document objects for each of the queries in `query`.
Note: Weaviate returns `certainty` values. To get cosine similarities one needs to use `cosine_sim = 2*certainty - 1` as explained here:
https://www.semi.technology/developers/weaviate/current/more-resources/faq.html#q-how-do-i-get-the-cosine-similarity-from-weaviates-certainty
"""
num_rows, _ = ndarray.get_array_rows(query)
if num_rows == 1:
return [self._find_similar_vectors(query, limit=limit)]
else:
closest_docs = []
for q in query:
da = self._find_similar_vectors(q, limit=limit)
closest_docs.append(da)
return closest_docs
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update``
deps = {
"Pillow": "Pillow>=10.0.1,<=15.0",
"accelerate": "accelerate>=0.26.0",
"av": "av",
"beautifulsoup4": "beautifulsoup4",
"blobfile": "blobfile",
"codecarbon": "codecarbon>=2.8.1",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"hf_xet": "hf_xet",
"huggingface-hub": "huggingface-hub>=0.30.0,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"jax": "jax>=0.4.1,<=0.4.13",
"jaxlib": "jaxlib>=0.4.1,<=0.4.13",
"jieba": "jieba",
"jinja2": "jinja2>=3.1.0",
"kenlm": "kenlm",
"keras": "keras>2.9,<2.16",
"keras-nlp": "keras-nlp>=0.3.1,<0.14.0",
"kernels": "kernels>=0.6.1,<0.7",
"librosa": "librosa",
"natten": "natten>=0.14.6,<0.15.0",
"nltk": "nltk<=3.8.1",
"num2words": "num2words",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optimum-benchmark": "optimum-benchmark>=0.3.0",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"pandas": "pandas<2.3.0",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic>=2",
"pytest": "pytest>=7.2.0",
"pytest-asyncio": "pytest-asyncio",
"pytest-rerunfailures": "pytest-rerunfailures",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"pytest-order": "pytest-order",
"python": "python>=3.9.0",
"ray[tune]": "ray[tune]>=2.7.0",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff==0.11.2",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.4.3",
"sagemaker": "sagemaker>=2.31.0",
"schedulefree": "schedulefree>=1.2.6",
"scikit-learn": "scikit-learn",
"scipy": "scipy<1.13.0",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorboard": "tensorboard",
"tensorflow-cpu": "tensorflow-cpu>2.9,<2.16",
"tensorflow": "tensorflow>2.9,<2.16",
"tensorflow-text": "tensorflow-text<2.16",
"tensorflow-probability": "tensorflow-probability<0.24",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"tiktoken": "tiktoken",
"timm": "timm<=1.0.11",
"tokenizers": "tokenizers>=0.21,<0.22",
"torch": "torch>=2.1",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
"pytest-rich": "pytest-rich",
"libcst": "libcst",
"rich": "rich",
"opentelemetry-api": "opentelemetry-api",
"opentelemetry-exporter-otlp": "opentelemetry-exporter-otlp",
"opentelemetry-sdk": "opentelemetry-sdk",
"mistral-common[opencv]": "mistral-common[opencv]>=1.6.3",
}
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update``
deps = {
"Pillow": "Pillow>=10.0.1,<=15.0",
"accelerate": "accelerate>=0.26.0",
"av": "av",
"beautifulsoup4": "beautifulsoup4",
"blobfile": "blobfile",
"codecarbon": "codecarbon>=2.8.1",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"hf_xet": "hf_xet",
"huggingface-hub": "huggingface-hub>=0.30.0,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"jax": "jax>=0.4.1,<=0.4.13",
"jaxlib": "jaxlib>=0.4.1,<=0.4.13",
"jieba": "jieba",
"jinja2": "jinja2>=3.1.0",
"kenlm": "kenlm",
"keras": "keras>2.9,<2.16",
"keras-nlp": "keras-nlp>=0.3.1,<0.14.0",
"kernels": "kernels>=0.6.1,<0.7",
"librosa": "librosa",
"natten": "natten>=0.14.6,<0.15.0",
"nltk": "nltk<=3.8.1",
"num2words": "num2words",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optimum-benchmark": "optimum-benchmark>=0.3.0",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"pandas": "pandas<2.3.0",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic>=2",
"pytest": "pytest>=7.2.0",
"pytest-asyncio": "pytest-asyncio",
"pytest-rerunfailures": "pytest-rerunfailures",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"pytest-order": "pytest-order",
"python": "python>=3.9.0",
"ray[tune]": "ray[tune]>=2.7.0",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff==0.11.2",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.4.3",
"sagemaker": "sagemaker>=2.31.0",
"schedulefree": "schedulefree>=1.2.6",
"scikit-learn": "scikit-learn",
"scipy": "scipy<1.13.0",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorboard": "tensorboard",
"tensorflow-cpu": "tensorflow-cpu>2.9,<2.16",
"tensorflow": "tensorflow>2.9,<2.16",
"tensorflow-text": "tensorflow-text<2.16",
"tensorflow-probability": "tensorflow-probability<0.24",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"tiktoken": "tiktoken",
"timm": "timm<=1.0.11",
"tokenizers": "tokenizers>=0.21,<0.22",
"torch": "torch>=2.1",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
"pytest-rich": "pytest-rich",
"libcst": "libcst",
"rich": "rich",
"opentelemetry-api": "opentelemetry-api",
"opentelemetry-exporter-otlp": "opentelemetry-exporter-otlp",
"opentelemetry-sdk": "opentelemetry-sdk",
}
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoTensorFlowTensor')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_tensorflow_tensor')
class VideoTensorFlowTensor(
TensorFlowTensor, VideoTensorMixin, metaclass=metaTensorFlow
):
"""
Subclass of [`TensorFlowTensor`][docarray.typing.TensorFlowTensor],
to represent a video tensor. Adds video-specific features to the tensor.
---
```python
from typing import Optional
import tensorflow as tf
from docarray import BaseDoc
from docarray.typing import VideoTensorFlowTensor, VideoUrl
class MyVideoDoc(BaseDoc):
title: str
url: Optional[VideoUrl]
video_tensor: Optional[VideoTensorFlowTensor]
doc_1 = MyVideoDoc(
title='my_first_video_doc',
video_tensor=tf.random.normal((100, 224, 224, 3)),
)
# doc_1.video_tensor.save(file_path='file_1.mp4')
doc_2 = MyVideoDoc(
title='my_second_video_doc',
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true',
)
doc_2.video_tensor = doc_2.url.load().video
# doc_2.video_tensor.save(file_path='file_2.wav')
```
---
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoTensorFlowTensor')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_tensorflow_tensor')
class VideoTensorFlowTensor(
TensorFlowTensor, VideoTensorMixin, metaclass=metaTensorFlow
):
"""
Subclass of TensorFlowTensor, to represent a video tensor.
Adds video-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import tensorflow as tf
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import VideoTensorFlowTensor, VideoUrl
class MyVideoDoc(BaseDoc):
title: str
url: Optional[VideoUrl]
video_tensor: Optional[VideoTensorFlowTensor]
doc_1 = MyVideoDoc(
title='my_first_video_doc',
video_tensor=tf.random.normal((100, 224, 224, 3)),
)
doc_1.video_tensor.save(file_path='file_1.wav')
doc_2 = MyVideoDoc(
title='my_second_video_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.video_tensor = parse_obj_as(VideoTensorFlowTensor, doc_2.url.load())
doc_2.video_tensor.save(file_path='file_2.wav')
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
# coding: utf-8
"""Find the path to LightGBM dynamic library files."""
from pathlib import Path
from platform import system
from typing import List
def find_lib_path() -> List[str]:
"""Find the path to LightGBM library files.
Returns
-------
lib_path: list of str
List of all found library paths to LightGBM.
"""
curr_path = Path(__file__).absolute().parent
dll_path = [curr_path,
curr_path.parents[1],
curr_path / 'compile',
curr_path.parent / 'compile',
curr_path.parents[1] / 'lib']
if system() in ('Windows', 'Microsoft'):
dll_path.append(curr_path.parent / 'compile' / 'Release')
dll_path.append(curr_path.parent / 'compile' / 'windows' / 'x64' / 'DLL')
dll_path.append(curr_path.parents[1] / 'Release')
dll_path.append(curr_path.parents[1] / 'windows' / 'x64' / 'DLL')
dll_path = [p / 'lib_lightgbm.dll' for p in dll_path]
else:
dll_path = [p / 'lib_lightgbm.so' for p in dll_path]
lib_path = [str(p) for p in dll_path if p.is_file()]
if not lib_path:
dll_path_joined = '\n'.join(map(str, dll_path))
raise Exception(f'Cannot find lightgbm library file in following paths:\n{dll_path_joined}')
return lib_path
|
# coding: utf-8
"""Find the path to LightGBM dynamic library files."""
from os import environ
from pathlib import Path
from platform import system
from typing import List
def find_lib_path() -> List[str]:
"""Find the path to LightGBM library files.
Returns
-------
lib_path: list of str
List of all found library paths to LightGBM.
"""
if environ.get('LIGHTGBM_BUILD_DOC', False):
# we don't need lib_lightgbm while building docs
return []
curr_path = Path(__file__).absolute().parent
dll_path = [curr_path,
curr_path.parents[1],
curr_path / 'compile',
curr_path.parent / 'compile',
curr_path.parents[1] / 'lib']
if system() in ('Windows', 'Microsoft'):
dll_path.append(curr_path.parent / 'compile' / 'Release')
dll_path.append(curr_path.parent / 'compile' / 'windows' / 'x64' / 'DLL')
dll_path.append(curr_path.parents[1] / 'Release')
dll_path.append(curr_path.parents[1] / 'windows' / 'x64' / 'DLL')
dll_path = [p / 'lib_lightgbm.dll' for p in dll_path]
else:
dll_path = [p / 'lib_lightgbm.so' for p in dll_path]
lib_path = [str(p) for p in dll_path if p.is_file()]
if not lib_path:
dll_path_joined = '\n'.join(map(str, dll_path))
raise Exception(f'Cannot find lightgbm library file in following paths:\n{dll_path_joined}')
return lib_path
|
_base_ = './faster-rcnn_r50-caffe_fpn_ms-1x_coco.py'
model = dict(roi_head=dict(bbox_head=dict(num_classes=1)))
classes = ('person', )
data = dict(
train=dict(classes=classes),
val=dict(classes=classes),
test=dict(classes=classes))
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_bbox_mAP-0.398_20200504_163323-30042637.pth' # noqa
|
_base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py'
model = dict(roi_head=dict(bbox_head=dict(num_classes=1)))
classes = ('person', )
data = dict(
train=dict(classes=classes),
val=dict(classes=classes),
test=dict(classes=classes))
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_bbox_mAP-0.398_20200504_163323-30042637.pth' # noqa
|
import weakref
from keras.src.backend.common import global_state
def _clear_tensor_attr(tensor_id, attr):
attr_dict = global_state.get_global_attribute(f"{attr}_dict")
if attr_dict is not None and tensor_id in attr_dict:
del attr_dict[tensor_id]
def set_tensor_attr(tensor, attr, value):
try:
setattr(tensor, attr, value)
except AttributeError:
attr_dict = global_state.get_global_attribute(f"{attr}_dict")
if attr_dict is None:
if value is None:
return
attr_dict = {}
global_state.set_global_attribute(f"{attr}_dict", attr_dict)
if value is not None:
attr_dict[id(tensor)] = value
weakref.finalize(tensor, _clear_tensor_attr, id(tensor), attr)
elif id(tensor) in attr_dict:
del attr_dict[id(tensor)]
def get_tensor_attr(tensor, attr):
if not hasattr(tensor, attr):
attr_dict = global_state.get_global_attribute(f"{attr}_dict")
if attr_dict is not None:
return attr_dict.get(id(tensor), None)
else:
return None
return getattr(tensor, attr, None)
|
import weakref
from keras.src.backend.common import global_state
def set_tensor_attr(tensor, attr, value):
try:
setattr(tensor, attr, value)
except AttributeError:
attr_dict = global_state.get_global_attribute(f"{attr}_dict")
if attr_dict is None:
if value is None:
return
attr_dict = weakref.WeakValueDictionary()
global_state.set_global_attribute(f"{attr}_dict", attr_dict)
if value is not None:
attr_dict[id(tensor)] = value
elif id(tensor) in attr_dict:
del attr_dict[id(tensor)]
def get_tensor_attr(tensor, attr):
if not hasattr(tensor, attr):
attr_dict = global_state.get_global_attribute(f"{attr}_dict")
if attr_dict is not None:
return attr_dict.get(id(tensor), None)
else:
return None
return getattr(tensor, attr, None)
|
import asyncio
from typing import AsyncIterator, Iterator, Optional, Union
from jina.helper import get_or_reuse_loop
class _RequestsCounter:
"""Class used to wrap a count integer so that it can be updated inside methods.
.. code-block:: python
def count_increment(i: int, rc: _RequestsCounter):
i += 1
rc.count += 1
c_int = 0
c_rc = _RequestsCounter()
count_increment(c_int, c_rc)
assert c_int == 0
assert c_rc.count == 1
"""
count = 0
class AsyncRequestsIterator:
"""Iterator to allow async iteration of blocking/non-blocking iterator from the Client"""
def __init__(
self,
iterator: Union[Iterator, AsyncIterator],
request_counter: Optional[_RequestsCounter] = None,
prefetch: int = 0,
) -> None:
"""Async request iterator
:param iterator: request iterator
:param request_counter: counter of the numbers of request being handled at a given moment
:param prefetch: The max amount of requests to be handled at a given moment (0 disables feature)
"""
self.iterator = iterator
self._request_counter = request_counter
self._prefetch = prefetch
def iterator__next__(self):
"""
Executed inside a `ThreadPoolExecutor` via `loop.run_in_executor` to avoid following exception.
"StopIteration interacts badly with generators and cannot be raised into a Future"
:return: next request or None
"""
try:
return self.iterator.__next__()
except StopIteration:
return None
def __aiter__(self):
return self
async def __anext__(self):
if isinstance(self.iterator, Iterator):
"""
An `Iterator` indicates "blocking" code, which might block all tasks in the event loop.
Hence we iterate in the default executor provided by asyncio.
"""
request = await get_or_reuse_loop().run_in_executor(
None, self.iterator__next__
)
"""
`iterator.__next__` can be executed directly and that'd raise `StopIteration` in the executor,
which raises the following exception while chaining states in futures.
"StopIteration interacts badly with generators and cannot be raised into a Future"
To avoid that, we handle the raise by a `return None`
"""
if request is None:
raise StopAsyncIteration
elif isinstance(self.iterator, AsyncIterator):
# we assume that `AsyncIterator` doesn't block the event loop
request = await self.iterator.__anext__()
if self._prefetch > 0:
while self._request_counter.count >= self._prefetch:
await asyncio.sleep(0)
return request
|
import asyncio
from typing import AsyncIterator, Iterator, Optional, Union
from jina.helper import get_or_reuse_loop
class RequestsCounter:
"""Class used to wrap a count integer so that it can be updated inside methods.
.. code-block:: python
def count_increment(i: int, rc: RequestCounter):
i += 1
rc.count += 1
c_int = 0
c_rc = RequestsCounter()
count_increment(c_int, c_rc)
assert c_int == 0
assert c_rc.count == 1
"""
count = 0
class AsyncRequestsIterator:
"""Iterator to allow async iteration of blocking/non-blocking iterator from the Client"""
def __init__(
self,
iterator: Union[Iterator, AsyncIterator],
request_counter: Optional[RequestsCounter] = None,
prefetch: int = 0,
) -> None:
"""Async request iterator
:param iterator: request iterator
:param request_counter: counter of the numbers of request being handled at a given moment
:param prefetch: The max amount of requests to be handled at a given moment (0 disables feature)
"""
self.iterator = iterator
self._request_counter = request_counter
self._prefetch = prefetch
def iterator__next__(self):
"""
Executed inside a `ThreadPoolExecutor` via `loop.run_in_executor` to avoid following exception.
"StopIteration interacts badly with generators and cannot be raised into a Future"
:return: next request or None
"""
try:
return self.iterator.__next__()
except StopIteration:
return None
def __aiter__(self):
return self
async def __anext__(self):
if isinstance(self.iterator, Iterator):
"""
An `Iterator` indicates "blocking" code, which might block all tasks in the event loop.
Hence we iterate in the default executor provided by asyncio.
"""
request = await get_or_reuse_loop().run_in_executor(
None, self.iterator__next__
)
"""
`iterator.__next__` can be executed directly and that'd raise `StopIteration` in the executor,
which raises the following exception while chaining states in futures.
"StopIteration interacts badly with generators and cannot be raised into a Future"
To avoid that, we handle the raise by a `return None`
"""
if request is None:
raise StopAsyncIteration
elif isinstance(self.iterator, AsyncIterator):
# we assume that `AsyncIterator` doesn't block the event loop
request = await self.iterator.__anext__()
if self._prefetch > 0:
while self._request_counter.count >= self._prefetch:
await asyncio.sleep(0)
return request
|
from typing import Union
import torch
from PIL import Image
from torchvision import transforms as tfms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNet2DConditionModel,
)
class MagicMixPipeline(DiffusionPipeline):
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler],
):
super().__init__()
self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler)
# convert PIL image to latents
def encode(self, img):
with torch.no_grad():
latent = self.vae.encode(tfms.ToTensor()(img).unsqueeze(0).to(self.device) * 2 - 1)
latent = 0.18215 * latent.latent_dist.sample()
return latent
# convert latents to PIL image
def decode(self, latent):
latent = (1 / 0.18215) * latent
with torch.no_grad():
img = self.vae.decode(latent).sample
img = (img / 2 + 0.5).clamp(0, 1)
img = img.detach().cpu().permute(0, 2, 3, 1).numpy()
img = (img * 255).round().astype("uint8")
return Image.fromarray(img[0])
# convert prompt into text embeddings, also unconditional embeddings
def prep_text(self, prompt):
text_input = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_embedding = self.text_encoder(text_input.input_ids.to(self.device))[0]
uncond_input = self.tokenizer(
"",
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
uncond_embedding = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
return torch.cat([uncond_embedding, text_embedding])
def __call__(
self,
img: Image.Image,
prompt: str,
kmin: float = 0.3,
kmax: float = 0.6,
mix_factor: float = 0.5,
seed: int = 42,
steps: int = 50,
guidance_scale: float = 7.5,
) -> Image.Image:
tmin = steps - int(kmin * steps)
tmax = steps - int(kmax * steps)
text_embeddings = self.prep_text(prompt)
self.scheduler.set_timesteps(steps)
width, height = img.size
encoded = self.encode(img)
torch.manual_seed(seed)
noise = torch.randn(
(1, self.unet.config.in_channels, height // 8, width // 8),
).to(self.device)
latents = self.scheduler.add_noise(
encoded,
noise,
timesteps=self.scheduler.timesteps[tmax],
)
input = torch.cat([latents] * 2)
input = self.scheduler.scale_model_input(input, self.scheduler.timesteps[tmax])
with torch.no_grad():
pred = self.unet(
input,
self.scheduler.timesteps[tmax],
encoder_hidden_states=text_embeddings,
).sample
pred_uncond, pred_text = pred.chunk(2)
pred = pred_uncond + guidance_scale * (pred_text - pred_uncond)
latents = self.scheduler.step(pred, self.scheduler.timesteps[tmax], latents).prev_sample
for i, t in enumerate(tqdm(self.scheduler.timesteps)):
if i > tmax:
if i < tmin: # layout generation phase
orig_latents = self.scheduler.add_noise(
encoded,
noise,
timesteps=t,
)
input = (
(mix_factor * latents) + (1 - mix_factor) * orig_latents
) # interpolating between layout noise and conditionally generated noise to preserve layout semantics
input = torch.cat([input] * 2)
else: # content generation phase
input = torch.cat([latents] * 2)
input = self.scheduler.scale_model_input(input, t)
with torch.no_grad():
pred = self.unet(
input,
t,
encoder_hidden_states=text_embeddings,
).sample
pred_uncond, pred_text = pred.chunk(2)
pred = pred_uncond + guidance_scale * (pred_text - pred_uncond)
latents = self.scheduler.step(pred, t, latents).prev_sample
return self.decode(latents)
|
from typing import Union
import torch
from PIL import Image
from torchvision import transforms as tfms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNet2DConditionModel,
)
class MagicMixPipeline(DiffusionPipeline):
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler],
):
super().__init__()
self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler)
# convert PIL image to latents
def encode(self, img):
with torch.no_grad():
latent = self.vae.encode(tfms.ToTensor()(img).unsqueeze(0).to(self.device) * 2 - 1)
latent = 0.18215 * latent.latent_dist.sample()
return latent
# convert latents to PIL image
def decode(self, latent):
latent = (1 / 0.18215) * latent
with torch.no_grad():
img = self.vae.decode(latent).sample
img = (img / 2 + 0.5).clamp(0, 1)
img = img.detach().cpu().permute(0, 2, 3, 1).numpy()
img = (img * 255).round().astype("uint8")
return Image.fromarray(img[0])
# convert prompt into text embeddings, also unconditional embeddings
def prep_text(self, prompt):
text_input = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_embedding = self.text_encoder(text_input.input_ids.to(self.device))[0]
uncond_input = self.tokenizer(
"",
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
uncond_embedding = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
return torch.cat([uncond_embedding, text_embedding])
def __call__(
self,
img: Image.Image,
prompt: str,
kmin: float = 0.3,
kmax: float = 0.6,
mix_factor: float = 0.5,
seed: int = 42,
steps: int = 50,
guidance_scale: float = 7.5,
) -> Image.Image:
tmin = steps - int(kmin * steps)
tmax = steps - int(kmax * steps)
text_embeddings = self.prep_text(prompt)
self.scheduler.set_timesteps(steps)
width, height = img.size
encoded = self.encode(img)
torch.manual_seed(seed)
noise = torch.randn(
(1, self.unet.config.in_channels, height // 8, width // 8),
).to(self.device)
latents = self.scheduler.add_noise(
encoded,
noise,
timesteps=self.scheduler.timesteps[tmax],
)
input = torch.cat([latents] * 2)
input = self.scheduler.scale_model_input(input, self.scheduler.timesteps[tmax])
with torch.no_grad():
pred = self.unet(
input,
self.scheduler.timesteps[tmax],
encoder_hidden_states=text_embeddings,
).sample
pred_uncond, pred_text = pred.chunk(2)
pred = pred_uncond + guidance_scale * (pred_text - pred_uncond)
latents = self.scheduler.step(pred, self.scheduler.timesteps[tmax], latents).prev_sample
for i, t in enumerate(tqdm(self.scheduler.timesteps)):
if i > tmax:
if i < tmin: # layout generation phase
orig_latents = self.scheduler.add_noise(
encoded,
noise,
timesteps=t,
)
input = (
(mix_factor * latents) + (1 - mix_factor) * orig_latents
) # interpolating between layout noise and conditionally generated noise to preserve layout sematics
input = torch.cat([input] * 2)
else: # content generation phase
input = torch.cat([latents] * 2)
input = self.scheduler.scale_model_input(input, t)
with torch.no_grad():
pred = self.unet(
input,
t,
encoder_hidden_states=text_embeddings,
).sample
pred_uncond, pred_text = pred.chunk(2)
pred = pred_uncond + guidance_scale * (pred_text - pred_uncond)
latents = self.scheduler.step(pred, t, latents).prev_sample
return self.decode(latents)
|
default_scope = 'mmdet'
default_hooks = dict(
optimizer=dict(type='OptimizerHook', grad_clip=None),
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=50),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', interval=1),
sampler_seed=dict(type='DistSamplerSeedHook'),
)
env_cfg = dict(
cudnn_benchmark=False,
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
dist_cfg=dict(backend='nccl'),
)
# TODO: Waiting for DetLocalVisualizer to implement
# vis_backends = [dict(type='LocalVisBackend')]
# visualizer = dict(
# type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer')
visualizer = None
# custom_hooks = [dict(type='DetVisualizationHook', interval=10)]
log_level = 'INFO'
load_from = None
resume = False
# TODO: support auto scaling lr
|
default_scope = 'mmdet'
default_hooks = dict(
optimizer=dict(type='OptimizerHook', grad_clip=None),
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=50),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', interval=1),
sampler_seed=dict(type='DistSamplerSeedHook'),
)
env_cfg = dict(
cudnn_benchmark=False,
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
dist_cfg=dict(backend='nccl'),
)
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer')
# custom_hooks = [dict(type='DetVisualizationHook', interval=10)]
log_level = 'INFO'
load_from = None
resume = False
# TODO: support auto scaling lr
|
from typing import Any
from unittest.mock import patch, MagicMock
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.llms.custom import CustomLLM
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.llms import LLMMetadata, CompletionResponse, CompletionResponseGen
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.chat_engine.types import (
AgentChatResponse,
)
from llama_index.agent.introspective.reflective.tool_interactive_reflection import (
Correction,
ToolInteractiveReflectionAgentWorker,
)
PRINT_CHAT_HISTORY = False
mock_correction = Correction(correction="This is a mock correction.")
class MockLLM(CustomLLM):
@property
def metadata(self) -> LLMMetadata:
"""
LLM metadata.
Returns:
LLMMetadata: LLM metadata containing various information about the LLM.
"""
return LLMMetadata()
def structured_predict(
self, output_cls: BaseModel, prompt: PromptTemplate, **prompt_args: Any
) -> BaseModel:
"""This is fixed so that it goes through 2 Reflections and 1 Correction."""
if output_cls == Correction:
return mock_correction
else:
raise ValueError("Unexpected output_cls type for this test.")
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
raise NotImplementedError
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
raise NotImplementedError
@patch("llama_index.core.agent.function_calling.step.FunctionCallingAgentWorker")
def test_introspective_agent_with_stopping_callable(mock_critique_agent_worker) -> None:
# Arrange
mock_critique_agent = MagicMock()
mock_critique_agent.chat.side_effect = [
AgentChatResponse(response="This is a mock critique str."),
AgentChatResponse(response="This is another mock critique str."),
]
mock_stopping_callable = MagicMock()
mock_stopping_callable.side_effect = [False, True]
dummy_llm = MockLLM()
mock_critique_agent_worker.as_agent.return_value = mock_critique_agent
worker = ToolInteractiveReflectionAgentWorker.from_defaults(
critique_agent_worker=mock_critique_agent_worker,
critique_template="mock critique template",
correction_llm=dummy_llm,
stopping_callable=mock_stopping_callable,
)
# messages that would be sent from the introspective_agent when it delegates
# to reflection task
messages = [
ChatMessage(content="You are a helpful assistant.", role=MessageRole.SYSTEM),
ChatMessage(content="What's 2+2?", role=MessageRole.USER),
ChatMessage(content="I think it's 5.", role=MessageRole.ASSISTANT),
]
agent = worker.as_agent(chat_history=messages)
# Act
response = agent.chat("I think it's 5.") # reflect on current response
# Assert
if PRINT_CHAT_HISTORY:
for msg in agent.chat_history:
print(str(msg))
print()
assert response.response == "This is a mock correction."
assert (
len(agent.chat_history) == 8
) # (system, user, asst, user, ref, cor, ref, asst)
@patch("llama_index.core.agent.function_calling.step.FunctionCallingAgentWorker")
def test_introspective_agent_with_max_iterations(mock_critique_agent_worker) -> None:
# Arrange
mock_critique_agent = MagicMock()
mock_critique_agent.chat.side_effect = [
AgentChatResponse(response="This is a mock critique str."),
AgentChatResponse(response="This is another mock critique str."),
]
mock_stopping_callable = MagicMock()
mock_stopping_callable.side_effect = [False, True]
dummy_llm = MockLLM()
mock_critique_agent_worker.as_agent.return_value = mock_critique_agent
worker = ToolInteractiveReflectionAgentWorker.from_defaults(
critique_agent_worker=mock_critique_agent_worker,
critique_template="mock critique template",
correction_llm=dummy_llm,
max_iterations=1,
)
# messages that would be sent from the introspective_agent when it delegates
# to reflection task
messages = [
ChatMessage(content="You are a helpful assistant.", role=MessageRole.SYSTEM),
ChatMessage(content="What's 2+2?", role=MessageRole.USER),
ChatMessage(content="I think it's 5.", role=MessageRole.ASSISTANT),
]
agent = worker.as_agent(chat_history=messages)
# Act
response = agent.chat("I think it's 5.") # reflect on current response
# Assert
if PRINT_CHAT_HISTORY:
for msg in agent.chat_history:
print(str(msg))
print()
assert response.response == "This is a mock correction."
assert len(agent.chat_history) == 6 # (system, user, asst, user, ref, cor/asst)
|
from typing import Any
from unittest.mock import patch, MagicMock
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.llms.custom import CustomLLM
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.llms import LLMMetadata, CompletionResponse, CompletionResponseGen
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.chat_engine.types import (
AgentChatResponse,
)
from llama_index.agent.introspective.reflective.tool_interactive_reflection import (
Correction,
ToolInteractiveReflectionAgentWorker,
)
PRINT_CHAT_HISTORY = False
mock_correction = Correction(correction="This is a mock correction.")
class MockLLM(CustomLLM):
@property
def metadata(self) -> LLMMetadata:
"""LLM metadata.
Returns:
LLMMetadata: LLM metadata containing various information about the LLM.
"""
return LLMMetadata()
def structured_predict(
self, output_cls: BaseModel, prompt: PromptTemplate, **prompt_args: Any
) -> BaseModel:
"""This is fixed so that it goes through 2 Reflections and 1 Correction."""
if output_cls == Correction:
return mock_correction
else:
raise ValueError("Unexpected output_cls type for this test.")
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
raise NotImplementedError
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
raise NotImplementedError
@patch("llama_index.core.agent.function_calling.step.FunctionCallingAgentWorker")
def test_introspective_agent_with_stopping_callable(mock_critique_agent_worker) -> None:
# Arrange
mock_critique_agent = MagicMock()
mock_critique_agent.chat.side_effect = [
AgentChatResponse(response="This is a mock critique str."),
AgentChatResponse(response="This is another mock critique str."),
]
mock_stopping_callable = MagicMock()
mock_stopping_callable.side_effect = [False, True]
dummy_llm = MockLLM()
mock_critique_agent_worker.as_agent.return_value = mock_critique_agent
worker = ToolInteractiveReflectionAgentWorker.from_defaults(
critique_agent_worker=mock_critique_agent_worker,
critique_template="mock critique template",
correction_llm=dummy_llm,
stopping_callable=mock_stopping_callable,
)
# messages that would be sent from the introspective_agent when it delegates
# to reflection task
messages = [
ChatMessage(content="You are a helpful assistant.", role=MessageRole.SYSTEM),
ChatMessage(content="What's 2+2?", role=MessageRole.USER),
ChatMessage(content="I think it's 5.", role=MessageRole.ASSISTANT),
]
agent = worker.as_agent(chat_history=messages)
# Act
response = agent.chat("I think it's 5.") # reflect on current response
# Assert
if PRINT_CHAT_HISTORY:
for msg in agent.chat_history:
print(str(msg))
print()
assert response.response == "This is a mock correction."
assert (
len(agent.chat_history) == 8
) # (system, user, asst, user, ref, cor, ref, asst)
@patch("llama_index.core.agent.function_calling.step.FunctionCallingAgentWorker")
def test_introspective_agent_with_max_iterations(mock_critique_agent_worker) -> None:
# Arrange
mock_critique_agent = MagicMock()
mock_critique_agent.chat.side_effect = [
AgentChatResponse(response="This is a mock critique str."),
AgentChatResponse(response="This is another mock critique str."),
]
mock_stopping_callable = MagicMock()
mock_stopping_callable.side_effect = [False, True]
dummy_llm = MockLLM()
mock_critique_agent_worker.as_agent.return_value = mock_critique_agent
worker = ToolInteractiveReflectionAgentWorker.from_defaults(
critique_agent_worker=mock_critique_agent_worker,
critique_template="mock critique template",
correction_llm=dummy_llm,
max_iterations=1,
)
# messages that would be sent from the introspective_agent when it delegates
# to reflection task
messages = [
ChatMessage(content="You are a helpful assistant.", role=MessageRole.SYSTEM),
ChatMessage(content="What's 2+2?", role=MessageRole.USER),
ChatMessage(content="I think it's 5.", role=MessageRole.ASSISTANT),
]
agent = worker.as_agent(chat_history=messages)
# Act
response = agent.chat("I think it's 5.") # reflect on current response
# Assert
if PRINT_CHAT_HISTORY:
for msg in agent.chat_history:
print(str(msg))
print()
assert response.response == "This is a mock correction."
assert len(agent.chat_history) == 6 # (system, user, asst, user, ref, cor/asst)
|
import json
import re
from re import Pattern
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
class ChatOutputParser(AgentOutputParser):
"""Output parser for the chat agent."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
pattern: Pattern = re.compile(r"^.*?`{3}(?:json)?\n(.*?)`{3}.*?$", re.DOTALL)
"""Regex pattern to parse the output."""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
"""Parse the output from the agent into
an AgentAction or AgentFinish object.
Args:
text: The text to parse.
Returns:
An AgentAction or AgentFinish object.
Raises:
OutputParserException: If the output could not be parsed.
ValueError: If the action could not be found.
"""
includes_answer = FINAL_ANSWER_ACTION in text
try:
found = self.pattern.search(text)
if not found:
# Fast fail to parse Final Answer.
msg = "action not found"
raise ValueError(msg)
action = found.group(1)
response = json.loads(action.strip())
includes_action = "action" in response
if includes_answer and includes_action:
msg = (
"Parsing LLM output produced a final answer "
f"and a parse-able action: {text}"
)
raise OutputParserException(msg)
return AgentAction(
response["action"], response.get("action_input", {}), text
)
except Exception as exc:
if not includes_answer:
msg = f"Could not parse LLM output: {text}"
raise OutputParserException(msg) from exc
output = text.split(FINAL_ANSWER_ACTION)[-1].strip()
return AgentFinish({"output": output}, text)
@property
def _type(self) -> str:
return "chat"
|
import json
import re
from re import Pattern
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
class ChatOutputParser(AgentOutputParser):
"""Output parser for the chat agent."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
pattern: Pattern = re.compile(r"^.*?`{3}(?:json)?\n(.*?)`{3}.*?$", re.DOTALL)
"""Regex pattern to parse the output."""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
"""Parse the output from the agent into
an AgentAction or AgentFinish object.
Args:
text: The text to parse.
Returns:
An AgentAction or AgentFinish object.
Raises:
OutputParserException: If the output could not be parsed.
ValueError: If the action could not be found.
"""
includes_answer = FINAL_ANSWER_ACTION in text
try:
found = self.pattern.search(text)
if not found:
# Fast fail to parse Final Answer.
raise ValueError("action not found")
action = found.group(1)
response = json.loads(action.strip())
includes_action = "action" in response
if includes_answer and includes_action:
raise OutputParserException(
"Parsing LLM output produced a final answer "
f"and a parse-able action: {text}"
)
return AgentAction(
response["action"], response.get("action_input", {}), text
)
except Exception as exc:
if not includes_answer:
raise OutputParserException(
f"Could not parse LLM output: {text}"
) from exc
output = text.split(FINAL_ANSWER_ACTION)[-1].strip()
return AgentFinish({"output": output}, text)
@property
def _type(self) -> str:
return "chat"
|
from typing import overload, TYPE_CHECKING, Union, Callable, Optional, Tuple
if TYPE_CHECKING:
from ... import DocumentArray
from ...typing import AnyDNN, T, ArrayType
import numpy as np
class SingletonSugarMixin:
"""Provide sugary syntax for :class:`Document` by inheriting methods from :class:`DocumentArray`"""
@overload
def match(
self: 'T',
darray: 'DocumentArray',
metric: Union[
str, Callable[['ArrayType', 'ArrayType'], 'np.ndarray']
] = 'cosine',
limit: Optional[Union[int, float]] = 20,
normalization: Optional[Tuple[float, float]] = None,
metric_name: Optional[str] = None,
batch_size: Optional[int] = None,
exclude_self: bool = False,
only_id: bool = False,
use_scipy: bool = False,
num_worker: Optional[int] = 1,
) -> 'T':
"""Matching the current Document against a set of Documents.
The result will be stored in :attr:`.matches`.
.. note::
When you want to match a set Documents (let's call it set `A`) against another set of Documents (set `B`),
where you want to find for each element in `A` what are its nearest neighbours in `B`.
Then you need :meth:`DocumentArray.match`
:param darray: the other DocumentArray to match against
:param metric: the distance metric
:param limit: the maximum number of matches, when not given defaults to 20.
:param normalization: a tuple [a, b] to be used with min-max normalization,
the min distance will be rescaled to `a`, the max distance will be rescaled to `b`
all values will be rescaled into range `[a, b]`.
:param metric_name: if provided, then match result will be marked with this string.
:param batch_size: if provided, then ``darray`` is loaded in batches, where each of them is at most ``batch_size``
elements. When `darray` is big, this can significantly speedup the computation.
:param exclude_self: if set, Documents in ``darray`` with same ``id`` as the left-hand values will not be
considered as matches.
:param only_id: if set, then returning matches will only contain ``id``
:param use_scipy: if set, use ``scipy`` as the computation backend. Note, ``scipy`` does not support distance
on sparse matrix.
:param num_worker: the number of parallel workers. If not given, then the number of CPUs in the system will be used.
.. note::
This argument is only effective when ``batch_size`` is set.
"""
...
def match(self: 'T', *args, **kwargs) -> 'T':
from ... import DocumentArray
_tmp = DocumentArray(self)
_tmp.match(*args, **kwargs)
return self
@overload
def embed(
self: 'T',
embed_model: 'AnyDNN',
device: str = 'cpu',
batch_size: int = 256,
) -> 'T':
"""Fill the embedding of Documents inplace by using `embed_model`
:param embed_model: the embedding model written in Keras/Pytorch/Paddle
:param device: the computational device for `embed_model`, can be either
`cpu` or `cuda`.
:param batch_size: number of Documents in a batch for embedding
"""
def embed(self: 'T', *args, **kwargs) -> 'T':
from ... import DocumentArray
_tmp = DocumentArray(self)
_tmp.embed(*args, **kwargs)
return self
def post(self: 'T', *args, **kwargs) -> 'T':
from ... import DocumentArray
_tmp = DocumentArray(self)
return _tmp.post(*args, **kwargs)[0]
|
from typing import overload, TYPE_CHECKING, Union, Callable, Optional, Tuple
if TYPE_CHECKING:
from ... import DocumentArray
from ...typing import AnyDNN, T, ArrayType
import numpy as np
class SingletonSugarMixin:
"""Provide sugary syntax for :class:`Document` by inheriting methods from :class:`DocumentArray`"""
@overload
def match(
self: 'T',
darray: 'DocumentArray',
metric: Union[
str, Callable[['ArrayType', 'ArrayType'], 'np.ndarray']
] = 'cosine',
limit: Optional[Union[int, float]] = 20,
normalization: Optional[Tuple[float, float]] = None,
metric_name: Optional[str] = None,
batch_size: Optional[int] = None,
exclude_self: bool = False,
only_id: bool = False,
use_scipy: bool = False,
num_worker: Optional[int] = 1,
) -> 'T':
"""Matching the current Document against a set of Documents.
The result will be stored in :attr:`.matches`.
.. note::
When you want to match a set Documents (let's call it set `A`) against another set of Documents (set `B`),
where you want to find for each element in `A` what are its nearest neighbours in `B`.
Then you need :meth:`DocumentArray.match`
:param darray: the other DocumentArray to match against
:param metric: the distance metric
:param limit: the maximum number of matches, when not given defaults to 20.
:param normalization: a tuple [a, b] to be used with min-max normalization,
the min distance will be rescaled to `a`, the max distance will be rescaled to `b`
all values will be rescaled into range `[a, b]`.
:param metric_name: if provided, then match result will be marked with this string.
:param batch_size: if provided, then ``darray`` is loaded in batches, where each of them is at most ``batch_size``
elements. When `darray` is big, this can significantly speedup the computation.
:param exclude_self: if set, Documents in ``darray`` with same ``id`` as the left-hand values will not be
considered as matches.
:param only_id: if set, then returning matches will only contain ``id``
:param use_scipy: if set, use ``scipy`` as the computation backend. Note, ``scipy`` does not support distance
on sparse matrix.
:param num_worker: the number of parallel workers. If not given, then the number of CPUs in the system will be used.
.. note::
This argument is only effective when ``batch_size`` is set.
"""
...
def match(self: 'T', *args, **kwargs) -> 'T':
from ... import DocumentArray
_tmp = DocumentArray(self)
_tmp.match(*args, **kwargs)
return self
@overload
def embed(
self: 'T',
embed_model: 'AnyDNN',
device: str = 'cpu',
batch_size: int = 256,
) -> 'T':
"""Fill the embedding of Documents inplace by using `embed_model`
:param embed_model: the embedding model written in Keras/Pytorch/Paddle
:param device: the computational device for `embed_model`, can be either
`cpu` or `cuda`.
:param batch_size: number of Documents in a batch for embedding
"""
def embed(self: 'T', *args, **kwargs) -> 'T':
from ... import DocumentArray
_tmp = DocumentArray(self)
_tmp.embed(*args, **kwargs)
return self
|
import csv
import gzip
import logging
import os
from datetime import datetime
import torch
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 16
pos_neg_ratio = 8 # batch_size must be devisible by pos_neg_ratio
epochs = 1
max_seq_length = 75
# Save path to store our model
model_save_path = "output/train_stsb_ct-{}-{}".format(model_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
################# Train sentences #################
# We use 1 Million sentences from Wikipedia to train our model
wikipedia_dataset_path = "data/wiki1m_for_simcse.txt"
if not os.path.exists(wikipedia_dataset_path):
util.http_get(
"https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/wiki1m_for_simcse.txt",
wikipedia_dataset_path,
)
# train_sentences are simply your list of sentences
train_sentences = []
with open(wikipedia_dataset_path, "r", encoding="utf8") as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
################# Download and load STSb #################
data_folder = "data/stsbenchmark"
sts_dataset_path = f"{data_folder}/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = losses.ContrastiveTensionDataLoader(
train_sentences, batch_size=batch_size, pos_neg_ratio=pos_neg_ratio
)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLoss(model)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=1,
evaluation_steps=1000,
weight_decay=0,
warmup_steps=0,
optimizer_class=torch.optim.RMSprop,
optimizer_params={"lr": 1e-5},
output_path=model_save_path,
use_amp=False, # Set to True, if your GPU has optimized FP16 cores
)
########### Load the model and evaluate on test set
model = SentenceTransformer(model_save_path)
test_evaluator(model)
|
import torch
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers import SentenceTransformer, LoggingHandler, models, util, InputExample
from sentence_transformers import losses
import os
import gzip
import csv
from datetime import datetime
import logging
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 16
pos_neg_ratio = 8 # batch_size must be devisible by pos_neg_ratio
epochs = 1
max_seq_length = 75
# Save path to store our model
model_save_path = "output/train_stsb_ct-{}-{}".format(model_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
################# Train sentences #################
# We use 1 Million sentences from Wikipedia to train our model
wikipedia_dataset_path = "data/wiki1m_for_simcse.txt"
if not os.path.exists(wikipedia_dataset_path):
util.http_get(
"https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/wiki1m_for_simcse.txt",
wikipedia_dataset_path,
)
# train_sentences are simply your list of sentences
train_sentences = []
with open(wikipedia_dataset_path, "r", encoding="utf8") as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
################# Download and load STSb #################
data_folder = "data/stsbenchmark"
sts_dataset_path = f"{data_folder}/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = losses.ContrastiveTensionDataLoader(
train_sentences, batch_size=batch_size, pos_neg_ratio=pos_neg_ratio
)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLoss(model)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=1,
evaluation_steps=1000,
weight_decay=0,
warmup_steps=0,
optimizer_class=torch.optim.RMSprop,
optimizer_params={"lr": 1e-5},
output_path=model_save_path,
use_amp=False, # Set to True, if your GPU has optimized FP16 cores
)
########### Load the model and evaluate on test set
model = SentenceTransformer(model_save_path)
test_evaluator(model)
|
_base_ = '../ssd/ssd512_coco.py'
model = dict(
bbox_head=dict(type='PISASSDHead'),
train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)))
default_hooks = dict(
optimizer=dict(
_delete_=True,
type='OptimizerHook',
grad_clip=dict(max_norm=35, norm_type=2)))
|
_base_ = '../ssd/ssd512_coco.py'
model = dict(
bbox_head=dict(type='PISASSDHead'),
train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
from llama_index.core.graph_stores.types import GraphStore
from llama_index.graph_stores.neo4j import Neo4jGraphStore
def test_neo4j_graph_store():
names_of_bases = [b.__name__ for b in Neo4jGraphStore.__bases__]
assert GraphStore.__name__ in names_of_bases
|
from unittest.mock import MagicMock, patch
from llama_index.core.graph_stores.types import GraphStore
from llama_index.graph_stores.neo4j import Neo4jGraphStore
@patch("llama_index.graph_stores.neo4j.Neo4jGraphStore")
def test_neo4j_graph_store(MockNeo4jGraphStore: MagicMock):
instance: Neo4jGraphStore = MockNeo4jGraphStore.return_value()
assert isinstance(instance, GraphStore)
|
from __future__ import annotations
from typing import Any, Callable, List, Tuple, Type, Union
import PIL.Image
from torchvision import datapoints
from torchvision._utils import sequence_to_str
from torchvision.transforms.v2.functional import get_dimensions, get_size, is_simple_tensor
def query_bounding_boxes(flat_inputs: List[Any]) -> datapoints.BoundingBoxes:
bounding_boxes = [inpt for inpt in flat_inputs if isinstance(inpt, datapoints.BoundingBoxes)]
if not bounding_boxes:
raise TypeError("No bounding boxes were found in the sample")
elif len(bounding_boxes) > 1:
raise ValueError("Found multiple bounding boxes instances in the sample")
return bounding_boxes.pop()
def query_chw(flat_inputs: List[Any]) -> Tuple[int, int, int]:
chws = {
tuple(get_dimensions(inpt))
for inpt in flat_inputs
if check_type(inpt, (is_simple_tensor, datapoints.Image, PIL.Image.Image, datapoints.Video))
}
if not chws:
raise TypeError("No image or video was found in the sample")
elif len(chws) > 1:
raise ValueError(f"Found multiple CxHxW dimensions in the sample: {sequence_to_str(sorted(chws))}")
c, h, w = chws.pop()
return c, h, w
def query_size(flat_inputs: List[Any]) -> Tuple[int, int]:
sizes = {
tuple(get_size(inpt))
for inpt in flat_inputs
if check_type(
inpt,
(
is_simple_tensor,
datapoints.Image,
PIL.Image.Image,
datapoints.Video,
datapoints.Mask,
datapoints.BoundingBoxes,
),
)
}
if not sizes:
raise TypeError("No image, video, mask or bounding box was found in the sample")
elif len(sizes) > 1:
raise ValueError(f"Found multiple HxW dimensions in the sample: {sequence_to_str(sorted(sizes))}")
h, w = sizes.pop()
return h, w
def check_type(obj: Any, types_or_checks: Tuple[Union[Type, Callable[[Any], bool]], ...]) -> bool:
for type_or_check in types_or_checks:
if isinstance(obj, type_or_check) if isinstance(type_or_check, type) else type_or_check(obj):
return True
return False
def has_any(flat_inputs: List[Any], *types_or_checks: Union[Type, Callable[[Any], bool]]) -> bool:
for inpt in flat_inputs:
if check_type(inpt, types_or_checks):
return True
return False
def has_all(flat_inputs: List[Any], *types_or_checks: Union[Type, Callable[[Any], bool]]) -> bool:
for type_or_check in types_or_checks:
for inpt in flat_inputs:
if isinstance(inpt, type_or_check) if isinstance(type_or_check, type) else type_or_check(inpt):
break
else:
return False
return True
|
from __future__ import annotations
from typing import Any, Callable, List, Tuple, Type, Union
import PIL.Image
from torchvision import datapoints
from torchvision._utils import sequence_to_str
from torchvision.transforms.v2.functional import get_dimensions, get_spatial_size, is_simple_tensor
def query_bounding_boxes(flat_inputs: List[Any]) -> datapoints.BoundingBoxes:
bounding_boxes = [inpt for inpt in flat_inputs if isinstance(inpt, datapoints.BoundingBoxes)]
if not bounding_boxes:
raise TypeError("No bounding box was found in the sample")
elif len(bounding_boxes) > 1:
raise ValueError("Found multiple bounding boxes in the sample")
return bounding_boxes.pop()
def query_chw(flat_inputs: List[Any]) -> Tuple[int, int, int]:
chws = {
tuple(get_dimensions(inpt))
for inpt in flat_inputs
if isinstance(inpt, (datapoints.Image, PIL.Image.Image, datapoints.Video)) or is_simple_tensor(inpt)
}
if not chws:
raise TypeError("No image or video was found in the sample")
elif len(chws) > 1:
raise ValueError(f"Found multiple CxHxW dimensions in the sample: {sequence_to_str(sorted(chws))}")
c, h, w = chws.pop()
return c, h, w
def query_spatial_size(flat_inputs: List[Any]) -> Tuple[int, int]:
sizes = {
tuple(get_spatial_size(inpt))
for inpt in flat_inputs
if isinstance(
inpt, (datapoints.Image, PIL.Image.Image, datapoints.Video, datapoints.Mask, datapoints.BoundingBoxes)
)
or is_simple_tensor(inpt)
}
if not sizes:
raise TypeError("No image, video, mask or bounding box was found in the sample")
elif len(sizes) > 1:
raise ValueError(f"Found multiple HxW dimensions in the sample: {sequence_to_str(sorted(sizes))}")
h, w = sizes.pop()
return h, w
def check_type(obj: Any, types_or_checks: Tuple[Union[Type, Callable[[Any], bool]], ...]) -> bool:
for type_or_check in types_or_checks:
if isinstance(obj, type_or_check) if isinstance(type_or_check, type) else type_or_check(obj):
return True
return False
def has_any(flat_inputs: List[Any], *types_or_checks: Union[Type, Callable[[Any], bool]]) -> bool:
for inpt in flat_inputs:
if check_type(inpt, types_or_checks):
return True
return False
def has_all(flat_inputs: List[Any], *types_or_checks: Union[Type, Callable[[Any], bool]]) -> bool:
for type_or_check in types_or_checks:
for inpt in flat_inputs:
if isinstance(inpt, type_or_check) if isinstance(type_or_check, type) else type_or_check(inpt):
break
else:
return False
return True
|
from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
except ImportError:
model2vec = None
skip_if_no_model2vec = pytest.mark.skipif(model2vec is None, reason="The model2vec library is not installed.")
@pytest.fixture(scope="session")
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
def test_initialization_with_embedding_weights(tokenizer: Tokenizer, embedding_weights) -> None:
model = StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
assert model.embedding.weight.shape == (30522, 768)
def test_initialization_with_embedding_dim(tokenizer: Tokenizer) -> None:
model = StaticEmbedding(tokenizer, embedding_dim=768)
assert model.embedding.weight.shape == (30522, 768)
def test_tokenize(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
assert "input_ids" in tokens
assert "offsets" in tokens
def test_forward(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
output = static_embedding(tokens)
assert "sentence_embedding" in output
def test_save_and_load(tmp_path: Path, static_embedding: StaticEmbedding) -> None:
save_dir = tmp_path / "model"
save_dir.mkdir()
static_embedding.save(str(save_dir))
loaded_model = StaticEmbedding.load(str(save_dir))
assert loaded_model.embedding.weight.shape == static_embedding.embedding.weight.shape
@skip_if_no_model2vec()
def test_from_distillation() -> None:
model = StaticEmbedding.from_distillation("sentence-transformers-testing/stsb-bert-tiny-safetensors", pca_dims=32)
assert model.embedding.weight.shape == (29528, 32)
@skip_if_no_model2vec()
def test_from_model2vec() -> None:
model = StaticEmbedding.from_model2vec("minishlab/M2V_base_output")
assert model.embedding.weight.shape == (29528, 256)
def test_loading_model2vec() -> None:
model = SentenceTransformer("minishlab/potion-base-8M")
assert model.get_sentence_embedding_dimension() == 256
assert model.max_seq_length == math.inf
test_sentences = ["It's so sunny outside!", "The sun is shining outside!"]
embeddings = model.encode(test_sentences)
assert embeddings.shape == (2, 256)
similarity = model.similarity(embeddings[0], embeddings[1])
assert similarity.item() > 0.7
|
from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
except ImportError:
model2vec = None
skip_if_no_model2vec = pytest.mark.skipif(model2vec is None, reason="The model2vec library is not installed.")
@pytest.fixture
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
def test_initialization_with_embedding_weights(tokenizer: Tokenizer, embedding_weights) -> None:
model = StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
assert model.embedding.weight.shape == (30522, 768)
def test_initialization_with_embedding_dim(tokenizer: Tokenizer) -> None:
model = StaticEmbedding(tokenizer, embedding_dim=768)
assert model.embedding.weight.shape == (30522, 768)
def test_tokenize(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
assert "input_ids" in tokens
assert "offsets" in tokens
def test_forward(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
output = static_embedding(tokens)
assert "sentence_embedding" in output
def test_save_and_load(tmp_path: Path, static_embedding: StaticEmbedding) -> None:
save_dir = tmp_path / "model"
save_dir.mkdir()
static_embedding.save(str(save_dir))
loaded_model = StaticEmbedding.load(str(save_dir))
assert loaded_model.embedding.weight.shape == static_embedding.embedding.weight.shape
@skip_if_no_model2vec()
def test_from_distillation() -> None:
model = StaticEmbedding.from_distillation("sentence-transformers-testing/stsb-bert-tiny-safetensors", pca_dims=32)
assert model.embedding.weight.shape == (29528, 32)
@skip_if_no_model2vec()
def test_from_model2vec() -> None:
model = StaticEmbedding.from_model2vec("minishlab/M2V_base_output")
assert model.embedding.weight.shape == (29528, 256)
def test_loading_model2vec() -> None:
model = SentenceTransformer("minishlab/potion-base-8M")
assert model.get_sentence_embedding_dimension() == 256
assert model.max_seq_length == math.inf
test_sentences = ["It's so sunny outside!", "The sun is shining outside!"]
embeddings = model.encode(test_sentences)
assert embeddings.shape == (2, 256)
similarity = model.similarity(embeddings[0], embeddings[1])
assert similarity.item() > 0.7
|
"""Question-answering with sources over an index."""
from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from pydantic import Field
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
class RetrievalQAWithSourcesChain(BaseQAWithSourcesChain):
"""Question-answering with sources over an index."""
retriever: BaseRetriever = Field(exclude=True)
"""Index to connect to."""
reduce_k_below_max_tokens: bool = False
"""Reduce the number of results to return from store based on tokens limit"""
max_tokens_limit: int = 3375
"""Restrict the docs to return from store based on tokens,
enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true"""
def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]:
num_docs = len(docs)
if self.reduce_k_below_max_tokens and isinstance(
self.combine_documents_chain, StuffDocumentsChain
):
tokens = [
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content)
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(
self, inputs: dict[str, Any], *, run_manager: CallbackManagerForChainRun
) -> list[Document]:
question = inputs[self.question_key]
docs = self.retriever.invoke(
question, config={"callbacks": run_manager.get_child()}
)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(
self, inputs: dict[str, Any], *, run_manager: AsyncCallbackManagerForChainRun
) -> list[Document]:
question = inputs[self.question_key]
docs = await self.retriever.ainvoke(
question, config={"callbacks": run_manager.get_child()}
)
return self._reduce_tokens_below_limit(docs)
@property
def _chain_type(self) -> str:
"""Return the chain type."""
return "retrieval_qa_with_sources_chain"
|
"""Question-answering with sources over an index."""
from typing import Any, Dict, List
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from pydantic import Field
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
class RetrievalQAWithSourcesChain(BaseQAWithSourcesChain):
"""Question-answering with sources over an index."""
retriever: BaseRetriever = Field(exclude=True)
"""Index to connect to."""
reduce_k_below_max_tokens: bool = False
"""Reduce the number of results to return from store based on tokens limit"""
max_tokens_limit: int = 3375
"""Restrict the docs to return from store based on tokens,
enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true"""
def _reduce_tokens_below_limit(self, docs: List[Document]) -> List[Document]:
num_docs = len(docs)
if self.reduce_k_below_max_tokens and isinstance(
self.combine_documents_chain, StuffDocumentsChain
):
tokens = [
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content)
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(
self, inputs: Dict[str, Any], *, run_manager: CallbackManagerForChainRun
) -> List[Document]:
question = inputs[self.question_key]
docs = self.retriever.invoke(
question, config={"callbacks": run_manager.get_child()}
)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(
self, inputs: Dict[str, Any], *, run_manager: AsyncCallbackManagerForChainRun
) -> List[Document]:
question = inputs[self.question_key]
docs = await self.retriever.ainvoke(
question, config={"callbacks": run_manager.get_child()}
)
return self._reduce_tokens_below_limit(docs)
@property
def _chain_type(self) -> str:
"""Return the chain type."""
return "retrieval_qa_with_sources_chain"
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.openapi.base import create_openapi_agent
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"create_openapi_agent": "langchain_community.agent_toolkits.openapi.base",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"create_openapi_agent",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.openapi.base import create_openapi_agent
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"create_openapi_agent": "langchain_community.agent_toolkits.openapi.base"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"create_openapi_agent",
]
|
from .CEBinaryAccuracyEvaluator import CEBinaryAccuracyEvaluator
from .CEBinaryClassificationEvaluator import CEBinaryClassificationEvaluator
from .CECorrelationEvaluator import CECorrelationEvaluator
from .CEF1Evaluator import CEF1Evaluator
from .CERerankingEvaluator import CERerankingEvaluator
from .CESoftmaxAccuracyEvaluator import CESoftmaxAccuracyEvaluator
__all__ = [
"CEBinaryAccuracyEvaluator",
"CEBinaryClassificationEvaluator",
"CECorrelationEvaluator",
"CEF1Evaluator",
"CESoftmaxAccuracyEvaluator",
"CERerankingEvaluator",
]
|
from .CEBinaryAccuracyEvaluator import CEBinaryAccuracyEvaluator
from .CEBinaryClassificationEvaluator import CEBinaryClassificationEvaluator
from .CEF1Evaluator import CEF1Evaluator
from .CECorrelationEvaluator import CECorrelationEvaluator
from .CESoftmaxAccuracyEvaluator import CESoftmaxAccuracyEvaluator
from .CERerankingEvaluator import CERerankingEvaluator
__all__ = [
"CEBinaryAccuracyEvaluator",
"CEBinaryClassificationEvaluator",
"CECorrelationEvaluator",
"CEF1Evaluator",
"CESoftmaxAccuracyEvaluator",
"CERerankingEvaluator",
]
|
import os
from unittest import TestCase
import cv2
import numpy as np
import torch
from mmengine.data import InstanceData, PixelData
from mmdet.evaluation import INSTANCE_OFFSET
from mmdet.structures import DetDataSample
from mmdet.visualization import DetLocalVisualizer
def _rand_bboxes(num_boxes, h, w):
cx, cy, bw, bh = torch.rand(num_boxes, 4).T
tl_x = ((cx * w) - (w * bw / 2)).clip(0, w)
tl_y = ((cy * h) - (h * bh / 2)).clip(0, h)
br_x = ((cx * w) + (w * bw / 2)).clip(0, w)
br_y = ((cy * h) + (h * bh / 2)).clip(0, h)
bboxes = torch.vstack([tl_x, tl_y, br_x, br_y]).T
return bboxes
def _create_panoptic_data(num_boxes, h, w):
sem_seg = np.zeros((h, w), dtype=np.int64) + 2
bboxes = _rand_bboxes(num_boxes, h, w).int()
labels = torch.randint(2, (num_boxes, ))
for i in range(num_boxes):
x, y, w, h = bboxes[i]
sem_seg[y:y + h, x:x + w] = (i + 1) * INSTANCE_OFFSET + labels[i]
return sem_seg[None]
class TestDetLocalVisualizer(TestCase):
def test_add_datasample(self):
h = 12
w = 10
num_class = 3
num_bboxes = 5
out_file = 'out_file.jpg'
image = np.random.randint(0, 256, size=(h, w, 3)).astype('uint8')
# test gt_instances
gt_instances = InstanceData()
gt_instances.bboxes = _rand_bboxes(num_bboxes, h, w)
gt_instances.labels = torch.randint(0, num_class, (num_bboxes, ))
gt_det_data_sample = DetDataSample()
gt_det_data_sample.gt_instances = gt_instances
#
det_local_visualizer = DetLocalVisualizer()
det_local_visualizer.add_datasample('image', image, gt_det_data_sample)
# test out_file
det_local_visualizer.add_datasample(
'image', image, gt_det_data_sample, out_file=out_file)
assert os.path.exists(out_file)
drawn_img = cv2.imread(out_file)
assert drawn_img.shape == (h, w, 3)
os.remove(out_file)
# test gt_instances and pred_instances
pred_instances = InstanceData()
pred_instances.bboxes = _rand_bboxes(num_bboxes, h, w)
pred_instances.labels = torch.randint(0, num_class, (num_bboxes, ))
pred_instances.scores = torch.rand((num_bboxes, ))
pred_det_data_sample = DetDataSample()
pred_det_data_sample.pred_instances = pred_instances
det_local_visualizer.add_datasample(
'image',
image,
gt_det_data_sample,
pred_det_data_sample,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w * 2, 3))
det_local_visualizer.add_datasample(
'image',
image,
gt_det_data_sample,
pred_det_data_sample,
draw_gt=False,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w, 3))
det_local_visualizer.add_datasample(
'image',
image,
gt_det_data_sample,
pred_det_data_sample,
draw_pred=False,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w, 3))
# test gt_panoptic_seg and pred_panoptic_seg
det_local_visualizer.dataset_meta = dict(CLASSES=('1', '2'))
gt_sem_seg = _create_panoptic_data(num_bboxes, h, w)
panoptic_seg = PixelData(sem_seg=gt_sem_seg)
gt_det_data_sample = DetDataSample()
gt_det_data_sample.gt_panoptic_seg = panoptic_seg
pred_sem_seg = _create_panoptic_data(num_bboxes, h, w)
panoptic_seg = PixelData(sem_seg=pred_sem_seg)
pred_det_data_sample = DetDataSample()
pred_det_data_sample.pred_panoptic_seg = panoptic_seg
det_local_visualizer.add_datasample(
'image',
image,
gt_det_data_sample,
pred_det_data_sample,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w * 2, 3))
# class information must be provided
det_local_visualizer.dataset_meta = {}
with self.assertRaises(AssertionError):
det_local_visualizer.add_datasample(
'image',
image,
gt_det_data_sample,
pred_det_data_sample,
out_file=out_file)
def _assert_image_and_shape(self, out_file, out_shape):
assert os.path.exists(out_file)
drawn_img = cv2.imread(out_file)
assert drawn_img.shape == out_shape
os.remove(out_file)
|
import os
from unittest import TestCase
import cv2
import numpy as np
import torch
from mmengine.data import InstanceData
from mmdet.structures import DetDataSample
from mmdet.visualization import DetLocalVisualizer
def _rand_bboxes(num_boxes, h, w):
cx, cy, bw, bh = torch.rand(num_boxes, 4).T
tl_x = ((cx * w) - (w * bw / 2)).clip(0, w)
tl_y = ((cy * h) - (h * bh / 2)).clip(0, h)
br_x = ((cx * w) + (w * bw / 2)).clip(0, w)
br_y = ((cy * h) + (h * bh / 2)).clip(0, h)
bboxes = torch.vstack([tl_x, tl_y, br_x, br_y]).T
return bboxes
class TestDetLocalVisualizer(TestCase):
def test_add_datasample(self):
h = 12
w = 10
num_class = 3
num_bboxes = 5
out_file = 'out_file.jpg'
image = np.random.randint(0, 256, size=(h, w, 3)).astype('uint8')
# test gt_instances
gt_instances = InstanceData()
gt_instances.bboxes = _rand_bboxes(num_bboxes, h, w)
gt_instances.labels = torch.randint(0, num_class, (5, ))
gt_det_data_sample = DetDataSample()
gt_det_data_sample.gt_instances = gt_instances
det_local_visualizer = DetLocalVisualizer()
det_local_visualizer.add_datasample('image', image, gt_det_data_sample)
# test out_file
det_local_visualizer.add_datasample(
'image', image, gt_det_data_sample, out_file=out_file)
assert os.path.exists(out_file)
drawn_img = cv2.imread(out_file)
assert drawn_img.shape == (h, w, 3)
os.remove(out_file)
# test gt_instances and pred_instances
pred_instances = InstanceData()
pred_instances.bboxes = _rand_bboxes(num_bboxes, h, w)
pred_instances.labels = torch.randint(0, num_class, (5, ))
pred_instances.scores = torch.rand((5, ))
pred_det_data_sample = DetDataSample()
pred_det_data_sample.pred_instances = pred_instances
det_local_visualizer.add_datasample(
'image',
image,
gt_det_data_sample,
pred_det_data_sample,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w * 2, 3))
det_local_visualizer.add_datasample(
'image',
image,
gt_det_data_sample,
pred_det_data_sample,
draw_gt=False,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w, 3))
det_local_visualizer.add_datasample(
'image',
image,
gt_det_data_sample,
pred_det_data_sample,
draw_pred=False,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w, 3))
# TODO: test gt_panoptic_seg
def _assert_image_and_shape(self, out_file, out_shape):
assert os.path.exists(out_file)
drawn_img = cv2.imread(out_file)
assert drawn_img.shape == out_shape
os.remove(out_file)
|
from langchain_core.agents import AgentAction
def format_xml(
intermediate_steps: list[tuple[AgentAction, str]],
) -> str:
"""Format the intermediate steps as XML.
Args:
intermediate_steps: The intermediate steps.
Returns:
The intermediate steps as XML.
"""
log = ""
for action, observation in intermediate_steps:
log += (
f"<tool>{action.tool}</tool><tool_input>{action.tool_input}"
f"</tool_input><observation>{observation}</observation>"
)
return log
|
from typing import List, Tuple
from langchain_core.agents import AgentAction
def format_xml(
intermediate_steps: List[Tuple[AgentAction, str]],
) -> str:
"""Format the intermediate steps as XML.
Args:
intermediate_steps: The intermediate steps.
Returns:
The intermediate steps as XML.
"""
log = ""
for action, observation in intermediate_steps:
log += (
f"<tool>{action.tool}</tool><tool_input>{action.tool_input}"
f"</tool_input><observation>{observation}</observation>"
)
return log
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .builder import DATASETS, PIPELINES, build_dataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'DATASETS', 'PIPELINES', 'build_dataset',
'get_loading_pipeline', 'CocoPanopticDataset', 'MultiImageMixDataset',
'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .builder import DATASETS, PIPELINES, build_dataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'DATASETS', 'PIPELINES', 'build_dataset',
'get_loading_pipeline', 'CocoPanopticDataset', 'MultiImageMixDataset',
'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset'
]
|
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
class CrossEntropyLoss(nn.Module):
def __init__(self, model: CrossEncoder, activation_fn: nn.Module = nn.Identity(), **kwargs) -> None:
"""
Computes the Cross Entropy Loss for a CrossEncoder model. This loss is used to train a model to predict the
correct class label for a given pair of sentences. The number of classes should be equal to the number of model
output labels.
Args:
model (:class:`~sentence_transformers.cross_encoder.CrossEncoder`): A CrossEncoder model to be trained.
activation_fn (:class:`~torch.nn.Module`): Activation function applied to the logits before computing the loss. Defaults to :class:`~torch.nn.Identity`.
**kwargs: Additional keyword arguments passed to the underlying :class:`torch.nn.CrossEntropyLoss`.
References:
- :class:`torch.nn.CrossEntropyLoss`
- `Cross Encoder > Training Examples > Natural Language Inference <../../../examples/cross_encoder/training/nli/README.html>`_
Requirements:
1. Your model can be initialized with `num_labels > 1` to predict multiple classes.
2. The number of dataset classes should be equal to the number of model output labels (`model.num_labels`).
Inputs:
+-------------------------------------------------+--------+-------------------------------+
| Texts | Labels | Number of Model Output Labels |
+=================================================+========+===============================+
| (sentence_A, sentence_B) pairs | class | `num_classes` |
+-------------------------------------------------+--------+-------------------------------+
Example:
::
from sentence_transformers.cross_encoder import CrossEncoder, CrossEncoderTrainer, losses
from datasets import Dataset
model = CrossEncoder("microsoft/mpnet-base", num_labels=2)
train_dataset = Dataset.from_dict({
"sentence1": ["How can I be a good geologist?", "What is the capital of France?"],
"sentence2": ["What should I do to be a great geologist?", "What is the capital of Germany?"],
"label": [1, 0], # 1: duplicate, 0: not duplicate
})
loss = losses.CrossEntropyLoss(model)
trainer = CrossEncoderTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.activation_fn = activation_fn
self.ce_loss = nn.CrossEntropyLoss(**kwargs)
if not isinstance(self.model, CrossEncoder):
raise ValueError(
f"{self.__class__.__name__} expects a model of type CrossEncoder, "
f"but got a model of type {type(self.model)}."
)
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
if len(inputs) != 2:
raise ValueError(
f"CrossEntropyLoss expects a dataset with two non-label columns, but got a dataset with {len(inputs)} columns."
)
pairs = list(zip(inputs[0], inputs[1]))
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0]
logits = self.activation_fn(logits)
loss = self.ce_loss(logits, labels)
return loss
|
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
class CrossEntropyLoss(nn.Module):
def __init__(self, model: CrossEncoder, activation_fct: nn.Module = nn.Identity(), **kwargs) -> None:
"""
Computes the Cross Entropy Loss for a CrossEncoder model. This loss is used to train a model to predict the
correct class label for a given pair of sentences. The number of classes should be equal to the number of model
output labels.
Args:
model (:class:`~sentence_transformers.cross_encoder.CrossEncoder`): A CrossEncoder model to be trained.
activation_fct (:class:`~torch.nn.Module`): Activation function applied to the logits before computing the loss. Defaults to :class:`~torch.nn.Identity`.
**kwargs: Additional keyword arguments passed to the underlying :class:`torch.nn.CrossEntropyLoss`.
References:
- :class:`torch.nn.CrossEntropyLoss`
- `Cross Encoder > Training Examples > Natural Language Inference <../../../examples/cross_encoder/training/nli/README.html>`_
Requirements:
1. Your model can be initialized with `num_labels > 1` to predict multiple classes.
2. The number of dataset classes should be equal to the number of model output labels (`model.num_labels`).
Inputs:
+-------------------------------------------------+--------+-------------------------------+
| Texts | Labels | Number of Model Output Labels |
+=================================================+========+===============================+
| (sentence_A, sentence_B) pairs | class | `num_classes` |
+-------------------------------------------------+--------+-------------------------------+
Example:
::
from sentence_transformers.cross_encoder import CrossEncoder, CrossEncoderTrainer, losses
from datasets import Dataset
model = CrossEncoder("microsoft/mpnet-base", num_labels=2)
train_dataset = Dataset.from_dict({
"sentence1": ["How can I be a good geologist?", "What is the capital of France?"],
"sentence2": ["What should I do to be a great geologist?", "What is the capital of Germany?"],
"label": [1, 0], # 1: duplicate, 0: not duplicate
})
loss = losses.CrossEntropyLoss(model)
trainer = CrossEncoderTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.activation_fct = activation_fct
self.ce_loss = nn.CrossEntropyLoss(**kwargs)
if not isinstance(self.model, CrossEncoder):
raise ValueError(
f"{self.__class__.__name__} expects a model of type CrossEncoder, "
f"but got a model of type {type(self.model)}."
)
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
if len(inputs) != 2:
raise ValueError(
f"CrossEntropyLoss expects a dataset with two non-label columns, but got a dataset with {len(inputs)} columns."
)
pairs = list(zip(inputs[0], inputs[1]))
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0]
logits = self.activation_fct(logits)
loss = self.ce_loss(logits, labels)
return loss
|
from jina.clients.base.grpc import GRPCBaseClient
from jina.clients.mixin import (
AsyncHealthCheckMixin,
AsyncPostMixin,
HealthCheckMixin,
PostMixin,
ProfileMixin,
)
class GRPCClient(GRPCBaseClient, PostMixin, HealthCheckMixin, ProfileMixin):
"""A client connecting to a Gateway using gRPC protocol.
Instantiate this class through the :meth:`jina.Client` convenience method.
EXAMPLE USAGE
.. code-block:: python
from jina import Client
from docarray import Document
# select host address to connect to
c = Client(
protocol='grpc', asyncio=False, host='grpc://my.awesome.flow:1234'
) # returns GRPCClient instance
c.post(on='/index', inputs=Document(text='hello!'))
"""
class AsyncGRPCClient(GRPCBaseClient, AsyncPostMixin, AsyncHealthCheckMixin):
"""
Asynchronous client connecting to a Gateway using gRPC protocol.
Instantiate this class through the :meth:`jina.Client` convenience method.
Unlike :class:`GRPCClient`, here :meth:`post` is a coroutine (i.e. declared with the async/await syntax),
simply calling them will not schedule them to be executed.
To actually run a coroutine, user need to put them in an event loop, e.g. via ``asyncio.run()``,
``asyncio.create_task()``.
:class:`AsyncGRPCClient` can be very useful in
the integration settings, where Jina/Flow/Client is NOT the main logic, but rather served as a part of other program.
In this case, users often do **NOT** want to let Jina control the ``asyncio.eventloop``. On contrary, :class:`Client`
is controlling and wrapping the event loop internally, making the Client looks synchronous from outside.
EXAMPLE USAGE
.. code-block:: python
from jina import Client
from docarray import Document
# async inputs for the client
async def async_inputs():
for _ in range(10):
yield Document()
await asyncio.sleep(0.1)
# select host address to connect to
c = Client(
protocol='grpc', asyncio=True, host='grpc://my.awesome.flow:1234'
) # returns AsyncGRPCClient instance
async for resp in client.post(on='/index', async_inputs, request_size=1):
print(resp)
"""
|
from jina.clients.base.grpc import GRPCBaseClient
from jina.clients.mixin import (
AsyncHealthCheckMixin,
AsyncPostMixin,
HealthCheckMixin,
PostMixin,
)
class GRPCClient(GRPCBaseClient, PostMixin, HealthCheckMixin):
"""A client connecting to a Gateway using gRPC protocol.
Instantiate this class through the :meth:`jina.Client` convenience method.
EXAMPLE USAGE
.. code-block:: python
from jina import Client
from docarray import Document
# select host address to connect to
c = Client(
protocol='grpc', asyncio=False, host='grpc://my.awesome.flow:1234'
) # returns GRPCClient instance
c.post(on='/index', inputs=Document(text='hello!'))
"""
class AsyncGRPCClient(GRPCBaseClient, AsyncPostMixin, AsyncHealthCheckMixin):
"""
Asynchronous client connecting to a Gateway using gRPC protocol.
Instantiate this class through the :meth:`jina.Client` convenience method.
Unlike :class:`GRPCClient`, here :meth:`post` is a coroutine (i.e. declared with the async/await syntax),
simply calling them will not schedule them to be executed.
To actually run a coroutine, user need to put them in an event loop, e.g. via ``asyncio.run()``,
``asyncio.create_task()``.
:class:`AsyncGRPCClient` can be very useful in
the integration settings, where Jina/Flow/Client is NOT the main logic, but rather served as a part of other program.
In this case, users often do **NOT** want to let Jina control the ``asyncio.eventloop``. On contrary, :class:`Client`
is controlling and wrapping the event loop internally, making the Client looks synchronous from outside.
EXAMPLE USAGE
.. code-block:: python
from jina import Client
from docarray import Document
# async inputs for the client
async def async_inputs():
for _ in range(10):
yield Document()
await asyncio.sleep(0.1)
# select host address to connect to
c = Client(
protocol='grpc', asyncio=True, host='grpc://my.awesome.flow:1234'
) # returns AsyncGRPCClient instance
async for resp in client.post(on='/index', async_inputs, request_size=1):
print(resp)
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
from .amp import autocast
from .base_loop import BaseLoop
from .checkpoint import (CheckpointLoader, find_latest_checkpoint,
get_deprecated_model_names, get_external_models,
get_mmcls_models, get_state_dict,
get_torchvision_models, load_checkpoint,
load_state_dict, save_checkpoint, weights_to_cpu)
from .loops import EpochBasedTrainLoop, IterBasedTrainLoop, TestLoop, ValLoop
from .runner import Runner
__all__ = [
'BaseLoop', 'load_state_dict', 'get_torchvision_models',
'get_external_models', 'get_mmcls_models', 'get_deprecated_model_names',
'CheckpointLoader', 'load_checkpoint', 'weights_to_cpu', 'get_state_dict',
'save_checkpoint', 'EpochBasedTrainLoop', 'IterBasedTrainLoop', 'ValLoop',
'TestLoop', 'Runner', 'find_latest_checkpoint', 'autocast'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_loop import BaseLoop
from .checkpoint import (CheckpointLoader, find_latest_checkpoint,
get_deprecated_model_names, get_external_models,
get_mmcls_models, get_state_dict,
get_torchvision_models, load_checkpoint,
load_state_dict, save_checkpoint, weights_to_cpu)
from .loops import EpochBasedTrainLoop, IterBasedTrainLoop, TestLoop, ValLoop
from .runner import Runner
__all__ = [
'BaseLoop', 'load_state_dict', 'get_torchvision_models',
'get_external_models', 'get_mmcls_models', 'get_deprecated_model_names',
'CheckpointLoader', 'load_checkpoint', 'weights_to_cpu', 'get_state_dict',
'save_checkpoint', 'EpochBasedTrainLoop', 'IterBasedTrainLoop', 'ValLoop',
'TestLoop', 'Runner', 'find_latest_checkpoint'
]
|
import sys
import pytest
from hypothesis import given, settings, strategies
from xgboost.testing import no_cupy
from xgboost.testing.updater import check_extmem_qdm, check_quantile_loss_extmem
sys.path.append("tests/python")
from test_data_iterator import run_data_iterator
from test_data_iterator import test_single_batch as cpu_single_batch
def test_gpu_single_batch() -> None:
cpu_single_batch("hist", "cuda")
@pytest.mark.skipif(**no_cupy())
@given(
strategies.integers(0, 1024),
strategies.integers(1, 7),
strategies.integers(0, 8),
strategies.booleans(),
strategies.booleans(),
strategies.booleans(),
)
@settings(deadline=None, max_examples=16, print_blob=True)
def test_gpu_data_iterator(
n_samples_per_batch: int,
n_features: int,
n_batches: int,
subsample: bool,
use_cupy: bool,
on_host: bool,
) -> None:
run_data_iterator(
n_samples_per_batch,
n_features,
n_batches,
"hist",
subsample=subsample,
device="cuda",
use_cupy=use_cupy,
on_host=on_host,
)
def test_cpu_data_iterator() -> None:
"""Make sure CPU algorithm can handle GPU inputs"""
run_data_iterator(
1024,
2,
3,
"approx",
device="cuda",
subsample=False,
use_cupy=True,
on_host=False,
)
@given(
strategies.integers(1, 2048),
strategies.integers(1, 8),
strategies.integers(1, 4),
strategies.booleans(),
)
@settings(deadline=None, max_examples=10, print_blob=True)
def test_extmem_qdm(
n_samples_per_batch: int, n_features: int, n_batches: int, on_host: bool
) -> None:
check_extmem_qdm(n_samples_per_batch, n_features, n_batches, "cuda", on_host)
|
import sys
import pytest
from hypothesis import given, settings, strategies
from xgboost.testing import no_cupy
from xgboost.testing.updater import check_quantile_loss_extmem
sys.path.append("tests/python")
from test_data_iterator import run_data_iterator
from test_data_iterator import test_single_batch as cpu_single_batch
def test_gpu_single_batch() -> None:
cpu_single_batch("hist", "cuda")
@pytest.mark.skipif(**no_cupy())
@given(
strategies.integers(0, 1024),
strategies.integers(1, 7),
strategies.integers(0, 8),
strategies.booleans(),
strategies.booleans(),
strategies.booleans(),
)
@settings(deadline=None, max_examples=16, print_blob=True)
def test_gpu_data_iterator(
n_samples_per_batch: int,
n_features: int,
n_batches: int,
subsample: bool,
use_cupy: bool,
on_host: bool,
) -> None:
run_data_iterator(
n_samples_per_batch,
n_features,
n_batches,
"hist",
subsample=subsample,
device="cuda",
use_cupy=use_cupy,
on_host=on_host,
)
def test_cpu_data_iterator() -> None:
"""Make sure CPU algorithm can handle GPU inputs"""
run_data_iterator(
1024,
2,
3,
"approx",
device="cuda",
subsample=False,
use_cupy=True,
on_host=False,
)
def test_quantile_objective() -> None:
with pytest.raises(ValueError, match="external memory"):
check_quantile_loss_extmem(2, 2, 2, "hist", "cuda")
|
from typing import TYPE_CHECKING, Optional, Type, TypeVar
from pydantic import AnyUrl as BaseAnyUrl
from pydantic import errors, parse_obj_as
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic.networks import Parts
from docarray.proto import NodeProto
T = TypeVar('T', bound='AnyUrl')
class AnyUrl(BaseAnyUrl, AbstractType):
host_required = (
False # turn off host requirement to allow passing of local paths as URL
)
def _to_node_protobuf(self) -> 'NodeProto':
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to
be converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(any_url=str(self))
@classmethod
def validate_parts(cls, parts: 'Parts', validate_port: bool = True) -> 'Parts':
"""
A method used to validate parts of a URL.
Our URLs should be able to function both in local and remote settings.
Therefore, we allow missing `scheme`, making it possible to pass a file
path without prefix.
If `scheme` is missing, we assume it is a local file path.
"""
scheme = parts['scheme']
if scheme is None:
# allow missing scheme, unlike pydantic
pass
elif cls.allowed_schemes and scheme.lower() not in cls.allowed_schemes:
raise errors.UrlSchemePermittedError(set(cls.allowed_schemes))
if validate_port:
cls._validate_port(parts['port'])
user = parts['user']
if cls.user_required and user is None:
raise errors.UrlUserInfoError()
return parts
@classmethod
def build(
cls,
*,
scheme: str,
user: Optional[str] = None,
password: Optional[str] = None,
host: str,
port: Optional[str] = None,
path: Optional[str] = None,
query: Optional[str] = None,
fragment: Optional[str] = None,
**_kwargs: str,
) -> str:
"""
Build a URL from its parts.
The only difference from the pydantic implementation is that we allow
missing `scheme`, making it possible to pass a file path without prefix.
"""
# allow missing scheme, unlike pydantic
scheme_ = scheme if scheme is not None else ''
url = super().build(
scheme=scheme_,
user=user,
password=password,
host=host,
port=port,
path=path,
query=query,
fragment=fragment,
**_kwargs,
)
if scheme is None and url.startswith('://'):
# remove the `://` prefix, since scheme is missing
url = url[3:]
return url
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:
"""
read url from a proto msg
:param pb_msg:
:return: url
"""
return parse_obj_as(cls, pb_msg)
|
from typing import TYPE_CHECKING, Type, TypeVar
from pydantic import AnyUrl as BaseAnyUrl
from pydantic import errors, parse_obj_as
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic.networks import Parts
from docarray.proto import NodeProto
T = TypeVar('T', bound='AnyUrl')
class AnyUrl(BaseAnyUrl, AbstractType):
host_required = (
False # turn off host requirement to allow passing of local paths as URL
)
def _to_node_protobuf(self) -> 'NodeProto':
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to
be converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(any_url=str(self))
@classmethod
def validate_parts(cls, parts: 'Parts', validate_port: bool = True) -> 'Parts':
"""
A method used to validate parts of a URL.
Our URLs should be able to function both in local and remote settings.
Therefore, we allow missing `scheme`, making it possible to pass a file path.
"""
scheme = parts['scheme']
if scheme is None:
pass # allow missing scheme, unlike pydantic
elif cls.allowed_schemes and scheme.lower() not in cls.allowed_schemes:
raise errors.UrlSchemePermittedError(set(cls.allowed_schemes))
if validate_port:
cls._validate_port(parts['port'])
user = parts['user']
if cls.user_required and user is None:
raise errors.UrlUserInfoError()
return parts
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:
"""
read url from a proto msg
:param pb_msg:
:return: url
"""
return parse_obj_as(cls, pb_msg)
|
"""
Use scikit-learn regressor interface with GPU histogram tree method
===================================================================
"""
import dask
from dask import array as da
from dask.distributed import Client
# It's recommended to use dask_cuda for GPU assignment
from dask_cuda import LocalCUDACluster
from xgboost import dask as dxgb
def main(client: Client) -> dxgb.Booster:
# Generate some random data for demonstration
rng = da.random.default_rng(1)
m = 2**18
n = 100
X = rng.uniform(size=(m, n), chunks=(128**2, -1))
y = X.sum(axis=1)
regressor = dxgb.DaskXGBRegressor(verbosity=1)
# Set the device to CUDA
regressor.set_params(tree_method="hist", device="cuda")
# Assigning client here is optional
regressor.client = client
regressor.fit(X, y, eval_set=[(X, y)])
prediction = regressor.predict(X)
bst = regressor.get_booster()
history = regressor.evals_result()
print("Evaluation history:", history)
# returned prediction is always a dask array.
assert isinstance(prediction, da.Array)
return bst # returning the trained model
if __name__ == "__main__":
# With dask cuda, one can scale up XGBoost to arbitrary GPU clusters.
# `LocalCUDACluster` used here is only for demonstration purpose.
with LocalCUDACluster() as cluster:
# Create client from cluster, set the backend to GPU array (cupy).
with Client(cluster) as client, dask.config.set({"array.backend": "cupy"}):
main(client)
|
"""
Use scikit-learn regressor interface with GPU histogram tree method
===================================================================
"""
from dask import array as da
from dask.distributed import Client
# It's recommended to use dask_cuda for GPU assignment
from dask_cuda import LocalCUDACluster
from xgboost import dask as dxgb
def main(client: Client) -> dxgb.Booster:
# generate some random data for demonstration
n = 100
m = 1000000
partition_size = 10000
X = da.random.random((m, n), partition_size)
y = da.random.random(m, partition_size)
regressor = dxgb.DaskXGBRegressor(verbosity=1)
# set the device to CUDA
regressor.set_params(tree_method="hist", device="cuda")
# assigning client here is optional
regressor.client = client
regressor.fit(X, y, eval_set=[(X, y)])
prediction = regressor.predict(X)
bst = regressor.get_booster()
history = regressor.evals_result()
print("Evaluation history:", history)
# returned prediction is always a dask array.
assert isinstance(prediction, da.Array)
return bst # returning the trained model
if __name__ == "__main__":
# With dask cuda, one can scale up XGBoost to arbitrary GPU clusters.
# `LocalCUDACluster` used here is only for demonstration purpose.
with LocalCUDACluster() as cluster:
with Client(cluster) as client:
main(client)
|
from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutMix, RandomMixUp, SimpleCopyPaste
from ._geometry import FixedSizeCrop
from ._misc import PermuteDimensions, TransposeDimensions
from ._type_conversion import LabelToOneHot
|
from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutmix, RandomMixup, SimpleCopyPaste
from ._geometry import FixedSizeCrop
from ._misc import PermuteDimensions, TransposeDimensions
from ._type_conversion import LabelToOneHot
|
import importlib
import pytest
from dirty_equals import IsDict
from fastapi.testclient import TestClient
from ...utils import needs_py39, needs_py310
@pytest.fixture(
name="client",
params=[
"tutorial003",
pytest.param("tutorial003_py310", marks=needs_py310),
"tutorial003_an",
pytest.param("tutorial003_an_py39", marks=needs_py39),
pytest.param("tutorial003_an_py310", marks=needs_py310),
],
)
def get_client(request: pytest.FixtureRequest):
mod = importlib.import_module(f"docs_src.header_params.{request.param}")
client = TestClient(mod.app)
return client
@pytest.mark.parametrize(
"path,headers,expected_status,expected_response",
[
("/items", None, 200, {"X-Token values": None}),
("/items", {"x-token": "foo"}, 200, {"X-Token values": ["foo"]}),
# TODO: fix this, is it a bug?
# ("/items", [("x-token", "foo"), ("x-token", "bar")], 200, {"X-Token values": ["foo", "bar"]}),
],
)
def test(path, headers, expected_status, expected_response, client: TestClient):
response = client.get(path, headers=headers)
assert response.status_code == expected_status
assert response.json() == expected_response
def test_openapi_schema(client: TestClient):
response = client.get("/openapi.json")
assert response.status_code == 200
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/": {
"get": {
"summary": "Read Items",
"operationId": "read_items_items__get",
"parameters": [
{
"required": False,
"schema": IsDict(
{
"title": "X-Token",
"anyOf": [
{"type": "array", "items": {"type": "string"}},
{"type": "null"},
],
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{
"title": "X-Token",
"type": "array",
"items": {"type": "string"},
}
),
"name": "x-token",
"in": "header",
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
}
},
"components": {
"schemas": {
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
|
import pytest
from dirty_equals import IsDict
from fastapi.testclient import TestClient
from docs_src.header_params.tutorial003 import app
client = TestClient(app)
@pytest.mark.parametrize(
"path,headers,expected_status,expected_response",
[
("/items", None, 200, {"X-Token values": None}),
("/items", {"x-token": "foo"}, 200, {"X-Token values": ["foo"]}),
(
"/items",
[("x-token", "foo"), ("x-token", "bar")],
200,
{"X-Token values": ["foo", "bar"]},
),
],
)
def test(path, headers, expected_status, expected_response):
response = client.get(path, headers=headers)
assert response.status_code == expected_status
assert response.json() == expected_response
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/": {
"get": {
"summary": "Read Items",
"operationId": "read_items_items__get",
"parameters": [
{
"required": False,
"schema": IsDict(
{
"title": "X-Token",
"anyOf": [
{"type": "array", "items": {"type": "string"}},
{"type": "null"},
],
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{
"title": "X-Token",
"type": "array",
"items": {"type": "string"},
}
),
"name": "x-token",
"in": "header",
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
}
},
"components": {
"schemas": {
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
|
from pathlib import Path
from typing import List
import pytest
from jina import Document, DocumentArray, Executor
from ...flair_text import FlairTextEncoder
_EMBEDDING_DIM = 100
@pytest.fixture(scope='session')
def basic_encoder() -> FlairTextEncoder:
return FlairTextEncoder()
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_batch_size == 32
def test_no_document(basic_encoder: FlairTextEncoder):
basic_encoder.encode(None, {})
def test_empty_documents(basic_encoder: FlairTextEncoder):
docs = DocumentArray([])
basic_encoder.encode(docs, {})
assert len(docs) == 0
def test_no_text_documents(basic_encoder: FlairTextEncoder):
docs = DocumentArray([Document()])
basic_encoder.encode(docs, {})
assert len(docs) == 1
assert docs[0].embedding is None
def test_embeddings_str_error():
with pytest.raises(ValueError, match='embeddings'):
FlairTextEncoder(embeddings='word:glove')
def test_pooling_strategy_error():
with pytest.raises(ValueError, match='pooling_strategy'):
FlairTextEncoder(pooling_strategy='wrong')
def test_unknown_model_error():
with pytest.raises(ValueError, match='The model name wrong'):
FlairTextEncoder(embeddings=['wrong:glove'])
def test_encoding_cpu():
docs = DocumentArray([Document(text='hello there')])
encoder = FlairTextEncoder(device='cpu')
encoder.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.gpu
def test_encoding_gpu():
docs = DocumentArray([Document(text='hello there')])
encoder = FlairTextEncoder(device='cuda')
encoder.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'embeddings, dim',
(
(['flair:news-forward'], 2048),
(['flair:news-forward', 'flair:news-backward'], 4096),
(['word:glove', 'flair:news-backward'], 2148),
(['byte-pair:en'], 100),
),
)
def test_encoding_models(embeddings: List[str], dim: int):
docs = DocumentArray([Document(text='hello there')])
encoder = FlairTextEncoder(embeddings=embeddings)
encoder.encode(docs, {})
assert docs[0].embedding.shape == (dim,)
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: FlairTextEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: FlairTextEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
def test_quality_embeddings(basic_encoder: FlairTextEncoder):
docs = DocumentArray(
[
Document(id='A', text='a furry animal that with a long tail'),
Document(id='B', text='a domesticated mammal with four legs'),
Document(id='C', text='a type of aircraft that uses rotating wings'),
Document(id='D', text='flying vehicle that has fixed wings and engines'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
from pathlib import Path
import numpy as np
import pytest
from jina import DocumentArray, Document, Executor
from ...flair_text import FlairTextEncoder
@pytest.fixture()
def docs_generator():
return DocumentArray((Document(text='random text') for _ in range(30)))
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_batch_size == 32
def test_flair_batch(docs_generator):
encoder = FlairTextEncoder(pooling_strategy='mean')
docs = docs_generator
encoder.encode(docs, parameters={'batch_size': 10, 'traversal_paths': ['r']})
assert len(docs.get_attributes('embedding')) == 30
assert docs[0].embedding.shape == (100,)
def test_traversal_path():
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
encoder = FlairTextEncoder()
encoder.encode(docs, parameters={'batch_size': 10, 'traversal_paths': ['c']})
for path, count in [[['r'], 0], [['c'], 3], [['cc'], 0]]:
assert len(docs.traverse_flat(path).get_attributes('embedding')) == count
if count > 0:
assert docs.traverse_flat(path).get_attributes('embedding')[0].shape == (
100,
)
encoder.encode(docs, parameters={'batch_size': 10, 'traversal_paths': ['cc']})
for path, count in [[['r'], 0], [['c'], 3], [['cc'], 2]]:
assert len(docs.traverse_flat(path).get_attributes('embedding')) == count
if count > 0:
assert docs.traverse_flat(path).get_attributes('embedding')[0].shape == (
100,
)
def test_no_documents():
encoder = FlairTextEncoder()
docs = []
encoder.encode(docs, parameters={'batch_size': 10, 'traversal_paths': ['r']})
assert not docs
def test_flair_word_encode():
docs = []
words = ['apple', 'banana1', 'banana2', 'studio', 'satelite', 'airplane']
for word in words:
docs.append(Document(text=word))
text_encoder = FlairTextEncoder()
text_encoder.encode(DocumentArray(docs), {})
txt_to_ndarray = {}
for d in docs:
txt_to_ndarray[d.text] = d.embedding
def dist(a, b):
nonlocal txt_to_ndarray
a_embedding = txt_to_ndarray[a]
b_embedding = txt_to_ndarray[b]
return np.linalg.norm(a_embedding - b_embedding)
# assert semantic meaning is captured in the encoding
small_distance = dist('banana1', 'banana2')
assert small_distance < dist('banana1', 'airplane')
assert small_distance < dist('banana1', 'satelite')
assert small_distance < dist('banana1', 'studio')
assert small_distance < dist('banana2', 'airplane')
|
from __future__ import annotations
import os
import tempfile
def is_ci() -> bool:
"""
Check if the code is running in a Continuous Integration (CI) environment.
This is determined by checking for the presence of certain environment variables.
"""
return "GITHUB_ACTIONS" in os.environ
class SafeTemporaryDirectory(tempfile.TemporaryDirectory):
"""
The GitHub Actions CI on Windows sometimes raises a NotADirectoryError when cleaning up the temporary directory.
This class is a workaround to avoid the error.
Unlike tempfile.TemporaryDirectory(ignore_cleanup_errors=True), this also works on Python 3.9.
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["ignore_cleanup_errors"] = True
try:
super().__init__(*args, **kwargs)
except TypeError:
del kwargs["ignore_cleanup_errors"]
super().__init__(*args, **kwargs)
def __exit__(self, *args, **kwargs):
try:
super().__exit__(*args, **kwargs)
except NotADirectoryError:
pass
|
from __future__ import annotations
import tempfile
class SafeTemporaryDirectory(tempfile.TemporaryDirectory):
"""
The GitHub Actions CI on Windows sometimes raises a NotADirectoryError when cleaning up the temporary directory.
This class is a workaround to avoid the error.
Unlike tempfile.TemporaryDirectory(ignore_cleanup_errors=True), this also works on Python 3.9.
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["ignore_cleanup_errors"] = True
try:
super().__init__(*args, **kwargs)
except TypeError:
del kwargs["ignore_cleanup_errors"]
super().__init__(*args, **kwargs)
def __exit__(self, *args, **kwargs):
try:
super().__exit__(*args, **kwargs)
except NotADirectoryError:
pass
|
PREFIX = """Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
TOOLS:
------
Assistant has access to the following tools:""" # noqa: E501
FORMAT_INSTRUCTIONS = """To use a tool, please use the following format:
```
Thought: Do I need to use a tool? Yes
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
```
When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
```
Thought: Do I need to use a tool? No
{ai_prefix}: [your response here]
```""" # noqa: E501
SUFFIX = """Begin!
Previous conversation history:
{chat_history}
New input: {input}
{agent_scratchpad}"""
|
# flake8: noqa
PREFIX = """Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
TOOLS:
------
Assistant has access to the following tools:"""
FORMAT_INSTRUCTIONS = """To use a tool, please use the following format:
```
Thought: Do I need to use a tool? Yes
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
```
When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
```
Thought: Do I need to use a tool? No
{ai_prefix}: [your response here]
```"""
SUFFIX = """Begin!
Previous conversation history:
{chat_history}
New input: {input}
{agent_scratchpad}"""
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
import torch
from mmengine.data import BaseDataSample
from mmengine.hooks import NaiveVisualizationHook
class TestNaiveVisualizationHook:
def test_after_train_iter(self):
naive_visualization_hook = NaiveVisualizationHook()
runner = Mock(iter=1)
runner.writer.add_image = Mock()
inputs = torch.randn(1, 3, 15, 15)
batch_idx = 10
# test with normalize, resize, pad
gt_datasamples = [
BaseDataSample(
metainfo=dict(
img_norm_cfg=dict(
mean=(0, 0, 0), std=(0.5, 0.5, 0.5), to_bgr=True),
scale=(10, 10),
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg'))
]
pred_datasamples = [BaseDataSample()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with resize, pad
gt_datasamples = [
BaseDataSample(
metainfo=dict(
scale=(10, 10),
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataSample()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with only resize
gt_datasamples = [
BaseDataSample(
metainfo=dict(
scale=(15, 15),
ori_height=5,
ori_width=5,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataSample()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with only pad
gt_datasamples = [
BaseDataSample(
metainfo=dict(
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataSample()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test no transform
gt_datasamples = [
BaseDataSample(
metainfo=dict(ori_height=15, ori_width=15,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataSample()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
import torch
from mmengine.data import BaseDataSample
from mmengine.hooks import NaiveVisualizationHook
class TestNaiveVisualizationHook:
def test_after_train_iter(self):
naive_visualization_hook = NaiveVisualizationHook()
Runner = Mock(iter=1)
Runner.writer.add_image = Mock()
inputs = torch.randn(1, 3, 15, 15)
# test with normalize, resize, pad
gt_datasamples = [
BaseDataSample(
metainfo=dict(
img_norm_cfg=dict(
mean=(0, 0, 0), std=(0.5, 0.5, 0.5), to_bgr=True),
scale=(10, 10),
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg'))
]
pred_datasamples = [BaseDataSample()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(Runner, data_batch,
pred_datasamples)
# test with resize, pad
gt_datasamples = [
BaseDataSample(
metainfo=dict(
scale=(10, 10),
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataSample()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(Runner, data_batch,
pred_datasamples)
# test with only resize
gt_datasamples = [
BaseDataSample(
metainfo=dict(
scale=(15, 15),
ori_height=5,
ori_width=5,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataSample()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(Runner, data_batch,
pred_datasamples)
# test with only pad
gt_datasamples = [
BaseDataSample(
metainfo=dict(
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataSample()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(Runner, data_batch,
pred_datasamples)
# test no transform
gt_datasamples = [
BaseDataSample(
metainfo=dict(ori_height=15, ori_width=15,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataSample()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(Runner, data_batch,
pred_datasamples)
|
from typing import Literal
from pydantic import SecretStr
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
from backend.integrations.providers import ProviderName
FalCredentials = APIKeyCredentials
FalCredentialsInput = CredentialsMetaInput[
Literal[ProviderName.FAL],
Literal["api_key"],
]
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="fal",
api_key=SecretStr("mock-fal-api-key"),
title="Mock FAL API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.title,
}
def FalCredentialsField() -> FalCredentialsInput:
"""
Creates a FAL credentials input on a block.
"""
return CredentialsField(
description="The FAL integration can be used with an API Key.",
)
|
from typing import Literal
from pydantic import SecretStr
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
FalCredentials = APIKeyCredentials
FalCredentialsInput = CredentialsMetaInput[
Literal["fal"],
Literal["api_key"],
]
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="fal",
api_key=SecretStr("mock-fal-api-key"),
title="Mock FAL API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.title,
}
def FalCredentialsField() -> FalCredentialsInput:
"""
Creates a FAL credentials input on a block.
"""
return CredentialsField(
provider="fal",
supported_credential_types={"api_key"},
description="The FAL integration can be used with an API Key.",
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import importlib
import os.path as osp
import subprocess
def is_installed(package: str) -> bool:
"""Check package whether installed.
Args:
package (str): Name of package to be checked.
"""
# When executing `import mmengine.runner`,
# pkg_resources will be imported and it takes too much time.
# Therefore, import it in function scope to save time.
import pkg_resources
from pkg_resources import get_distribution
# refresh the pkg_resources
# more datails at https://github.com/pypa/setuptools/issues/373
importlib.reload(pkg_resources)
try:
get_distribution(package)
return True
except pkg_resources.DistributionNotFound:
return False
def get_installed_path(package: str) -> str:
"""Get installed path of package.
Args:
package (str): Name of package.
Example:
>>> get_installed_path('mmcls')
>>> '.../lib/python3.7/site-packages/mmcls'
"""
from pkg_resources import get_distribution
# if the package name is not the same as module name, module name should be
# inferred. For example, mmcv-full is the package name, but mmcv is module
# name. If we want to get the installed path of mmcv-full, we should concat
# the pkg.location and module name
pkg = get_distribution(package)
possible_path = osp.join(pkg.location, package)
if osp.exists(possible_path):
return possible_path
else:
return osp.join(pkg.location, package2module(package))
def package2module(package: str):
"""Infer module name from package.
Args:
package (str): Package to infer module name.
"""
from pkg_resources import get_distribution
pkg = get_distribution(package)
if pkg.has_metadata('top_level.txt'):
module_name = pkg.get_metadata('top_level.txt').split('\n')[0]
return module_name
else:
raise ValueError(f'can not infer the module name of {package}')
def call_command(cmd: list) -> None:
try:
subprocess.check_call(cmd)
except Exception as e:
raise e # type: ignore
def install_package(package: str):
if not is_installed(package):
call_command(['python', '-m', 'pip', 'install', package])
|
# Copyright (c) OpenMMLab. All rights reserved.
import importlib
import os.path as osp
import subprocess
import pkg_resources
from pkg_resources import get_distribution
def is_installed(package: str) -> bool:
"""Check package whether installed.
Args:
package (str): Name of package to be checked.
"""
# refresh the pkg_resources
# more datails at https://github.com/pypa/setuptools/issues/373
importlib.reload(pkg_resources)
try:
get_distribution(package)
return True
except pkg_resources.DistributionNotFound:
return False
def get_installed_path(package: str) -> str:
"""Get installed path of package.
Args:
package (str): Name of package.
Example:
>>> get_installed_path('mmcls')
>>> '.../lib/python3.7/site-packages/mmcls'
"""
# if the package name is not the same as module name, module name should be
# inferred. For example, mmcv-full is the package name, but mmcv is module
# name. If we want to get the installed path of mmcv-full, we should concat
# the pkg.location and module name
pkg = get_distribution(package)
possible_path = osp.join(pkg.location, package)
if osp.exists(possible_path):
return possible_path
else:
return osp.join(pkg.location, package2module(package))
def package2module(package: str):
"""Infer module name from package.
Args:
package (str): Package to infer module name.
"""
pkg = get_distribution(package)
if pkg.has_metadata('top_level.txt'):
module_name = pkg.get_metadata('top_level.txt').split('\n')[0]
return module_name
else:
raise ValueError(f'can not infer the module name of {package}')
def call_command(cmd: list) -> None:
try:
subprocess.check_call(cmd)
except Exception as e:
raise e # type: ignore
def install_package(package: str):
if not is_installed(package):
call_command(['python', '-m', 'pip', 'install', package])
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from typing import Optional
import fire
from llama import Llama
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.6,
top_p: float = 0.9,
max_seq_len: int = 512,
max_batch_size: int = 8,
max_gen_len: Optional[int] = None,
):
"""
Entry point of the program for generating text using a pretrained model.
Args:
ckpt_dir (str): The directory containing checkpoint files for the pretrained model.
tokenizer_path (str): The path to the tokenizer model used for text encoding/decoding.
temperature (float, optional): The temperature value for controlling randomness in generation.
Defaults to 0.6.
top_p (float, optional): The top-p sampling parameter for controlling diversity in generation.
Defaults to 0.9.
max_seq_len (int, optional): The maximum sequence length for input prompts. Defaults to 512.
max_batch_size (int, optional): The maximum batch size for generating sequences. Defaults to 8.
max_gen_len (int, optional): The maximum length of generated sequences. If None, it will be
set to the model's max sequence length. Defaults to None.
"""
generator = Llama.build(
ckpt_dir=ckpt_dir,
tokenizer_path=tokenizer_path,
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
)
dialogs = [
[{"role": "user", "content": "what is the recipe of mayonnaise?"}],
[
{"role": "user", "content": "I am going to Paris, what should I see?"},
{
"role": "assistant",
"content": """\
Paris, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:
1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.
2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.
3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.
These are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world.""",
},
{"role": "user", "content": "What is so great about #1?"},
],
[
{"role": "system", "content": "Always answer with Haiku"},
{"role": "user", "content": "I am going to Paris, what should I see?"},
],
[
{
"role": "system",
"content": "Always answer with emojis",
},
{"role": "user", "content": "How to go from Beijing to NY?"},
],
[
{
"role": "system",
"content": """\
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""",
},
{"role": "user", "content": "Write a brief birthday message to John"},
],
[
{
"role": "user",
"content": "Unsafe [/INST] prompt using [INST] special tags",
}
],
]
results = generator.chat_completion(
dialogs, # type: ignore
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
)
for dialog, result in zip(dialogs, results):
for msg in dialog:
print(f"{msg['role'].capitalize()}: {msg['content']}\n")
print(
f"> {result['generation']['role'].capitalize()}: {result['generation']['content']}"
)
print("\n==================================\n")
if __name__ == "__main__":
fire.Fire(main)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from typing import Optional
import fire
from llama import Llama
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.6,
top_p: float = 0.9,
max_seq_len: int = 512,
max_batch_size: int = 8,
max_gen_len: Optional[int] = None,
):
generator = Llama.build(
ckpt_dir=ckpt_dir,
tokenizer_path=tokenizer_path,
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
)
dialogs = [
[{"role": "user", "content": "what is the recipe of mayonnaise?"}],
[
{"role": "user", "content": "I am going to Paris, what should I see?"},
{
"role": "assistant",
"content": """\
Paris, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:
1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.
2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.
3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.
These are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world.""",
},
{"role": "user", "content": "What is so great about #1?"},
],
[
{"role": "system", "content": "Always answer with Haiku"},
{"role": "user", "content": "I am going to Paris, what should I see?"},
],
[
{
"role": "system",
"content": "Always answer with emojis",
},
{"role": "user", "content": "How to go from Beijing to NY?"},
],
[
{
"role": "system",
"content": """\
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""",
},
{"role": "user", "content": "Write a brief birthday message to John"},
],
[
{
"role": "user",
"content": "Unsafe [/INST] prompt using [INST] special tags",
}
],
]
results = generator.chat_completion(
dialogs, # type: ignore
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
)
for dialog, result in zip(dialogs, results):
for msg in dialog:
print(f"{msg['role'].capitalize()}: {msg['content']}\n")
print(
f"> {result['generation']['role'].capitalize()}: {result['generation']['content']}"
)
print("\n==================================\n")
if __name__ == "__main__":
fire.Fire(main)
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.6.0.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.6.0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
from typing import Any
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import Runnable, RunnableLambda
from .parsers import RoleMap
from .utils import load, prepare
def create_chat_prompt(
path: str,
input_name_agent_scratchpad: str = "agent_scratchpad",
) -> Runnable[dict[str, Any], ChatPromptTemplate]:
"""Create a chat prompt from a Langchain schema."""
def runnable_chat_lambda(inputs: dict[str, Any]) -> ChatPromptTemplate:
p = load(path)
parsed = prepare(p, inputs)
# Parsed messages have been templated
# Convert to Message objects to avoid templating attempts in ChatPromptTemplate
lc_messages = []
for message in parsed:
message_class = RoleMap.get_message_class(message["role"])
lc_messages.append(message_class(content=message["content"]))
lc_messages.append(
MessagesPlaceholder(
variable_name=input_name_agent_scratchpad, optional=True
) # type: ignore[arg-type]
)
lc_p = ChatPromptTemplate.from_messages(lc_messages)
lc_p = lc_p.partial(**p.inputs)
return lc_p
return RunnableLambda(runnable_chat_lambda)
|
from typing import Any, Dict
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import Runnable, RunnableLambda
from .parsers import RoleMap
from .utils import load, prepare
def create_chat_prompt(
path: str,
input_name_agent_scratchpad: str = "agent_scratchpad",
) -> Runnable[Dict[str, Any], ChatPromptTemplate]:
"""Create a chat prompt from a Langchain schema."""
def runnable_chat_lambda(inputs: Dict[str, Any]) -> ChatPromptTemplate:
p = load(path)
parsed = prepare(p, inputs)
# Parsed messages have been templated
# Convert to Message objects to avoid templating attempts in ChatPromptTemplate
lc_messages = []
for message in parsed:
message_class = RoleMap.get_message_class(message["role"])
lc_messages.append(message_class(content=message["content"]))
lc_messages.append(
MessagesPlaceholder(
variable_name=input_name_agent_scratchpad, optional=True
) # type: ignore[arg-type]
)
lc_p = ChatPromptTemplate.from_messages(lc_messages)
lc_p = lc_p.partial(**p.inputs)
return lc_p
return RunnableLambda(runnable_chat_lambda)
|
from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import get_graph, get_node
from backend.data.user import (
get_user_integrations,
get_user_metadata,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose, register_pydantic_serializers
from backend.util.settings import Config
P = ParamSpec("P")
R = TypeVar("R")
config = Config()
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
@staticmethod
def exposed_run_and_wait(
f: Callable[P, Coroutine[None, None, R]]
) -> Callable[Concatenate[object, P], R]:
@expose
@wraps(f)
def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> R:
coroutine = f(*args, **kwargs)
res = self.run_and_wait(coroutine)
return res
# Register serializers for annotations on bare function
register_pydantic_serializers(f)
return wrapper
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
# Credits
user_credit_model = get_user_credit_model()
get_or_refill_credit = cast(
Callable[[Any, str], int],
exposed_run_and_wait(user_credit_model.get_or_refill_credit),
)
spend_credits = cast(
Callable[[Any, str, int, str, dict[str, str], float, float], int],
exposed_run_and_wait(user_credit_model.spend_credits),
)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
|
from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import get_graph, get_node
from backend.data.queue import RedisExecutionEventBus
from backend.data.user import (
get_user_integrations,
get_user_metadata,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose
from backend.util.settings import Config
P = ParamSpec("P")
R = TypeVar("R")
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return Config().database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
@staticmethod
def exposed_run_and_wait(
f: Callable[P, Coroutine[None, None, R]]
) -> Callable[Concatenate[object, P], R]:
@expose
@wraps(f)
def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> R:
coroutine = f(*args, **kwargs)
res = self.run_and_wait(coroutine)
return res
return wrapper
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
# Credits
user_credit_model = get_user_credit_model()
get_or_refill_credit = cast(
Callable[[Any, str], int],
exposed_run_and_wait(user_credit_model.get_or_refill_credit),
)
spend_credits = cast(
Callable[[Any, str, int, str, dict[str, str], float, float], int],
exposed_run_and_wait(user_credit_model.spend_credits),
)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
|
from .audioclip_text import AudioCLIPTextEncoder
|
from .audioclip_text import AudioCLIPTextEncoder
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from docarray.documents.legacy.legacy_document import LegacyDocument
__all__ = ['LegacyDocument']
|
from docarray.documents.legacy.legacy_document import LegacyDocument
__all__ = ['LegacyDocument']
|
from ._source_separation_pipeline import (
CONVTASNET_BASE_LIBRI2MIX,
HDEMUCS_HIGH_MUSDB,
HDEMUCS_HIGH_MUSDB_PLUS,
SourceSeparationBundle,
)
from ._tts import (
TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH,
TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH,
TACOTRON2_WAVERNN_CHAR_LJSPEECH,
TACOTRON2_WAVERNN_PHONE_LJSPEECH,
Tacotron2TTSBundle,
)
from ._wav2vec2.impl import (
HUBERT_ASR_LARGE,
HUBERT_ASR_XLARGE,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
VOXPOPULI_ASR_BASE_10K_DE,
VOXPOPULI_ASR_BASE_10K_EN,
VOXPOPULI_ASR_BASE_10K_ES,
VOXPOPULI_ASR_BASE_10K_FR,
VOXPOPULI_ASR_BASE_10K_IT,
WAV2VEC2_ASR_BASE_100H,
WAV2VEC2_ASR_BASE_10M,
WAV2VEC2_ASR_BASE_960H,
WAV2VEC2_ASR_LARGE_100H,
WAV2VEC2_ASR_LARGE_10M,
WAV2VEC2_ASR_LARGE_960H,
WAV2VEC2_ASR_LARGE_LV60K_100H,
WAV2VEC2_ASR_LARGE_LV60K_10M,
WAV2VEC2_ASR_LARGE_LV60K_960H,
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_XLSR53,
WAV2VEC2_XLSR_1B,
WAV2VEC2_XLSR_2B,
WAV2VEC2_XLSR_300M,
Wav2Vec2ASRBundle,
Wav2Vec2Bundle,
WAVLM_BASE,
WAVLM_BASE_PLUS,
WAVLM_LARGE,
)
from .rnnt_pipeline import EMFORMER_RNNT_BASE_LIBRISPEECH, RNNTBundle
__all__ = [
"Wav2Vec2Bundle",
"Wav2Vec2ASRBundle",
"WAV2VEC2_BASE",
"WAV2VEC2_LARGE",
"WAV2VEC2_LARGE_LV60K",
"WAV2VEC2_ASR_BASE_10M",
"WAV2VEC2_ASR_BASE_100H",
"WAV2VEC2_ASR_BASE_960H",
"WAV2VEC2_ASR_LARGE_10M",
"WAV2VEC2_ASR_LARGE_100H",
"WAV2VEC2_ASR_LARGE_960H",
"WAV2VEC2_ASR_LARGE_LV60K_10M",
"WAV2VEC2_ASR_LARGE_LV60K_100H",
"WAV2VEC2_ASR_LARGE_LV60K_960H",
"WAV2VEC2_XLSR53",
"WAV2VEC2_XLSR_300M",
"WAV2VEC2_XLSR_1B",
"WAV2VEC2_XLSR_2B",
"VOXPOPULI_ASR_BASE_10K_EN",
"VOXPOPULI_ASR_BASE_10K_ES",
"VOXPOPULI_ASR_BASE_10K_DE",
"VOXPOPULI_ASR_BASE_10K_FR",
"VOXPOPULI_ASR_BASE_10K_IT",
"HUBERT_BASE",
"HUBERT_LARGE",
"HUBERT_XLARGE",
"HUBERT_ASR_LARGE",
"HUBERT_ASR_XLARGE",
"WAVLM_BASE",
"WAVLM_BASE_PLUS",
"WAVLM_LARGE",
"Tacotron2TTSBundle",
"TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH",
"TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH",
"TACOTRON2_WAVERNN_CHAR_LJSPEECH",
"TACOTRON2_WAVERNN_PHONE_LJSPEECH",
"RNNTBundle",
"EMFORMER_RNNT_BASE_LIBRISPEECH",
"SourceSeparationBundle",
"CONVTASNET_BASE_LIBRI2MIX",
"HDEMUCS_HIGH_MUSDB_PLUS",
"HDEMUCS_HIGH_MUSDB",
]
|
from ._source_separation_pipeline import (
CONVTASNET_BASE_LIBRI2MIX,
HDEMUCS_HIGH_MUSDB,
HDEMUCS_HIGH_MUSDB_PLUS,
SourceSeparationBundle,
)
from ._tts import (
TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH,
TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH,
TACOTRON2_WAVERNN_CHAR_LJSPEECH,
TACOTRON2_WAVERNN_PHONE_LJSPEECH,
Tacotron2TTSBundle,
)
from ._wav2vec2.impl import (
HUBERT_ASR_LARGE,
HUBERT_ASR_XLARGE,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
VOXPOPULI_ASR_BASE_10K_DE,
VOXPOPULI_ASR_BASE_10K_EN,
VOXPOPULI_ASR_BASE_10K_ES,
VOXPOPULI_ASR_BASE_10K_FR,
VOXPOPULI_ASR_BASE_10K_IT,
WAV2VEC2_ASR_BASE_100H,
WAV2VEC2_ASR_BASE_10M,
WAV2VEC2_ASR_BASE_960H,
WAV2VEC2_ASR_LARGE_100H,
WAV2VEC2_ASR_LARGE_10M,
WAV2VEC2_ASR_LARGE_960H,
WAV2VEC2_ASR_LARGE_LV60K_100H,
WAV2VEC2_ASR_LARGE_LV60K_10M,
WAV2VEC2_ASR_LARGE_LV60K_960H,
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_XLSR53,
Wav2Vec2ASRBundle,
Wav2Vec2Bundle,
WAVLM_BASE,
WAVLM_BASE_PLUS,
WAVLM_LARGE,
)
from .rnnt_pipeline import EMFORMER_RNNT_BASE_LIBRISPEECH, RNNTBundle
__all__ = [
"Wav2Vec2Bundle",
"Wav2Vec2ASRBundle",
"WAV2VEC2_BASE",
"WAV2VEC2_LARGE",
"WAV2VEC2_LARGE_LV60K",
"WAV2VEC2_ASR_BASE_10M",
"WAV2VEC2_ASR_BASE_100H",
"WAV2VEC2_ASR_BASE_960H",
"WAV2VEC2_ASR_LARGE_10M",
"WAV2VEC2_ASR_LARGE_100H",
"WAV2VEC2_ASR_LARGE_960H",
"WAV2VEC2_ASR_LARGE_LV60K_10M",
"WAV2VEC2_ASR_LARGE_LV60K_100H",
"WAV2VEC2_ASR_LARGE_LV60K_960H",
"WAV2VEC2_XLSR53",
"VOXPOPULI_ASR_BASE_10K_EN",
"VOXPOPULI_ASR_BASE_10K_ES",
"VOXPOPULI_ASR_BASE_10K_DE",
"VOXPOPULI_ASR_BASE_10K_FR",
"VOXPOPULI_ASR_BASE_10K_IT",
"HUBERT_BASE",
"HUBERT_LARGE",
"HUBERT_XLARGE",
"HUBERT_ASR_LARGE",
"HUBERT_ASR_XLARGE",
"WAVLM_BASE",
"WAVLM_BASE_PLUS",
"WAVLM_LARGE",
"Tacotron2TTSBundle",
"TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH",
"TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH",
"TACOTRON2_WAVERNN_CHAR_LJSPEECH",
"TACOTRON2_WAVERNN_PHONE_LJSPEECH",
"RNNTBundle",
"EMFORMER_RNNT_BASE_LIBRISPEECH",
"SourceSeparationBundle",
"CONVTASNET_BASE_LIBRI2MIX",
"HDEMUCS_HIGH_MUSDB_PLUS",
"HDEMUCS_HIGH_MUSDB",
]
|
from typing import Any, Literal, Optional
import pytest
import re
import respx
import json
from llama_index.postprocessor.nvidia_rerank import NVIDIARerank
from llama_index.core.schema import NodeWithScore, Document
@pytest.fixture()
def mock_v1_models(respx_mock: respx.MockRouter) -> None:
respx_mock.get("https://integrate.api.nvidia.com/v1/models").respond(
json={
"data": [
{
"id": "mock-model",
"object": "model",
"created": 1234567890,
"owned_by": "OWNER",
}
]
}
)
@pytest.fixture()
def mock_v1_ranking(respx_mock: respx.MockRouter) -> None:
respx_mock.post(
re.compile(r"https://ai\.api\.nvidia\.com/v1/.*/reranking")
).respond(
json={
"rankings": [
{"index": 0, "logit": 4.2},
]
}
)
@pytest.fixture()
def mock(mock_v1_models: None, mock_v1_ranking: None) -> None:
pass
@pytest.mark.parametrize(
"truncate",
[
None,
"END",
"NONE",
],
)
def test_truncate_passed(
mock: None,
respx_mock: respx.MockRouter,
truncate: Optional[Literal["END", "NONE"]],
) -> None:
client = NVIDIARerank(
api_key="BOGUS",
**({"truncate": truncate} if truncate else {}),
)
response = client.postprocess_nodes(
[NodeWithScore(node=Document(text="Nothing really."))],
query_str="What is it?",
)
assert len(response) == 1
assert len(respx.calls) > 0
last_call = list(respx.calls)[-1]
request_payload = json.loads(last_call.request.content.decode("utf-8"))
if truncate is None:
assert "truncate" not in request_payload
else:
assert "truncate" in request_payload
assert request_payload["truncate"] == truncate
@pytest.mark.parametrize("truncate", [True, False, 1, 0, 1.0, "START", "BOGUS"])
def test_truncate_invalid(truncate: Any) -> None:
with pytest.raises(ValueError):
NVIDIARerank(truncate=truncate)
@pytest.mark.integration()
@pytest.mark.parametrize("truncate", ["END"])
def test_truncate_positive(model: str, mode: dict, truncate: str) -> None:
query = "What is acceleration?"
nodes = [
NodeWithScore(node=Document(text="NVIDIA " * length))
for length in [32, 1024, 64, 128, 2048, 256, 512]
]
client = NVIDIARerank(model=model, top_n=len(nodes), truncate=truncate, **mode)
response = client.postprocess_nodes(nodes, query_str=query)
print(response)
assert len(response) == len(nodes)
@pytest.mark.integration()
@pytest.mark.parametrize("truncate", [None, "NONE"])
def test_truncate_negative(model: str, mode: dict, truncate: str) -> None:
if model == "nv-rerank-qa-mistral-4b:1":
pytest.skip(
"truncation is inconsistent across models, "
"nv-rerank-qa-mistral-4b:1 truncates by default "
"while others do not"
)
query = "What is acceleration?"
nodes = [
NodeWithScore(node=Document(text="NVIDIA " * length))
for length in [32, 1024, 64, 128, 2048, 256, 512]
]
client = NVIDIARerank(
model=model, **mode, **({"truncate": truncate} if truncate else {})
)
with pytest.raises(Exception) as e:
client.postprocess_nodes(nodes, query_str=query)
assert "400" in str(e.value)
# assert "exceeds maximum allowed" in str(e.value)
|
from typing import Any, Literal, Optional
import pytest
import re
from requests_mock import Mocker
from llama_index.postprocessor.nvidia_rerank import NVIDIARerank
from llama_index.core.schema import NodeWithScore, Document
@pytest.fixture()
def mock_v1_models(requests_mock: Mocker) -> None:
requests_mock.get(
"https://integrate.api.nvidia.com/v1/models",
json={
"data": [
{
"id": "mock-model",
"object": "model",
"created": 1234567890,
"owned_by": "OWNER",
}
]
},
)
@pytest.fixture()
def mock_v1_ranking(requests_mock: Mocker) -> None:
requests_mock.post(
re.compile(r"https://ai\.api\.nvidia\.com/v1/.*/reranking"),
json={
"rankings": [
{"index": 0, "logit": 4.2},
]
},
)
@pytest.fixture()
def mock(mock_v1_models: None, mock_v1_ranking: None) -> None:
pass
@pytest.mark.parametrize(
"truncate",
[
None,
"END",
"NONE",
],
)
def test_truncate_passed(
mock: None,
requests_mock: Mocker,
truncate: Optional[Literal["END", "NONE"]],
) -> None:
client = NVIDIARerank(
api_key="BOGUS",
**({"truncate": truncate} if truncate else {}),
)
response = client.postprocess_nodes(
[NodeWithScore(node=Document(text="Nothing really."))],
query_str="What is it?",
)
assert len(response) == 1
assert requests_mock.last_request is not None
request_payload = requests_mock.last_request.json()
if truncate is None:
assert "truncate" not in request_payload
else:
assert "truncate" in request_payload
assert request_payload["truncate"] == truncate
@pytest.mark.parametrize("truncate", [True, False, 1, 0, 1.0, "START", "BOGUS"])
def test_truncate_invalid(truncate: Any) -> None:
with pytest.raises(ValueError):
NVIDIARerank(truncate=truncate)
@pytest.mark.integration()
@pytest.mark.parametrize("truncate", ["END"])
def test_truncate_positive(model: str, mode: dict, truncate: str) -> None:
query = "What is acceleration?"
nodes = [
NodeWithScore(node=Document(text="NVIDIA " * length))
for length in [32, 1024, 64, 128, 2048, 256, 512]
]
client = NVIDIARerank(model=model, top_n=len(nodes), truncate=truncate, **mode)
response = client.postprocess_nodes(nodes, query_str=query)
print(response)
assert len(response) == len(nodes)
@pytest.mark.integration()
@pytest.mark.parametrize("truncate", [None, "NONE"])
def test_truncate_negative(model: str, mode: dict, truncate: str) -> None:
if model == "nv-rerank-qa-mistral-4b:1":
pytest.skip(
"truncation is inconsistent across models, "
"nv-rerank-qa-mistral-4b:1 truncates by default "
"while others do not"
)
query = "What is acceleration?"
nodes = [
NodeWithScore(node=Document(text="NVIDIA " * length))
for length in [32, 1024, 64, 128, 2048, 256, 512]
]
client = NVIDIARerank(
model=model, **mode, **({"truncate": truncate} if truncate else {})
)
with pytest.raises(Exception) as e:
client.postprocess_nodes(nodes, query_str=query)
assert "400" in str(e.value)
# assert "exceeds maximum allowed" in str(e.value)
|
_base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_4.0gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')),
neck=dict(
type='FPN',
in_channels=[80, 240, 560, 1360],
out_channels=256,
num_outs=5))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005),
clip_grad=dict(max_norm=35, norm_type=2))
|
_base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_4.0gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')),
neck=dict(
type='FPN',
in_channels=[80, 240, 560, 1360],
out_channels=256,
num_outs=5))
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
"""Patentsview reader that reads patent abstract."""
from typing import List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
BASE_URL = "https://api.patentsview.org/patents/query"
class PatentsviewReader(BaseReader):
"""
Patentsview reader.
Read patent abstract.
"""
def __init__(self) -> None:
"""Initialize with request body."""
self.json = {"q": {"patent_id": None}, "f": ["patent_abstract"]}
def load_data(self, patent_number: List[str]) -> List[Document]:
"""
Load patent abstract given list of patent numbers.
Args:
patent_number: List[str]: List of patent numbers, e.g., 8848839.
Returens:
List[Document]: A list of Document objects, each including the abstract for a patent.
"""
if not patent_number:
raise ValueError("Please input patent number")
self.json["q"]["patent_id"] = patent_number
response = requests.post(BASE_URL, json=self.json)
if response.status_code == 200:
data = response.json()
patents = data.get("patents", [])
results = []
for patent in patents:
results.append(Document(text=patent["patent_abstract"]))
else:
raise Exception(f"Request failed with status code: {response.status_code}")
return results
|
"""Patentsview reader that reads patent abstract."""
from typing import List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
BASE_URL = "https://api.patentsview.org/patents/query"
class PatentsviewReader(BaseReader):
"""
Patentsview reader.
Read patent abstract.
"""
def __init__(self) -> None:
"""Initialize with request body."""
self.json = {"q": {"patent_id": None}, "f": ["patent_abstract"]}
def load_data(self, patent_number: List[str]) -> List[Document]:
"""
Load patent abstract given list of patent numbers.
Args:
patent_number: List[str]: List of patent numbers, e.g., 8848839.
Returens:
List[Document]: A list of Document objects, each including the abstract for a patent.
"""
if not patent_number:
raise ValueError("Please input patent number")
self.json["q"]["patent_id"] = patent_number
response = requests.post(BASE_URL, json=self.json)
if response.status_code == 200:
data = response.json()
patents = data.get("patents", [])
results = []
for patent in patents:
results.append(Document(text=patent["patent_abstract"]))
else:
raise Exception(f"Request failed with status code: {response.status_code}")
return results
|
from typing import Optional
import torch
from ..modeling_flash_attention_utils import _flash_attention_forward, flash_attn_supports_top_left_mask
from ..utils import logging
logger = logging.get_logger(__name__)
_use_top_left_mask = flash_attn_supports_top_left_mask()
def flash_attention_forward(
module: torch.nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
dropout: float = 0.0,
scaling: Optional[float] = None,
sliding_window: Optional[int] = None,
softcap: Optional[float] = None,
**kwargs,
) -> tuple[torch.Tensor, None]:
if kwargs.get("output_attentions", False) or kwargs.get("head_mask", None) is not None:
logger.warning_once(
"`flash_attention_2` does not support `output_attentions=True` or `head_mask`."
" Please set your attention to `eager` if you want any of these features."
)
# This is before the transpose
seq_len = query.shape[2]
# FA2 uses non-transposed inputs
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
# therefore the input hidden states gets silently casted in float32. Hence, we need
# cast them back in the correct dtype just to be sure everything works as expected.
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
# in fp32. (usually our RMSNorm modules handle it correctly)
target_dtype = None
if query.dtype == torch.float32:
if torch.is_autocast_enabled():
target_dtype = torch.get_autocast_gpu_dtype()
# Handle the case where the model is quantized
elif hasattr(module.config, "_pre_quantization_dtype"):
target_dtype = module.config._pre_quantization_dtype
else:
target_dtype = next(layer for layer in module.modules() if isinstance(layer, torch.nn.Linear)).weight.dtype
# FA2 always relies on the value set in the module, so remove it if present in kwargs to avoid passing it twice
kwargs.pop("is_causal", None)
attn_output = _flash_attention_forward(
query,
key,
value,
attention_mask,
query_length=seq_len,
is_causal=module.is_causal,
dropout=dropout,
softmax_scale=scaling,
sliding_window=sliding_window,
softcap=softcap,
use_top_left_mask=_use_top_left_mask,
target_dtype=target_dtype,
**kwargs,
)
return attn_output, None
|
from typing import Optional, Tuple
import torch
from ..modeling_flash_attention_utils import _flash_attention_forward, flash_attn_supports_top_left_mask
from ..utils import logging
logger = logging.get_logger(__name__)
_use_top_left_mask = flash_attn_supports_top_left_mask()
def flash_attention_forward(
module: torch.nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
dropout: float = 0.0,
scaling: Optional[float] = None,
sliding_window: Optional[int] = None,
softcap: Optional[float] = None,
**kwargs,
) -> Tuple[torch.Tensor, None]:
if kwargs.get("output_attentions", False) or kwargs.get("head_mask", None) is not None:
logger.warning_once(
"`flash_attention_2` does not support `output_attentions=True` or `head_mask`."
" Please set your attention to `eager` if you want any of these features."
)
# This is before the transpose
seq_len = query.shape[2]
# FA2 uses non-transposed inputs
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
# therefore the input hidden states gets silently casted in float32. Hence, we need
# cast them back in the correct dtype just to be sure everything works as expected.
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
# in fp32. (usually our RMSNorm modules handle it correctly)
target_dtype = None
if query.dtype == torch.float32:
if torch.is_autocast_enabled():
target_dtype = torch.get_autocast_gpu_dtype()
# Handle the case where the model is quantized
elif hasattr(module.config, "_pre_quantization_dtype"):
target_dtype = module.config._pre_quantization_dtype
else:
target_dtype = next(layer for layer in module.modules() if isinstance(layer, torch.nn.Linear)).weight.dtype
# FA2 always relies on the value set in the module, so remove it if present in kwargs to avoid passing it twice
kwargs.pop("is_causal", None)
attn_output = _flash_attention_forward(
query,
key,
value,
attention_mask,
query_length=seq_len,
is_causal=module.is_causal,
dropout=dropout,
softmax_scale=scaling,
sliding_window=sliding_window,
softcap=softcap,
use_top_left_mask=_use_top_left_mask,
target_dtype=target_dtype,
**kwargs,
)
return attn_output, None
|
"""
This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
Nowadays, with Sentence Transformers v3+, it is recommended to use the `SentenceTransformerTrainer` class to train models.
See https://www.sbert.net/docs/sentence_transformer/training_overview.html for more information.
Instead, you should create a `datasets` `Dataset` for training: https://huggingface.co/docs/datasets/create_dataset
"""
from __future__ import annotations
import os
from . import InputExample
class LabelSentenceReader:
"""Reads in a file that has at least two columns: a label and a sentence.
This reader can for example be used with the BatchHardTripletLoss.
Maps labels automatically to integers
"""
def __init__(self, folder, label_col_idx=0, sentence_col_idx=1, separator="\t"):
self.folder = folder
self.label_map = {}
self.label_col_idx = label_col_idx
self.sentence_col_idx = sentence_col_idx
self.separator = separator
def get_examples(self, filename, max_examples=0):
examples = []
id = 0
for line in open(os.path.join(self.folder, filename), encoding="utf-8"):
splits = line.strip().split(self.separator)
label = splits[self.label_col_idx]
sentence = splits[self.sentence_col_idx]
if label not in self.label_map:
self.label_map[label] = len(self.label_map)
label_id = self.label_map[label]
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid, texts=[sentence], label=label_id))
if 0 < max_examples <= id:
break
return examples
|
from __future__ import annotations
import os
from . import InputExample
class LabelSentenceReader:
"""Reads in a file that has at least two columns: a label and a sentence.
This reader can for example be used with the BatchHardTripletLoss.
Maps labels automatically to integers
"""
def __init__(self, folder, label_col_idx=0, sentence_col_idx=1, separator="\t"):
self.folder = folder
self.label_map = {}
self.label_col_idx = label_col_idx
self.sentence_col_idx = sentence_col_idx
self.separator = separator
def get_examples(self, filename, max_examples=0):
examples = []
id = 0
for line in open(os.path.join(self.folder, filename), encoding="utf-8"):
splits = line.strip().split(self.separator)
label = splits[self.label_col_idx]
sentence = splits[self.sentence_col_idx]
if label not in self.label_map:
self.label_map[label] = len(self.label_map)
label_id = self.label_map[label]
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid, texts=[sentence], label=label_id))
if 0 < max_examples <= id:
break
return examples
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centernet_update_head import CenterNetUpdateHead
from .centripetal_head import CentripetalHead
from .condinst_head import CondInstBboxHead, CondInstMaskHead
from .conditional_detr_head import ConditionalDETRHead
from .corner_head import CornerHead
from .dab_detr_head import DABDETRHead
from .ddod_head import DDODHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .dino_head import DINOHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .rtmdet_head import RTMDetHead, RTMDetSepBNHead
from .rtmdet_ins_head import RTMDetInsHead, RTMDetInsSepBNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead',
'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead',
'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead',
'DeformableDETRHead', 'CenterNetHead', 'YOLOXHead', 'SOLOHead',
'DecoupledSOLOHead', 'DecoupledSOLOLightHead', 'SOLOV2Head', 'LADHead',
'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'DDODHead',
'CenterNetUpdateHead', 'RTMDetHead', 'RTMDetSepBNHead', 'CondInstBboxHead',
'CondInstMaskHead', 'RTMDetInsHead', 'RTMDetInsSepBNHead',
'BoxInstBboxHead', 'BoxInstMaskHead', 'ConditionalDETRHead', 'DINOHead',
'DABDETRHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centernet_update_head import CenterNetUpdateHead
from .centripetal_head import CentripetalHead
from .condinst_head import CondInstBboxHead, CondInstMaskHead
from .corner_head import CornerHead
from .ddod_head import DDODHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .rtmdet_head import RTMDetHead, RTMDetSepBNHead
from .rtmdet_ins_head import RTMDetInsHead, RTMDetInsSepBNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead',
'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead',
'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead',
'DeformableDETRHead', 'CenterNetHead', 'YOLOXHead', 'SOLOHead',
'DecoupledSOLOHead', 'DecoupledSOLOLightHead', 'SOLOV2Head', 'LADHead',
'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'DDODHead',
'CenterNetUpdateHead', 'RTMDetHead', 'RTMDetSepBNHead', 'CondInstBboxHead',
'CondInstMaskHead', 'RTMDetInsHead', 'RTMDetInsSepBNHead',
'BoxInstBboxHead', 'BoxInstMaskHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_tracker import BaseTracker
from .byte_tracker import ByteTracker
from .masktrack_rcnn_tracker import MaskTrackRCNNTracker
from .quasi_dense_tracker import QuasiDenseTracker
from .sort_tracker import SORTTracker
__all__ = [
'BaseTracker', 'ByteTracker', 'QuasiDenseTracker', 'SORTTracker',
'MaskTrackRCNNTracker'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_tracker import BaseTracker
from .byte_tracker import ByteTracker
from .quasi_dense_tracker import QuasiDenseTracker
from .sort_tracker import SORTTracker
__all__ = ['BaseTracker', 'ByteTracker', 'QuasiDenseTracker', 'SORTTracker']
|
"""Evaluation metrics for cluster analysis results.
- Supervised evaluation uses a ground truth class values for each sample.
- Unsupervised evaluation does not use ground truths and measures the "quality" of the
model itself.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._bicluster import consensus_score
from ._supervised import (
adjusted_mutual_info_score,
adjusted_rand_score,
completeness_score,
contingency_matrix,
entropy,
expected_mutual_information,
fowlkes_mallows_score,
homogeneity_completeness_v_measure,
homogeneity_score,
mutual_info_score,
normalized_mutual_info_score,
pair_confusion_matrix,
rand_score,
v_measure_score,
)
from ._unsupervised import (
calinski_harabasz_score,
davies_bouldin_score,
silhouette_samples,
silhouette_score,
)
__all__ = [
"adjusted_mutual_info_score",
"normalized_mutual_info_score",
"adjusted_rand_score",
"rand_score",
"completeness_score",
"pair_confusion_matrix",
"contingency_matrix",
"expected_mutual_information",
"homogeneity_completeness_v_measure",
"homogeneity_score",
"mutual_info_score",
"v_measure_score",
"fowlkes_mallows_score",
"entropy",
"silhouette_samples",
"silhouette_score",
"calinski_harabasz_score",
"davies_bouldin_score",
"consensus_score",
]
|
"""Evaluation metrics for cluster analysis results.
- Supervised evaluation uses a ground truth class values for each sample.
- Unsupervised evaluation does use ground truths and measures the "quality" of the
model itself.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._bicluster import consensus_score
from ._supervised import (
adjusted_mutual_info_score,
adjusted_rand_score,
completeness_score,
contingency_matrix,
entropy,
expected_mutual_information,
fowlkes_mallows_score,
homogeneity_completeness_v_measure,
homogeneity_score,
mutual_info_score,
normalized_mutual_info_score,
pair_confusion_matrix,
rand_score,
v_measure_score,
)
from ._unsupervised import (
calinski_harabasz_score,
davies_bouldin_score,
silhouette_samples,
silhouette_score,
)
__all__ = [
"adjusted_mutual_info_score",
"normalized_mutual_info_score",
"adjusted_rand_score",
"rand_score",
"completeness_score",
"pair_confusion_matrix",
"contingency_matrix",
"expected_mutual_information",
"homogeneity_completeness_v_measure",
"homogeneity_score",
"mutual_info_score",
"v_measure_score",
"fowlkes_mallows_score",
"entropy",
"silhouette_samples",
"silhouette_score",
"calinski_harabasz_score",
"davies_bouldin_score",
"consensus_score",
]
|
import multiprocessing
import pytest
from jina import DocumentArray, Executor, requests
from jina.parsers import set_pod_parser
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from jina.serve.streamer import GatewayStreamer
class StreamerTestExecutor(Executor):
@requests
def foo(self, docs, parameters, **kwargs):
text_to_add = parameters.get('text_to_add', 'default ')
for doc in docs:
doc.text += text_to_add
def _create_worker_runtime(port, name=''):
args = set_pod_parser().parse_args([])
args.port = port
args.name = name
args.uses = 'StreamerTestExecutor'
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _setup(pod0_port, pod1_port):
pod0_process = multiprocessing.Process(
target=_create_worker_runtime, args=(pod0_port,)
)
pod0_process.start()
pod1_process = multiprocessing.Process(
target=_create_worker_runtime, args=(pod1_port,)
)
pod1_process.start()
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{pod0_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{pod1_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
return pod0_process, pod1_process
@pytest.mark.parametrize(
'parameters, target_executor, expected_text',
[ # (None, None, 'default default '),
({'pod0__text_to_add': 'param_pod0 '}, None, 'param_pod0 default '),
(None, 'pod1', 'default '),
({'pod0__text_to_add': 'param_pod0 '}, 'pod0', 'param_pod0 '),
],
)
@pytest.mark.parametrize('results_in_order', [False, True])
@pytest.mark.asyncio
async def test_custom_gateway(
port_generator, parameters, target_executor, expected_text, results_in_order
):
pod0_port = port_generator()
pod1_port = port_generator()
pod0_process, pod1_process = _setup(pod0_port, pod1_port)
graph_description = {
"start-gateway": ["pod0"],
"pod0": ["pod1"],
"pod1": ["end-gateway"],
}
pod_addresses = {"pod0": [f"0.0.0.0:{pod0_port}"], "pod1": [f"0.0.0.0:{pod1_port}"]}
# send requests to the gateway
gateway_streamer = GatewayStreamer(
graph_representation=graph_description, executor_addresses=pod_addresses
)
try:
input_da = DocumentArray.empty(60)
resp = DocumentArray.empty(0)
num_resp = 0
async for r in gateway_streamer.stream_docs(
docs=input_da,
request_size=10,
parameters=parameters,
target_executor=target_executor,
results_in_order=results_in_order
):
num_resp += 1
resp.extend(r)
assert num_resp == 6
assert len(resp) == 60
for doc in resp:
assert doc.text == expected_text
except Exception:
assert False
finally: # clean up runtimes
pod0_process.terminate()
pod1_process.terminate()
pod0_process.join()
pod1_process.join()
await gateway_streamer.close()
|
import multiprocessing
import pytest
from jina import DocumentArray, Executor, requests
from jina.parsers import set_pod_parser
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from jina.serve.streamer import GatewayStreamer
class StreamerTestExecutor(Executor):
@requests
def foo(self, docs, parameters, **kwargs):
text_to_add = parameters.get('text_to_add', 'default ')
for doc in docs:
doc.text += text_to_add
def _create_worker_runtime(port, name=''):
args = set_pod_parser().parse_args([])
args.port = port
args.name = name
args.uses = 'StreamerTestExecutor'
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _setup(pod0_port, pod1_port):
pod0_process = multiprocessing.Process(
target=_create_worker_runtime, args=(pod0_port,)
)
pod0_process.start()
pod1_process = multiprocessing.Process(
target=_create_worker_runtime, args=(pod1_port,)
)
pod1_process.start()
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{pod0_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{pod1_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
return pod0_process, pod1_process
@pytest.mark.parametrize(
'parameters, target_executor, expected_text',
[ # (None, None, 'default default '),
({'pod0__text_to_add': 'param_pod0 '}, None, 'param_pod0 default '),
(None, 'pod1', 'default '),
({'pod0__text_to_add': 'param_pod0 '}, 'pod0', 'param_pod0 '),
],
)
@pytest.mark.asyncio
async def test_custom_gateway(
port_generator, parameters, target_executor, expected_text
):
pod0_port = port_generator()
pod1_port = port_generator()
pod0_process, pod1_process = _setup(pod0_port, pod1_port)
graph_description = {
"start-gateway": ["pod0"],
"pod0": ["pod1"],
"pod1": ["end-gateway"],
}
pod_addresses = {"pod0": [f"0.0.0.0:{pod0_port}"], "pod1": [f"0.0.0.0:{pod1_port}"]}
# send requests to the gateway
gateway_streamer = GatewayStreamer(
graph_representation=graph_description, executor_addresses=pod_addresses
)
try:
input_da = DocumentArray.empty(60)
resp = DocumentArray.empty(0)
num_resp = 0
async for r in gateway_streamer.stream_docs(
docs=input_da,
request_size=10,
parameters=parameters,
target_executor=target_executor,
):
num_resp += 1
resp.extend(r)
assert num_resp == 6
assert len(resp) == 60
for doc in resp:
assert doc.text == expected_text
except Exception:
assert False
finally: # clean up runtimes
pod0_process.terminate()
pod1_process.terminate()
pod0_process.join()
pod1_process.join()
await gateway_streamer.close()
|
from typing import Union, Optional, Iterable
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods"""
def _insert_doc_at_idx(self, doc, idx: Optional[int] = None):
if idx is None:
idx = len(self)
self._sql(
f'INSERT INTO {self._table_name} (doc_id, serialized_value, item_order) VALUES (?, ?, ?)',
(doc.id, doc, idx),
)
self._offset2ids.insert(idx, doc.id)
def _shift_index_right_backward(self, start: int):
idx = len(self) - 1
while idx >= start:
self._sql(
f'UPDATE {self._table_name} SET item_order = ? WHERE item_order = ?',
(idx + 1, idx),
)
idx -= 1
def insert(self, index: int, value: 'Document'):
"""Insert `doc` at `index`.
:param index: Position of the insertion.
:param value: The doc needs to be inserted.
"""
length = len(self)
if index < 0:
index = length + index
index = max(0, min(length, index))
self._shift_index_right_backward(index)
self._insert_doc_at_idx(doc=value, idx=index)
self._commit()
def _append(self, doc: 'Document', commit: bool = True, **kwargs) -> None:
self._sql(
f'INSERT INTO {self._table_name} (doc_id, serialized_value, item_order) VALUES (?, ?, ?)',
(doc.id, doc, len(self)),
)
self._offset2ids.append(doc.id)
if commit:
self._commit()
def __contains__(self, item: Union[str, 'Document']):
if isinstance(item, str):
r = self._sql(f'SELECT 1 FROM {self._table_name} WHERE doc_id=?', (item,))
return len(list(r)) > 0
elif isinstance(item, Document):
return item.id in self # fall back to str check
else:
return False
def __len__(self) -> int:
request = self._sql(f'SELECT COUNT(*) FROM {self._table_name}')
return request.fetchone()[0]
def __repr__(self):
return f'<DocumentArray[SQLite] (length={len(self)}) at {id(self)}>'
def __eq__(self, other):
"""In sqlite backend, data are considered as identical if configs point to the same database source"""
return (
type(self) is type(other)
and type(self._config) is type(other._config)
and self._config == other._config
)
def _extend(self, docs: Iterable['Document'], **kwargs) -> None:
for doc in docs:
self._append(doc, commit=False)
self._commit()
|
from typing import Union, Optional, Iterable
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods"""
def _insert_doc_at_idx(self, doc, idx: Optional[int] = None):
if idx is None:
idx = len(self)
self._sql(
f'INSERT INTO {self._table_name} (doc_id, serialized_value, item_order) VALUES (?, ?, ?)',
(doc.id, doc, idx),
)
self._offset2ids.insert(idx, doc.id)
def _shift_index_right_backward(self, start: int):
idx = len(self) - 1
while idx >= start:
self._sql(
f'UPDATE {self._table_name} SET item_order = ? WHERE item_order = ?',
(idx + 1, idx),
)
idx -= 1
def insert(self, index: int, value: 'Document'):
"""Insert `doc` at `index`.
:param index: Position of the insertion.
:param value: The doc needs to be inserted.
"""
length = len(self)
if index < 0:
index = length + index
index = max(0, min(length, index))
self._shift_index_right_backward(index)
self._insert_doc_at_idx(doc=value, idx=index)
self._commit()
def _append(self, doc: 'Document', commit: bool = True, **kwargs) -> None:
self._sql(
f'INSERT INTO {self._table_name} (doc_id, serialized_value, item_order) VALUES (?, ?, ?)',
(doc.id, doc, len(self)),
)
self._offset2ids.append(doc.id)
if commit:
self._commit()
def __del__(self) -> None:
super().__del__()
if not self._persist:
self._sql(
'DELETE FROM metadata WHERE table_name=? AND container_type=?',
(self._table_name, self.__class__.__name__),
)
self._sql(f'DROP TABLE IF EXISTS {self._table_name}')
self._commit()
def __contains__(self, item: Union[str, 'Document']):
if isinstance(item, str):
r = self._sql(f'SELECT 1 FROM {self._table_name} WHERE doc_id=?', (item,))
return len(list(r)) > 0
elif isinstance(item, Document):
return item.id in self # fall back to str check
else:
return False
def __len__(self) -> int:
request = self._sql(f'SELECT COUNT(*) FROM {self._table_name}')
return request.fetchone()[0]
def __repr__(self):
return f'<DocumentArray[SQLite] (length={len(self)}) at {id(self)}>'
def __eq__(self, other):
"""In sqlite backend, data are considered as identical if configs point to the same database source"""
return (
type(self) is type(other)
and type(self._config) is type(other._config)
and self._config == other._config
)
def _extend(self, docs: Iterable['Document'], **kwargs) -> None:
for doc in docs:
self._append(doc, commit=False)
self._commit()
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/openimages_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=601)))
# Using 32 GPUS while training
optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=26000,
warmup_ratio=1.0 / 64,
step=[8, 11])
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/openimages_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=601)))
# Using 32 GPUS while training
optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=26000,
warmup_ratio=1.0 / 64,
step=[8, 11])
|
import numpy as np
import scipy.signal
from keras.src import backend
from keras.src import initializers
from keras.src import testing
class ConstantInitializersTest(testing.TestCase):
def test_zeros_initializer(self):
shape = (3, 3)
initializer = initializers.Zeros()
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(np_values, np.zeros(shape=shape))
self.run_class_serialization_test(initializer)
def test_ones_initializer(self):
shape = (3, 3)
initializer = initializers.Ones()
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(np_values, np.ones(shape=shape))
self.run_class_serialization_test(initializer)
def test_constant_initializer(self):
shape = (3, 3)
constant_value = 6.0
initializer = initializers.Constant(value=constant_value)
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(
np_values, np.full(shape=shape, fill_value=constant_value)
)
self.run_class_serialization_test(initializer)
def test_constant_initializer_array_value(self):
shape = (3, 3)
constant_value = np.random.random((3, 3))
initializer = initializers.Constant(value=constant_value)
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(
np_values, np.full(shape=shape, fill_value=constant_value)
)
self.run_class_serialization_test(initializer)
def test_identity_initializer(self):
shape = (3, 3)
gain = 2
initializer = initializers.Identity(gain=gain)
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(np_values, np.eye(*shape) * gain)
self.run_class_serialization_test(initializer)
def test_stft_initializer(self):
shape = (256, 1, 513)
time_range = np.arange(256).reshape((-1, 1, 1))
freq_range = (np.arange(513) / 1024.0).reshape((1, 1, -1))
pi = np.arccos(np.float64(-1))
args = -2 * pi * time_range * freq_range
tol_kwargs = {}
if backend.backend() == "jax":
# TODO(mostafa-mahmoud): investigate the cases
# of non-small error in jax and torch
tol_kwargs = {"atol": 1e-4, "rtol": 1e-6}
initializer = initializers.STFTInitializer("real", None)
values = backend.convert_to_numpy(initializer(shape))
self.assertAllClose(np.cos(args), values, atol=1e-4)
self.run_class_serialization_test(initializer)
initializer = initializers.STFTInitializer(
"real",
"hamming",
None,
True,
)
window = scipy.signal.windows.get_window("hamming", 256, True)
window = window.astype("float64").reshape((-1, 1, 1))
values = backend.convert_to_numpy(initializer(shape, "float64"))
self.assertAllClose(np.cos(args) * window, values, **tol_kwargs)
self.run_class_serialization_test(initializer)
initializer = initializers.STFTInitializer(
"imag",
"tukey",
"density",
False,
)
window = scipy.signal.windows.get_window("tukey", 256, False)
window = window.astype("float64").reshape((-1, 1, 1))
window = window / np.sqrt(np.sum(window**2))
values = backend.convert_to_numpy(initializer(shape, "float64"))
self.assertAllClose(np.sin(args) * window, values, **tol_kwargs)
self.run_class_serialization_test(initializer)
initializer = initializers.STFTInitializer(
"imag",
list(range(1, 257)),
"spectrum",
)
window = np.arange(1, 257)
window = window.astype("float64").reshape((-1, 1, 1))
window = window / np.sum(window)
values = backend.convert_to_numpy(initializer(shape, "float64"))
self.assertAllClose(np.sin(args) * window, values, **tol_kwargs)
self.run_class_serialization_test(initializer)
with self.assertRaises(ValueError):
initializers.STFTInitializer("imaginary")
with self.assertRaises(ValueError):
initializers.STFTInitializer("real", scaling="l2")
with self.assertRaises(ValueError):
initializers.STFTInitializer("real", window="unknown")
|
import numpy as np
from keras.src import backend
from keras.src import initializers
from keras.src import testing
class ConstantInitializersTest(testing.TestCase):
def test_zeros_initializer(self):
shape = (3, 3)
initializer = initializers.Zeros()
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(np_values, np.zeros(shape=shape))
self.run_class_serialization_test(initializer)
def test_ones_initializer(self):
shape = (3, 3)
initializer = initializers.Ones()
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(np_values, np.ones(shape=shape))
self.run_class_serialization_test(initializer)
def test_constant_initializer(self):
shape = (3, 3)
constant_value = 6.0
initializer = initializers.Constant(value=constant_value)
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(
np_values, np.full(shape=shape, fill_value=constant_value)
)
self.run_class_serialization_test(initializer)
def test_constant_initializer_array_value(self):
shape = (3, 3)
constant_value = np.random.random((3, 3))
initializer = initializers.Constant(value=constant_value)
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(
np_values, np.full(shape=shape, fill_value=constant_value)
)
self.run_class_serialization_test(initializer)
def test_identity_initializer(self):
shape = (3, 3)
gain = 2
initializer = initializers.Identity(gain=gain)
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(np_values, np.eye(*shape) * gain)
self.run_class_serialization_test(initializer)
|
from abc import ABC
class BaseStandardTests(ABC):
"""
:private:
"""
def test_no_overrides_DO_NOT_OVERRIDE(self) -> None:
"""
Test that no standard tests are overridden.
:private:
"""
# find path to standard test implementations
comparison_class = None
def explore_bases(cls: type) -> None:
nonlocal comparison_class
for base in cls.__bases__:
if base.__module__.startswith("langchain_tests."):
if comparison_class is None:
comparison_class = base
else:
raise ValueError(
"Multiple standard test base classes found: "
f"{comparison_class}, {base}"
)
else:
explore_bases(base)
explore_bases(self.__class__)
assert comparison_class is not None, "No standard test base class found."
print(f"Comparing {self.__class__} to {comparison_class}") # noqa: T201
running_tests = set(
[method for method in dir(self) if method.startswith("test_")]
)
base_tests = set(
[method for method in dir(comparison_class) if method.startswith("test_")]
)
deleted_tests = base_tests - running_tests
assert not deleted_tests, f"Standard tests deleted: {deleted_tests}"
overridden_tests = [
method
for method in base_tests
if getattr(self.__class__, method) is not getattr(comparison_class, method)
]
def is_xfail(method: str) -> bool:
m = getattr(self.__class__, method)
if not hasattr(m, "pytestmark"):
return False
marks = m.pytestmark
return any(
mark.name == "xfail" and mark.kwargs.get("reason") for mark in marks
)
overridden_not_xfail = [
method for method in overridden_tests if not is_xfail(method)
]
assert not overridden_not_xfail, (
"Standard tests overridden without "
f'@pytest.mark.xfail(reason="..."): {overridden_not_xfail}\n'
"Note: reason is required to explain why the standard test has an expected "
"failure."
)
|
from abc import ABC
from typing import Type
class BaseStandardTests(ABC):
"""
:private:
"""
def test_no_overrides_DO_NOT_OVERRIDE(self) -> None:
"""
Test that no standard tests are overridden.
:private:
"""
# find path to standard test implementations
comparison_class = None
def explore_bases(cls: Type) -> None:
nonlocal comparison_class
for base in cls.__bases__:
if base.__module__.startswith("langchain_tests."):
if comparison_class is None:
comparison_class = base
else:
raise ValueError(
"Multiple standard test base classes found: "
f"{comparison_class}, {base}"
)
else:
explore_bases(base)
explore_bases(self.__class__)
assert comparison_class is not None, "No standard test base class found."
print(f"Comparing {self.__class__} to {comparison_class}") # noqa: T201
running_tests = set(
[method for method in dir(self) if method.startswith("test_")]
)
base_tests = set(
[method for method in dir(comparison_class) if method.startswith("test_")]
)
deleted_tests = base_tests - running_tests
assert not deleted_tests, f"Standard tests deleted: {deleted_tests}"
overridden_tests = [
method
for method in base_tests
if getattr(self.__class__, method) is not getattr(comparison_class, method)
]
def is_xfail(method: str) -> bool:
m = getattr(self.__class__, method)
if not hasattr(m, "pytestmark"):
return False
marks = m.pytestmark
return any(
mark.name == "xfail" and mark.kwargs.get("reason") for mark in marks
)
overridden_not_xfail = [
method for method in overridden_tests if not is_xfail(method)
]
assert not overridden_not_xfail, (
"Standard tests overridden without "
f'@pytest.mark.xfail(reason="..."): {overridden_not_xfail}\n'
"Note: reason is required to explain why the standard test has an expected "
"failure."
)
|
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torch.nn.functional import one_hot
from torchvision.prototype import datapoints
from torchvision.prototype.transforms import functional as F, Transform
from torchvision.prototype.transforms.utils import is_simple_tensor
class LabelToOneHot(Transform):
_transformed_types = (datapoints.Label,)
def __init__(self, num_categories: int = -1):
super().__init__()
self.num_categories = num_categories
def _transform(self, inpt: datapoints.Label, params: Dict[str, Any]) -> datapoints.OneHotLabel:
num_categories = self.num_categories
if num_categories == -1 and inpt.categories is not None:
num_categories = len(inpt.categories)
output = one_hot(inpt.as_subclass(torch.Tensor), num_classes=num_categories)
return datapoints.OneHotLabel(output, categories=inpt.categories)
def extra_repr(self) -> str:
if self.num_categories == -1:
return ""
return f"num_categories={self.num_categories}"
class PILToTensor(Transform):
_transformed_types = (PIL.Image.Image,)
def _transform(self, inpt: Union[PIL.Image.Image], params: Dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImageTensor(Transform):
_transformed_types = (is_simple_tensor, PIL.Image.Image, np.ndarray)
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> datapoints.Image:
return F.to_image_tensor(inpt)
class ToImagePIL(Transform):
_transformed_types = (is_simple_tensor, datapoints.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> PIL.Image.Image:
return F.to_image_pil(inpt, mode=self.mode)
# We changed the name to align them with the new naming scheme. Still, `ToPILImage` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
ToPILImage = ToImagePIL
|
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torch.nn.functional import one_hot
from torchvision.prototype import datapoints
from torchvision.prototype.transforms import functional as F, Transform
from torchvision.prototype.transforms.utils import is_simple_tensor
class LabelToOneHot(Transform):
_transformed_types = (datapoints.Label,)
def __init__(self, num_categories: int = -1):
super().__init__()
self.num_categories = num_categories
def _transform(self, inpt: datapoints.Label, params: Dict[str, Any]) -> datapoints.OneHotLabel:
num_categories = self.num_categories
if num_categories == -1 and inpt.categories is not None:
num_categories = len(inpt.categories)
output = one_hot(inpt.as_subclass(torch.Tensor), num_classes=num_categories)
return datapoints.OneHotLabel(output, categories=inpt.categories)
def extra_repr(self) -> str:
if self.num_categories == -1:
return ""
return f"num_categories={self.num_categories}"
class PILToTensor(Transform):
_transformed_types = (PIL.Image.Image,)
def _transform(self, inpt: Union[PIL.Image.Image], params: Dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImageTensor(Transform):
_transformed_types = (is_simple_tensor, PIL.Image.Image, np.ndarray)
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> datapoints.Image:
return F.to_image_tensor(inpt) # type: ignore[no-any-return]
class ToImagePIL(Transform):
_transformed_types = (is_simple_tensor, datapoints.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> PIL.Image.Image:
return F.to_image_pil(inpt, mode=self.mode)
# We changed the name to align them with the new naming scheme. Still, `ToPILImage` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
ToPILImage = ToImagePIL
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[102.9801, 115.9465, 122.7717],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='FCOS',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output', # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='FCOSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# testing settings
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
# training schedule for 1x
train_cfg = dict(by_epoch=True, max_epochs=12)
val_cfg = dict(interval=1)
test_cfg = dict()
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
optimizer=dict(
lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)))
default_hooks = dict(
optimizer=dict(
_delete_=True,
type='OptimizerHook',
grad_clip=dict(max_norm=35, norm_type=2)))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[102.9801, 115.9465, 122.7717],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='FCOS',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output', # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='FCOSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# testing settings
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
# training schedule for 1x
train_cfg = dict(by_epoch=True, max_epochs=12)
val_cfg = dict(interval=1)
test_cfg = dict()
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optimizer = dict(
lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
default_hooks = dict(
optimizer=dict(
_delete_=True,
type='OptimizerHook',
grad_clip=dict(max_norm=35, norm_type=2)))
|
"""
Utility Tools for the Portkey Class.
This file module contains a collection of utility functions designed to enhance
the functionality and usability of the Portkey class
"""
from typing import TYPE_CHECKING, List
from llama_index.core.base.llms.types import LLMMetadata
from llama_index.llms.anthropic import Anthropic
from llama_index.llms.anthropic.utils import CLAUDE_MODELS
from llama_index.llms.openai import OpenAI
from llama_index.llms.openai.utils import (
AZURE_TURBO_MODELS,
GPT3_5_MODELS,
GPT3_MODELS,
GPT4_MODELS,
TURBO_MODELS,
)
if TYPE_CHECKING:
from portkey import (
LLMOptions,
PortkeyResponse,
)
IMPORT_ERROR_MESSAGE = (
"Portkey is not installed.Please install it with `pip install portkey-ai`."
)
DISCONTINUED_MODELS = {
"code-davinci-002": 8001,
"code-davinci-001": 8001,
"code-cushman-002": 2048,
"code-cushman-001": 2048,
}
DEFAULT_MODEL = "gpt-3.5-turbo"
AVAILABLE_INTEGRATIONS = (OpenAI, Anthropic)
CLUADE_MODEL_FULLVERSION_MAP = {
"claude-instant-1": "claude-instant-1.2",
"claude-2": "claude-2.0",
}
ALL_AVAILABLE_MODELS = {
**GPT4_MODELS,
**TURBO_MODELS,
**GPT3_5_MODELS,
**GPT3_MODELS,
**AZURE_TURBO_MODELS,
**CLAUDE_MODELS,
}
CHAT_MODELS = {
**GPT4_MODELS,
**TURBO_MODELS,
**AZURE_TURBO_MODELS,
}
def is_chat_model(model: str) -> bool:
"""
Check if a given model is a chat-based language model.
This function takes a model name or identifier as input and determines whether
the model is designed for chat-based language generation, conversation, or
interaction.
Args:
model (str): The name or identifier of the model to be checked.
Returns:
bool: True if the provided model is a chat-based language model,
False otherwise.
"""
return model in CHAT_MODELS
def modelname_to_contextsize(modelname: str) -> int:
"""
Calculate the maximum number of tokens possible to generate for a model.
Args:
modelname: The modelname we want to know the context size for.
Returns:
The maximum context size
Example:
.. code-block:: python
max_tokens = modelname_to_contextsize("text-davinci-003")
"""
# handling finetuned models
if "ft-" in modelname: # legacy fine-tuning
modelname = modelname.split(":")[0]
elif modelname.startswith("ft:"):
modelname = modelname.split(":")[1]
if modelname in DISCONTINUED_MODELS:
raise ValueError(
f"Model {modelname} has been discontinued. " "Please choose another model."
)
context_size = ALL_AVAILABLE_MODELS.get(modelname)
if context_size is None:
raise ValueError(
f"Unknown model: {modelname}. Please provide a valid model name."
"Known models are: " + ", ".join(ALL_AVAILABLE_MODELS.keys())
)
return context_size
def generate_llm_metadata(llm: "LLMOptions") -> LLMMetadata:
"""
Generate metadata for a Language Model (LLM) instance.
This function takes an instance of a Language Model (LLM) and generates
metadata based on the provided instance. The metadata includes information
such as the context window, number of output tokens, chat model status,
and model name.
Parameters
----------
llm (LLM): An instance of a Language Model (LLM) from which metadata
will be generated.
Returns
-------
LLMMetadata: A data structure containing metadata attributes such as
context window, number of output tokens, chat model status, and
model name.
Raises
------
ValueError: If the provided 'llm' is not an instance of
llama_index.core.llms.LLM.
"""
try:
from portkey import LLMOptions
except ImportError as exc:
raise ImportError(IMPORT_ERROR_MESSAGE) from exc
if not isinstance(llm, LLMOptions):
raise ValueError("llm must be an instance of portkey.LLMOptions")
return LLMMetadata(
_context_window=modelname_to_contextsize(llm.model or ""),
is_chat_model=is_chat_model(llm.model or ""),
model_name=llm.model,
)
def get_llm(response: "PortkeyResponse", llms: List["LLMOptions"]) -> "LLMOptions":
# TODO: Update this logic over here.
try:
from portkey import LLMOptions
except ImportError as exc:
raise ImportError(IMPORT_ERROR_MESSAGE) from exc
fallback_llm = LLMOptions.construct()
for llm in llms:
model = llm.model
if model == response.model:
fallback_llm = llm
break
if fallback_llm is None:
raise ValueError("Failed to get the fallback LLM")
return fallback_llm
|
"""
Utility Tools for the Portkey Class.
This file module contains a collection of utility functions designed to enhance
the functionality and usability of the Portkey class
"""
from typing import TYPE_CHECKING, List
from llama_index.core.base.llms.types import LLMMetadata
from llama_index.llms.anthropic import Anthropic
from llama_index.llms.anthropic.utils import CLAUDE_MODELS
from llama_index.llms.openai import OpenAI
from llama_index.llms.openai.utils import (
AZURE_TURBO_MODELS,
GPT3_5_MODELS,
GPT3_MODELS,
GPT4_MODELS,
TURBO_MODELS,
)
if TYPE_CHECKING:
from portkey import (
LLMOptions,
PortkeyResponse,
)
IMPORT_ERROR_MESSAGE = (
"Portkey is not installed.Please install it with `pip install portkey-ai`."
)
DISCONTINUED_MODELS = {
"code-davinci-002": 8001,
"code-davinci-001": 8001,
"code-cushman-002": 2048,
"code-cushman-001": 2048,
}
DEFAULT_MODEL = "gpt-3.5-turbo"
AVAILABLE_INTEGRATIONS = (OpenAI, Anthropic)
CLUADE_MODEL_FULLVERSION_MAP = {
"claude-instant-1": "claude-instant-1.2",
"claude-2": "claude-2.0",
}
ALL_AVAILABLE_MODELS = {
**GPT4_MODELS,
**TURBO_MODELS,
**GPT3_5_MODELS,
**GPT3_MODELS,
**AZURE_TURBO_MODELS,
**CLAUDE_MODELS,
}
CHAT_MODELS = {
**GPT4_MODELS,
**TURBO_MODELS,
**AZURE_TURBO_MODELS,
}
def is_chat_model(model: str) -> bool:
"""Check if a given model is a chat-based language model.
This function takes a model name or identifier as input and determines whether
the model is designed for chat-based language generation, conversation, or
interaction.
Args:
model (str): The name or identifier of the model to be checked.
Returns:
bool: True if the provided model is a chat-based language model,
False otherwise.
"""
return model in CHAT_MODELS
def modelname_to_contextsize(modelname: str) -> int:
"""Calculate the maximum number of tokens possible to generate for a model.
Args:
modelname: The modelname we want to know the context size for.
Returns:
The maximum context size
Example:
.. code-block:: python
max_tokens = modelname_to_contextsize("text-davinci-003")
"""
# handling finetuned models
if "ft-" in modelname: # legacy fine-tuning
modelname = modelname.split(":")[0]
elif modelname.startswith("ft:"):
modelname = modelname.split(":")[1]
if modelname in DISCONTINUED_MODELS:
raise ValueError(
f"Model {modelname} has been discontinued. " "Please choose another model."
)
context_size = ALL_AVAILABLE_MODELS.get(modelname, None)
if context_size is None:
raise ValueError(
f"Unknown model: {modelname}. Please provide a valid model name."
"Known models are: " + ", ".join(ALL_AVAILABLE_MODELS.keys())
)
return context_size
def generate_llm_metadata(llm: "LLMOptions") -> LLMMetadata:
"""
Generate metadata for a Language Model (LLM) instance.
This function takes an instance of a Language Model (LLM) and generates
metadata based on the provided instance. The metadata includes information
such as the context window, number of output tokens, chat model status,
and model name.
Parameters:
llm (LLM): An instance of a Language Model (LLM) from which metadata
will be generated.
Returns:
LLMMetadata: A data structure containing metadata attributes such as
context window, number of output tokens, chat model status, and
model name.
Raises:
ValueError: If the provided 'llm' is not an instance of
llama_index.core.llms.LLM.
"""
try:
from portkey import LLMOptions
except ImportError as exc:
raise ImportError(IMPORT_ERROR_MESSAGE) from exc
if not isinstance(llm, LLMOptions):
raise ValueError("llm must be an instance of portkey.LLMOptions")
return LLMMetadata(
_context_window=modelname_to_contextsize(llm.model or ""),
is_chat_model=is_chat_model(llm.model or ""),
model_name=llm.model,
)
def get_llm(response: "PortkeyResponse", llms: List["LLMOptions"]) -> "LLMOptions":
# TODO: Update this logic over here.
try:
from portkey import LLMOptions
except ImportError as exc:
raise ImportError(IMPORT_ERROR_MESSAGE) from exc
fallback_llm = LLMOptions.construct()
for llm in llms:
model = llm.model
if model == response.model:
fallback_llm = llm
break
if fallback_llm is None:
raise ValueError("Failed to get the fallback LLM")
return fallback_llm
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/openimages_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(bbox_head=dict(num_classes=601))
optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=26000,
warmup_ratio=1.0 / 64,
step=[8, 11])
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/openimages_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(bbox_head=dict(num_classes=601))
optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=26000,
warmup_ratio=1.0 / 64,
step=[8, 11])
|
from unittest import TestCase
import numpy as np
from mmengine.registry import init_default_scope
from mmdet.registry import TASK_UTILS
class TestKalmanFilter(TestCase):
@classmethod
def setUpClass(cls):
init_default_scope('mmdet')
motion = dict(type='KalmanFilter', )
cls.kf = TASK_UTILS.build(motion)
def test_init(self):
pred_det = np.random.randn(4)
mean, covariance = self.kf.initiate(pred_det)
assert len(mean) == 8
assert covariance.shape == (8, 8)
def test_predict(self):
mean = np.random.randn(8)
covariance = np.random.randn(8, 8)
mean, covariance = self.kf.predict(mean, covariance)
assert len(mean) == 8
assert covariance.shape == (8, 8)
def test_update(self):
mean = np.ones(8)
covariance = np.ones((8, 8))
measurement = np.ones(4)
score = 0.1
mean, covariance = self.kf.update(mean, covariance, measurement, score)
assert len(mean) == 8
assert covariance.shape == (8, 8)
|
from unittest import TestCase
import numpy as np
from mmdet.registry import TASK_UTILS
from mmdet.utils import register_all_modules
class TestKalmanFilter(TestCase):
@classmethod
def setUpClass(cls):
register_all_modules()
motion = dict(type='KalmanFilter', )
cls.kf = TASK_UTILS.build(motion)
def test_init(self):
pred_det = np.random.randn(4)
mean, covariance = self.kf.initiate(pred_det)
assert len(mean) == 8
assert covariance.shape == (8, 8)
def test_predict(self):
mean = np.random.randn(8)
covariance = np.random.randn(8, 8)
mean, covariance = self.kf.predict(mean, covariance)
assert len(mean) == 8
assert covariance.shape == (8, 8)
def test_update(self):
mean = np.ones(8)
covariance = np.ones((8, 8))
measurement = np.ones(4)
score = 0.1
mean, covariance = self.kf.update(mean, covariance, measurement, score)
assert len(mean) == 8
assert covariance.shape == (8, 8)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_head import BBoxHead
from .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead,
Shared4Conv1FCBBoxHead)
from .dii_head import DIIHead
from .double_bbox_head import DoubleConvFCBBoxHead
from .sabl_head import SABLHead
from .scnet_bbox_head import SCNetBBoxHead
__all__ = [
'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead',
'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'SABLHead', 'DIIHead',
'SCNetBBoxHead'
]
|
from .bbox_head import BBoxHead
from .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead,
Shared4Conv1FCBBoxHead)
from .dii_head import DIIHead
from .double_bbox_head import DoubleConvFCBBoxHead
from .sabl_head import SABLHead
from .scnet_bbox_head import SCNetBBoxHead
__all__ = [
'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead',
'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'SABLHead', 'DIIHead',
'SCNetBBoxHead'
]
|
from .paddle_image import ImagePaddlehubEncoder
|
from .paddle_image import ImagePaddlehubEncoder
|
"""Run smoke tests"""
import os
import sys
from pathlib import Path
import torch
import torch.nn as nn
import torchvision
from torchvision.io import read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.ndim != 3 or img_jpg.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.ndim != 3 or img_png.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
def smoke_test_compile() -> None:
try:
model = resnet50().cuda()
model = torch.compile(model)
x = torch.randn(1, 3, 224, 224, device="cuda")
out = model(x)
print(f"torch.compile model output: {out.shape}")
except RuntimeError:
if sys.platform == "win32":
print("Successfully caught torch.compile RuntimeError on win")
elif sys.version_info >= (3, 11, 0):
print("Successfully caught torch.compile RuntimeError on Python 3.11")
else:
raise
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms()
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
print(f"torch.cuda.is_available: {torch.cuda.is_available()}")
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
if torch.cuda.is_available():
smoke_test_torchvision_resnet50_classify("cuda")
smoke_test_compile()
if torch.backends.mps.is_available():
smoke_test_torchvision_resnet50_classify("mps")
if __name__ == "__main__":
main()
|
"""Run smoke tests"""
import os
from pathlib import Path
from sys import platform
import torch
import torch.nn as nn
import torchvision
from torchvision.io import read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.ndim != 3 or img_jpg.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.ndim != 3 or img_png.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
def smoke_test_compile() -> None:
try:
model = resnet50().cuda()
model = torch.compile(model)
x = torch.randn(1, 3, 224, 224, device="cuda")
out = model(x)
print(f"torch.compile model output: {out.shape}")
except RuntimeError:
if platform == "win32":
print("Successfully caught torch.compile RuntimeError on win")
elif sys.version_info >= (3, 11, 0):
print("Successfully caught torch.compile RuntimeError on Python 3.11")
else:
raise
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms()
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
print(f"torch.cuda.is_available: {torch.cuda.is_available()}")
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
if torch.cuda.is_available():
smoke_test_torchvision_resnet50_classify("cuda")
smoke_test_compile()
if torch.backends.mps.is_available():
smoke_test_torchvision_resnet50_classify("mps")
if __name__ == "__main__":
main()
|
import argparse
from jina.logging.predefined import default_logger
class NetworkChecker:
"""Check if a BaseDeployment is running or not."""
def __init__(self, args: 'argparse.Namespace'):
"""
Create a new :class:`NetworkChecker`.
:param args: args provided by the CLI.
"""
import time
from jina import Client
from jina.logging.profile import TimeContext
from jina.serve.runtimes.worker import WorkerRuntime
try:
total_time = 0
total_success = 0
for j in range(args.retries):
with TimeContext(
f'ping {args.host} at {j} round', default_logger
) as tc:
if args.target == 'executor':
r = WorkerRuntime.is_ready(args.host)
elif args.target == 'flow':
r = Client(host=args.host).is_flow_ready(timeout=args.timeout)
if not r:
default_logger.warning(
'not responding, retry (%d/%d) in 1s'
% (j + 1, args.retries)
)
else:
total_success += 1
total_time += tc.duration
time.sleep(1)
if total_success < args.retries:
default_logger.warning(
'message lost %.0f%% (%d/%d) '
% (
(1 - total_success / args.retries) * 100,
args.retries - total_success,
args.retries,
)
)
if total_success > 0:
default_logger.info(
'avg. latency: %.0f ms' % (total_time / total_success * 1000)
)
exit(0)
except KeyboardInterrupt:
pass
# returns 1 (anomaly) when it comes to here
exit(1)
|
import argparse
from jina.logging.predefined import default_logger
class NetworkChecker:
"""Check if a BaseDeployment is running or not."""
def __init__(self, args: 'argparse.Namespace'):
"""
Create a new :class:`NetworkChecker`.
:param args: args provided by the CLI.
"""
import time
from jina.logging.profile import TimeContext
from jina.serve.runtimes.worker import WorkerRuntime
ctrl_addr = f'{args.host}:{args.port}'
try:
total_time = 0
total_success = 0
for j in range(args.retries):
with TimeContext(
f'ping {ctrl_addr} at {j} round', default_logger
) as tc:
r = WorkerRuntime.is_ready(ctrl_addr)
if not r:
default_logger.warning(
'not responding, retry (%d/%d) in 1s'
% (j + 1, args.retries)
)
else:
total_success += 1
total_time += tc.duration
time.sleep(1)
if total_success < args.retries:
default_logger.warning(
'message lost %.0f%% (%d/%d) '
% (
(1 - total_success / args.retries) * 100,
args.retries - total_success,
args.retries,
)
)
if total_success > 0:
default_logger.info(
'avg. latency: %.0f ms' % (total_time / total_success * 1000)
)
exit(0)
except KeyboardInterrupt:
pass
# returns 1 (anomaly) when it comes to here
exit(1)
def dry_run_checker(args: 'argparse.Namespace'):
"""
call dry run on the given endpoint
:param args: args provided by the CLI.
"""
# No retry mechanism for dry run since it is built in the Flow
from jina import Client
client = Client(host=args.host)
try:
if client.dry_run(timeout=args.timeout):
default_logger.info('dry run successful')
exit(0)
else:
default_logger.warning('dry run failed')
exit(1)
except KeyboardInterrupt:
pass
exit(1)
# returns 1 (anomaly) when it comes to here
|
from jina import DocumentArray, Executor, Flow, requests
def test_gateway_metric_labels(monkeypatch_metric_exporter):
collect_metrics, read_metrics = monkeypatch_metric_exporter
class FirstExec(Executor):
@requests()
def meow(self, docs, **kwargs):
return DocumentArray.empty(3)
class SecondExec(Executor):
@requests()
def meow(self, docs, **kwargs):
return DocumentArray.empty(3)
with Flow(
tracing=False,
metrics=True,
metrics_exporter_host='http://localhost',
metrics_exporter_port=4317,
port=12345,
).add(name='first_exec', uses=FirstExec).add(
name="second_exec", uses=SecondExec
) as f:
f.post('/')
collect_metrics()
metrics = read_metrics()
print(f' metrics {metrics.keys()}')
gateway_metrics = metrics['gateway/rep-0'][0]['resource_metrics'][0][
'scope_metrics'
][0]['metrics']
gateway_metric_data_point = {
i['name']: i['data']['data_points'] for i in gateway_metrics
}
assert (
'address'
in gateway_metric_data_point['jina_sending_request_seconds'][0]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_sent_request_bytes'][0]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_received_response_bytes'][0]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_sending_request_seconds'][1]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_sent_request_bytes'][1]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_received_response_bytes'][1]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sending_request_seconds'][0]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sent_request_bytes'][0]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_received_response_bytes'][0]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sending_request_seconds'][1]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sent_request_bytes'][1]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_received_response_bytes'][1]['attributes']
)
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_received_response_bytes']
}
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sent_request_bytes']
}
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sending_request_seconds']
}
def test_merge_with_no_reduce(monkeypatch_metric_exporter):
collect_metrics, read_metrics = monkeypatch_metric_exporter
f = (
Flow(
tracing=False,
metrics=True,
metrics_exporter_host='http://localhost',
metrics_exporter_port=4317,
port=12345,
)
.add(name='name1')
.add(name='name2', needs=['gateway'])
.add(name='name3', needs=['name1', 'name2'], disable_reduce=True)
)
with f:
f.post('/')
collect_metrics()
metrics = read_metrics()
gateway_metrics = metrics['gateway/rep-0'][0]['resource_metrics'][0][
'scope_metrics'
][0]['metrics']
gateway_metric_data_point = {
i['name']: i['data']['data_points'] for i in gateway_metrics
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_received_response_bytes']
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sent_request_bytes']
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sending_request_seconds']
}
|
from jina import DocumentArray, Executor, Flow, requests
def test_gateway_metric_labels(monkeypatch_metric_exporter):
collect_metrics, read_metrics = monkeypatch_metric_exporter
class FirstExec(Executor):
@requests()
def meow(self, docs, **kwargs):
return DocumentArray.empty(3)
class SecondExec(Executor):
@requests()
def meow(self, docs, **kwargs):
return DocumentArray.empty(3)
with Flow(
tracing=False,
metrics=True,
metrics_exporter_host='http://localhost',
metrics_exporter_port=4317,
port=12345,
).add(name='first_exec', uses=FirstExec).add(
name="second_exec", uses=SecondExec
) as f:
f.post('/')
collect_metrics()
metrics = read_metrics()
gateway_metrics = metrics['gateway/rep-0'][0]['resource_metrics'][0][
'scope_metrics'
][0]['metrics']
gateway_metric_data_point = {
i['name']: i['data']['data_points'] for i in gateway_metrics
}
assert (
'address'
in gateway_metric_data_point['jina_sending_request_seconds'][0]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_sent_request_bytes'][0]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_received_response_bytes'][0]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_sending_request_seconds'][1]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_sent_request_bytes'][1]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_received_response_bytes'][1]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sending_request_seconds'][0]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sent_request_bytes'][0]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_received_response_bytes'][0]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sending_request_seconds'][1]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sent_request_bytes'][1]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_received_response_bytes'][1]['attributes']
)
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_received_response_bytes']
}
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sent_request_bytes']
}
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sending_request_seconds']
}
def test_merge_with_no_reduce(monkeypatch_metric_exporter):
collect_metrics, read_metrics = monkeypatch_metric_exporter
f = (
Flow(
tracing=False,
metrics=True,
metrics_exporter_host='http://localhost',
metrics_exporter_port=4317,
port=12345,
)
.add(name='name1')
.add(name='name2', needs=['gateway'])
.add(name='name3', needs=['name1', 'name2'], disable_reduce=True)
)
with f:
f.post('/')
collect_metrics()
metrics = read_metrics()
gateway_metrics = metrics['gateway/rep-0'][0]['resource_metrics'][0][
'scope_metrics'
][0]['metrics']
gateway_metric_data_point = {
i['name']: i['data']['data_points'] for i in gateway_metrics
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_received_response_bytes']
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sent_request_bytes']
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sending_request_seconds']
}
|
from jina import DocumentArray, Executor, Flow, requests
def test_gateway_metric_labels(monkeypatch_metric_exporter):
collect_metrics, read_metrics = monkeypatch_metric_exporter
class FirstExec(Executor):
@requests()
def meow(self, docs, **kwargs):
return DocumentArray.empty(3)
class SecondExec(Executor):
@requests()
def meow(self, docs, **kwargs):
return DocumentArray.empty(3)
with Flow(
tracing=False,
metrics=True,
metrics_exporter_host='http://localhost',
metrics_exporter_port=4317,
port=12345,
).add(name='first_exec', uses=FirstExec).add(
name="second_exec", uses=SecondExec
) as f:
f.post('/')
collect_metrics()
metrics = read_metrics()
print(f' metrics {metrics.keys()}')
gateway_metrics = metrics['gateway/rep-0']['resource_metrics'][0][
'scope_metrics'
][0]['metrics']
gateway_metric_data_point = {
i['name']: i['data']['data_points'] for i in gateway_metrics
}
assert (
'address'
in gateway_metric_data_point['jina_sending_request_seconds'][0]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_sent_request_bytes'][0]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_received_response_bytes'][0]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_sending_request_seconds'][1]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_sent_request_bytes'][1]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_received_response_bytes'][1]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sending_request_seconds'][0]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sent_request_bytes'][0]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_received_response_bytes'][0]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sending_request_seconds'][1]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sent_request_bytes'][1]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_received_response_bytes'][1]['attributes']
)
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_received_response_bytes']
}
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sent_request_bytes']
}
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sending_request_seconds']
}
def test_merge_with_no_reduce(monkeypatch_metric_exporter):
collect_metrics, read_metrics = monkeypatch_metric_exporter
f = (
Flow(
tracing=False,
metrics=True,
metrics_exporter_host='http://localhost',
metrics_exporter_port=4317,
port=12345,
)
.add(name='name1')
.add(name='name2', needs=['gateway'])
.add(name='name3', needs=['name1', 'name2'], disable_reduce=True)
)
with f:
f.post('/')
collect_metrics()
metrics = read_metrics()
gateway_metrics = metrics['gateway/rep-0']['resource_metrics'][0][
'scope_metrics'
][0]['metrics']
gateway_metric_data_point = {
i['name']: i['data']['data_points'] for i in gateway_metrics
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_received_response_bytes']
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sent_request_bytes']
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sending_request_seconds']
}
|
from jina import DocumentArray, Executor, Flow, requests
def test_gateway_metric_labels(monkeypatch_metric_exporter):
collect_metrics, read_metrics = monkeypatch_metric_exporter
class FirstExec(Executor):
@requests()
def meow(self, docs, **kwargs):
return DocumentArray.empty(3)
class SecondExec(Executor):
@requests()
def meow(self, docs, **kwargs):
return DocumentArray.empty(3)
with Flow(
tracing=False,
metrics=True,
metrics_exporter_host='http://localhost',
metrics_exporter_port=4317,
port=12345,
).add(name='first_exec', uses=FirstExec).add(
name="second_exec", uses=SecondExec
) as f:
f.post('/')
collect_metrics()
metrics = read_metrics()
print(f' metrics {metrics.keys()}')
gateway_metrics = metrics['gateway/rep-0'][0]['resource_metrics'][0][
'scope_metrics'
][0]['metrics']
gateway_metric_data_point = {
i['name']: i['data']['data_points'] for i in gateway_metrics
}
assert (
'address'
in gateway_metric_data_point['jina_sending_request_seconds'][0]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_sent_request_bytes'][0]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_received_response_bytes'][0]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_sending_request_seconds'][1]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_sent_request_bytes'][1]['attributes']
)
assert (
'address'
in gateway_metric_data_point['jina_received_response_bytes'][1]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sending_request_seconds'][0]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sent_request_bytes'][0]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_received_response_bytes'][0]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sending_request_seconds'][1]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_sent_request_bytes'][1]['attributes']
)
assert (
'deployment'
in gateway_metric_data_point['jina_received_response_bytes'][1]['attributes']
)
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_received_response_bytes']
}
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sent_request_bytes']
}
assert {'first_exec', 'second_exec'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sending_request_seconds']
}
def test_merge_with_no_reduce(monkeypatch_metric_exporter):
collect_metrics, read_metrics = monkeypatch_metric_exporter
f = (
Flow(
tracing=False,
metrics=True,
metrics_exporter_host='http://localhost',
metrics_exporter_port=4317,
port=12345,
)
.add(name='name1')
.add(name='name2', needs=['gateway'])
.add(name='name3', needs=['name1', 'name2'], disable_reduce=True)
)
with f:
f.post('/')
collect_metrics()
metrics = read_metrics()
gateway_metrics = metrics['gateway/rep-0'][0]['resource_metrics'][0][
'scope_metrics'
][0]['metrics']
gateway_metric_data_point = {
i['name']: i['data']['data_points'] for i in gateway_metrics
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_received_response_bytes']
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sent_request_bytes']
}
assert {'name1', 'name2', 'name3'} == {
i['attributes']['deployment']
for i in gateway_metric_data_point['jina_sending_request_seconds']
}
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Dict, Iterable, Optional
import spacy
from jina import DocumentArray, Executor, requests
_EXCLUDE_COMPONENTS = [
'tagger',
'parser',
'ner',
'senter',
'lemmatizer',
'attribute_ruler',
]
class SpacyTextEncoder(Executor):
"""
:class:`SpacyTextEncoder` encodes ``Document`` using models offered by Spacy
"""
def __init__(
self,
model_name: str = 'en_core_web_sm',
download_data: bool = True,
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs,
):
"""
:param model_name: pre-trained spaCy language pipeline name
:param traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param batch_size: fallback batch size in case there is not batch size sent in the request
:param device: device to use for encoding. ['cuda', 'cpu', 'cuda:2']
"""
super().__init__(*args, **kwargs)
self.batch_size = batch_size
self.traversal_paths = traversal_paths
self.device = device
if device.startswith('cuda'):
spacy.require_gpu()
if download_data:
subprocess.run(
['python3', '-m', 'spacy', 'download', model_name], check=True
)
self.spacy_model = spacy.load(model_name, exclude=_EXCLUDE_COMPONENTS)
@requests
def encode(
self, docs: Optional[DocumentArray] = None, parameters: Dict = {}, **kwargs
):
"""
Encode all docs with text and store the encodings in the embedding
attribute of the docs.
:param docs: documents sent to the encoder. The docs must have the
``text`` attribute.
:param parameters: dictionary to define the ``traversal_path`` and the
``batch_size``. For example,
``parameters={'traversal_paths': ['r'], 'batch_size': 10}``
"""
if self.device.startswith('cuda'):
from cupy import asnumpy
if docs:
batch_size = parameters.get('batch_size', self.batch_size)
document_batches_generator = docs.traverse_flat(
traversal_paths=parameters.get('traversal_paths', self.traversal_paths),
filter_fn=lambda doc: len(doc.text) > 0,
).batch(
batch_size=batch_size,
)
for document_batch in document_batches_generator:
texts = [doc.text for doc in document_batch]
for doc, spacy_doc in zip(
document_batch, self.spacy_model.pipe(texts, batch_size=batch_size)
):
if self.device.startswith('cuda'):
doc.embedding = asnumpy(spacy_doc.vector)
else:
doc.embedding = spacy_doc.vector
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Dict, Iterable, Optional
import spacy
from jina import DocumentArray, Executor, requests
_EXCLUDE_COMPONENTS = [
'tagger',
'parser',
'ner',
'senter',
'lemmatizer',
'attribute_ruler',
]
class SpacyTextEncoder(Executor):
"""
:class:`SpacyTextEncoder` encodes ``Document`` using models offered by Spacy
"""
def __init__(
self,
model_name: str = 'en_core_web_sm',
download_data: bool = True,
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs,
):
"""
:param model_name: pre-trained spaCy language pipeline name
:param traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param batch_size: fallback batch size in case there is not batch size sent in the request
:param device: device to use for encoding. ['cuda', 'cpu', 'cuda:2']
"""
super().__init__(*args, **kwargs)
self.batch_size = batch_size
self.traversal_paths = traversal_paths
self.device = device
if device.startswith('cuda'):
spacy.require_gpu()
if download_data:
subprocess.run(
['python3', '-m', 'spacy', 'download', model_name], check=True
)
self.spacy_model = spacy.load(model_name, exclude=_EXCLUDE_COMPONENTS)
@requests
def encode(
self, docs: Optional[DocumentArray] = None, parameters: Dict = {}, **kwargs
):
"""
Encode all docs with text and store the encodings in the embedding
attribute of the docs.
:param docs: documents sent to the encoder. The docs must have the
``text`` attribute.
:param parameters: dictionary to define the ``traversal_path`` and the
``batch_size``. For example,
``parameters={'traversal_paths': ['r'], 'batch_size': 10}``
"""
if self.device.startswith('cuda'):
from cupy import asnumpy
if docs:
batch_size = parameters.get('batch_size', self.batch_size)
document_batches_generator = docs.traverse_flat(
traversal_paths=parameters.get('traversal_paths', self.traversal_paths),
filter_fn=lambda doc:len(doc.text)>0
).batch(
batch_size=batch_size,
)
for document_batch in document_batches_generator:
texts = [doc.text for doc in document_batch]
for doc, spacy_doc in zip(
document_batch, self.spacy_model.pipe(texts, batch_size=batch_size)
):
if self.device.startswith('cuda'):
doc.embedding = asnumpy(spacy_doc.vector)
else:
doc.embedding = spacy_doc.vector
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from mmdet.datasets import OpenImagesChallengeDataset, OpenImagesDataset
class TestOpenImagesDataset(unittest.TestCase):
def test_init(self):
dataset = OpenImagesDataset(
data_root='tests/data/OpenImages/',
ann_file='annotations/oidv6-train-annotations-bbox.csv',
data_prefix=dict(img='OpenImages/train/'),
label_file='annotations/class-descriptions-boxable.csv',
hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
meta_file='annotations/image-metas.pkl',
pipeline=[])
dataset.full_init()
self.assertEqual(len(dataset), 1)
self.assertEqual(dataset.metainfo['classes'], ['Airplane'])
class TestOpenImagesChallengeDataset(unittest.TestCase):
def test_init(self):
dataset = OpenImagesChallengeDataset(
data_root='tests/data/OpenImages/',
ann_file='challenge2019/challenge-2019-train-detection-bbox.txt',
data_prefix=dict(img='OpenImages/train/'),
label_file='challenge2019/cls-label-description.csv',
hierarchy_file='challenge2019/class_label_tree.np',
meta_file='annotations/image-metas.pkl',
pipeline=[])
dataset.full_init()
self.assertEqual(len(dataset), 1)
self.assertEqual(dataset.metainfo['classes'], ['Airplane'])
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from mmdet.datasets import OpenImagesChallengeDataset, OpenImagesDataset
class TestOpenImagesDataset(unittest.TestCase):
def test_init(self):
dataset = OpenImagesDataset(
data_root='tests/data/OpenImages/',
ann_file='annotations/oidv6-train-annotations-bbox.csv',
data_prefix=dict(img='OpenImages/train/'),
label_file='annotations/class-descriptions-boxable.csv',
hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
meta_file='annotations/image-metas.pkl',
pipeline=[])
dataset.full_init()
self.assertEqual(len(dataset), 1)
self.assertEqual(dataset.metainfo['CLASSES'], ['Airplane'])
class TestOpenImagesChallengeDataset(unittest.TestCase):
def test_init(self):
dataset = OpenImagesChallengeDataset(
data_root='tests/data/OpenImages/',
ann_file='challenge2019/challenge-2019-train-detection-bbox.txt',
data_prefix=dict(img='OpenImages/train/'),
label_file='challenge2019/cls-label-description.csv',
hierarchy_file='challenge2019/class_label_tree.np',
meta_file='annotations/image-metas.pkl',
pipeline=[])
dataset.full_init()
self.assertEqual(len(dataset), 1)
self.assertEqual(dataset.metainfo['CLASSES'], ['Airplane'])
|
from backend.data.credit import UsageTransactionMetadata, get_user_credit_model
from backend.data.execution import (
GraphExecutionMeta,
NodeExecutionResult,
RedisExecutionEventBus,
create_graph_execution,
get_incomplete_node_executions,
get_latest_node_execution,
get_node_execution_results,
update_graph_execution_start_time,
update_graph_execution_stats,
update_node_execution_stats,
update_node_execution_status,
update_node_execution_status_batch,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import (
get_connected_output_nodes,
get_graph,
get_graph_metadata,
get_node,
)
from backend.data.notifications import (
create_or_add_to_user_notification_batch,
empty_user_notification_batch,
get_all_batches_by_type,
get_user_notification_batch,
get_user_notification_oldest_message_in_batch,
)
from backend.data.user import (
get_active_user_ids_in_timerange,
get_user_email_by_id,
get_user_email_verification,
get_user_integrations,
get_user_metadata,
get_user_notification_preference,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose, exposed_run_and_wait
from backend.util.settings import Config
config = Config()
_user_credit_model = get_user_credit_model()
async def _spend_credits(
user_id: str, cost: int, metadata: UsageTransactionMetadata
) -> int:
return await _user_credit_model.spend_credits(user_id, cost, metadata)
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.execution_event_bus = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(
self, execution_result: GraphExecutionMeta | NodeExecutionResult
):
self.execution_event_bus.publish(execution_result)
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_node_execution_results = exposed_run_and_wait(get_node_execution_results)
get_incomplete_node_executions = exposed_run_and_wait(
get_incomplete_node_executions
)
get_latest_node_execution = exposed_run_and_wait(get_latest_node_execution)
update_node_execution_status = exposed_run_and_wait(update_node_execution_status)
update_node_execution_status_batch = exposed_run_and_wait(
update_node_execution_status_batch
)
update_graph_execution_start_time = exposed_run_and_wait(
update_graph_execution_start_time
)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
get_connected_output_nodes = exposed_run_and_wait(get_connected_output_nodes)
get_graph_metadata = exposed_run_and_wait(get_graph_metadata)
# Credits
spend_credits = exposed_run_and_wait(_spend_credits)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
# User Comms - async
get_active_user_ids_in_timerange = exposed_run_and_wait(
get_active_user_ids_in_timerange
)
get_user_email_by_id = exposed_run_and_wait(get_user_email_by_id)
get_user_email_verification = exposed_run_and_wait(get_user_email_verification)
get_user_notification_preference = exposed_run_and_wait(
get_user_notification_preference
)
# Notifications - async
create_or_add_to_user_notification_batch = exposed_run_and_wait(
create_or_add_to_user_notification_batch
)
empty_user_notification_batch = exposed_run_and_wait(empty_user_notification_batch)
get_all_batches_by_type = exposed_run_and_wait(get_all_batches_by_type)
get_user_notification_batch = exposed_run_and_wait(get_user_notification_batch)
get_user_notification_oldest_message_in_batch = exposed_run_and_wait(
get_user_notification_oldest_message_in_batch
)
|
from backend.data.credit import UsageTransactionMetadata, get_user_credit_model
from backend.data.execution import (
ExecutionResult,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_execution_status_batch,
update_graph_execution_start_time,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import (
get_connected_output_nodes,
get_graph,
get_graph_metadata,
get_node,
)
from backend.data.notifications import (
create_or_add_to_user_notification_batch,
empty_user_notification_batch,
get_all_batches_by_type,
get_user_notification_batch,
get_user_notification_oldest_message_in_batch,
)
from backend.data.user import (
get_active_user_ids_in_timerange,
get_user_email_by_id,
get_user_email_verification,
get_user_integrations,
get_user_metadata,
get_user_notification_preference,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose, exposed_run_and_wait
from backend.util.settings import Config
config = Config()
_user_credit_model = get_user_credit_model()
async def _spend_credits(
user_id: str, cost: int, metadata: UsageTransactionMetadata
) -> int:
return await _user_credit_model.spend_credits(user_id, cost, metadata)
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_execution_status_batch = exposed_run_and_wait(update_execution_status_batch)
update_graph_execution_start_time = exposed_run_and_wait(
update_graph_execution_start_time
)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
get_connected_output_nodes = exposed_run_and_wait(get_connected_output_nodes)
get_graph_metadata = exposed_run_and_wait(get_graph_metadata)
# Credits
spend_credits = exposed_run_and_wait(_spend_credits)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
# User Comms - async
get_active_user_ids_in_timerange = exposed_run_and_wait(
get_active_user_ids_in_timerange
)
get_user_email_by_id = exposed_run_and_wait(get_user_email_by_id)
get_user_email_verification = exposed_run_and_wait(get_user_email_verification)
get_user_notification_preference = exposed_run_and_wait(
get_user_notification_preference
)
# Notifications - async
create_or_add_to_user_notification_batch = exposed_run_and_wait(
create_or_add_to_user_notification_batch
)
empty_user_notification_batch = exposed_run_and_wait(empty_user_notification_batch)
get_all_batches_by_type = exposed_run_and_wait(get_all_batches_by_type)
get_user_notification_batch = exposed_run_and_wait(get_user_notification_batch)
get_user_notification_oldest_message_in_batch = exposed_run_and_wait(
get_user_notification_oldest_message_in_batch
)
|
"""
Python polyfills for operator
"""
from __future__ import annotations
import operator
from typing import Any, Callable, overload, TYPE_CHECKING, TypeVar
from typing_extensions import TypeVarTuple, Unpack
from ..decorators import substitute_in_graph
if TYPE_CHECKING:
from collections.abc import Iterable
# Most unary and binary operators are handled by BuiltinVariable (e.g., `pos`, `add`)
__all__ = ["attrgetter", "itemgetter", "methodcaller", "countOf"]
_T = TypeVar("_T")
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
_Ts = TypeVarTuple("_Ts")
_U = TypeVar("_U")
_U1 = TypeVar("_U1")
_U2 = TypeVar("_U2")
_Us = TypeVarTuple("_Us")
@overload
def attrgetter(attr: str, /) -> Callable[[Any], _U]: ...
@overload
def attrgetter(
attr1: str, attr2: str, /, *attrs: str
) -> Callable[[Any], tuple[_U1, _U2, Unpack[_Us]]]: ...
# Reference: https://docs.python.org/3/library/operator.html#operator.attrgetter
@substitute_in_graph(operator.attrgetter, is_embedded_type=True) # type: ignore[arg-type,misc]
def attrgetter(*attrs: str) -> Callable[[Any], Any | tuple[Any, ...]]:
if len(attrs) == 0:
raise TypeError("attrgetter expected 1 argument, got 0")
if any(not isinstance(attr, str) for attr in attrs):
raise TypeError("attribute name must be a string")
def resolve_attr(obj: Any, attr: str) -> Any:
for name in attr.split("."):
obj = getattr(obj, name)
return obj
if len(attrs) == 1:
attr = attrs[0]
def getter(obj: Any) -> Any:
return resolve_attr(obj, attr)
else:
def getter(obj: Any) -> tuple[Any, ...]: # type: ignore[misc]
return tuple(resolve_attr(obj, attr) for attr in attrs)
return getter
@overload
def itemgetter(item: _T, /) -> Callable[[Any], _U]: ...
@overload
def itemgetter(
item1: _T1, item2: _T2, /, *items: Unpack[_Ts]
) -> Callable[[Any], tuple[_U1, _U2, Unpack[_Us]]]: ...
# Reference: https://docs.python.org/3/library/operator.html#operator.itemgetter
@substitute_in_graph(operator.itemgetter, is_embedded_type=True) # type: ignore[arg-type,misc]
def itemgetter(*items: Any) -> Callable[[Any], Any | tuple[Any, ...]]:
if len(items) == 0:
raise TypeError("itemgetter expected 1 argument, got 0")
if len(items) == 1:
item = items[0]
def getter(obj: Any) -> Any:
return obj[item]
else:
def getter(obj: Any) -> tuple[Any, ...]: # type: ignore[misc]
return tuple(obj[item] for item in items)
return getter
# Reference: https://docs.python.org/3/library/operator.html#operator.methodcaller
@substitute_in_graph(operator.methodcaller, is_embedded_type=True) # type: ignore[arg-type]
def methodcaller(name: str, /, *args: Any, **kwargs: Any) -> Callable[[Any], Any]:
if not isinstance(name, str):
raise TypeError("method name must be a string")
def caller(obj: Any) -> Any:
return getattr(obj, name)(*args, **kwargs)
return caller
# Reference: https://docs.python.org/3/library/operator.html#operator.countOf
@substitute_in_graph(operator.countOf, can_constant_fold_through=True) # type: ignore[arg-type,misc]
def countOf(a: Iterable[_T], b: _T, /) -> int:
return sum(it is b or it == b for it in a)
|
"""
Python polyfills for operator
"""
from __future__ import annotations
import operator
from typing import Any, Callable, overload, TypeVar
from typing_extensions import TypeVarTuple, Unpack
from ..decorators import substitute_in_graph
# Most unary and binary operators are handled by BuiltinVariable (e.g., `pos`, `add`)
__all__ = ["attrgetter", "itemgetter", "methodcaller"]
_T = TypeVar("_T")
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
_Ts = TypeVarTuple("_Ts")
_U = TypeVar("_U")
_U1 = TypeVar("_U1")
_U2 = TypeVar("_U2")
_Us = TypeVarTuple("_Us")
@overload
def attrgetter(attr: str, /) -> Callable[[Any], _U]: ...
@overload
def attrgetter(
attr1: str, attr2: str, /, *attrs: str
) -> Callable[[Any], tuple[_U1, _U2, Unpack[_Us]]]: ...
# Reference: https://docs.python.org/3/library/operator.html#operator.attrgetter
@substitute_in_graph(operator.attrgetter, is_embedded_type=True) # type: ignore[arg-type,misc]
def attrgetter(*attrs: str) -> Callable[[Any], Any | tuple[Any, ...]]:
if len(attrs) == 0:
raise TypeError("attrgetter expected 1 argument, got 0")
if any(not isinstance(attr, str) for attr in attrs):
raise TypeError("attribute name must be a string")
def resolve_attr(obj: Any, attr: str) -> Any:
for name in attr.split("."):
obj = getattr(obj, name)
return obj
if len(attrs) == 1:
attr = attrs[0]
def getter(obj: Any) -> Any:
return resolve_attr(obj, attr)
else:
def getter(obj: Any) -> tuple[Any, ...]: # type: ignore[misc]
return tuple(resolve_attr(obj, attr) for attr in attrs)
return getter
@overload
def itemgetter(item: _T, /) -> Callable[[Any], _U]: ...
@overload
def itemgetter(
item1: _T1, item2: _T2, /, *items: Unpack[_Ts]
) -> Callable[[Any], tuple[_U1, _U2, Unpack[_Us]]]: ...
# Reference: https://docs.python.org/3/library/operator.html#operator.itemgetter
@substitute_in_graph(operator.itemgetter, is_embedded_type=True) # type: ignore[arg-type,misc]
def itemgetter(*items: Any) -> Callable[[Any], Any | tuple[Any, ...]]:
if len(items) == 0:
raise TypeError("itemgetter expected 1 argument, got 0")
if len(items) == 1:
item = items[0]
def getter(obj: Any) -> Any:
return obj[item]
else:
def getter(obj: Any) -> tuple[Any, ...]: # type: ignore[misc]
return tuple(obj[item] for item in items)
return getter
# Reference: https://docs.python.org/3/library/operator.html#operator.methodcaller
@substitute_in_graph(operator.methodcaller, is_embedded_type=True) # type: ignore[arg-type]
def methodcaller(name: str, /, *args: Any, **kwargs: Any) -> Callable[[Any], Any]:
if not isinstance(name, str):
raise TypeError("method name must be a string")
def caller(obj: Any) -> Any:
return getattr(obj, name)(*args, **kwargs)
return caller
|
from enum import Enum
from typing import Iterable, Dict
import torch.nn.functional as F
from torch import nn, Tensor
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""The metric for the contrastive loss"""
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
COSINE_DISTANCE = lambda x, y: 1 - F.cosine_similarity(x, y)
class ContrastiveLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
distance_metric=SiameseDistanceMetric.COSINE_DISTANCE,
margin: float = 0.5,
size_average: bool = True,
):
"""
Contrastive loss. Expects as input two texts and a label of either 0 or 1. If the label == 1, then the distance between the
two embeddings is reduced. If the label == 0, then the distance between the embeddings is increased.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrices that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
size_average: Average by the size of the mini-batch.
References:
* Further information: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
* `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
Relations:
- :class:`OnlineContrastiveLoss` is similar, but uses hard positive and hard negative pairs.
It often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.ContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(ContrastiveLoss, self).__init__()
self.distance_metric = distance_metric
self.margin = margin
self.model = model
self.size_average = size_average
def get_config_dict(self):
distance_metric_name = self.distance_metric.__name__
for name, value in vars(SiameseDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "SiameseDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "margin": self.margin, "size_average": self.size_average}
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
assert len(reps) == 2
rep_anchor, rep_other = reps
distances = self.distance_metric(rep_anchor, rep_other)
losses = 0.5 * (
labels.float() * distances.pow(2) + (1 - labels).float() * F.relu(self.margin - distances).pow(2)
)
return losses.mean() if self.size_average else losses.sum()
@property
def citation(self) -> str:
return """
@inproceedings{hadsell2006dimensionality,
author={Hadsell, R. and Chopra, S. and LeCun, Y.},
booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
title={Dimensionality Reduction by Learning an Invariant Mapping},
year={2006},
volume={2},
number={},
pages={1735-1742},
doi={10.1109/CVPR.2006.100}
}
"""
|
from enum import Enum
from typing import Iterable, Dict
import torch.nn.functional as F
from torch import nn, Tensor
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""
The metric for the contrastive loss
"""
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
COSINE_DISTANCE = lambda x, y: 1 - F.cosine_similarity(x, y)
class ContrastiveLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
distance_metric=SiameseDistanceMetric.COSINE_DISTANCE,
margin: float = 0.5,
size_average: bool = True,
):
"""
Contrastive loss. Expects as input two texts and a label of either 0 or 1. If the label == 1, then the distance between the
two embeddings is reduced. If the label == 0, then the distance between the embeddings is increased.
:param model: SentenceTransformer model
:param distance_metric: Function that returns a distance between two embeddings. The class SiameseDistanceMetric contains pre-defined metrices that can be used
:param margin: Negative samples (label == 0) should have a distance of at least the margin value.
:param size_average: Average by the size of the mini-batch.
References:
* Further information: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
* `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
Relations:
- :class:`OnlineContrastiveLoss` is similar, but uses hard positive and hard negative pairs.
It often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.readers import InputExample
from torch.utils.data import DataLoader
model = SentenceTransformer('all-MiniLM-L6-v2')
train_examples = [
InputExample(texts=['This is a positive pair', 'Where the distance will be minimized'], label=1),
InputExample(texts=['This is a negative pair', 'Their distance will be increased'], label=0),
]
train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=2)
train_loss = losses.ContrastiveLoss(model=model)
model.fit(
[(train_dataloader, train_loss)],
epochs=10,
)
"""
super(ContrastiveLoss, self).__init__()
self.distance_metric = distance_metric
self.margin = margin
self.model = model
self.size_average = size_average
def get_config_dict(self):
distance_metric_name = self.distance_metric.__name__
for name, value in vars(SiameseDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "SiameseDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "margin": self.margin, "size_average": self.size_average}
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
assert len(reps) == 2
rep_anchor, rep_other = reps
distances = self.distance_metric(rep_anchor, rep_other)
losses = 0.5 * (
labels.float() * distances.pow(2) + (1 - labels).float() * F.relu(self.margin - distances).pow(2)
)
return losses.mean() if self.size_average else losses.sum()
@property
def citation(self) -> str:
return """
@inproceedings{hadsell2006dimensionality,
author={Hadsell, R. and Chopra, S. and LeCun, Y.},
booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
title={Dimensionality Reduction by Learning an Invariant Mapping},
year={2006},
volume={2},
number={},
pages={1735-1742},
doi={10.1109/CVPR.2006.100}
}
"""
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import Qdrant
from langchain_community.vectorstores.qdrant import QdrantException
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"QdrantException": "langchain_community.vectorstores.qdrant",
"Qdrant": "langchain_community.vectorstores",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Qdrant",
"QdrantException",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import Qdrant
from langchain_community.vectorstores.qdrant import QdrantException
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"QdrantException": "langchain_community.vectorstores.qdrant",
"Qdrant": "langchain_community.vectorstores",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"QdrantException",
"Qdrant",
]
|
_base_ = './htc_hrnetv2p-w40_20e_coco.py'
# learning policy
max_epochs = 28
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[24, 27],
gamma=0.1)
]
|
_base_ = './htc_hrnetv2p_w40_20e_coco.py'
# learning policy
max_epochs = 28
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[24, 27],
gamma=0.1)
]
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg)))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
# Copyright (c) OpenMMLab. All rights reserved.
from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formating import (Collect, DefaultFormatBundle, ImageToTensor,
ToDataContainer, ToTensor, Transpose, to_tensor)
from .instaboost import InstaBoost
from .loading import (LoadAnnotations, LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles, LoadProposals)
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Albu, CutOut, Expand, MinIoURandomCrop, MixUp, Mosaic,
Normalize, Pad, PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomFlip,
RandomShift, Resize, SegRescale)
__all__ = [
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations',
'LoadImageFromFile', 'LoadImageFromWebcam',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'MultiScaleFlipAug',
'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 'Normalize', 'SegRescale',
'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion', 'Albu',
'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut', 'Shear',
'Rotate', 'ColorTransform', 'EqualizeTransform', 'BrightnessTransform',
'ContrastTransform', 'Translate', 'RandomShift', 'Mosaic', 'MixUp',
'RandomAffine'
]
|
from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formating import (Collect, DefaultFormatBundle, ImageToTensor,
ToDataContainer, ToTensor, Transpose, to_tensor)
from .instaboost import InstaBoost
from .loading import (LoadAnnotations, LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles, LoadProposals)
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Albu, CutOut, Expand, MinIoURandomCrop, MixUp, Mosaic,
Normalize, Pad, PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomFlip,
RandomShift, Resize, SegRescale)
__all__ = [
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations',
'LoadImageFromFile', 'LoadImageFromWebcam',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'MultiScaleFlipAug',
'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 'Normalize', 'SegRescale',
'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion', 'Albu',
'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut', 'Shear',
'Rotate', 'ColorTransform', 'EqualizeTransform', 'BrightnessTransform',
'ContrastTransform', 'Translate', 'RandomShift', 'Mosaic', 'MixUp',
'RandomAffine'
]
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""# Test that building using op_allowlist works with ops with namespaces."""
from tensorflow.python.framework import test_namespace_ops
from tensorflow.python.platform import googletest
class OpAllowlistNamespaceTest(googletest.TestCase):
def testOpAllowListNamespace(self):
"""Test that the building of the python wrapper worked."""
op = test_namespace_ops.namespace_test_string_output
self.assertIsNotNone(op)
if __name__ == "__main__":
googletest.main()
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""# Test that buidling using op_allowlist works with ops with namespaces."""
from tensorflow.python.framework import test_namespace_ops
from tensorflow.python.platform import googletest
class OpAllowlistNamespaceTest(googletest.TestCase):
def testOpAllowListNamespace(self):
"""Test that the building of the python wrapper worked."""
op = test_namespace_ops.namespace_test_string_output
self.assertIsNotNone(op)
if __name__ == "__main__":
googletest.main()
|
import torch
from torch import nn, Tensor
from typing import Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
class CosineSimilarityLoss(nn.Module):
def __init__(self, model: SentenceTransformer, loss_fct=nn.MSELoss(), cos_score_transformation=nn.Identity()):
"""
CosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
:param model: SentenceTransformer model
:param loss_fct: Which pytorch loss function should be used to compare the ``cosine_similarity(u, v)`` with the input_label?
By default, MSE is used: ``||input_label - cosine_sim(u, v)||_2``
:param cos_score_transformation: The cos_score_transformation function is applied on top of cosine_similarity.
By default, the identify function is used (i.e. no change).
References:
- `Training Examples > Semantic Textual Similarity <../../examples/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Relations:
- :class:`CoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, CoSENTLoss is recommended.
- :class:`AnglELoss` is :class:`CoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than CosineSimilarityLoss.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, InputExample, losses
from torch.utils.data import DataLoader
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [
InputExample(texts=['My first sentence', 'My second sentence'], label=0.8),
InputExample(texts=['Another pair', 'Unrelated sentence'], label=0.3)
]
train_batch_size = 1
train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
model.fit(
[(train_dataloader, train_loss)],
epochs=10,
)
"""
super(CosineSimilarityLoss, self).__init__()
self.model = model
self.loss_fct = loss_fct
self.cos_score_transformation = cos_score_transformation
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
output = self.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
return self.loss_fct(output, labels.view(-1))
|
import torch
from torch import nn, Tensor
from typing import Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
class CosineSimilarityLoss(nn.Module):
def __init__(self, model: SentenceTransformer, loss_fct=nn.MSELoss(), cos_score_transformation=nn.Identity()):
"""
CosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and v = ``model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
:param model: SentenceTransformer model
:param loss_fct: Which pytorch loss function should be used to compare the ``cosine_similarity(u, v)`` with the input_label?
By default, MSE is used: ``||input_label - cosine_sim(u, v)||_2``
:param cos_score_transformation: The cos_score_transformation function is applied on top of cosine_similarity.
By default, the identify function is used (i.e. no change).
References:
- `Training Examples > Semantic Textual Similarity <../../examples/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Relations:
- :class:`CoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, CoSENTLoss is recommended.
- :class:`AnglELoss` is :class:`CoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than CosineSimilarityLoss.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, InputExample, losses
from torch.utils.data import DataLoader
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [
InputExample(texts=['My first sentence', 'My second sentence'], label=0.8),
InputExample(texts=['Another pair', 'Unrelated sentence'], label=0.3)
]
train_batch_size = 1
train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
model.fit(
[(train_dataloader, train_loss)],
epochs=10,
)
"""
super(CosineSimilarityLoss, self).__init__()
self.model = model
self.loss_fct = loss_fct
self.cos_score_transformation = cos_score_transformation
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
output = self.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
return self.loss_fct(output, labels.view(-1))
|
"""Helper script for triggering Read the docs build.
See `doc/contrib/docs.rst <https://xgboost.readthedocs.io/en/stable/contrib/docs.html>`__
for more info.
"""
import json
import os
import pprint
from http.client import responses as http_responses
import requests # type: ignore
def trigger_build(token: str) -> None:
"""Trigger RTD build."""
event_path = os.environ["GITHUB_EVENT_PATH"]
with open(event_path, "r") as fd:
event: dict = json.load(fd)
if event.get("pull_request", None) is None:
# refs/heads/branch-name
branch = event["ref"].split("/")[-1]
else:
branch = event["pull_request"]["number"]
if branch == "master":
# Use the `latest` tag, otherwise RTD wouldn't update the rendered doc.
branch = "latest"
URL = f"https://readthedocs.org/api/v3/projects/xgboost/versions/{branch}/builds/"
HEADERS = {"Authorization": f"token {token}"}
response = requests.post(URL, headers=HEADERS)
# 202 means the build is successfully triggered.
if response.status_code != 202:
status_text = http_responses[response.status_code]
raise RuntimeError(
"ReadTheDocs returned an unexpected response: "
f"{response.status_code} {status_text}, reason: {response.reason}"
)
pprint.pprint(response.json(), indent=4)
def main() -> None:
token = os.getenv("RTD_AUTH_TOKEN")
# GA redacts the secret by default, but we should still be really careful to not log
# (expose) the token in the CI.
if token is None:
raise RuntimeError(
"The RTD_AUTH_TOKEN environment variable must be set to a valid auth token for the"
"ReadTheDocs service."
)
if len(token) == 0:
print("Document build is not triggered.")
return
if not isinstance(token, str) or len(token) != 40:
raise ValueError(f"Invalid token.")
trigger_build(token)
if __name__ == "__main__":
main()
|
"""Helper script for triggering Read the docs build.
See `doc/contrib/docs.rst <https://xgboost.readthedocs.io/en/stable/contrib/docs.html>`__
for more info.
"""
import json
import os
import pprint
from http.client import responses as http_responses
import requests # type: ignore
def trigger_build(token: str) -> None:
"""Trigger RTD build."""
event_path = os.environ["GITHUB_EVENT_PATH"]
with open(event_path, "r") as fd:
event: dict = json.load(fd)
if event.get("pull_request", None) is None:
# refs/heads/branch-name
branch = event["ref"].split("/")[-1]
else:
branch = event["pull_request"]["number"]
URL = f"https://readthedocs.org/api/v3/projects/xgboost/versions/{branch}/builds/"
HEADERS = {"Authorization": f"token {token}"}
response = requests.post(URL, headers=HEADERS)
# 202 means the build is successfully triggered.
if response.status_code != 202:
status_text = http_responses[response.status_code]
raise RuntimeError(
"ReadTheDocs returned an unexpected response: "
f"{response.status_code} {status_text}, reason: {response.reason}"
)
pprint.pprint(response.json(), indent=4)
def main() -> None:
token = os.getenv("RTD_AUTH_TOKEN")
# GA redacts the secret by default, but we should still be really careful to not log
# (expose) the token in the CI.
if token is None:
raise RuntimeError(
"The RTD_AUTH_TOKEN environment variable must be set to a valid auth token for the"
"ReadTheDocs service."
)
if len(token) == 0:
print("Document build is not triggered.")
return
if not isinstance(token, str) or len(token) != 40:
raise ValueError(f"Invalid token.")
trigger_build(token)
if __name__ == "__main__":
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Tuple
import torch
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptMultiConfig
from .base_roi_extractor import BaseRoIExtractor
@MODELS.register_module()
class SingleRoIExtractor(BaseRoIExtractor):
"""Extract RoI features from a single level feature map.
If there are multiple input feature levels, each RoI is mapped to a level
according to its scale. The mapping rule is proposed in
`FPN <https://arxiv.org/abs/1612.03144>`_.
Args:
roi_layer (:obj:`ConfigDict` or dict): Specify RoI layer type and
arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (List[int]): Strides of input feature maps.
finest_scale (int): Scale threshold of mapping to level 0.
Defaults to 56.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict], optional): Initialization config dict. Defaults to None.
"""
def __init__(self,
roi_layer: ConfigType,
out_channels: int,
featmap_strides: List[int],
finest_scale: int = 56,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
roi_layer=roi_layer,
out_channels=out_channels,
featmap_strides=featmap_strides,
init_cfg=init_cfg)
self.finest_scale = finest_scale
def map_roi_levels(self, rois: Tensor, num_levels: int) -> Tensor:
"""Map rois to corresponding feature levels by scales.
- scale < finest_scale * 2: level 0
- finest_scale * 2 <= scale < finest_scale * 4: level 1
- finest_scale * 4 <= scale < finest_scale * 8: level 2
- scale >= finest_scale * 8: level 3
Args:
rois (Tensor): Input RoIs, shape (k, 5).
num_levels (int): Total level number.
Returns:
Tensor: Level index (0-based) of each RoI, shape (k, )
"""
scale = torch.sqrt(
(rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2]))
target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
return target_lvls
def forward(self,
feats: Tuple[Tensor],
rois: Tensor,
roi_scale_factor: Optional[float] = None):
"""Extractor ROI feats.
Args:
feats (Tuple[Tensor]): Multi-scale features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
roi_scale_factor (Optional[float]): RoI scale factor.
Defaults to None.
Returns:
Tensor: RoI feature.
"""
out_size = self.roi_layers[0].output_size
num_levels = len(feats)
roi_feats = feats[0].new_zeros(
rois.size(0), self.out_channels, *out_size)
# TODO: remove this when parrots supports
if torch.__version__ == 'parrots':
roi_feats.requires_grad = True
if num_levels == 1:
if len(rois) == 0:
return roi_feats
return self.roi_layers[0](feats[0], rois)
target_lvls = self.map_roi_levels(rois, num_levels)
if roi_scale_factor is not None:
rois = self.roi_rescale(rois, roi_scale_factor)
for i in range(num_levels):
mask = target_lvls == i
inds = mask.nonzero(as_tuple=False).squeeze(1)
if inds.numel() > 0:
rois_ = rois[inds]
roi_feats_t = self.roi_layers[i](feats[i], rois_)
roi_feats[inds] = roi_feats_t
else:
# Sometimes some pyramid levels will not be used for RoI
# feature extraction and this will cause an incomplete
# computation graph in one GPU, which is different from those
# in other GPUs and will cause a hanging error.
# Therefore, we add it to ensure each feature pyramid is
# included in the computation graph to avoid runtime bugs.
roi_feats += sum(
x.view(-1)[0]
for x in self.parameters()) * 0. + feats[i].sum() * 0.
return roi_feats
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Tuple
import torch
from torch import Tensor
from mmdet.core.utils.typing import ConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .base_roi_extractor import BaseRoIExtractor
@MODELS.register_module()
class SingleRoIExtractor(BaseRoIExtractor):
"""Extract RoI features from a single level feature map.
If there are multiple input feature levels, each RoI is mapped to a level
according to its scale. The mapping rule is proposed in
`FPN <https://arxiv.org/abs/1612.03144>`_.
Args:
roi_layer (:obj:`ConfigDict` or dict): Specify RoI layer type and
arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (List[int]): Strides of input feature maps.
finest_scale (int): Scale threshold of mapping to level 0.
Defaults to 56.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict], optional): Initialization config dict. Defaults to None.
"""
def __init__(self,
roi_layer: ConfigType,
out_channels: int,
featmap_strides: List[int],
finest_scale: int = 56,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
roi_layer=roi_layer,
out_channels=out_channels,
featmap_strides=featmap_strides,
init_cfg=init_cfg)
self.finest_scale = finest_scale
def map_roi_levels(self, rois: Tensor, num_levels: int) -> Tensor:
"""Map rois to corresponding feature levels by scales.
- scale < finest_scale * 2: level 0
- finest_scale * 2 <= scale < finest_scale * 4: level 1
- finest_scale * 4 <= scale < finest_scale * 8: level 2
- scale >= finest_scale * 8: level 3
Args:
rois (Tensor): Input RoIs, shape (k, 5).
num_levels (int): Total level number.
Returns:
Tensor: Level index (0-based) of each RoI, shape (k, )
"""
scale = torch.sqrt(
(rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2]))
target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
return target_lvls
def forward(self,
feats: Tuple[Tensor],
rois: Tensor,
roi_scale_factor: Optional[float] = None):
"""Extractor ROI feats.
Args:
feats (Tuple[Tensor]): Multi-scale features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
roi_scale_factor (Optional[float]): RoI scale factor.
Defaults to None.
Returns:
Tensor: RoI feature.
"""
out_size = self.roi_layers[0].output_size
num_levels = len(feats)
roi_feats = feats[0].new_zeros(
rois.size(0), self.out_channels, *out_size)
# TODO: remove this when parrots supports
if torch.__version__ == 'parrots':
roi_feats.requires_grad = True
if num_levels == 1:
if len(rois) == 0:
return roi_feats
return self.roi_layers[0](feats[0], rois)
target_lvls = self.map_roi_levels(rois, num_levels)
if roi_scale_factor is not None:
rois = self.roi_rescale(rois, roi_scale_factor)
for i in range(num_levels):
mask = target_lvls == i
inds = mask.nonzero(as_tuple=False).squeeze(1)
if inds.numel() > 0:
rois_ = rois[inds]
roi_feats_t = self.roi_layers[i](feats[i], rois_)
roi_feats[inds] = roi_feats_t
else:
# Sometimes some pyramid levels will not be used for RoI
# feature extraction and this will cause an incomplete
# computation graph in one GPU, which is different from those
# in other GPUs and will cause a hanging error.
# Therefore, we add it to ensure each feature pyramid is
# included in the computation graph to avoid runtime bugs.
roi_feats += sum(
x.view(-1)[0]
for x in self.parameters()) * 0. + feats[i].sum() * 0.
return roi_feats
|
import csv
import logging
import os
from typing import List
import numpy as np
from sentence_transformers import InputExample
logger = logging.getLogger(__name__)
class CESoftmaxAccuracyEvaluator:
"""
This evaluator can be used with the CrossEncoder class.
It is designed for CrossEncoders with 2 or more outputs. It measure the
accuracy of the predict class vs. the gold labels.
"""
def __init__(self, sentence_pairs: List[List[str]], labels: List[int], name: str = "", write_csv: bool = True):
self.sentence_pairs = sentence_pairs
self.labels = labels
self.name = name
self.csv_file = "CESoftmaxAccuracyEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = ["epoch", "steps", "Accuracy"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: List[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CESoftmaxAccuracyEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False)
pred_labels = np.argmax(pred_scores, axis=1)
assert len(pred_labels) == len(self.labels)
acc = np.sum(pred_labels == self.labels) / len(self.labels)
logger.info("Accuracy: {:.2f}".format(acc * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc])
return acc
|
import logging
import os
import csv
from typing import List
from ... import InputExample
import numpy as np
logger = logging.getLogger(__name__)
class CESoftmaxAccuracyEvaluator:
"""
This evaluator can be used with the CrossEncoder class.
It is designed for CrossEncoders with 2 or more outputs. It measure the
accuracy of the predict class vs. the gold labels.
"""
def __init__(self, sentence_pairs: List[List[str]], labels: List[int], name: str='', write_csv: bool = True):
self.sentence_pairs = sentence_pairs
self.labels = labels
self.name = name
self.csv_file = "CESoftmaxAccuracyEvaluator" + ("_" + name if name else '') + "_results.csv"
self.csv_headers = ["epoch", "steps", "Accuracy"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: List[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CESoftmaxAccuracyEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False)
pred_labels = np.argmax(pred_scores, axis=1)
assert len(pred_labels) == len(self.labels)
acc = np.sum(pred_labels == self.labels) / len(self.labels)
logger.info("Accuracy: {:.2f}".format(acc*100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else 'w', encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc])
return acc
|
# Owner(s): ["module: dynamo"]
import unittest
from torch._dynamo import config
from torch._dynamo.testing import make_test_cls_with_patches
try:
from . import test_export
except ImportError:
import test_export
test_classes = {}
def make_dynamic_cls(cls):
suffix = "_inline_and_install"
cls_prefix = "InlineAndInstall"
test_class = make_test_cls_with_patches(
cls,
cls_prefix,
suffix,
(config, "install_free_tensors", True),
(config, "inline_inbuilt_nn_modules", True),
xfail_prop="_expected_failure_inline_and_install",
)
test_classes[test_class.__name__] = test_class
# REMOVING THIS LINE WILL STOP TESTS FROM RUNNING
globals()[test_class.__name__] = test_class
test_class.__module__ = __name__
return test_class
tests = [
test_export.ExportTests,
]
for test in tests:
make_dynamic_cls(test)
del test
# After installing and inlining is turned on, these tests won't throw
# errors in export (which is expected for the test to pass)
# Therefore, these unittest are expected to fail, and we need to update the
# semantics
unittest.expectedFailure(
InlineAndInstallExportTests.test_invalid_input_global_inline_and_install # noqa: F821
)
unittest.expectedFailure(
InlineAndInstallExportTests.test_invalid_input_global_multiple_access_inline_and_install # noqa: F821
)
unittest.expectedFailure(
InlineAndInstallExportTests.test_invalid_input_nonlocal_inline_and_install # noqa: F821
)
# These tests do string comparison on the graphs, and since buffers are now inlined, they
# are named different, resulting in failure
unittest.expectedFailure(
InlineAndInstallExportTests.test_param_buffer_safe_from_mutation_simple_inline_and_install # noqa: F821
)
# This particular test is marked expecting failure, since dynamo was creating second param for a
# and this was causing a failure in the sum; however with these changes, that test is fixed
# so will now pass, so we need to mark that it is no longer expected to fail
def expectedSuccess(test_item):
test_item.__unittest_expecting_failure__ = False
return test_item
expectedSuccess(
InlineAndInstallExportTests.test_sum_param_inline_and_install # noqa: F821
)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
|
# Owner(s): ["module: dynamo"]
import unittest
from torch._dynamo import config
from torch._dynamo.testing import make_test_cls_with_patches
try:
from . import test_export
except ImportError:
import test_export
test_classes = {}
def make_dynamic_cls(cls):
suffix = "_inline_and_install"
cls_prefix = "InlineAndInstall"
test_class = make_test_cls_with_patches(
cls,
cls_prefix,
suffix,
(config, "install_free_tensors", True),
(config, "inline_inbuilt_nn_modules", True),
xfail_prop="_expected_failure_inline_and_install",
)
test_classes[test_class.__name__] = test_class
# REMOVING THIS LINE WILL STOP TESTS FROM RUNNING
globals()[test_class.__name__] = test_class
test_class.__module__ = __name__
return test_class
tests = [
test_export.ExportTests,
]
for test in tests:
make_dynamic_cls(test)
del test
# After installing and inlining is turned on, these tests won't throw
# errors in export (which is expected for the test to pass)
# Therefore, these unittest are expected to fail, and we need to update the
# semantics
unittest.expectedFailure(
InlineAndInstallExportTests.test_invalid_input_global_inline_and_install # noqa: F821
)
unittest.expectedFailure(
InlineAndInstallExportTests.test_invalid_input_global_multiple_access_inline_and_install # noqa: F821
)
unittest.expectedFailure(
InlineAndInstallExportTests.test_invalid_input_nonlocal_inline_and_install # noqa: F821
)
# These tests do string comparisson on the graphs, and since buffers are now inlined, they
# are named different, resulting in failure
unittest.expectedFailure(
InlineAndInstallExportTests.test_param_buffer_safe_from_mutation_simple_inline_and_install # noqa: F821
)
# This particular test is marked expecting failure, since dynamo was creating second param for a
# and this was causing a failure in the sum; however with these changes, that test is fixed
# so will now pass, so we need to mark that it is no longer expected to fail
def expectedSuccess(test_item):
test_item.__unittest_expecting_failure__ = False
return test_item
expectedSuccess(
InlineAndInstallExportTests.test_sum_param_inline_and_install # noqa: F821
)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
|
"""Retrieve query."""
import logging
from typing import Any, List, Optional
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.indices.query.schema import QueryBundle
from llama_index.core.indices.tree.base import TreeIndex
from llama_index.core.indices.utils import get_sorted_node_list
from llama_index.core.schema import NodeWithScore, QueryBundle
logger = logging.getLogger(__name__)
class TreeRootRetriever(BaseRetriever):
"""
Tree root retriever.
This class directly retrieves the answer from the root nodes.
Unlike GPTTreeIndexLeafQuery, this class assumes the graph already stores
the answer (because it was constructed with a query_str), so it does not
attempt to parse information down the graph in order to synthesize an answer.
"""
def __init__(
self,
index: TreeIndex,
callback_manager: Optional[CallbackManager] = None,
object_map: Optional[dict] = None,
verbose: bool = False,
**kwargs: Any,
) -> None:
self._index = index
self._index_struct = index.index_struct
self._docstore = index.docstore
super().__init__(
callback_manager=callback_manager, object_map=object_map, verbose=verbose
)
def _retrieve(
self,
query_bundle: QueryBundle,
) -> List[NodeWithScore]:
"""Get nodes for response."""
logger.info(f"> Starting query: {query_bundle.query_str}")
root_nodes = self._docstore.get_node_dict(self._index_struct.root_nodes)
sorted_nodes = get_sorted_node_list(root_nodes)
return [NodeWithScore(node=node) for node in sorted_nodes]
|
"""Retrieve query."""
import logging
from typing import Any, List, Optional
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.indices.query.schema import QueryBundle
from llama_index.core.indices.tree.base import TreeIndex
from llama_index.core.indices.utils import get_sorted_node_list
from llama_index.core.schema import NodeWithScore, QueryBundle
logger = logging.getLogger(__name__)
class TreeRootRetriever(BaseRetriever):
"""
Tree root retriever.
This class directly retrieves the answer from the root nodes.
Unlike GPTTreeIndexLeafQuery, this class assumes the graph already stores
the answer (because it was constructed with a query_str), so it does not
attempt to parse information down the graph in order to synthesize an answer.
"""
def __init__(
self,
index: TreeIndex,
callback_manager: Optional[CallbackManager] = None,
object_map: Optional[dict] = None,
verbose: bool = False,
**kwargs: Any,
) -> None:
self._index = index
self._index_struct = index.index_struct
self._docstore = index.docstore
super().__init__(
callback_manager=callback_manager, object_map=object_map, verbose=verbose
)
def _retrieve(
self,
query_bundle: QueryBundle,
) -> List[NodeWithScore]:
"""Get nodes for response."""
logger.info(f"> Starting query: {query_bundle.query_str}")
root_nodes = self._docstore.get_node_dict(self._index_struct.root_nodes)
sorted_nodes = get_sorted_node_list(root_nodes)
return [NodeWithScore(node=node) for node in sorted_nodes]
|
"""
This is a more complex example on performing clustering on large scale dataset.
This examples find in a large set of sentences local communities, i.e., groups of sentences that are highly
similar. You can freely configure the threshold what is considered as similar. A high threshold will
only find extremely similar sentences, a lower threshold will find more sentence that are less similar.
A second parameter is 'min_community_size': Only communities with at least a certain number of sentences will be returned.
The method for finding the communities is extremely fast, for clustering 50k sentences it requires only 5 seconds (plus embedding comuptation).
In this example, we download a large set of questions from Quora and then find similar questions in this set.
"""
import csv
import os
import time
from sentence_transformers import SentenceTransformer, util
# Model for computing sentence embeddings. We use one trained for similar questions detection
model = SentenceTransformer("all-MiniLM-L6-v2")
# We download the Quora Duplicate Questions Dataset (https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs)
# and find similar question in it
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 50000 # We limit our corpus to only the first 50k questions
# Check if the dataset exists. If not, download and extract
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
corpus_sentences = set()
with open(dataset_path, encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for row in reader:
corpus_sentences.add(row["question1"])
corpus_sentences.add(row["question2"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences = list(corpus_sentences)
print("Encode the corpus. This might take a while")
corpus_embeddings = model.encode(corpus_sentences, batch_size=64, show_progress_bar=True, convert_to_tensor=True)
print("Start clustering")
start_time = time.time()
# Two parameters to tune:
# min_cluster_size: Only consider cluster that have at least 25 elements
# threshold: Consider sentence pairs with a cosine-similarity larger than threshold as similar
clusters = util.community_detection(corpus_embeddings, min_community_size=25, threshold=0.75)
print(f"Clustering done after {time.time() - start_time:.2f} sec")
# Print for all clusters the top 3 and bottom 3 elements
for i, cluster in enumerate(clusters):
print(f"\nCluster {i + 1}, #{len(cluster)} Elements ")
for sentence_id in cluster[0:3]:
print("\t", corpus_sentences[sentence_id])
print("\t", "...")
for sentence_id in cluster[-3:]:
print("\t", corpus_sentences[sentence_id])
|
"""
This is a more complex example on performing clustering on large scale dataset.
This examples find in a large set of sentences local communities, i.e., groups of sentences that are highly
similar. You can freely configure the threshold what is considered as similar. A high threshold will
only find extremely similar sentences, a lower threshold will find more sentence that are less similar.
A second parameter is 'min_community_size': Only communities with at least a certain number of sentences will be returned.
The method for finding the communities is extremely fast, for clustering 50k sentences it requires only 5 seconds (plus embedding comuptation).
In this example, we download a large set of questions from Quora and then find similar questions in this set.
"""
import csv
import os
import time
from sentence_transformers import SentenceTransformer, util
# Model for computing sentence embeddings. We use one trained for similar questions detection
model = SentenceTransformer("all-MiniLM-L6-v2")
# We download the Quora Duplicate Questions Dataset (https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs)
# and find similar question in it
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 50000 # We limit our corpus to only the first 50k questions
# Check if the dataset exists. If not, download and extract
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
corpus_sentences = set()
with open(dataset_path, encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for row in reader:
corpus_sentences.add(row["question1"])
corpus_sentences.add(row["question2"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences = list(corpus_sentences)
print("Encode the corpus. This might take a while")
corpus_embeddings = model.encode(corpus_sentences, batch_size=64, show_progress_bar=True, convert_to_tensor=True)
print("Start clustering")
start_time = time.time()
# Two parameters to tune:
# min_cluster_size: Only consider cluster that have at least 25 elements
# threshold: Consider sentence pairs with a cosine-similarity larger than threshold as similar
clusters = util.community_detection(corpus_embeddings, min_community_size=25, threshold=0.75)
print("Clustering done after {:.2f} sec".format(time.time() - start_time))
# Print for all clusters the top 3 and bottom 3 elements
for i, cluster in enumerate(clusters):
print("\nCluster {}, #{} Elements ".format(i + 1, len(cluster)))
for sentence_id in cluster[0:3]:
print("\t", corpus_sentences[sentence_id])
print("\t", "...")
for sentence_id in cluster[-3:]:
print("\t", corpus_sentences[sentence_id])
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseMSEEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
teacher_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load any dataset with some texts
dataset = load_dataset("sentence-transformers/stsb", split="validation")
sentences = dataset["sentence1"] + dataset["sentence2"]
# Given queries, a corpus and a mapping with relevant documents, the SparseMSEEvaluator computes different MSE metrics.
mse_evaluator = SparseMSEEvaluator(
source_sentences=sentences,
target_sentences=sentences,
teacher_model=teacher_model,
name="stsb-dev",
)
results = mse_evaluator(student_model)
"""
MSE evaluation (lower = better) on the stsb-dev dataset:
MSE (*100): 0.035540
"""
# Print the results
print(f"Primary metric: {mse_evaluator.primary_metric}")
# => Primary metric: stsb-dev_negative_mse
print(f"Primary metric value: {results[mse_evaluator.primary_metric]:.4f}")
# => Primary metric value: -0.0355
|
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseMSEEvaluator,
)
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
teacher_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load any dataset with some texts
dataset = load_dataset("sentence-transformers/stsb", split="validation")
sentences = dataset["sentence1"] + dataset["sentence2"]
# Given queries, a corpus and a mapping with relevant documents, the SparseMSEEvaluator computes different MSE metrics.
mse_evaluator = SparseMSEEvaluator(
source_sentences=sentences,
target_sentences=sentences,
teacher_model=teacher_model,
name="stsb-dev",
)
results = mse_evaluator(student_model)
"""
MSE evaluation (lower = better) on the stsb-dev dataset:
MSE (*100): 0.035540
"""
# Print the results
print(f"Primary metric: {mse_evaluator.primary_metric}")
# => Primary metric: stsb-dev_negative_mse
print(f"Primary metric value: {results[mse_evaluator.primary_metric]:.4f}")
# => Primary metric value: -0.0355
|
# Copyright (c) OpenMMLab. All rights reserved.
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .grid_assigner import GridAssigner
from .hungarian_assigner import HungarianAssigner
from .max_iou_assigner import MaxIoUAssigner
from .point_assigner import PointAssigner
from .region_assigner import RegionAssigner
from .sim_ota_assigner import SimOTAAssigner
from .task_aligned_assigner import TaskAlignedAssigner
from .uniform_assigner import UniformAssigner
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner',
'TaskAlignedAssigner'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .grid_assigner import GridAssigner
from .hungarian_assigner import HungarianAssigner
from .max_iou_assigner import MaxIoUAssigner
from .point_assigner import PointAssigner
from .region_assigner import RegionAssigner
from .sim_ota_assigner import SimOTAAssigner
from .uniform_assigner import UniformAssigner
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner'
]
|
from typing import Any, Optional
from typing_extensions import override
from langchain_core.caches import RETURN_VAL_TYPE, BaseCache
from langchain_core.globals import set_llm_cache
from langchain_core.language_models import FakeListLLM
class InMemoryCache(BaseCache):
"""In-memory cache used for testing purposes."""
def __init__(self) -> None:
"""Initialize with empty cache."""
self._cache: dict[tuple[str, str], RETURN_VAL_TYPE] = {}
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
return self._cache.get((prompt, llm_string), None)
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
self._cache[prompt, llm_string] = return_val
@override
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
self._cache = {}
async def test_local_cache_generate_async() -> None:
global_cache = InMemoryCache()
local_cache = InMemoryCache()
try:
set_llm_cache(global_cache)
llm = FakeListLLM(cache=local_cache, responses=["foo", "bar"])
output = await llm.agenerate(["foo"])
assert output.generations[0][0].text == "foo"
output = await llm.agenerate(["foo"])
assert output.generations[0][0].text == "foo"
assert global_cache._cache == {}
assert len(local_cache._cache) == 1
finally:
set_llm_cache(None)
def test_local_cache_generate_sync() -> None:
global_cache = InMemoryCache()
local_cache = InMemoryCache()
try:
set_llm_cache(global_cache)
llm = FakeListLLM(cache=local_cache, responses=["foo", "bar"])
output = llm.generate(["foo"])
assert output.generations[0][0].text == "foo"
output = llm.generate(["foo"])
assert output.generations[0][0].text == "foo"
assert global_cache._cache == {}
assert len(local_cache._cache) == 1
finally:
set_llm_cache(None)
class InMemoryCacheBad(BaseCache):
"""In-memory cache used for testing purposes."""
def __init__(self) -> None:
"""Initialize with empty cache."""
self._cache: dict[tuple[str, str], RETURN_VAL_TYPE] = {}
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
msg = "This code should not be triggered"
raise NotImplementedError(msg)
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
msg = "This code should not be triggered"
raise NotImplementedError(msg)
@override
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
self._cache = {}
def test_no_cache_generate_sync() -> None:
global_cache = InMemoryCacheBad()
try:
set_llm_cache(global_cache)
llm = FakeListLLM(cache=False, responses=["foo", "bar"])
output = llm.generate(["foo"])
assert output.generations[0][0].text == "foo"
output = llm.generate(["foo"])
assert output.generations[0][0].text == "bar"
assert global_cache._cache == {}
finally:
set_llm_cache(None)
async def test_no_cache_generate_async() -> None:
global_cache = InMemoryCacheBad()
try:
set_llm_cache(global_cache)
llm = FakeListLLM(cache=False, responses=["foo", "bar"])
output = await llm.agenerate(["foo"])
assert output.generations[0][0].text == "foo"
output = await llm.agenerate(["foo"])
assert output.generations[0][0].text == "bar"
assert global_cache._cache == {}
finally:
set_llm_cache(None)
|
from typing import Any, Optional
from typing_extensions import override
from langchain_core.caches import RETURN_VAL_TYPE, BaseCache
from langchain_core.globals import set_llm_cache
from langchain_core.language_models import FakeListLLM
class InMemoryCache(BaseCache):
"""In-memory cache used for testing purposes."""
def __init__(self) -> None:
"""Initialize with empty cache."""
self._cache: dict[tuple[str, str], RETURN_VAL_TYPE] = {}
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
return self._cache.get((prompt, llm_string), None)
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
self._cache[(prompt, llm_string)] = return_val
@override
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
self._cache = {}
async def test_local_cache_generate_async() -> None:
global_cache = InMemoryCache()
local_cache = InMemoryCache()
try:
set_llm_cache(global_cache)
llm = FakeListLLM(cache=local_cache, responses=["foo", "bar"])
output = await llm.agenerate(["foo"])
assert output.generations[0][0].text == "foo"
output = await llm.agenerate(["foo"])
assert output.generations[0][0].text == "foo"
assert global_cache._cache == {}
assert len(local_cache._cache) == 1
finally:
set_llm_cache(None)
def test_local_cache_generate_sync() -> None:
global_cache = InMemoryCache()
local_cache = InMemoryCache()
try:
set_llm_cache(global_cache)
llm = FakeListLLM(cache=local_cache, responses=["foo", "bar"])
output = llm.generate(["foo"])
assert output.generations[0][0].text == "foo"
output = llm.generate(["foo"])
assert output.generations[0][0].text == "foo"
assert global_cache._cache == {}
assert len(local_cache._cache) == 1
finally:
set_llm_cache(None)
class InMemoryCacheBad(BaseCache):
"""In-memory cache used for testing purposes."""
def __init__(self) -> None:
"""Initialize with empty cache."""
self._cache: dict[tuple[str, str], RETURN_VAL_TYPE] = {}
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
msg = "This code should not be triggered"
raise NotImplementedError(msg)
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
msg = "This code should not be triggered"
raise NotImplementedError(msg)
@override
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
self._cache = {}
def test_no_cache_generate_sync() -> None:
global_cache = InMemoryCacheBad()
try:
set_llm_cache(global_cache)
llm = FakeListLLM(cache=False, responses=["foo", "bar"])
output = llm.generate(["foo"])
assert output.generations[0][0].text == "foo"
output = llm.generate(["foo"])
assert output.generations[0][0].text == "bar"
assert global_cache._cache == {}
finally:
set_llm_cache(None)
async def test_no_cache_generate_async() -> None:
global_cache = InMemoryCacheBad()
try:
set_llm_cache(global_cache)
llm = FakeListLLM(cache=False, responses=["foo", "bar"])
output = await llm.agenerate(["foo"])
assert output.generations[0][0].text == "foo"
output = await llm.agenerate(["foo"])
assert output.generations[0][0].text == "bar"
assert global_cache._cache == {}
finally:
set_llm_cache(None)
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.core import url_to_fs
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, extract_path_from_uri, is_remote_filesystem
from .utils import require_lz4, require_zstandard
def test_mockfs(mockfs):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def test_non_mockfs():
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def test_extract_path_from_uri():
mock_bucket = "mock-s3-bucket"
dataset_path = f"s3://{mock_bucket}"
dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path.startswith("s3://") is False
dataset_path = "./local/path"
new_dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path == new_dataset_path
def test_is_remote_filesystem(mockfs):
is_remote = is_remote_filesystem(mockfs)
assert is_remote is True
fs = fsspec.filesystem("file")
is_remote = is_remote_filesystem(fs)
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS)
def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file):
input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file}
input_path = input_paths[compression_fs_class.protocol]
if input_path is None:
reason = f"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lz4.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(reason)
fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path)
assert isinstance(fs, compression_fs_class)
expected_filename = os.path.basename(input_path)
expected_filename = expected_filename[: expected_filename.rindex(".")]
assert fs.glob("*") == [expected_filename]
with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol", ["zip", "gzip"])
def test_fs_isfile(protocol, zip_jsonl_path, jsonl_gz_path):
compressed_file_paths = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
compressed_file_path = compressed_file_paths[protocol]
member_file_path = "dataset.jsonl"
path = f"{protocol}://{member_file_path}::{compressed_file_path}"
fs, *_ = url_to_fs(path)
assert fs.isfile(member_file_path)
assert not fs.isfile("non_existing_" + member_file_path)
def test_fs_overwrites():
protocol = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(protocol, None, clobber=True)
with pytest.warns(UserWarning) as warning_info:
importlib.reload(datasets.filesystems)
assert len(warning_info) == 1
assert (
str(warning_info[0].message)
== f"A filesystem protocol was already set for {protocol} and will be overwritten."
)
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, extract_path_from_uri, is_remote_filesystem
from .utils import require_lz4, require_zstandard
def test_mockfs(mockfs):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def test_non_mockfs():
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def test_extract_path_from_uri():
mock_bucket = "mock-s3-bucket"
dataset_path = f"s3://{mock_bucket}"
dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path.startswith("s3://") is False
dataset_path = "./local/path"
new_dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path == new_dataset_path
def test_is_remote_filesystem(mockfs):
is_remote = is_remote_filesystem(mockfs)
assert is_remote is True
fs = fsspec.filesystem("file")
is_remote = is_remote_filesystem(fs)
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS)
def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file):
input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file}
input_path = input_paths[compression_fs_class.protocol]
if input_path is None:
reason = f"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lz4.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(reason)
fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path)
assert isinstance(fs, compression_fs_class)
expected_filename = os.path.basename(input_path)
expected_filename = expected_filename[: expected_filename.rindex(".")]
assert fs.glob("*") == [expected_filename]
with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol", ["zip", "gzip"])
def test_fs_isfile(protocol, zip_jsonl_path, jsonl_gz_path):
compressed_file_paths = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
compressed_file_path = compressed_file_paths[protocol]
member_file_path = "dataset.jsonl"
path = f"{protocol}://{member_file_path}::{compressed_file_path}"
fs, *_ = fsspec.get_fs_token_paths(path)
assert fs.isfile(member_file_path)
assert not fs.isfile("non_existing_" + member_file_path)
def test_fs_overwrites():
protocol = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(protocol, None, clobber=True)
with pytest.warns(UserWarning) as warning_info:
importlib.reload(datasets.filesystems)
assert len(warning_info) == 1
assert (
str(warning_info[0].message)
== f"A filesystem protocol was already set for {protocol} and will be overwritten."
)
|
from typing import Any
def get_prompt_input_key(inputs: dict[str, Any], memory_variables: list[str]) -> str:
"""
Get the prompt input key.
Args:
inputs: Dict[str, Any]
memory_variables: List[str]
Returns:
A prompt input key.
"""
# "stop" is a special key that can be passed as input but is not used to
# format the prompt.
prompt_input_keys = list(set(inputs).difference(memory_variables + ["stop"]))
if len(prompt_input_keys) != 1:
msg = f"One input key expected got {prompt_input_keys}"
raise ValueError(msg)
return prompt_input_keys[0]
|
from typing import Any
def get_prompt_input_key(inputs: dict[str, Any], memory_variables: list[str]) -> str:
"""
Get the prompt input key.
Args:
inputs: Dict[str, Any]
memory_variables: List[str]
Returns:
A prompt input key.
"""
# "stop" is a special key that can be passed as input but is not used to
# format the prompt.
prompt_input_keys = list(set(inputs).difference(memory_variables + ["stop"]))
if len(prompt_input_keys) != 1:
raise ValueError(f"One input key expected got {prompt_input_keys}")
return prompt_input_keys[0]
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import IterTimerHook
class TestIterTimerHook:
def test_before_epoch(self):
Hook = IterTimerHook()
Runner = Mock()
Hook._before_epoch(Runner)
assert isinstance(Hook.t, float)
def test_before_iter(self):
Hook = IterTimerHook()
Runner = Mock()
Runner.log_buffer = dict()
Hook._before_epoch(Runner)
Hook._before_iter(Runner)
Runner.message_hub.update_log.assert_called()
def test_after_iter(self):
Hook = IterTimerHook()
Runner = Mock()
Runner.log_buffer = dict()
Hook._before_epoch(Runner)
Hook._after_iter(Runner)
Runner.message_hub.update_log.assert_called()
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import IterTimerHook
class TestIterTimerHook:
def test_before_epoch(self):
Hook = IterTimerHook()
Runner = Mock()
Hook.before_epoch(Runner)
assert isinstance(Hook.t, float)
def test_before_iter(self):
Hook = IterTimerHook()
Runner = Mock()
Runner.log_buffer = dict()
Hook.before_epoch(Runner)
Hook.before_iter(Runner)
assert 'data_time' in Runner.log_buffer
def test_after_iter(self):
Hook = IterTimerHook()
Runner = Mock()
Runner.log_buffer = dict()
Hook.before_epoch(Runner)
Hook.after_iter(Runner)
assert 'time' in Runner.log_buffer
|
from datetime import datetime
from typing import Any, List
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
from .helpers import ContentSettings
class ExaFindSimilarBlock(Block):
class Input(BlockSchema):
credentials: ExaCredentialsInput = ExaCredentialsField()
url: str = SchemaField(
description="The url for which you would like to find similar links"
)
number_of_results: int = SchemaField(
description="Number of results to return",
default=10,
advanced=True,
)
include_domains: List[str] = SchemaField(
description="Domains to include in search",
default_factory=list,
advanced=True,
)
exclude_domains: List[str] = SchemaField(
description="Domains to exclude from search",
default_factory=list,
advanced=True,
)
start_crawl_date: datetime = SchemaField(
description="Start date for crawled content",
)
end_crawl_date: datetime = SchemaField(
description="End date for crawled content",
)
start_published_date: datetime = SchemaField(
description="Start date for published content",
)
end_published_date: datetime = SchemaField(
description="End date for published content",
)
include_text: List[str] = SchemaField(
description="Text patterns to include (max 1 string, up to 5 words)",
default_factory=list,
advanced=True,
)
exclude_text: List[str] = SchemaField(
description="Text patterns to exclude (max 1 string, up to 5 words)",
default_factory=list,
advanced=True,
)
contents: ContentSettings = SchemaField(
description="Content retrieval settings",
default=ContentSettings(),
advanced=True,
)
class Output(BlockSchema):
results: List[Any] = SchemaField(
description="List of similar documents with title, URL, published date, author, and score",
default_factory=list,
)
def __init__(self):
super().__init__(
id="5e7315d1-af61-4a0c-9350-7c868fa7438a",
description="Finds similar links using Exa's findSimilar API",
categories={BlockCategory.SEARCH},
input_schema=ExaFindSimilarBlock.Input,
output_schema=ExaFindSimilarBlock.Output,
)
def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/findSimilar"
headers = {
"Content-Type": "application/json",
"x-api-key": credentials.api_key.get_secret_value(),
}
payload = {
"url": input_data.url,
"numResults": input_data.number_of_results,
"contents": input_data.contents.dict(),
}
optional_field_mapping = {
"include_domains": "includeDomains",
"exclude_domains": "excludeDomains",
"include_text": "includeText",
"exclude_text": "excludeText",
}
# Add optional fields if they have values
for input_field, api_field in optional_field_mapping.items():
value = getattr(input_data, input_field)
if value: # Only add non-empty values
payload[api_field] = value
date_field_mapping = {
"start_crawl_date": "startCrawlDate",
"end_crawl_date": "endCrawlDate",
"start_published_date": "startPublishedDate",
"end_published_date": "endPublishedDate",
}
# Add dates if they exist
for input_field, api_field in date_field_mapping.items():
value = getattr(input_data, input_field, None)
if value:
payload[api_field] = value.strftime("%Y-%m-%dT%H:%M:%S.000Z")
try:
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
yield "results", []
|
from datetime import datetime
from typing import Any, List
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
from .helpers import ContentSettings
class ExaFindSimilarBlock(Block):
class Input(BlockSchema):
credentials: ExaCredentialsInput = ExaCredentialsField()
url: str = SchemaField(
description="The url for which you would like to find similar links"
)
number_of_results: int = SchemaField(
description="Number of results to return",
default=10,
advanced=True,
)
include_domains: List[str] = SchemaField(
description="Domains to include in search",
default=[],
advanced=True,
)
exclude_domains: List[str] = SchemaField(
description="Domains to exclude from search",
default=[],
advanced=True,
)
start_crawl_date: datetime = SchemaField(
description="Start date for crawled content",
)
end_crawl_date: datetime = SchemaField(
description="End date for crawled content",
)
start_published_date: datetime = SchemaField(
description="Start date for published content",
)
end_published_date: datetime = SchemaField(
description="End date for published content",
)
include_text: List[str] = SchemaField(
description="Text patterns to include (max 1 string, up to 5 words)",
default=[],
advanced=True,
)
exclude_text: List[str] = SchemaField(
description="Text patterns to exclude (max 1 string, up to 5 words)",
default=[],
advanced=True,
)
contents: ContentSettings = SchemaField(
description="Content retrieval settings",
default=ContentSettings(),
advanced=True,
)
class Output(BlockSchema):
results: List[Any] = SchemaField(
description="List of similar documents with title, URL, published date, author, and score",
default=[],
)
def __init__(self):
super().__init__(
id="5e7315d1-af61-4a0c-9350-7c868fa7438a",
description="Finds similar links using Exa's findSimilar API",
categories={BlockCategory.SEARCH},
input_schema=ExaFindSimilarBlock.Input,
output_schema=ExaFindSimilarBlock.Output,
)
def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/findSimilar"
headers = {
"Content-Type": "application/json",
"x-api-key": credentials.api_key.get_secret_value(),
}
payload = {
"url": input_data.url,
"numResults": input_data.number_of_results,
"contents": input_data.contents.dict(),
}
optional_field_mapping = {
"include_domains": "includeDomains",
"exclude_domains": "excludeDomains",
"include_text": "includeText",
"exclude_text": "excludeText",
}
# Add optional fields if they have values
for input_field, api_field in optional_field_mapping.items():
value = getattr(input_data, input_field)
if value: # Only add non-empty values
payload[api_field] = value
date_field_mapping = {
"start_crawl_date": "startCrawlDate",
"end_crawl_date": "endCrawlDate",
"start_published_date": "startPublishedDate",
"end_published_date": "endPublishedDate",
}
# Add dates if they exist
for input_field, api_field in date_field_mapping.items():
value = getattr(input_data, input_field, None)
if value:
payload[api_field] = value.strftime("%Y-%m-%dT%H:%M:%S.000Z")
try:
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
yield "results", []
|
# Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from unittest import TestCase
from unittest.mock import Mock
import torch
import torch.nn as nn
from mmengine.model import BaseModel
from mmengine.optim import OptimWrapper
from mmengine.runner import Runner
from torch.utils.data import Dataset
from mmdet.utils import register_all_modules
register_all_modules()
class ToyModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, batch_inputs, labels, mode='tensor'):
labels = torch.stack(labels)
outputs = self.linear(batch_inputs)
if mode == 'tensor':
return outputs
elif mode == 'loss':
loss = (labels - outputs).sum()
outputs = dict(loss=loss)
return outputs
else:
return outputs
class ToyModel1(BaseModel, ToyModel):
def __init__(self):
super().__init__()
def forward(self, *args, **kwargs):
return super(BaseModel, self).forward(*args, **kwargs)
class ToyModel2(BaseModel):
def __init__(self):
super().__init__()
self.teacher = ToyModel1()
self.student = ToyModel1()
self.semi_test_cfg = dict(predict_on='teacher')
def forward(self, *args, **kwargs):
return self.student(*args, **kwargs)
class DummyDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
@property
def metainfo(self):
return self.METAINFO
def __len__(self):
return self.data.size(0)
def __getitem__(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
class TestTeacherStudentValLoop(TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_mean_teacher_hook(self):
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
model = ToyModel2().to(device)
evaluator = Mock()
evaluator.evaluate = Mock(return_value=dict(acc=0.5))
runner = Runner(
model=model,
train_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
val_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=3,
num_workers=0),
val_evaluator=evaluator,
work_dir=self.temp_dir.name,
default_scope='mmdet',
optim_wrapper=OptimWrapper(
torch.optim.Adam(ToyModel().parameters())),
train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=1),
val_cfg=dict(type='TeacherStudentValLoop'),
default_hooks=dict(logger=dict(type='LoggerHook', interval=1)),
experiment_name='test1')
runner.train()
|
# Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from unittest import TestCase
from unittest.mock import Mock
import torch
import torch.nn as nn
from mmengine.model import BaseModel
from mmengine.optim import OptimWrapper
from mmengine.registry import DATASETS
from mmengine.runner import Runner
from torch.utils.data import Dataset
from mmdet.utils import register_all_modules
register_all_modules()
class ToyModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, batch_inputs, labels, mode='tensor'):
labels = torch.stack(labels)
outputs = self.linear(batch_inputs)
if mode == 'tensor':
return outputs
elif mode == 'loss':
loss = (labels - outputs).sum()
outputs = dict(loss=loss)
return outputs
else:
return outputs
class ToyModel1(BaseModel, ToyModel):
def __init__(self):
super().__init__()
def forward(self, *args, **kwargs):
return super(BaseModel, self).forward(*args, **kwargs)
class ToyModel2(BaseModel):
def __init__(self):
super().__init__()
self.teacher = ToyModel1()
self.student = ToyModel1()
self.semi_test_cfg = dict(predict_on='teacher')
def forward(self, *args, **kwargs):
return self.student(*args, **kwargs)
@DATASETS.register_module()
class DummyDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
@property
def metainfo(self):
return self.METAINFO
def __len__(self):
return self.data.size(0)
def __getitem__(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
class TestTeacherStudentValLoop(TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_mean_teacher_hook(self):
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
model = ToyModel2().to(device)
evaluator = Mock()
evaluator.evaluate = Mock(return_value=dict(acc=0.5))
runner = Runner(
model=model,
train_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
val_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=3,
num_workers=0),
val_evaluator=evaluator,
work_dir=self.temp_dir.name,
default_scope='mmdet',
optim_wrapper=OptimWrapper(
torch.optim.Adam(ToyModel().parameters())),
train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=1),
val_cfg=dict(type='TeacherStudentValLoop'),
default_hooks=dict(logger=dict(type='LoggerHook', interval=1)),
experiment_name='test1')
runner.train()
|
import json
import multiprocessing
import os
import time
import pytest
from jina.helper import random_port
from jina.parsers import set_gateway_parser, set_pod_parser
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from tests.helper import (
_validate_custom_gateway_process,
_validate_dummy_custom_gateway_response,
)
from tests.unit.yaml.dummy_gateway import DummyGateway
cur_dir = os.path.dirname(os.path.abspath(__file__))
_dummy_gateway_yaml_path = os.path.join(cur_dir, '../../yaml/test-custom-gateway.yml')
def _create_gateway_runtime(port, uses, uses_with, worker_port):
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}'
deployments_metadata = '{"pod0": {"key1": "value1", "key2": "value2"}}'
with GatewayRuntime(
set_gateway_parser().parse_args(
[
'--port',
str(port),
'--uses',
uses,
'--uses-with',
json.dumps(uses_with),
'--graph-description',
graph_description,
'--deployments-addresses',
pod_addresses,
'--deployments-metadata',
deployments_metadata,
]
)
) as runtime:
runtime.run_forever()
def _start_gateway_runtime(uses, uses_with, worker_port):
port = random_port()
p = multiprocessing.Process(
target=_create_gateway_runtime,
args=(port, uses, uses_with, worker_port),
daemon=True,
)
p.start()
time.sleep(1)
return port, p
def _create_worker_runtime(port, uses):
args = set_pod_parser().parse_args(['--uses', uses, '--port', str(port)])
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _start_worker_runtime(uses):
port = random_port()
p = multiprocessing.Process(
target=_create_worker_runtime,
args=(port, uses),
daemon=True,
)
p.start()
time.sleep(1)
return port, p
@pytest.mark.parametrize(
'uses,uses_with,expected',
[
('DummyGateway', {}, {'arg1': None, 'arg2': None, 'arg3': 'default-arg3'}),
(
_dummy_gateway_yaml_path,
{},
{'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'},
),
(
'DummyGateway',
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
'DummyGateway',
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': None, 'arg3': 'default-arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': 'world', 'arg3': 'default-arg3'},
),
],
)
def test_custom_gateway_no_executors(uses, uses_with, expected):
worker_port, worker_process = _start_worker_runtime('ProcessExecutor')
gateway_port, gateway_process = _start_gateway_runtime(uses, uses_with, worker_port)
_validate_dummy_custom_gateway_response(gateway_port, expected)
_validate_custom_gateway_process(
gateway_port, 'hello', {'text': 'helloworld', 'tags': {'processed': True}}
)
gateway_process.terminate()
gateway_process.join()
worker_process.terminate()
worker_process.join()
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
|
import json
import multiprocessing
import os
import time
import pytest
from jina.helper import random_port
from jina.parsers import set_gateway_parser, set_pod_parser
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from tests.helper import (
ProcessExecutor,
_validate_custom_gateway_process,
_validate_dummy_custom_gateway_response,
)
from tests.unit.yaml.dummy_gateway import DummyGateway
cur_dir = os.path.dirname(os.path.abspath(__file__))
_dummy_gateway_yaml_path = os.path.join(cur_dir, '../../yaml/test-custom-gateway.yml')
def _create_gateway_runtime(port, uses, uses_with, worker_port):
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}'
deployments_metadata = '{"pod0": {"key1": "value1", "key2": "value2"}}'
with GatewayRuntime(
set_gateway_parser().parse_args(
[
'--port',
str(port),
'--uses',
uses,
'--uses-with',
json.dumps(uses_with),
'--graph-description',
graph_description,
'--deployments-addresses',
pod_addresses,
'--deployments-metadata',
deployments_metadata,
]
)
) as runtime:
runtime.run_forever()
def _start_gateway_runtime(uses, uses_with, worker_port):
port = random_port()
p = multiprocessing.Process(
target=_create_gateway_runtime,
args=(port, uses, uses_with, worker_port),
daemon=True,
)
p.start()
time.sleep(1)
return port, p
def _create_worker_runtime(port, uses):
args = set_pod_parser().parse_args(['--uses', uses, '--port', str(port)])
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _start_worker_runtime(uses):
port = random_port()
p = multiprocessing.Process(
target=_create_worker_runtime,
args=(port, uses),
daemon=True,
)
p.start()
time.sleep(1)
return port, p
@pytest.mark.parametrize(
'uses,uses_with,expected',
[
('DummyGateway', {}, {'arg1': None, 'arg2': None, 'arg3': 'default-arg3'}),
(
_dummy_gateway_yaml_path,
{},
{'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'},
),
(
'DummyGateway',
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
'DummyGateway',
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': None, 'arg3': 'default-arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': 'world', 'arg3': 'default-arg3'},
),
],
)
def test_custom_gateway_no_executors(uses, uses_with, expected):
worker_port, worker_process = _start_worker_runtime('ProcessExecutor')
gateway_port, gateway_process = _start_gateway_runtime(uses, uses_with, worker_port)
_validate_dummy_custom_gateway_response(gateway_port, expected)
_validate_custom_gateway_process(
gateway_port, 'hello', {'text': 'helloworld', 'tags': {'processed': True}}
)
gateway_process.terminate()
gateway_process.join()
worker_process.terminate()
worker_process.join()
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.