input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
"""LangChain **Runnable** and the **LangChain Expression Language (LCEL)**.
The LangChain Expression Language (LCEL) offers a declarative method to build
production-grade programs that harness the power of LLMs.
Programs created using LCEL and LangChain Runnables inherently support
synchronous, asynchronous, batch, and streaming operations.
Support for **async** allows servers hosting LCEL based programs to scale better
for higher concurrent loads.
**Batch** operations allow for processing multiple inputs in parallel.
**Streaming** of intermediate outputs, as they're being generated, allows for
creating more responsive UX.
This module contains schema and implementation of LangChain Runnables primitives.
"""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.runnables.base import (
Runnable,
RunnableBinding,
RunnableGenerator,
RunnableLambda,
RunnableMap,
RunnableParallel,
RunnableSequence,
RunnableSerializable,
chain,
)
from langchain_core.runnables.branch import RunnableBranch
from langchain_core.runnables.config import (
RunnableConfig,
ensure_config,
get_config_list,
patch_config,
run_in_executor,
)
from langchain_core.runnables.fallbacks import RunnableWithFallbacks
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.runnables.passthrough import (
RunnableAssign,
RunnablePassthrough,
RunnablePick,
)
from langchain_core.runnables.router import RouterInput, RouterRunnable
from langchain_core.runnables.utils import (
AddableDict,
ConfigurableField,
ConfigurableFieldMultiOption,
ConfigurableFieldSingleOption,
ConfigurableFieldSpec,
aadd,
add,
)
__all__ = (
"AddableDict",
"ConfigurableField",
"ConfigurableFieldMultiOption",
"ConfigurableFieldSingleOption",
"ConfigurableFieldSpec",
"RouterInput",
"RouterRunnable",
"Runnable",
"RunnableAssign",
"RunnableBinding",
"RunnableBranch",
"RunnableConfig",
"RunnableGenerator",
"RunnableLambda",
"RunnableMap",
"RunnableParallel",
"RunnablePassthrough",
"RunnablePick",
"RunnableSequence",
"RunnableSerializable",
"RunnableWithFallbacks",
"RunnableWithMessageHistory",
"aadd",
"add",
"chain",
"ensure_config",
"get_config_list",
"patch_config",
"run_in_executor",
)
_dynamic_imports = {
"chain": "base",
"Runnable": "base",
"RunnableBinding": "base",
"RunnableGenerator": "base",
"RunnableLambda": "base",
"RunnableMap": "base",
"RunnableParallel": "base",
"RunnableSequence": "base",
"RunnableSerializable": "base",
"RunnableBranch": "branch",
"RunnableConfig": "config",
"ensure_config": "config",
"get_config_list": "config",
"patch_config": "config",
"run_in_executor": "config",
"RunnableWithFallbacks": "fallbacks",
"RunnableWithMessageHistory": "history",
"RunnableAssign": "passthrough",
"RunnablePassthrough": "passthrough",
"RunnablePick": "passthrough",
"RouterInput": "router",
"RouterRunnable": "router",
"AddableDict": "utils",
"ConfigurableField": "utils",
"ConfigurableFieldMultiOption": "utils",
"ConfigurableFieldSingleOption": "utils",
"ConfigurableFieldSpec": "utils",
"aadd": "utils",
"add": "utils",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""LangChain **Runnable** and the **LangChain Expression Language (LCEL)**.
The LangChain Expression Language (LCEL) offers a declarative method to build
production-grade programs that harness the power of LLMs.
Programs created using LCEL and LangChain Runnables inherently support
synchronous, asynchronous, batch, and streaming operations.
Support for **async** allows servers hosting LCEL based programs to scale better
for higher concurrent loads.
**Batch** operations allow for processing multiple inputs in parallel.
**Streaming** of intermediate outputs, as they're being generated, allows for
creating more responsive UX.
This module contains schema and implementation of LangChain Runnables primitives.
"""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.runnables.base import (
Runnable,
RunnableBinding,
RunnableGenerator,
RunnableLambda,
RunnableMap,
RunnableParallel,
RunnableSequence,
RunnableSerializable,
chain,
)
from langchain_core.runnables.branch import RunnableBranch
from langchain_core.runnables.config import (
RunnableConfig,
ensure_config,
get_config_list,
patch_config,
run_in_executor,
)
from langchain_core.runnables.fallbacks import RunnableWithFallbacks
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.runnables.passthrough import (
RunnableAssign,
RunnablePassthrough,
RunnablePick,
)
from langchain_core.runnables.router import RouterInput, RouterRunnable
from langchain_core.runnables.utils import (
AddableDict,
ConfigurableField,
ConfigurableFieldMultiOption,
ConfigurableFieldSingleOption,
ConfigurableFieldSpec,
aadd,
add,
)
__all__ = (
"chain",
"AddableDict",
"ConfigurableField",
"ConfigurableFieldSingleOption",
"ConfigurableFieldMultiOption",
"ConfigurableFieldSpec",
"ensure_config",
"run_in_executor",
"patch_config",
"RouterInput",
"RouterRunnable",
"Runnable",
"RunnableSerializable",
"RunnableBinding",
"RunnableBranch",
"RunnableConfig",
"RunnableGenerator",
"RunnableLambda",
"RunnableMap",
"RunnableParallel",
"RunnablePassthrough",
"RunnableAssign",
"RunnablePick",
"RunnableSequence",
"RunnableWithFallbacks",
"RunnableWithMessageHistory",
"get_config_list",
"aadd",
"add",
)
_dynamic_imports = {
"chain": "base",
"Runnable": "base",
"RunnableBinding": "base",
"RunnableGenerator": "base",
"RunnableLambda": "base",
"RunnableMap": "base",
"RunnableParallel": "base",
"RunnableSequence": "base",
"RunnableSerializable": "base",
"RunnableBranch": "branch",
"RunnableConfig": "config",
"ensure_config": "config",
"get_config_list": "config",
"patch_config": "config",
"run_in_executor": "config",
"RunnableWithFallbacks": "fallbacks",
"RunnableWithMessageHistory": "history",
"RunnableAssign": "passthrough",
"RunnablePassthrough": "passthrough",
"RunnablePick": "passthrough",
"RouterInput": "router",
"RouterRunnable": "router",
"AddableDict": "utils",
"ConfigurableField": "utils",
"ConfigurableFieldMultiOption": "utils",
"ConfigurableFieldSingleOption": "utils",
"ConfigurableFieldSpec": "utils",
"aadd": "utils",
"add": "utils",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
from pathlib import Path
from typing import List
import numpy as np
import pytest
import scipy
from jina import Document, DocumentArray, Executor
from jina.excepts import ExecutorFailToLoad
from tfidf_text_executor import TFIDFTextEncoder
_EMBEDDING_DIM = 130107
@pytest.fixture(scope='session')
def basic_encoder() -> TFIDFTextEncoder:
return TFIDFTextEncoder()
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.__class__.__name__ == 'TFIDFTextEncoder'
def test_error_no_file():
with pytest.raises(ExecutorFailToLoad):
TFIDFTextEncoder(path_vectorizer='does/not/exist')
def test_no_document(basic_encoder: TFIDFTextEncoder):
basic_encoder.encode(None, {})
def test_empty_documents(basic_encoder: TFIDFTextEncoder):
docs = DocumentArray([])
basic_encoder.encode(docs, {})
assert len(docs) == 0
def test_no_text_documents(basic_encoder: TFIDFTextEncoder):
docs = DocumentArray([Document()])
basic_encoder.encode(docs, {})
assert len(docs) == 1
assert docs[0].embedding is None
def test_tfidf_text_encoder(basic_encoder: TFIDFTextEncoder):
doc = Document(text='Han likes eating pizza')
docarray = DocumentArray([doc])
basic_encoder.encode(docarray, parameters={})
embedding = doc.embedding
assert embedding.shape == (1, _EMBEDDING_DIM)
assert embedding.size == 4
def test_tfidf_text_encoder_batch(basic_encoder: TFIDFTextEncoder):
# Input
text_batch = ['Han likes eating pizza', 'Han likes pizza', 'Jina rocks']
# Encoder embedding
docarray = DocumentArray([Document(text=text) for text in text_batch])
basic_encoder.encode(docarray, parameters={})
embeddeding_batch = scipy.sparse.vstack(docarray.get_attributes('embedding'))
assert embeddeding_batch.shape == (3, _EMBEDDING_DIM)
assert embeddeding_batch.size == 8
embs = np.asarray(embeddeding_batch.todense())
# They overlap in Han
assert (embs[0] * embs[1]).sum() > 0.1
# They do not overlap
assert (embs[0] * embs[2]).sum() == 0
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: TFIDFTextEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: TFIDFTextEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (1, _EMBEDDING_DIM)
|
from pathlib import Path
from typing import List
import numpy as np
import pytest
import scipy
from jina import Document, DocumentArray, Executor
from jina.excepts import PretrainedModelFileDoesNotExist
from tfidf_text_executor import TFIDFTextEncoder
_EMBEDDING_DIM = 130107
@pytest.fixture(scope='session')
def basic_encoder() -> TFIDFTextEncoder:
return TFIDFTextEncoder()
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.__class__.__name__ == 'TFIDFTextEncoder'
def test_error_no_file():
with pytest.raises(PretrainedModelFileDoesNotExist):
TFIDFTextEncoder(path_vectorizer='does/not/exist')
def test_no_document(basic_encoder: TFIDFTextEncoder):
basic_encoder.encode(None, {})
def test_empty_documents(basic_encoder: TFIDFTextEncoder):
docs = DocumentArray([])
basic_encoder.encode(docs, {})
assert len(docs) == 0
def test_no_text_documents(basic_encoder: TFIDFTextEncoder):
docs = DocumentArray([Document()])
basic_encoder.encode(docs, {})
assert len(docs) == 1
assert docs[0].embedding is None
def test_tfidf_text_encoder(basic_encoder: TFIDFTextEncoder):
doc = Document(text='Han likes eating pizza')
docarray = DocumentArray([doc])
basic_encoder.encode(docarray, parameters={})
embedding = doc.embedding
assert embedding.shape == (1, _EMBEDDING_DIM)
assert embedding.size == 4
def test_tfidf_text_encoder_batch(basic_encoder: TFIDFTextEncoder):
# Input
text_batch = ['Han likes eating pizza', 'Han likes pizza', 'Jina rocks']
# Encoder embedding
docarray = DocumentArray([Document(text=text) for text in text_batch])
basic_encoder.encode(docarray, parameters={})
embeddeding_batch = scipy.sparse.vstack(docarray.get_attributes('embedding'))
assert embeddeding_batch.shape == (3, _EMBEDDING_DIM)
assert embeddeding_batch.size == 8
embs = np.asarray(embeddeding_batch.todense())
# They overlap in Han
assert (embs[0] * embs[1]).sum() > 0.1
# They do not overlap
assert (embs[0] * embs[2]).sum() == 0
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: TFIDFTextEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: TFIDFTextEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (1, _EMBEDDING_DIM)
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def smooth_l1_loss(pred, target, beta=1.0):
"""Smooth L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
Returns:
torch.Tensor: Calculated loss
"""
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
diff - 0.5 * beta)
return loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def l1_loss(pred, target):
"""L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
Returns:
torch.Tensor: Calculated loss
"""
assert pred.size() == target.size() and target.numel() > 0
loss = torch.abs(pred - target)
return loss
@LOSSES.register_module()
class SmoothL1Loss(nn.Module):
"""Smooth L1 loss.
Args:
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum". Defaults to "mean".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
super(SmoothL1Loss, self).__init__()
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * smooth_l1_loss(
pred,
target,
weight,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
@LOSSES.register_module()
class L1Loss(nn.Module):
"""L1 loss.
Args:
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super(L1Loss, self).__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * l1_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss_bbox
|
import mmcv
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def smooth_l1_loss(pred, target, beta=1.0):
"""Smooth L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
Returns:
torch.Tensor: Calculated loss
"""
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
diff - 0.5 * beta)
return loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def l1_loss(pred, target):
"""L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
Returns:
torch.Tensor: Calculated loss
"""
assert pred.size() == target.size() and target.numel() > 0
loss = torch.abs(pred - target)
return loss
@LOSSES.register_module()
class SmoothL1Loss(nn.Module):
"""Smooth L1 loss.
Args:
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum". Defaults to "mean".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
super(SmoothL1Loss, self).__init__()
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * smooth_l1_loss(
pred,
target,
weight,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
@LOSSES.register_module()
class L1Loss(nn.Module):
"""L1 loss.
Args:
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super(L1Loss, self).__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * l1_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss_bbox
|
import copy as cp
from dataclasses import fields
from functools import lru_cache
from typing import TYPE_CHECKING, Optional, Tuple, Dict
from docarray.dataclasses import is_multimodal
from docarray.helper import typename
if TYPE_CHECKING:
from docarray.typing import T
@lru_cache()
def _get_fields(dc):
return [f.name for f in fields(dc)]
class BaseDCType:
_data_class = None
def __init__(
self: 'T',
_obj: Optional['T'] = None,
copy: bool = False,
field_resolver: Optional[Dict[str, str]] = None,
unknown_fields_handler: str = 'catch',
**kwargs,
):
self._data = None
if isinstance(_obj, type(self)):
if copy:
self.copy_from(_obj)
else:
self._data = _obj._data
elif isinstance(_obj, dict):
kwargs.update(_obj)
elif is_multimodal(_obj):
self._data = type(self)._from_dataclass(_obj)._data
if kwargs:
try:
self._data = self._data_class(self, **kwargs)
except TypeError as ex:
if unknown_fields_handler == 'raise':
raise AttributeError(f'unknown attributes') from ex
else:
if field_resolver:
kwargs = {
field_resolver.get(k, k): v for k, v in kwargs.items()
}
_fields = _get_fields(self._data_class)
_unknown_kwargs = None
_unresolved = set(kwargs.keys()).difference(_fields)
if _unresolved:
_unknown_kwargs = {k: kwargs[k] for k in _unresolved}
for k in _unresolved:
kwargs.pop(k)
self._data = self._data_class(self, **kwargs)
if _unknown_kwargs and unknown_fields_handler == 'catch':
getattr(self, self._unresolved_fields_dest).update(
_unknown_kwargs
)
for k in self._post_init_fields:
if k in kwargs:
setattr(self, k, kwargs[k])
if not _obj and not kwargs and self._data is None:
self._data = self._data_class(self)
if self._data is None:
raise ValueError(
f'Failed to initialize {typename(self)} from obj={_obj}, kwargs={kwargs}'
)
def copy_from(self: 'T', other: 'T') -> None:
"""Overwrite self by copying from another :class:`Document`.
:param other: the other Document to copy from
"""
self._data = cp.deepcopy(other._data)
def clear(self) -> None:
"""Clear all fields from this :class:`Document` to their default values."""
for f in self.non_empty_fields:
setattr(self._data, f, None)
def pop(self, *fields) -> None:
"""Clear some fields from this :class:`Document` to their default values.
:param fields: field names to clear.
"""
for f in fields:
if hasattr(self, f):
setattr(self._data, f, None)
@property
def non_empty_fields(self) -> Tuple[str]:
"""Get all non-emtpy fields of this :class:`Document`.
Non-empty fields are the fields with not-`None` and not-default values.
:return: field names in a tuple.
"""
return self._data._non_empty_fields
@property
def nbytes(self) -> int:
"""Return total bytes consumed by protobuf.
:return: number of bytes
"""
return len(bytes(self))
def __hash__(self):
return hash(self._data)
def __repr__(self):
content = str(self.non_empty_fields)
content += f' at {getattr(self, "id", id(self))}'
return f'<{self.__class__.__name__} {content.strip()}>'
def __bytes__(self):
return self.to_bytes()
def __eq__(self, other):
if type(self) is type(other):
return self._data == other._data
return False
|
import copy as cp
from dataclasses import fields
from functools import lru_cache
from typing import TYPE_CHECKING, Optional, Tuple, Dict
from .dataclasses import is_multimodal
from .helper import typename
if TYPE_CHECKING:
from .typing import T
@lru_cache()
def _get_fields(dc):
return [f.name for f in fields(dc)]
class BaseDCType:
_data_class = None
def __init__(
self: 'T',
_obj: Optional['T'] = None,
copy: bool = False,
field_resolver: Optional[Dict[str, str]] = None,
unknown_fields_handler: str = 'catch',
**kwargs,
):
self._data = None
if isinstance(_obj, type(self)):
if copy:
self.copy_from(_obj)
else:
self._data = _obj._data
elif isinstance(_obj, dict):
kwargs.update(_obj)
elif is_multimodal(_obj):
self._data = type(self)._from_dataclass(_obj)._data
if kwargs:
try:
self._data = self._data_class(self, **kwargs)
except TypeError as ex:
if unknown_fields_handler == 'raise':
raise AttributeError(f'unknown attributes') from ex
else:
if field_resolver:
kwargs = {
field_resolver.get(k, k): v for k, v in kwargs.items()
}
_fields = _get_fields(self._data_class)
_unknown_kwargs = None
_unresolved = set(kwargs.keys()).difference(_fields)
if _unresolved:
_unknown_kwargs = {k: kwargs[k] for k in _unresolved}
for k in _unresolved:
kwargs.pop(k)
self._data = self._data_class(self, **kwargs)
if _unknown_kwargs and unknown_fields_handler == 'catch':
getattr(self, self._unresolved_fields_dest).update(
_unknown_kwargs
)
for k in self._post_init_fields:
if k in kwargs:
setattr(self, k, kwargs[k])
if not _obj and not kwargs and self._data is None:
self._data = self._data_class(self)
if self._data is None:
raise ValueError(
f'Failed to initialize {typename(self)} from obj={_obj}, kwargs={kwargs}'
)
def copy_from(self: 'T', other: 'T') -> None:
"""Overwrite self by copying from another :class:`Document`.
:param other: the other Document to copy from
"""
self._data = cp.deepcopy(other._data)
def clear(self) -> None:
"""Clear all fields from this :class:`Document` to their default values."""
for f in self.non_empty_fields:
setattr(self._data, f, None)
def pop(self, *fields) -> None:
"""Clear some fields from this :class:`Document` to their default values.
:param fields: field names to clear.
"""
for f in fields:
if hasattr(self, f):
setattr(self._data, f, None)
@property
def non_empty_fields(self) -> Tuple[str]:
"""Get all non-emtpy fields of this :class:`Document`.
Non-empty fields are the fields with not-`None` and not-default values.
:return: field names in a tuple.
"""
return self._data._non_empty_fields
@property
def nbytes(self) -> int:
"""Return total bytes consumed by protobuf.
:return: number of bytes
"""
return len(bytes(self))
def __hash__(self):
return hash(self._data)
def __repr__(self):
content = str(self.non_empty_fields)
content += f' at {getattr(self, "id", id(self))}'
return f'<{self.__class__.__name__} {content.strip()}>'
def __bytes__(self):
return self.to_bytes()
def __eq__(self, other):
if type(self) is type(other):
return self._data == other._data
return False
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.runner import load_checkpoint
from mmdet.registry import MODELS
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
@MODELS.register_module()
class LAD(KnowledgeDistillationSingleStageDetector):
"""Implementation of `LAD <https://arxiv.org/pdf/2108.10520.pdf>`_."""
def __init__(self,
backbone,
neck,
bbox_head,
teacher_backbone,
teacher_neck,
teacher_bbox_head,
teacher_ckpt,
eval_teacher=True,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(KnowledgeDistillationSingleStageDetector,
self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
pretrained)
self.eval_teacher = eval_teacher
self.teacher_model = nn.Module()
self.teacher_model.backbone = MODELS.build(teacher_backbone)
if teacher_neck is not None:
self.teacher_model.neck = MODELS.build(teacher_neck)
teacher_bbox_head.update(train_cfg=train_cfg)
teacher_bbox_head.update(test_cfg=test_cfg)
self.teacher_model.bbox_head = MODELS.build(teacher_bbox_head)
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
@property
def with_teacher_neck(self):
"""bool: whether the detector has a teacher_neck"""
return hasattr(self.teacher_model, 'neck') and \
self.teacher_model.neck is not None
def extract_teacher_feat(self, img):
"""Directly extract teacher features from the backbone+neck."""
x = self.teacher_model.backbone(img)
if self.with_teacher_neck:
x = self.teacher_model.neck(x)
return x
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
# get label assignment from the teacher
with torch.no_grad():
x_teacher = self.extract_teacher_feat(img)
outs_teacher = self.teacher_model.bbox_head(x_teacher)
label_assignment_results = \
self.teacher_model.bbox_head.get_label_assignment(
*outs_teacher, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# the student use the label assignment from the teacher to learn
x = self.extract_feat(img)
losses = self.bbox_head.forward_train(x, label_assignment_results,
img_metas, gt_bboxes, gt_labels,
gt_bboxes_ignore)
return losses
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.runner import load_checkpoint
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
@DETECTORS.register_module()
class LAD(KnowledgeDistillationSingleStageDetector):
"""Implementation of `LAD <https://arxiv.org/pdf/2108.10520.pdf>`_."""
def __init__(self,
backbone,
neck,
bbox_head,
teacher_backbone,
teacher_neck,
teacher_bbox_head,
teacher_ckpt,
eval_teacher=True,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(KnowledgeDistillationSingleStageDetector,
self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
pretrained)
self.eval_teacher = eval_teacher
self.teacher_model = nn.Module()
self.teacher_model.backbone = build_backbone(teacher_backbone)
if teacher_neck is not None:
self.teacher_model.neck = build_neck(teacher_neck)
teacher_bbox_head.update(train_cfg=train_cfg)
teacher_bbox_head.update(test_cfg=test_cfg)
self.teacher_model.bbox_head = build_head(teacher_bbox_head)
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
@property
def with_teacher_neck(self):
"""bool: whether the detector has a teacher_neck"""
return hasattr(self.teacher_model, 'neck') and \
self.teacher_model.neck is not None
def extract_teacher_feat(self, img):
"""Directly extract teacher features from the backbone+neck."""
x = self.teacher_model.backbone(img)
if self.with_teacher_neck:
x = self.teacher_model.neck(x)
return x
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
# get label assignment from the teacher
with torch.no_grad():
x_teacher = self.extract_teacher_feat(img)
outs_teacher = self.teacher_model.bbox_head(x_teacher)
label_assignment_results = \
self.teacher_model.bbox_head.get_label_assignment(
*outs_teacher, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# the student use the label assignment from the teacher to learn
x = self.extract_feat(img)
losses = self.bbox_head.forward_train(x, label_assignment_results,
img_metas, gt_bboxes, gt_labels,
gt_bboxes_ignore)
return losses
|
_base_ = [
'../common/ms-poly_3x_coco-instance.py',
'../_base_/models/mask-rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './yolox_s_8x8_300e_coco.py'
# model settings
model = dict(
random_size_range=(10, 20),
backbone=dict(deepen_factor=0.33, widen_factor=0.375),
neck=dict(in_channels=[96, 192, 384], out_channels=96),
bbox_head=dict(in_channels=96, feat_channels=96))
img_scale = (640, 640) # height, width
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.5, 1.5),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
# Resize and Pad are for the last 15 epochs when Mosaic and
# RandomAffine are closed by YOLOXModeSwitchHook.
dict(type='Resize', scale=img_scale, keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(416, 416), keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
# auto_scale_lr = dict(base_batch_size=64)
|
_base_ = './yolox_s_8x8_300e_coco.py'
# model settings
model = dict(
random_size_range=(10, 20),
backbone=dict(deepen_factor=0.33, widen_factor=0.375),
neck=dict(in_channels=[96, 192, 384], out_channels=96),
bbox_head=dict(in_channels=96, feat_channels=96))
img_scale = (640, 640) # height, width
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.5, 1.5),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Resize', img_scale=img_scale, keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(416, 416),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
train_dataset = dict(pipeline=train_pipeline)
data = dict(
train=train_dataset,
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
List,
Tuple,
)
import numpy as np
from docarray.array.storage.base.backend import BaseBackendMixin, TypeMap
from docarray.helper import dataclass_from_dict, filter_dict, _safe_cast_int
if TYPE_CHECKING:
from docarray.typing import DocumentArraySourceType, ArrayType
@dataclass
class AnnliteConfig:
n_dim: int
metric: str = 'cosine'
serialize_config: Dict = field(default_factory=dict)
data_path: Optional[str] = None
ef_construction: Optional[int] = None
ef_search: Optional[int] = None
max_connection: Optional[int] = None
columns: Optional[List[Tuple[str, str]]] = None
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
TYPE_MAP = {
'str': TypeMap(type='TEXT', converter=str),
'float': TypeMap(type='float', converter=float),
'int': TypeMap(type='integer', converter=_safe_cast_int),
}
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
if embedding is None:
embedding = np.zeros(self.n_dim, dtype=np.float32)
elif isinstance(embedding, list):
from docarray.math.ndarray import to_numpy_array
embedding = to_numpy_array(embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
return embedding
def _normalize_columns(self, columns):
columns = super()._normalize_columns(columns)
for i in range(len(columns)):
columns[i] = (
columns[i][0],
self._map_type(columns[i][1]),
)
return columns
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
import os
if 'data_path' not in config_subindex:
config_joined['data_path'] = os.path.join(
config_joined['data_path'], 'subindex_' + subindex_name
)
return config_joined
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[AnnliteConfig, Dict]] = None,
subindex_configs: Optional[Dict] = None,
**kwargs,
):
from docarray import Document
if not config:
raise ValueError('Config object must be specified')
elif isinstance(config, dict):
config = dataclass_from_dict(AnnliteConfig, config)
self._persist = bool(config.data_path)
if not self._persist:
from tempfile import TemporaryDirectory
config.data_path = TemporaryDirectory().name
self._config = config
self._config.columns = self._normalize_columns(self._config.columns)
config = asdict(config)
self.n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(self.n_dim, lock=False, **filter_dict(config))
super()._init_storage()
if _docs is None:
return
self.clear()
if isinstance(_docs, Iterable):
self.extend(_docs)
elif isinstance(_docs, Document):
self.append(_docs)
def __getstate__(self):
state = dict(self.__dict__)
del state['_annlite']
del state['_offsetmapping']
return state
def __setstate__(self, state):
self.__dict__ = state
config = state['_config']
config = asdict(config)
n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(n_dim, lock=False, **filter_dict(config))
def __len__(self):
return self._annlite.index_size
|
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
List,
Tuple,
)
import numpy as np
from docarray.array.storage.base.backend import BaseBackendMixin, TypeMap
from docarray.helper import dataclass_from_dict, filter_dict, _safe_cast_int
if TYPE_CHECKING:
from docarray.typing import DocumentArraySourceType, ArrayType
@dataclass
class AnnliteConfig:
n_dim: int
metric: str = 'cosine'
serialize_config: Dict = field(default_factory=dict)
data_path: Optional[str] = None
ef_construction: Optional[int] = None
ef_search: Optional[int] = None
max_connection: Optional[int] = None
columns: Optional[List[Tuple[str, str]]] = None
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
TYPE_MAP = {
'str': TypeMap(type='TEXT', converter=str),
'float': TypeMap(type='float', converter=float),
'int': TypeMap(type='integer', converter=_safe_cast_int),
}
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
if embedding is None:
embedding = np.zeros(self.n_dim, dtype=np.float32)
elif isinstance(embedding, list):
from docarray.math.ndarray import to_numpy_array
embedding = to_numpy_array(embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
return embedding
def _normalize_columns(self, columns):
columns = super()._normalize_columns(columns)
for i in range(len(columns)):
columns[i] = (
columns[i][0],
self._map_type(columns[i][1]),
)
return columns
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[AnnliteConfig, Dict]] = None,
**kwargs,
):
if not config:
raise ValueError('Config object must be specified')
elif isinstance(config, dict):
config = dataclass_from_dict(AnnliteConfig, config)
self._persist = bool(config.data_path)
if not self._persist:
from tempfile import TemporaryDirectory
config.data_path = TemporaryDirectory().name
self._config = config
self._config.columns = self._normalize_columns(self._config.columns)
config = asdict(config)
self.n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(self.n_dim, lock=False, **filter_dict(config))
from docarray import Document
super()._init_storage()
if _docs is None:
return
self.clear()
if isinstance(_docs, Iterable):
self.extend(_docs)
elif isinstance(_docs, Document):
self.append(_docs)
def __getstate__(self):
state = dict(self.__dict__)
del state['_annlite']
del state['_offsetmapping']
return state
def __setstate__(self, state):
self.__dict__ = state
config = state['_config']
config = asdict(config)
n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(n_dim, lock=False, **filter_dict(config))
def __len__(self):
return self._annlite.index_size
|
_base_ = [
'mmdet::_base_/models/mask-rcnn_r50_fpn.py',
'mmdet::_base_/datasets/coco_instance.py',
'mmdet::_base_/schedules/schedule_1x.py',
'mmdet::_base_/default_runtime.py'
]
# please install the mmclassification dev-1.x branch
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-base_3rdparty-fcmae_in1k_20230104-8a798eaf.pth' # noqa
image_size = (1024, 1024)
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.ConvNeXt',
arch='base',
out_indices=[0, 1, 2, 3],
# TODO: verify stochastic depth rate {0.1, 0.2, 0.3, 0.4}
drop_path_rate=0.4,
layer_scale_init_value=0., # disable layer scale when using GRN
gap_before_final_norm=False,
use_grn=True, # V2 uses GRN
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')),
neck=dict(in_channels=[128, 256, 512, 1024]),
test_cfg=dict(
rpn=dict(nms=dict(type='nms')), # TODO: does RPN use soft_nms?
rcnn=dict(nms=dict(type='soft_nms'))))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=_base_.backend_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(
batch_size=4, # total_batch_size 32 = 8 GPUS x 4 images
num_workers=8,
dataset=dict(pipeline=train_pipeline))
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
constructor='LearningRateDecayOptimizerConstructor',
paramwise_cfg={
'decay_rate': 0.95,
'decay_type': 'layer_wise', # TODO: sweep layer-wise lr decay?
'num_layers': 12
},
optimizer=dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
))
default_hooks = dict(checkpoint=dict(max_keep_ckpts=1))
|
_base_ = [
'mmdet::_base_/models/mask-rcnn_r50_fpn.py',
'mmdet::_base_/datasets/coco_instance.py',
'mmdet::_base_/schedules/schedule_1x.py',
'mmdet::_base_/default_runtime.py'
]
# please install the mmclassification dev-1.x branch
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-base_3rdparty-fcmae_in1k_20230104-8a798eaf.pth' # noqa
image_size = (1024, 1024)
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.ConvNeXt',
arch='base',
out_indices=[0, 1, 2, 3],
# TODO: verify stochastic depth rate {0.1, 0.2, 0.3, 0.4}
drop_path_rate=0.4,
layer_scale_init_value=0., # disable layer scale when using GRN
gap_before_final_norm=False,
use_grn=True, # V2 uses GRN
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')),
neck=dict(in_channels=[128, 256, 512, 1024]),
test_cfg=dict(
rpn=dict(nms=dict(type='nms')), # TODO: does RPN use soft_nms?
rcnn=dict(nms=dict(type='soft_nms'))))
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(
batch_size=4, # total_batch_size 32 = 8 GPUS x 4 images
num_workers=8,
dataset=dict(pipeline=train_pipeline))
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
constructor='LearningRateDecayOptimizerConstructor',
paramwise_cfg={
'decay_rate': 0.95,
'decay_type': 'layer_wise', # TODO: sweep layer-wise lr decay?
'num_layers': 12
},
optimizer=dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
))
default_hooks = dict(checkpoint=dict(max_keep_ckpts=1))
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.optimizers import legacy
from keras.api.optimizers import schedules
from keras.src.optimizers import deserialize
from keras.src.optimizers import get
from keras.src.optimizers import serialize
from keras.src.optimizers.adadelta import Adadelta
from keras.src.optimizers.adafactor import Adafactor
from keras.src.optimizers.adagrad import Adagrad
from keras.src.optimizers.adam import Adam
from keras.src.optimizers.adamax import Adamax
from keras.src.optimizers.adamw import AdamW
from keras.src.optimizers.ftrl import Ftrl
from keras.src.optimizers.lamb import Lamb
from keras.src.optimizers.lion import Lion
from keras.src.optimizers.loss_scale_optimizer import LossScaleOptimizer
from keras.src.optimizers.muon import Muon
from keras.src.optimizers.nadam import Nadam
from keras.src.optimizers.optimizer import Optimizer
from keras.src.optimizers.rmsprop import RMSprop
from keras.src.optimizers.sgd import SGD
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.optimizers import legacy
from keras.api.optimizers import schedules
from keras.src.optimizers import deserialize
from keras.src.optimizers import get
from keras.src.optimizers import serialize
from keras.src.optimizers.adadelta import Adadelta
from keras.src.optimizers.adafactor import Adafactor
from keras.src.optimizers.adagrad import Adagrad
from keras.src.optimizers.adam import Adam
from keras.src.optimizers.adamax import Adamax
from keras.src.optimizers.adamw import AdamW
from keras.src.optimizers.ftrl import Ftrl
from keras.src.optimizers.lamb import Lamb
from keras.src.optimizers.lion import Lion
from keras.src.optimizers.loss_scale_optimizer import LossScaleOptimizer
from keras.src.optimizers.nadam import Nadam
from keras.src.optimizers.optimizer import Optimizer
from keras.src.optimizers.rmsprop import RMSprop
from keras.src.optimizers.sgd import SGD
|
"""Tool for the Google Trends"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.google_trends import GoogleTrendsAPIWrapper
class GoogleTrendsQueryRun(BaseTool):
"""Tool that queries the Google trends API."""
name: str = "google_trends"
description: str = (
"A wrapper around Google Trends Search. "
"Useful for when you need to get information about"
"google search trends from Google Trends"
"Input should be a search query."
)
api_wrapper: GoogleTrendsAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
|
"""Tool for the Google Trends"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.google_trends import GoogleTrendsAPIWrapper
class GoogleTrendsQueryRun(BaseTool): # type: ignore[override]
"""Tool that queries the Google trends API."""
name: str = "google_trends"
description: str = (
"A wrapper around Google Trends Search. "
"Useful for when you need to get information about"
"google search trends from Google Trends"
"Input should be a search query."
)
api_wrapper: GoogleTrendsAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
|
# Copyright 2025 Custom Diffusion authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def retrieve(class_prompt, class_data_dir, num_class_images):
factor = 1.5
num_images = int(factor * num_class_images)
client = ClipClient(
url="https://knn.laion.ai/knn-service", indice_name="laion_400m", num_images=num_images, aesthetic_weight=0.1
)
os.makedirs(f"{class_data_dir}/images", exist_ok=True)
if len(list(Path(f"{class_data_dir}/images").iterdir())) >= num_class_images:
return
while True:
class_images = client.query(text=class_prompt)
if len(class_images) >= factor * num_class_images or num_images > 1e4:
break
else:
num_images = int(factor * num_images)
client = ClipClient(
url="https://knn.laion.ai/knn-service",
indice_name="laion_400m",
num_images=num_images,
aesthetic_weight=0.1,
)
count = 0
total = 0
pbar = tqdm(desc="downloading real regularization images", total=num_class_images)
with (
open(f"{class_data_dir}/caption.txt", "w") as f1,
open(f"{class_data_dir}/urls.txt", "w") as f2,
open(f"{class_data_dir}/images.txt", "w") as f3,
):
while total < num_class_images:
images = class_images[count]
count += 1
try:
img = requests.get(images["url"], timeout=30)
if img.status_code == 200:
_ = Image.open(BytesIO(img.content))
with open(f"{class_data_dir}/images/{total}.jpg", "wb") as f:
f.write(img.content)
f1.write(images["caption"] + "\n")
f2.write(images["url"] + "\n")
f3.write(f"{class_data_dir}/images/{total}.jpg" + "\n")
total += 1
pbar.update(1)
else:
continue
except Exception:
continue
return
def parse_args():
parser = argparse.ArgumentParser("", add_help=False)
parser.add_argument("--class_prompt", help="text prompt to retrieve images", required=True, type=str)
parser.add_argument("--class_data_dir", help="path to save images", required=True, type=str)
parser.add_argument("--num_class_images", help="number of images to download", default=200, type=int)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
|
# Copyright 2024 Custom Diffusion authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def retrieve(class_prompt, class_data_dir, num_class_images):
factor = 1.5
num_images = int(factor * num_class_images)
client = ClipClient(
url="https://knn.laion.ai/knn-service", indice_name="laion_400m", num_images=num_images, aesthetic_weight=0.1
)
os.makedirs(f"{class_data_dir}/images", exist_ok=True)
if len(list(Path(f"{class_data_dir}/images").iterdir())) >= num_class_images:
return
while True:
class_images = client.query(text=class_prompt)
if len(class_images) >= factor * num_class_images or num_images > 1e4:
break
else:
num_images = int(factor * num_images)
client = ClipClient(
url="https://knn.laion.ai/knn-service",
indice_name="laion_400m",
num_images=num_images,
aesthetic_weight=0.1,
)
count = 0
total = 0
pbar = tqdm(desc="downloading real regularization images", total=num_class_images)
with (
open(f"{class_data_dir}/caption.txt", "w") as f1,
open(f"{class_data_dir}/urls.txt", "w") as f2,
open(f"{class_data_dir}/images.txt", "w") as f3,
):
while total < num_class_images:
images = class_images[count]
count += 1
try:
img = requests.get(images["url"], timeout=30)
if img.status_code == 200:
_ = Image.open(BytesIO(img.content))
with open(f"{class_data_dir}/images/{total}.jpg", "wb") as f:
f.write(img.content)
f1.write(images["caption"] + "\n")
f2.write(images["url"] + "\n")
f3.write(f"{class_data_dir}/images/{total}.jpg" + "\n")
total += 1
pbar.update(1)
else:
continue
except Exception:
continue
return
def parse_args():
parser = argparse.ArgumentParser("", add_help=False)
parser.add_argument("--class_prompt", help="text prompt to retrieve images", required=True, type=str)
parser.add_argument("--class_data_dir", help="path to save images", required=True, type=str)
parser.add_argument("--num_class_images", help="number of images to download", default=200, type=int)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .ade20k import (ADE20KInstanceDataset, ADE20KPanopticDataset,
ADE20KSegDataset)
from .base_det_dataset import BaseDetDataset
from .base_semseg_dataset import BaseSegDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_caption import CocoCaptionDataset
from .coco_panoptic import CocoPanopticDataset
from .coco_semantic import CocoSegDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .dsdl import DSDLDetDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .mot_challenge_dataset import MOTChallengeDataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .refcoco import RefCocoDataset
from .reid_dataset import ReIDDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler,
TrackAspectRatioBatchSampler, TrackImgSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
from .youtube_vis_dataset import YouTubeVISDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'get_loading_pipeline', 'CocoPanopticDataset',
'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset',
'Objects365V1Dataset', 'Objects365V2Dataset', 'DSDLDetDataset',
'BaseVideoDataset', 'MOTChallengeDataset', 'TrackImgSampler',
'ReIDDataset', 'YouTubeVISDataset', 'TrackAspectRatioBatchSampler',
'ADE20KPanopticDataset', 'CocoCaptionDataset', 'RefCocoDataset',
'BaseSegDataset', 'ADE20KSegDataset', 'CocoSegDataset',
'ADE20KInstanceDataset'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .ade20k import ADE20KDataset, ADE20KPanopticDataset
from .base_det_dataset import BaseDetDataset
from .base_semseg_dataset import BaseSegDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_caption import COCOCaptionDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .dsdl import DSDLDetDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .mot_challenge_dataset import MOTChallengeDataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .refcoco import RefCOCODataset
from .reid_dataset import ReIDDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler,
TrackAspectRatioBatchSampler, TrackImgSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
from .youtube_vis_dataset import YouTubeVISDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'get_loading_pipeline', 'CocoPanopticDataset',
'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset',
'Objects365V1Dataset', 'Objects365V2Dataset', 'DSDLDetDataset',
'BaseVideoDataset', 'MOTChallengeDataset', 'TrackImgSampler',
'ReIDDataset', 'YouTubeVISDataset', 'TrackAspectRatioBatchSampler',
'ADE20KPanopticDataset', 'COCOCaptionDataset', 'RefCOCODataset',
'BaseSegDataset', 'ADE20KDataset'
]
|
from __future__ import annotations
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import TYPE_CHECKING, Any
from sentence_transformers.model_card import SentenceTransformerModelCardCallback, SentenceTransformerModelCardData
from sentence_transformers.util import is_datasets_available
if is_datasets_available():
pass
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseEncoderModelCardCallback(SentenceTransformerModelCardCallback):
pass
@dataclass
class SparseEncoderModelCardData(SentenceTransformerModelCardData):
"""A dataclass storing data used in the model card.
Args:
language (`Optional[Union[str, List[str]]]`): The model language, either a string or a list,
e.g. "en" or ["en", "de", "nl"]
license (`Optional[str]`): The license of the model, e.g. "apache-2.0", "mit",
or "cc-by-nc-sa-4.0"
model_name (`Optional[str]`): The pretty name of the model, e.g. "SparseEncoder based on answerdotai/ModernBERT-base".
model_id (`Optional[str]`): The model ID when pushing the model to the Hub,
e.g. "tomaarsen/se-mpnet-base-ms-marco".
train_datasets (`List[Dict[str, str]]`): A list of the names and/or Hugging Face dataset IDs of the training datasets.
e.g. [{"name": "SNLI", "id": "stanfordnlp/snli"}, {"name": "MultiNLI", "id": "nyu-mll/multi_nli"}, {"name": "STSB"}]
eval_datasets (`List[Dict[str, str]]`): A list of the names and/or Hugging Face dataset IDs of the evaluation datasets.
e.g. [{"name": "SNLI", "id": "stanfordnlp/snli"}, {"id": "mteb/stsbenchmark-sts"}]
task_name (`str`): The human-readable task the model is trained on,
e.g. "semantic search and sparse retrieval".
tags (`Optional[List[str]]`): A list of tags for the model,
e.g. ["sentence-transformers", "sparse-encoder"].
.. tip::
Install `codecarbon <https://github.com/mlco2/codecarbon>`_ to automatically track carbon emission usage and
include it in your model cards.
Example::
>>> model = SparseEncoder(
... "microsoft/mpnet-base",
... model_card_data=SparseEncoderModelCardData(
... model_id="tomaarsen/se-mpnet-base-allnli",
... train_datasets=[{"name": "SNLI", "id": "stanfordnlp/snli"}, {"name": "MultiNLI", "id": "nyu-mll/multi_nli"}],
... eval_datasets=[{"name": "SNLI", "id": "stanfordnlp/snli"}, {"name": "MultiNLI", "id": "nyu-mll/multi_nli"}],
... license="apache-2.0",
... language="en",
... ),
... )
"""
# Potentially provided by the user
task_name: str = field(default=None)
tags: list[str] | None = field(
default_factory=lambda: [
"sentence-transformers",
"sparse-encoder",
]
)
# Automatically filled by `SparseEncoderModelCardCallback` and the Trainer directly
predict_example: list[list[str]] | None = field(default=None, init=False)
# Computed once, always unchanged
pipeline_tag: str = field(default=None, init=False)
template_path: Path = field(default=Path(__file__).parent / "model_card_template.md", init=False)
# Passed via `register_model` only
model: SparseEncoder | None = field(default=None, init=False, repr=False)
def register_model(self, model: SparseEncoder) -> None:
self.model = model
if self.task_name is None:
self.task_name = "semantic search and sparse retrieval"
if self.pipeline_tag is None:
self.pipeline_tag = "feature-extraction"
def tokenize(self, text: str | list[str]) -> dict[str, Any]:
return self.model.tokenizer(text)
def get_model_specific_metadata(self) -> dict[str, Any]:
return {
"model_max_length": self.model.get_max_seq_length(),
"output_dimensionality": self.model.get_sentence_embedding_dimension(),
}
|
from __future__ import annotations
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import TYPE_CHECKING, Any
from sentence_transformers.model_card import (
SentenceTransformerModelCardCallback,
SentenceTransformerModelCardData,
)
from sentence_transformers.util import is_datasets_available
if is_datasets_available():
pass
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseEncoderModelCardCallback(SentenceTransformerModelCardCallback):
pass
@dataclass
class SparseEncoderModelCardData(SentenceTransformerModelCardData):
"""A dataclass storing data used in the model card.
Args:
language (`Optional[Union[str, List[str]]]`): The model language, either a string or a list,
e.g. "en" or ["en", "de", "nl"]
license (`Optional[str]`): The license of the model, e.g. "apache-2.0", "mit",
or "cc-by-nc-sa-4.0"
model_name (`Optional[str]`): The pretty name of the model, e.g. "SparseEncoder based on answerdotai/ModernBERT-base".
model_id (`Optional[str]`): The model ID when pushing the model to the Hub,
e.g. "tomaarsen/se-mpnet-base-ms-marco".
train_datasets (`List[Dict[str, str]]`): A list of the names and/or Hugging Face dataset IDs of the training datasets.
e.g. [{"name": "SNLI", "id": "stanfordnlp/snli"}, {"name": "MultiNLI", "id": "nyu-mll/multi_nli"}, {"name": "STSB"}]
eval_datasets (`List[Dict[str, str]]`): A list of the names and/or Hugging Face dataset IDs of the evaluation datasets.
e.g. [{"name": "SNLI", "id": "stanfordnlp/snli"}, {"id": "mteb/stsbenchmark-sts"}]
task_name (`str`): The human-readable task the model is trained on,
e.g. "semantic search and sparse retrieval".
tags (`Optional[List[str]]`): A list of tags for the model,
e.g. ["sentence-transformers", "sparse-encoder"].
.. tip::
Install `codecarbon <https://github.com/mlco2/codecarbon>`_ to automatically track carbon emission usage and
include it in your model cards.
Example::
>>> model = SparseEncoder(
... "microsoft/mpnet-base",
... model_card_data=SparseEncoderModelCardData(
... model_id="tomaarsen/se-mpnet-base-allnli",
... train_datasets=[{"name": "SNLI", "id": "stanfordnlp/snli"}, {"name": "MultiNLI", "id": "nyu-mll/multi_nli"}],
... eval_datasets=[{"name": "SNLI", "id": "stanfordnlp/snli"}, {"name": "MultiNLI", "id": "nyu-mll/multi_nli"}],
... license="apache-2.0",
... language="en",
... ),
... )
"""
# Potentially provided by the user
task_name: str = field(default=None)
tags: list[str] | None = field(
default_factory=lambda: [
"sentence-transformers",
"sparse-encoder",
]
)
# Automatically filled by `SparseEncoderModelCardCallback` and the Trainer directly
predict_example: list[list[str]] | None = field(default=None, init=False)
# Computed once, always unchanged
pipeline_tag: str = field(default=None, init=False)
template_path: Path = field(default=Path(__file__).parent / "model_card_template.md", init=False)
# Passed via `register_model` only
model: SparseEncoder | None = field(default=None, init=False, repr=False)
def register_model(self, model: SparseEncoder) -> None:
self.model = model
if self.task_name is None:
self.task_name = "semantic search and sparse retrieval"
if self.pipeline_tag is None:
self.pipeline_tag = "feature-extraction"
def tokenize(self, text: str | list[str]) -> dict[str, Any]:
return self.model.tokenizer(text)
def get_model_specific_metadata(self) -> dict[str, Any]:
return {
"model_max_length": self.model.get_max_seq_length(),
"output_dimensionality": self.model.get_sentence_embedding_dimension(),
}
|
import pytest
from backend.data import db
from backend.executor import ExecutionScheduler
from backend.server.model import CreateGraph
from backend.usecases.sample import create_test_graph, create_test_user
from backend.util.service import get_service_client
from backend.util.test import SpinTestServer
@pytest.mark.asyncio(scope="session")
async def test_agent_schedule(server: SpinTestServer):
await db.connect()
test_user = await create_test_user()
test_graph = await server.agent_server.test_create_graph(
create_graph=CreateGraph(graph=create_test_graph()),
is_template=False,
user_id=test_user.id,
)
scheduler = get_service_client(ExecutionScheduler)
schedules = scheduler.get_execution_schedules(test_graph.id, test_user.id)
assert len(schedules) == 0
schedule_id = scheduler.add_execution_schedule(
graph_id=test_graph.id,
user_id=test_user.id,
graph_version=1,
cron="0 0 * * *",
input_data={"input": "data"},
)
assert schedule_id
schedules = scheduler.get_execution_schedules(test_graph.id, test_user.id)
assert len(schedules) == 1
assert schedules[schedule_id] == "0 0 * * *"
scheduler.update_schedule(schedule_id, is_enabled=False, user_id=test_user.id)
schedules = scheduler.get_execution_schedules(test_graph.id, user_id=test_user.id)
assert len(schedules) == 0
|
import pytest
from backend.data import db
from backend.executor import ExecutionScheduler
from backend.server.model import CreateGraph
from backend.usecases.sample import create_test_graph, create_test_user
from backend.util.service import get_service_client
from backend.util.test import SpinTestServer
@pytest.mark.asyncio(scope="session")
async def test_agent_schedule(server: SpinTestServer):
await db.connect()
test_user = await create_test_user()
test_graph = await server.agent_server.create_graph(
create_graph=CreateGraph(graph=create_test_graph()),
is_template=False,
user_id=test_user.id,
)
scheduler = get_service_client(ExecutionScheduler)
schedules = scheduler.get_execution_schedules(test_graph.id, test_user.id)
assert len(schedules) == 0
schedule_id = scheduler.add_execution_schedule(
graph_id=test_graph.id,
user_id=test_user.id,
graph_version=1,
cron="0 0 * * *",
input_data={"input": "data"},
)
assert schedule_id
schedules = scheduler.get_execution_schedules(test_graph.id, test_user.id)
assert len(schedules) == 1
assert schedules[schedule_id] == "0 0 * * *"
scheduler.update_schedule(schedule_id, is_enabled=False, user_id=test_user.id)
schedules = scheduler.get_execution_schedules(test_graph.id, user_id=test_user.id)
assert len(schedules) == 0
|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
PATH_TO_TRANSFORMERS = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
spec = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
transformers = spec.loader.load_module()
CONFIG_MAPPING = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_re_checkpoint = re.compile(r"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def check_config_docstrings_have_checkpoints():
configs_without_checkpoint = []
for config_class in list(CONFIG_MAPPING.values()):
checkpoint_found = False
# source code of `config_class`
config_source = inspect.getsource(config_class)
checkpoints = _re_checkpoint.findall(config_source)
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
ckpt_name, ckpt_link = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
ckpt_link_from_name = f"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
checkpoint_found = True
break
name = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(name)
if len(configs_without_checkpoint) > 0:
message = "\n".join(sorted(configs_without_checkpoint))
raise ValueError(f"The following configurations don't contain any valid checkpoint:\n{message}")
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
|
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
PATH_TO_TRANSFORMERS = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
spec = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
transformers = spec.loader.load_module()
CONFIG_MAPPING = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_re_checkpoint = re.compile(r"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def check_config_docstrings_have_checkpoints():
configs_without_checkpoint = []
for config_class in list(CONFIG_MAPPING.values()):
checkpoint_found = False
# source code of `config_class`
config_source = inspect.getsource(config_class)
checkpoints = _re_checkpoint.findall(config_source)
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
ckpt_name, ckpt_link = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
ckpt_link_from_name = f"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
checkpoint_found = True
break
name = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(name)
if len(configs_without_checkpoint) > 0:
message = "\n".join(sorted(configs_without_checkpoint))
raise ValueError(f"The following configurations don't contain any valid checkpoint:\n{message}")
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
|
# Copyright 2020 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SQuAD metric."""
import datasets
from .evaluate import evaluate
_CITATION = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
_DESCRIPTION = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
_KWARGS_DESCRIPTION = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Squad(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": {"id": datasets.Value("string"), "prediction_text": datasets.Value("string")},
"references": {
"id": datasets.Value("string"),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}
),
},
}
),
codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"],
reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"],
)
def _compute(self, predictions, references):
pred_dict = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
dataset = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
score = evaluate(dataset=dataset, predictions=pred_dict)
return score
|
# Copyright 2020 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" SQuAD metric. """
import datasets
from .evaluate import evaluate
_CITATION = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
_DESCRIPTION = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
_KWARGS_DESCRIPTION = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Squad(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": {"id": datasets.Value("string"), "prediction_text": datasets.Value("string")},
"references": {
"id": datasets.Value("string"),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}
),
},
}
),
codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"],
reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"],
)
def _compute(self, predictions, references):
pred_dict = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
dataset = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
score = evaluate(dataset=dataset, predictions=pred_dict)
return score
|
from .autoencoder_asym_kl import AsymmetricAutoencoderKL
from .autoencoder_dc import AutoencoderDC
from .autoencoder_kl import AutoencoderKL
from .autoencoder_kl_allegro import AutoencoderKLAllegro
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
from .autoencoder_kl_mochi import AutoencoderKLMochi
from .autoencoder_kl_temporal_decoder import AutoencoderKLTemporalDecoder
from .autoencoder_oobleck import AutoencoderOobleck
from .autoencoder_tiny import AutoencoderTiny
from .consistency_decoder_vae import ConsistencyDecoderVAE
from .vq_model import VQModel
|
from .autoencoder_asym_kl import AsymmetricAutoencoderKL
from .autoencoder_kl import AutoencoderKL
from .autoencoder_kl_allegro import AutoencoderKLAllegro
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
from .autoencoder_kl_mochi import AutoencoderKLMochi
from .autoencoder_kl_temporal_decoder import AutoencoderKLTemporalDecoder
from .autoencoder_oobleck import AutoencoderOobleck
from .autoencoder_tiny import AutoencoderTiny
from .consistency_decoder_vae import ConsistencyDecoderVAE
from .vq_model import VQModel
|
from typing import Any, Literal
from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
from pydantic import SecretStr
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import CredentialsField, CredentialsMetaInput, SchemaField
from backend.util.request import requests
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="unreal_speech",
api_key=SecretStr("mock-unreal-speech-api-key"),
title="Mock Unreal Speech API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
class UnrealTextToSpeechBlock(Block):
class Input(BlockSchema):
text: str = SchemaField(
description="The text to be converted to speech",
placeholder="Enter the text you want to convert to speech",
)
voice_id: str = SchemaField(
description="The voice ID to use for text-to-speech conversion",
placeholder="Scarlett",
default="Scarlett",
)
credentials: CredentialsMetaInput[
Literal["unreal_speech"], Literal["api_key"]
] = CredentialsField(
provider="unreal_speech",
supported_credential_types={"api_key"},
description="The Unreal Speech integration can be used with "
"any API key with sufficient permissions for the blocks it is used on.",
)
class Output(BlockSchema):
mp3_url: str = SchemaField(description="The URL of the generated MP3 file")
error: str = SchemaField(description="Error message if the API call failed")
def __init__(self):
super().__init__(
id="4ff1ff6d-cc40-4caa-ae69-011daa20c378",
description="Converts text to speech using the Unreal Speech API",
categories={BlockCategory.AI, BlockCategory.TEXT},
input_schema=UnrealTextToSpeechBlock.Input,
output_schema=UnrealTextToSpeechBlock.Output,
test_input={
"text": "This is a test of the text to speech API.",
"voice_id": "Scarlett",
"credentials": TEST_CREDENTIALS_INPUT,
},
test_output=[("mp3_url", "https://example.com/test.mp3")],
test_mock={
"call_unreal_speech_api": lambda *args, **kwargs: {
"OutputUri": "https://example.com/test.mp3"
}
},
test_credentials=TEST_CREDENTIALS,
)
@staticmethod
def call_unreal_speech_api(
api_key: SecretStr, text: str, voice_id: str
) -> dict[str, Any]:
url = "https://api.v7.unrealspeech.com/speech"
headers = {
"Authorization": f"Bearer {api_key.get_secret_value()}",
"Content-Type": "application/json",
}
data = {
"Text": text,
"VoiceId": voice_id,
"Bitrate": "192k",
"Speed": "0",
"Pitch": "1",
"TimestampType": "sentence",
}
response = requests.post(url, headers=headers, json=data)
return response.json()
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_response = self.call_unreal_speech_api(
credentials.api_key,
input_data.text,
input_data.voice_id,
)
yield "mp3_url", api_response["OutputUri"]
|
from typing import Any, Literal
import requests
from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
from pydantic import SecretStr
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import CredentialsField, CredentialsMetaInput, SchemaField
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="unreal_speech",
api_key=SecretStr("mock-unreal-speech-api-key"),
title="Mock Unreal Speech API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
class UnrealTextToSpeechBlock(Block):
class Input(BlockSchema):
text: str = SchemaField(
description="The text to be converted to speech",
placeholder="Enter the text you want to convert to speech",
)
voice_id: str = SchemaField(
description="The voice ID to use for text-to-speech conversion",
placeholder="Scarlett",
default="Scarlett",
)
credentials: CredentialsMetaInput[
Literal["unreal_speech"], Literal["api_key"]
] = CredentialsField(
provider="unreal_speech",
supported_credential_types={"api_key"},
description="The Unreal Speech integration can be used with "
"any API key with sufficient permissions for the blocks it is used on.",
)
class Output(BlockSchema):
mp3_url: str = SchemaField(description="The URL of the generated MP3 file")
error: str = SchemaField(description="Error message if the API call failed")
def __init__(self):
super().__init__(
id="4ff1ff6d-cc40-4caa-ae69-011daa20c378",
description="Converts text to speech using the Unreal Speech API",
categories={BlockCategory.AI, BlockCategory.TEXT},
input_schema=UnrealTextToSpeechBlock.Input,
output_schema=UnrealTextToSpeechBlock.Output,
test_input={
"text": "This is a test of the text to speech API.",
"voice_id": "Scarlett",
"credentials": TEST_CREDENTIALS_INPUT,
},
test_output=[("mp3_url", "https://example.com/test.mp3")],
test_mock={
"call_unreal_speech_api": lambda *args, **kwargs: {
"OutputUri": "https://example.com/test.mp3"
}
},
test_credentials=TEST_CREDENTIALS,
)
@staticmethod
def call_unreal_speech_api(
api_key: SecretStr, text: str, voice_id: str
) -> dict[str, Any]:
url = "https://api.v7.unrealspeech.com/speech"
headers = {
"Authorization": f"Bearer {api_key.get_secret_value()}",
"Content-Type": "application/json",
}
data = {
"Text": text,
"VoiceId": voice_id,
"Bitrate": "192k",
"Speed": "0",
"Pitch": "1",
"TimestampType": "sentence",
}
response = requests.post(url, headers=headers, json=data)
response.raise_for_status()
return response.json()
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_response = self.call_unreal_speech_api(
credentials.api_key,
input_data.text,
input_data.voice_id,
)
yield "mp3_url", api_response["OutputUri"]
|
import asyncio
from itertools import cycle
from typing import Any, Optional, Union
from uuid import UUID
import pytest
from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-untyped]
from typing_extensions import override
from langchain_core.callbacks.base import AsyncCallbackHandler
from langchain_core.language_models import GenericFakeChatModel
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
class MyCustomAsyncHandler(AsyncCallbackHandler):
@override
async def on_chat_model_start(
self,
serialized: dict[str, Any],
messages: list[list[BaseMessage]],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[list[str]] = None,
metadata: Optional[dict[str, Any]] = None,
**kwargs: Any,
) -> Any:
# Do nothing
# Required to implement since this is an abstract method
pass
@override
async def on_llm_new_token(
self,
token: str,
*,
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[list[str]] = None,
**kwargs: Any,
) -> None:
await asyncio.sleep(0)
@pytest.mark.benchmark
async def test_async_callbacks_in_sync(benchmark: BenchmarkFixture) -> None:
infinite_cycle = cycle([AIMessage(content=" ".join(["hello", "goodbye"] * 500))])
model = GenericFakeChatModel(messages=infinite_cycle)
@benchmark # type: ignore[misc]
def sync_callbacks() -> None:
for _ in range(5):
for _ in model.stream("meow", {"callbacks": [MyCustomAsyncHandler()]}):
pass
|
import asyncio
from itertools import cycle
from typing import Any, Optional, Union
from uuid import UUID
import pytest
from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-untyped]
from typing_extensions import override
from langchain_core.callbacks.base import AsyncCallbackHandler
from langchain_core.language_models import GenericFakeChatModel
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
class MyCustomAsyncHandler(AsyncCallbackHandler):
@override
async def on_chat_model_start(
self,
serialized: dict[str, Any],
messages: list[list[BaseMessage]],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[list[str]] = None,
metadata: Optional[dict[str, Any]] = None,
**kwargs: Any,
) -> Any:
# Do nothing
# Required to implement since this is an abstract method
pass
@override
async def on_llm_new_token(
self,
token: str,
*,
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[list[str]] = None,
**kwargs: Any,
) -> None:
await asyncio.sleep(0)
@pytest.mark.benchmark
async def test_async_callbacks_in_sync(benchmark: BenchmarkFixture) -> None:
infinite_cycle = cycle([AIMessage(content=" ".join(["hello", "goodbye"] * 500))])
model = GenericFakeChatModel(messages=infinite_cycle)
@benchmark
def sync_callbacks() -> None:
for _ in range(5):
for _ in model.stream("meow", {"callbacks": [MyCustomAsyncHandler()]}):
pass
|
import re
import torch
from torch.utils.hipify.hipify_python import PYTORCH_MAP, PYTORCH_TRIE
# It is not a good idea to directly apply hipify_torch to codegen, which will be vulnerable to cases like:
# "...
# from ..codecache import CudaKernelParamCache
# ..."
# In such cases, we do not need to hipify_torch the original class/file name in codegen/codecache
def maybe_hipify_code_wrapper(source_codes: str, force_hipify: bool = False) -> str:
if torch.version.hip is None and not force_hipify:
return source_codes
def c2_repl(m: re.Match[str]) -> object:
return PYTORCH_MAP[m.group(0)]
# We need to redefine RE_PYTORCH_PREPROCESSOR here since in hipify_torch,
# it will apply positive lookbehind (?<=\W) to the pattern to avoid matching
# keyword at the beginning of code line. However, this can happen in codegen,
# which will cause the pattern to not match.
# Note that lookahead (?=\W) is still needed to keep hipification idomponent, for example
# we need to skip replacing "getStreamFromExternal" in "getStreamFromExternalMasqueradingAsCUDA"
RE_PYTORCH_PREPROCESSOR = re.compile(rf"({PYTORCH_TRIE.export_to_regex()})(?=\W)")
source_codes = RE_PYTORCH_PREPROCESSOR.sub(c2_repl, source_codes) # type: ignore[arg-type]
return source_codes
|
import re
import torch
from torch.utils.hipify.hipify_python import PYTORCH_MAP, PYTORCH_TRIE
# It is not a good idea to directly apply hipify_torch to codegen, which will be vulnerable to cases like:
# "...
# from ..codecache import CudaKernelParamCache
# ..."
# In such cases, we do not need to hipify_torch the orignial class/file name in codegen/codecache
def maybe_hipify_code_wrapper(source_codes: str, force_hipify: bool = False) -> str:
if torch.version.hip is None and not force_hipify:
return source_codes
def c2_repl(m: re.Match[str]) -> object:
return PYTORCH_MAP[m.group(0)]
# We need to redefine RE_PYTORCH_PREPROCESSOR here since in hipify_torch,
# it will apply positive lookbehind (?<=\W) to the pattern to avoid matching
# keyword at the beginning of code line. However, this can happen in codegen,
# which will cause the pattern to not match.
# Note that lookahead (?=\W) is still needed to keep hipification idomponent, for example
# we need to skip replacing "getStreamFromExternal" in "getStreamFromExternalMasqueradingAsCUDA"
RE_PYTORCH_PREPROCESSOR = re.compile(rf"({PYTORCH_TRIE.export_to_regex()})(?=\W)")
source_codes = RE_PYTORCH_PREPROCESSOR.sub(c2_repl, source_codes) # type: ignore[arg-type]
return source_codes
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import mmengine
from mmengine import Config, DictAction
from mmengine.evaluator import Evaluator
from mmdet.registry import DATASETS
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Evaluate metric of the '
'results saved in pkl format')
parser.add_argument('config', help='Config of the model')
parser.add_argument('pkl_results', help='Results in pickle format')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
register_all_modules(init_default_scope=True)
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
dataset = DATASETS.build(cfg.test_dataloader.dataset)
predictions = mmengine.load(args.pkl_results)
evaluator = Evaluator(cfg.val_evaluator)
evaluator.dataset_meta = dataset.metainfo
eval_results = evaluator.offline_evaluate(predictions)
print(eval_results)
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from mmengine.config import Config, DictAction
from mmengine.fileio import load
from mmdet.datasets import build_dataset
from mmdet.utils import replace_cfg_vals, update_data_root
def parse_args():
parser = argparse.ArgumentParser(description='Evaluate metric of the '
'results saved in pkl format')
parser.add_argument('config', help='Config of the model')
parser.add_argument('pkl_results', help='Results in pickle format')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='Evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
assert args.eval or args.format_only, (
'Please specify at least one operation (eval/format the results) with '
'the argument "--eval", "--format-only"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
cfg.data.test.test_mode = True
dataset = build_dataset(cfg.data.test)
outputs = load(args.pkl_results)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs))
if __name__ == '__main__':
main()
|
"""
Test the standard tests on the custom chat model in the docs
"""
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_tests.unit_tests import ChatModelUnitTests
from .custom_chat_model import ChatParrotLink
class TestChatParrotLinkUnit(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[ChatParrotLink]:
return ChatParrotLink
@property
def chat_model_params(self) -> dict:
return {"model": "bird-brain-001", "temperature": 0, "parrot_buffer_length": 50}
class TestChatParrotLinkIntegration(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[ChatParrotLink]:
return ChatParrotLink
@property
def chat_model_params(self) -> dict:
return {"model": "bird-brain-001", "temperature": 0, "parrot_buffer_length": 50}
|
"""
Test the standard tests on the custom chat model in the docs
"""
from typing import Type
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_tests.unit_tests import ChatModelUnitTests
from .custom_chat_model import ChatParrotLink
class TestChatParrotLinkUnit(ChatModelUnitTests):
@property
def chat_model_class(self) -> Type[ChatParrotLink]:
return ChatParrotLink
@property
def chat_model_params(self) -> dict:
return {"model": "bird-brain-001", "temperature": 0, "parrot_buffer_length": 50}
class TestChatParrotLinkIntegration(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[ChatParrotLink]:
return ChatParrotLink
@property
def chat_model_params(self) -> dict:
return {"model": "bird-brain-001", "temperature": 0, "parrot_buffer_length": 50}
|
import pytest
from langchain_tests.integration_tests.base_store import (
BaseStoreAsyncTests,
BaseStoreSyncTests,
)
from langchain_core.stores import InMemoryStore
# Check against standard tests
class TestSyncInMemoryStore(BaseStoreSyncTests):
@pytest.fixture
def kv_store(self) -> InMemoryStore:
return InMemoryStore()
@pytest.fixture
def three_values(self) -> tuple[str, str, str]:
return "value1", "value2", "value3"
class TestAsyncInMemoryStore(BaseStoreAsyncTests):
@pytest.fixture
async def kv_store(self) -> InMemoryStore:
return InMemoryStore()
@pytest.fixture
def three_values(self) -> tuple[str, str, str]: # type: ignore[override]
return "value1", "value2", "value3"
def test_mget() -> None:
store = InMemoryStore()
store.mset([("key1", "value1"), ("key2", "value2")])
values = store.mget(["key1", "key2"])
assert values == ["value1", "value2"]
# Test non-existent key
non_existent_value = store.mget(["key3"])
assert non_existent_value == [None]
async def test_amget() -> None:
store = InMemoryStore()
await store.amset([("key1", "value1"), ("key2", "value2")])
values = await store.amget(["key1", "key2"])
assert values == ["value1", "value2"]
# Test non-existent key
non_existent_value = await store.amget(["key3"])
assert non_existent_value == [None]
def test_mset() -> None:
store = InMemoryStore()
store.mset([("key1", "value1"), ("key2", "value2")])
values = store.mget(["key1", "key2"])
assert values == ["value1", "value2"]
async def test_amset() -> None:
store = InMemoryStore()
await store.amset([("key1", "value1"), ("key2", "value2")])
values = await store.amget(["key1", "key2"])
assert values == ["value1", "value2"]
def test_mdelete() -> None:
store = InMemoryStore()
store.mset([("key1", "value1"), ("key2", "value2")])
store.mdelete(["key1"])
values = store.mget(["key1", "key2"])
assert values == [None, "value2"]
# Test deleting non-existent key
store.mdelete(["key3"]) # No error should be raised
async def test_amdelete() -> None:
store = InMemoryStore()
await store.amset([("key1", "value1"), ("key2", "value2")])
await store.amdelete(["key1"])
values = await store.amget(["key1", "key2"])
assert values == [None, "value2"]
# Test deleting non-existent key
await store.amdelete(["key3"]) # No error should be raised
def test_yield_keys() -> None:
store = InMemoryStore()
store.mset([("key1", "value1"), ("key2", "value2"), ("key3", "value3")])
keys = list(store.yield_keys())
assert set(keys) == {"key1", "key2", "key3"}
keys_with_prefix = list(store.yield_keys(prefix="key"))
assert set(keys_with_prefix) == {"key1", "key2", "key3"}
keys_with_invalid_prefix = list(store.yield_keys(prefix="x"))
assert keys_with_invalid_prefix == []
async def test_ayield_keys() -> None:
store = InMemoryStore()
await store.amset([("key1", "value1"), ("key2", "value2"), ("key3", "value3")])
keys = [key async for key in store.ayield_keys()]
assert set(keys) == {"key1", "key2", "key3"}
keys_with_prefix = [key async for key in store.ayield_keys(prefix="key")]
assert set(keys_with_prefix) == {"key1", "key2", "key3"}
keys_with_invalid_prefix = [key async for key in store.ayield_keys(prefix="x")]
assert keys_with_invalid_prefix == []
|
import pytest
from langchain_tests.integration_tests.base_store import (
BaseStoreAsyncTests,
BaseStoreSyncTests,
)
from langchain_core.stores import InMemoryStore
# Check against standard tests
class TestSyncInMemoryStore(BaseStoreSyncTests):
@pytest.fixture
def kv_store(self) -> InMemoryStore:
return InMemoryStore()
@pytest.fixture
def three_values(self) -> tuple[str, str, str]: # type: ignore
return "value1", "value2", "value3"
class TestAsyncInMemoryStore(BaseStoreAsyncTests):
@pytest.fixture
async def kv_store(self) -> InMemoryStore:
return InMemoryStore()
@pytest.fixture
def three_values(self) -> tuple[str, str, str]: # type: ignore
return "value1", "value2", "value3"
def test_mget() -> None:
store = InMemoryStore()
store.mset([("key1", "value1"), ("key2", "value2")])
values = store.mget(["key1", "key2"])
assert values == ["value1", "value2"]
# Test non-existent key
non_existent_value = store.mget(["key3"])
assert non_existent_value == [None]
async def test_amget() -> None:
store = InMemoryStore()
await store.amset([("key1", "value1"), ("key2", "value2")])
values = await store.amget(["key1", "key2"])
assert values == ["value1", "value2"]
# Test non-existent key
non_existent_value = await store.amget(["key3"])
assert non_existent_value == [None]
def test_mset() -> None:
store = InMemoryStore()
store.mset([("key1", "value1"), ("key2", "value2")])
values = store.mget(["key1", "key2"])
assert values == ["value1", "value2"]
async def test_amset() -> None:
store = InMemoryStore()
await store.amset([("key1", "value1"), ("key2", "value2")])
values = await store.amget(["key1", "key2"])
assert values == ["value1", "value2"]
def test_mdelete() -> None:
store = InMemoryStore()
store.mset([("key1", "value1"), ("key2", "value2")])
store.mdelete(["key1"])
values = store.mget(["key1", "key2"])
assert values == [None, "value2"]
# Test deleting non-existent key
store.mdelete(["key3"]) # No error should be raised
async def test_amdelete() -> None:
store = InMemoryStore()
await store.amset([("key1", "value1"), ("key2", "value2")])
await store.amdelete(["key1"])
values = await store.amget(["key1", "key2"])
assert values == [None, "value2"]
# Test deleting non-existent key
await store.amdelete(["key3"]) # No error should be raised
def test_yield_keys() -> None:
store = InMemoryStore()
store.mset([("key1", "value1"), ("key2", "value2"), ("key3", "value3")])
keys = list(store.yield_keys())
assert set(keys) == {"key1", "key2", "key3"}
keys_with_prefix = list(store.yield_keys(prefix="key"))
assert set(keys_with_prefix) == {"key1", "key2", "key3"}
keys_with_invalid_prefix = list(store.yield_keys(prefix="x"))
assert keys_with_invalid_prefix == []
async def test_ayield_keys() -> None:
store = InMemoryStore()
await store.amset([("key1", "value1"), ("key2", "value2"), ("key3", "value3")])
keys = [key async for key in store.ayield_keys()]
assert set(keys) == {"key1", "key2", "key3"}
keys_with_prefix = [key async for key in store.ayield_keys(prefix="key")]
assert set(keys_with_prefix) == {"key1", "key2", "key3"}
keys_with_invalid_prefix = [key async for key in store.ayield_keys(prefix="x")]
assert keys_with_invalid_prefix == []
|
# coding: utf-8
from pathlib import Path
import pandas as pd
import lightgbm as lgb
if lgb.compat.MATPLOTLIB_INSTALLED:
import matplotlib.pyplot as plt
else:
raise ImportError("You need to install matplotlib and restart your session for plot_example.py.")
print("Loading data...")
# load or create your dataset
regression_example_dir = Path(__file__).absolute().parents[1] / "regression"
df_train = pd.read_csv(str(regression_example_dir / "regression.train"), header=None, sep="\t")
df_test = pd.read_csv(str(regression_example_dir / "regression.test"), header=None, sep="\t")
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
# create dataset for lightgbm
lgb_train = lgb.Dataset(X_train, y_train)
lgb_test = lgb.Dataset(X_test, y_test, reference=lgb_train)
# specify your configurations as a dict
params = {"num_leaves": 5, "metric": ("l1", "l2"), "verbose": 0}
evals_result = {} # to record eval results for plotting
print("Starting training...")
# train
gbm = lgb.train(
params,
lgb_train,
num_boost_round=100,
valid_sets=[lgb_train, lgb_test],
feature_name=[f"f{i + 1}" for i in range(X_train.shape[-1])],
categorical_feature=[21],
callbacks=[lgb.log_evaluation(10), lgb.record_evaluation(evals_result)],
)
print("Plotting metrics recorded during training...")
ax = lgb.plot_metric(evals_result, metric="l1")
plt.show()
print("Plotting feature importances...")
ax = lgb.plot_importance(gbm, max_num_features=10)
plt.show()
print("Plotting split value histogram...")
ax = lgb.plot_split_value_histogram(gbm, feature="f26", bins="auto")
plt.show()
print("Plotting 54th tree...") # one tree use categorical feature to split
ax = lgb.plot_tree(gbm, tree_index=53, figsize=(15, 15), show_info=["split_gain"])
plt.show()
print("Plotting 54th tree with graphviz...")
graph = lgb.create_tree_digraph(gbm, tree_index=53, name="Tree54")
graph.render(view=True)
|
# coding: utf-8
from pathlib import Path
import pandas as pd
import lightgbm as lgb
if lgb.compat.MATPLOTLIB_INSTALLED:
import matplotlib.pyplot as plt
else:
raise ImportError('You need to install matplotlib and restart your session for plot_example.py.')
print('Loading data...')
# load or create your dataset
regression_example_dir = Path(__file__).absolute().parents[1] / 'regression'
df_train = pd.read_csv(str(regression_example_dir / 'regression.train'), header=None, sep='\t')
df_test = pd.read_csv(str(regression_example_dir / 'regression.test'), header=None, sep='\t')
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
# create dataset for lightgbm
lgb_train = lgb.Dataset(X_train, y_train)
lgb_test = lgb.Dataset(X_test, y_test, reference=lgb_train)
# specify your configurations as a dict
params = {
'num_leaves': 5,
'metric': ('l1', 'l2'),
'verbose': 0
}
evals_result = {} # to record eval results for plotting
print('Starting training...')
# train
gbm = lgb.train(
params,
lgb_train,
num_boost_round=100,
valid_sets=[lgb_train, lgb_test],
feature_name=[f'f{i + 1}' for i in range(X_train.shape[-1])],
categorical_feature=[21],
callbacks=[
lgb.log_evaluation(10),
lgb.record_evaluation(evals_result)
]
)
print('Plotting metrics recorded during training...')
ax = lgb.plot_metric(evals_result, metric='l1')
plt.show()
print('Plotting feature importances...')
ax = lgb.plot_importance(gbm, max_num_features=10)
plt.show()
print('Plotting split value histogram...')
ax = lgb.plot_split_value_histogram(gbm, feature='f26', bins='auto')
plt.show()
print('Plotting 54th tree...') # one tree use categorical feature to split
ax = lgb.plot_tree(gbm, tree_index=53, figsize=(15, 15), show_info=['split_gain'])
plt.show()
print('Plotting 54th tree with graphviz...')
graph = lgb.create_tree_digraph(gbm, tree_index=53, name='Tree54')
graph.render(view=True)
|
from typing import Union, Sequence, List, Tuple
from ai21.models import ChatMessage as J2ChatMessage, RoleType
from ai21.models.chat import (
ChatMessage as AI21ChatMessage,
AssistantMessage,
ToolMessage as AI21ToolMessage,
UserMessage,
SystemMessage,
)
from llama_index.core.base.llms.types import ChatMessage, MessageRole
JAMBA_MODELS = {
"jamba-instruct": 256_000,
"jamba-1.5-mini": 256_000,
"jamba-1.5-large": 256_000,
"jamba-1.5": 256_000,
}
_SYSTEM_ERR_MESSAGE = "System message must be at beginning of message list."
def ai21_model_to_context_size(model: str) -> Union[int, None]:
"""
Calculate the maximum number of tokens possible to generate for a model.
Args:
model: The modelname we want to know the context size for.
Returns:
The maximum context size
"""
token_limit = JAMBA_MODELS.get(model)
if token_limit is None:
raise ValueError(f"Model name {model} not found in {JAMBA_MODELS.keys()}")
return token_limit
def message_to_ai21_j2_message(
messages: Sequence[ChatMessage],
) -> Tuple[str, List[J2ChatMessage]]:
system_message = ""
converted_messages = [] # type: ignore
for i, message in enumerate(messages):
if message.role == MessageRole.SYSTEM:
if i != 0:
raise ValueError(_SYSTEM_ERR_MESSAGE)
else:
system_message = message.content
else:
converted_message = J2ChatMessage(
role=RoleType[message.role.name], text=message.content
)
converted_messages.append(converted_message)
return system_message, converted_messages
def message_to_ai21_message(message: ChatMessage) -> AI21ChatMessage:
if message.role == MessageRole.TOOL:
return AI21ToolMessage(
content=message.content,
tool_call_id=message.additional_kwargs["tool_call_id"],
)
if message.role == MessageRole.ASSISTANT:
return AssistantMessage(content=message.content)
if message.role == MessageRole.USER:
return UserMessage(content=message.content)
if message.role == MessageRole.SYSTEM:
return SystemMessage(content=message.content)
return AI21ChatMessage(role=message.role, content=message.content)
def is_function_calling_model(model: str) -> bool:
return "1.5" in model
def from_ai21_message_to_chat_message(ai21_message: AssistantMessage) -> ChatMessage:
return ChatMessage(
role=ai21_message.role,
content=ai21_message.content,
additional_kwargs={}
if ai21_message.tool_calls is None
else {"tool_calls": ai21_message.tool_calls},
)
|
from typing import Union, Sequence, List, Tuple
from ai21.models import ChatMessage as J2ChatMessage, RoleType
from ai21.models.chat import (
ChatMessage as AI21ChatMessage,
AssistantMessage,
ToolMessage as AI21ToolMessage,
UserMessage,
SystemMessage,
)
from llama_index.core.base.llms.types import ChatMessage, MessageRole
JAMBA_MODELS = {
"jamba-instruct": 256_000,
"jamba-1.5-mini": 256_000,
"jamba-1.5-large": 256_000,
"jamba-1.5": 256_000,
}
_SYSTEM_ERR_MESSAGE = "System message must be at beginning of message list."
def ai21_model_to_context_size(model: str) -> Union[int, None]:
"""Calculate the maximum number of tokens possible to generate for a model.
Args:
model: The modelname we want to know the context size for.
Returns:
The maximum context size
"""
token_limit = JAMBA_MODELS.get(model, None)
if token_limit is None:
raise ValueError(f"Model name {model} not found in {JAMBA_MODELS.keys()}")
return token_limit
def message_to_ai21_j2_message(
messages: Sequence[ChatMessage],
) -> Tuple[str, List[J2ChatMessage]]:
system_message = ""
converted_messages = [] # type: ignore
for i, message in enumerate(messages):
if message.role == MessageRole.SYSTEM:
if i != 0:
raise ValueError(_SYSTEM_ERR_MESSAGE)
else:
system_message = message.content
else:
converted_message = J2ChatMessage(
role=RoleType[message.role.name], text=message.content
)
converted_messages.append(converted_message)
return system_message, converted_messages
def message_to_ai21_message(message: ChatMessage) -> AI21ChatMessage:
if message.role == MessageRole.TOOL:
return AI21ToolMessage(
content=message.content,
tool_call_id=message.additional_kwargs["tool_call_id"],
)
if message.role == MessageRole.ASSISTANT:
return AssistantMessage(content=message.content)
if message.role == MessageRole.USER:
return UserMessage(content=message.content)
if message.role == MessageRole.SYSTEM:
return SystemMessage(content=message.content)
return AI21ChatMessage(role=message.role, content=message.content)
def is_function_calling_model(model: str) -> bool:
return "1.5" in model
def from_ai21_message_to_chat_message(ai21_message: AssistantMessage) -> ChatMessage:
return ChatMessage(
role=ai21_message.role,
content=ai21_message.content,
additional_kwargs={}
if ai21_message.tool_calls is None
else {"tool_calls": ai21_message.tool_calls},
)
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
epsilla_config = {
"protocol": 'http',
"host": 'localhost',
"port": 8888,
"is_self_hosted": True,
"db_path": "/epsilla",
"db_name": "tony_doc_array_test",
}
def index_len(index, max_len=20):
return len(index.filter("", limit=max_len))
|
epsilla_config = {
"protocol": 'http',
"host": 'localhost',
"port": 8888,
"is_self_hosted": True,
"db_path": "/epsilla",
"db_name": "tony_doc_array_test",
}
def index_len(index, max_len=20):
return len(index.filter("", limit=max_len))
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.image import affine_transform
from keras.src.ops.image import crop_images
from keras.src.ops.image import extract_patches
from keras.src.ops.image import hsv_to_rgb
from keras.src.ops.image import map_coordinates
from keras.src.ops.image import pad_images
from keras.src.ops.image import resize
from keras.src.ops.image import rgb_to_grayscale
from keras.src.ops.image import rgb_to_hsv
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.image import affine_transform
from keras.src.ops.image import crop_images
from keras.src.ops.image import extract_patches
from keras.src.ops.image import map_coordinates
from keras.src.ops.image import pad_images
from keras.src.ops.image import resize
from keras.src.ops.image import rgb_to_grayscale
|
"""
This module provides backward-compatible exports of core language model classes.
These classes are re-exported for compatibility with older versions of LangChain
and allow users to import language model interfaces from a stable path.
Exports:
- LLM: Abstract base class for all LLMs
- BaseLLM: Deprecated or foundational class for legacy LLMs
- BaseLanguageModel: Base class for core language model implementations
"""
from langchain_core.language_models import BaseLanguageModel
from langchain_core.language_models.llms import LLM, BaseLLM
__all__ = [
"LLM",
"BaseLLM",
"BaseLanguageModel",
]
|
# Backwards compatibility.
from langchain_core.language_models import BaseLanguageModel
from langchain_core.language_models.llms import (
LLM,
BaseLLM,
)
__all__ = [
"LLM",
"BaseLLM",
"BaseLanguageModel",
]
|
"""LLM Prompt Program."""
from abc import abstractmethod
from typing import Any, Generic, Optional, Type, TypeVar
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.types import BasePydanticProgram, Model
LM = TypeVar("LM")
class BaseLLMFunctionProgram(BasePydanticProgram[BaseModel], Generic[LM]):
"""
Base LLM Prompt Program.
This is a base class for LLM endpoints that can return
a structured output given the prompt.
NOTE: this only works for structured endpoints atm
(does not work for text completion endpoints.)
"""
@classmethod
@abstractmethod
def from_defaults(
cls,
output_cls: Type[Model],
prompt_template_str: Optional[str] = None,
prompt: Optional[PromptTemplate] = None,
llm: Optional[LM] = None,
**kwargs: Any,
) -> "BaseLLMFunctionProgram":
"""Initialize program from defaults."""
|
"""LLM Prompt Program."""
from abc import abstractmethod
from typing import Any, Generic, Optional, Type, TypeVar
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.types import BasePydanticProgram, Model
LM = TypeVar("LM")
class BaseLLMFunctionProgram(BasePydanticProgram[BaseModel], Generic[LM]):
"""Base LLM Prompt Program.
This is a base class for LLM endpoints that can return
a structured output given the prompt.
NOTE: this only works for structured endpoints atm
(does not work for text completion endpoints.)
"""
@classmethod
@abstractmethod
def from_defaults(
cls,
output_cls: Type[Model],
prompt_template_str: Optional[str] = None,
prompt: Optional[PromptTemplate] = None,
llm: Optional[LM] = None,
**kwargs: Any,
) -> "BaseLLMFunctionProgram":
"""Initialize program from defaults."""
|
from .document import DocumentArray
from .storage.sqlite import StorageMixins, SqliteConfig
__all__ = ['SqliteConfig', 'DocumentArraySqlite']
class DocumentArraySqlite(StorageMixins, DocumentArray):
"""
DocumentArray that stores Documents in a `SQLite database <https://www.sqlite.org/index.html>`_.
This stores Documents on disk instead of keeping them in memory, and offers the simplest way of persisting data with DocArray.
With this implementation, :meth:`match` and :meth:`find` perform exact (exhaustive) vector search.
Example usage:
.. code-block:: python
from docarray import DocumentArray
# with default config
da = DocumentArray(storage='sqlite')
# with customized config
da1 = DocumentArray(storage='sqlite', config={'connection': 'example.db'})
# connect to a previously created database
da = DocumentArray(
storage='sqlite', config={'connection': 'example.db', 'table_name': 'mine'}
)
.. seealso::
For further details, see our :ref:`user guide <sqlite>`.
"""
def __new__(cls, *args, **kwargs):
return super().__new__(cls)
|
from .document import DocumentArray
from .storage.sqlite import StorageMixins, SqliteConfig
__all__ = ['SqliteConfig', 'DocumentArraySqlite']
class DocumentArraySqlite(StorageMixins, DocumentArray):
def __new__(cls, *args, **kwargs):
return super().__new__(cls)
|
"""Argparser module for Pod runtimes"""
import argparse
from jina import helper
from jina.enums import PodRoleType
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
def mixin_pod_parser(parser):
"""Mixing in arguments required by :class:`Pod` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Pod')
gp.add_argument(
'--runtime-cls',
type=str,
default='WorkerRuntime',
help='The runtime class to run inside the Pod',
)
gp.add_argument(
'--timeout-ready',
type=int,
default=600000,
help='The timeout in milliseconds of a Pod waits for the runtime to be ready, -1 for waiting '
'forever',
)
gp.add_argument(
'--env',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The map of environment variables that are available inside runtime',
)
# hidden CLI used for internal only
gp.add_argument(
'--shard-id',
type=int,
default=0,
help='defines the shard identifier for the executor. It is used as suffix for the workspace path of the executor`'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--pod-role',
type=PodRoleType.from_string,
choices=list(PodRoleType),
default=PodRoleType.WORKER,
help='The role of this Pod in a Deployment'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--noblock-on-start',
action='store_true',
default=False,
help='If set, starting a Pod/Deployment does not block the thread/process. It then relies on '
'`wait_start_success` at outer function for the postpone check.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--shards',
type=int,
default=1,
help='The number of shards in the deployment running at the same time. For more details check '
'https://docs.jina.ai/fundamentals/flow/create-flow/#complex-flow-topologies',
)
gp.add_argument(
'--replicas',
type=int,
default=1,
help='The number of replicas in the deployment',
)
gp.add_argument(
'--port',
type=int,
default=helper.random_port(),
help='The port for input data to bind to, default is a random port between [49152, 65535]',
)
gp.add_argument(
'--monitoring',
action='store_true',
default=False,
help='If set, spawn an http server with a prometheus endpoint to expose metrics',
)
gp.add_argument(
'--port-monitoring',
type=str,
default=str(helper.random_port()),
dest='port_monitoring',
help=f'The port on which the prometheus server is exposed, default is a random port between [49152, 65535]',
)
gp.add_argument(
'--retries',
type=int,
default=-1,
dest='retries',
help=f'Number of retries per gRPC call. If <0 it defaults to max(3, num_replicas)',
)
gp.add_argument(
'--floating',
action='store_true',
default=False,
help='If set, the current Pod/Deployment can not be further chained, '
'and the next `.add()` will chain after the last Pod/Deployment not this current one.',
)
|
"""Argparser module for Pod runtimes"""
import argparse
from jina import helper
from jina.enums import PodRoleType
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
def mixin_pod_parser(parser, port_monitoring=True):
"""Mixing in arguments required by :class:`Pod` into the given parser.
:param parser: the parser instance to which we add arguments
:param port_monitoring: if to include the port parsing
"""
gp = add_arg_group(parser, title='Pod')
gp.add_argument(
'--runtime-cls',
type=str,
default='WorkerRuntime',
help='The runtime class to run inside the Pod',
)
gp.add_argument(
'--timeout-ready',
type=int,
default=600000,
help='The timeout in milliseconds of a Pod waits for the runtime to be ready, -1 for waiting '
'forever',
)
gp.add_argument(
'--env',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The map of environment variables that are available inside runtime',
)
# hidden CLI used for internal only
gp.add_argument(
'--shard-id',
type=int,
default=0,
help='defines the shard identifier for the executor. It is used as suffix for the workspace path of the executor`'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--pod-role',
type=PodRoleType.from_string,
choices=list(PodRoleType),
default=PodRoleType.WORKER,
help='The role of this Pod in a Deployment'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--noblock-on-start',
action='store_true',
default=False,
help='If set, starting a Pod/Deployment does not block the thread/process. It then relies on '
'`wait_start_success` at outer function for the postpone check.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--shards',
type=int,
default=1,
help='The number of shards in the deployment running at the same time. For more details check '
'https://docs.jina.ai/fundamentals/flow/create-flow/#complex-flow-topologies',
)
gp.add_argument(
'--replicas',
type=int,
default=1,
help='The number of replicas in the deployment',
)
gp.add_argument(
'--port',
type=int,
default=helper.random_port(),
help='The port for input data to bind to, default is a random port between [49152, 65535]',
)
gp.add_argument(
'--monitoring',
action='store_true',
default=False,
help='If set, spawn an http server with a prometheus endpoint to expose metrics',
)
if port_monitoring:
gp.add_argument(
'--port-monitoring',
type=int,
default=helper.random_port(),
dest='port_monitoring',
help=f'The port on which the prometheus server is exposed, default is a random port between [49152, 65535]',
)
gp.add_argument(
'--retries',
type=int,
default=-1,
dest='retries',
help=f'Number of retries per gRPC call. If <0 it defaults to max(3, num_replicas)',
)
gp.add_argument(
'--floating',
action='store_true',
default=False,
help='If set, the current Pod/Deployment can not be further chained, '
'and the next `.add()` will chain after the last Pod/Deployment not this current one.',
)
|
from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder import evaluation, losses, models
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
# Initialize model components
model_name = "microsoft/mpnet-base"
transformer = Transformer(model_name)
# transformer.requires_grad_(False) # Freeze the transformer model
pooling = Pooling(transformer.get_word_embedding_dimension(), pooling_mode="mean")
csr_sparsity = models.CSRSparsity(
input_dim=transformer.get_word_embedding_dimension(),
hidden_dim=4 * transformer.get_word_embedding_dimension(),
k=256, # Number of top values to keep
k_aux=512, # Number of top values for auxiliary loss
)
# Create the SparseEncoder model
model = SparseEncoder(modules=[transformer, pooling, csr_sparsity])
# 2a. Load the NQ dataset: https://huggingface.co/datasets/sentence-transformers/natural-questions
logging.info("Read the Natural Questions training dataset")
full_dataset = load_dataset("sentence-transformers/natural-questions", split="train").select(range(100_000))
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Initialize the loss
loss = losses.CSRLoss(
model=model,
beta=0.1, # Weight for auxiliary loss
gamma=1, # Weight for ranking loss
scale=20.0, # Scale for similarity computation
)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(evaluation.SparseNanoBEIREvaluator(["msmarco", "nfcorpus", "nq"], truncate_dim=k_dim))
dev_evaluator = evaluation.SequentialEvaluator(evaluators, main_score_function=lambda scores: scores[-1])
dev_evaluator(model)
# Set up training arguments
run_name = "sparse-mpnet-base-nq-fresh"
training_args = SparseEncoderTrainingArguments(
output_dir=f"models/{run_name}",
num_train_epochs=1,
per_device_train_batch_size=32,
per_device_eval_batch_size=32,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
logging_steps=200,
eval_strategy="steps",
eval_steps=400,
save_strategy="steps",
save_steps=400,
learning_rate=4e-5,
optim="adamw_torch",
weight_decay=1e-4,
adam_epsilon=6.25e-10,
run_name=run_name,
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance again after training
dev_evaluator(model)
# 8. Save the trained & evaluated model locally
model.save_pretrained(f"models/{run_name}/final")
model.push_to_hub(run_name)
if __name__ == "__main__":
main()
|
from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments, losses
from sentence_transformers.evaluation import SequentialEvaluator
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
from sentence_transformers.sparse_encoder.models import CSRSparsity
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
# Initialize model components
model_name = "microsoft/mpnet-base"
transformer = Transformer(model_name)
# transformer.requires_grad_(False) # Freeze the transformer model
pooling = Pooling(transformer.get_word_embedding_dimension(), pooling_mode="mean")
csr_sparsity = CSRSparsity(
input_dim=transformer.get_word_embedding_dimension(),
hidden_dim=4 * transformer.get_word_embedding_dimension(),
k=256, # Number of top values to keep
k_aux=512, # Number of top values for auxiliary loss
)
# Create the SparseEncoder model
model = SparseEncoder(modules=[transformer, pooling, csr_sparsity])
# 2a. Load the NQ dataset: https://huggingface.co/datasets/sentence-transformers/natural-questions
logging.info("Read the Natural Questions training dataset")
full_dataset = load_dataset("sentence-transformers/natural-questions", split="train").select(range(100_000))
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Initialize the loss
loss = losses.CSRLoss(
model=model,
beta=0.1, # Weight for auxiliary loss
gamma=1, # Weight for ranking loss
scale=20.0, # Scale for similarity computation
)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(SparseNanoBEIREvaluator(["msmarco", "nfcorpus", "nq"], truncate_dim=k_dim))
dev_evaluator = SequentialEvaluator(evaluators, main_score_function=lambda scores: scores[-1])
dev_evaluator(model)
# Set up training arguments
run_name = "sparse-mpnet-base-nq-fresh"
training_args = SparseEncoderTrainingArguments(
output_dir=f"models/{run_name}",
num_train_epochs=1,
per_device_train_batch_size=32,
per_device_eval_batch_size=32,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
logging_steps=200,
eval_strategy="steps",
eval_steps=400,
save_strategy="steps",
save_steps=400,
learning_rate=4e-5,
optim="adamw_torch",
weight_decay=1e-4,
adam_epsilon=6.25e-10,
run_name=run_name,
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance again after training
dev_evaluator(model)
# 8. Save the trained & evaluated model locally
model.save_pretrained(f"models/{run_name}/final")
model.push_to_hub(run_name)
if __name__ == "__main__":
main()
|
import importlib
from types import ModuleType
import pytest
from dirty_equals import IsDict
from fastapi.testclient import TestClient
from ...utils import needs_py39, needs_py310
@pytest.fixture(
name="mod",
params=[
"tutorial001",
pytest.param("tutorial001_py310", marks=needs_py310),
"tutorial001_an",
pytest.param("tutorial001_an_py39", marks=needs_py39),
pytest.param("tutorial001_an_py310", marks=needs_py310),
],
)
def get_mod(request: pytest.FixtureRequest):
mod = importlib.import_module(f"docs_src.cookie_params.{request.param}")
return mod
@pytest.mark.parametrize(
"path,cookies,expected_status,expected_response",
[
("/items", None, 200, {"ads_id": None}),
("/items", {"ads_id": "ads_track"}, 200, {"ads_id": "ads_track"}),
(
"/items",
{"ads_id": "ads_track", "session": "cookiesession"},
200,
{"ads_id": "ads_track"},
),
("/items", {"session": "cookiesession"}, 200, {"ads_id": None}),
],
)
def test(path, cookies, expected_status, expected_response, mod: ModuleType):
client = TestClient(mod.app, cookies=cookies)
response = client.get(path)
assert response.status_code == expected_status
assert response.json() == expected_response
def test_openapi_schema(mod: ModuleType):
client = TestClient(mod.app)
response = client.get("/openapi.json")
assert response.status_code == 200
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Read Items",
"operationId": "read_items_items__get",
"parameters": [
{
"required": False,
"schema": IsDict(
{
"anyOf": [{"type": "string"}, {"type": "null"}],
"title": "Ads Id",
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{"title": "Ads Id", "type": "string"}
),
"name": "ads_id",
"in": "cookie",
}
],
}
}
},
"components": {
"schemas": {
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
|
import pytest
from dirty_equals import IsDict
from fastapi.testclient import TestClient
from docs_src.cookie_params.tutorial001 import app
@pytest.mark.parametrize(
"path,cookies,expected_status,expected_response",
[
("/items", None, 200, {"ads_id": None}),
("/items", {"ads_id": "ads_track"}, 200, {"ads_id": "ads_track"}),
(
"/items",
{"ads_id": "ads_track", "session": "cookiesession"},
200,
{"ads_id": "ads_track"},
),
("/items", {"session": "cookiesession"}, 200, {"ads_id": None}),
],
)
def test(path, cookies, expected_status, expected_response):
client = TestClient(app, cookies=cookies)
response = client.get(path)
assert response.status_code == expected_status
assert response.json() == expected_response
def test_openapi_schema():
client = TestClient(app)
response = client.get("/openapi.json")
assert response.status_code == 200
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Read Items",
"operationId": "read_items_items__get",
"parameters": [
{
"required": False,
"schema": IsDict(
{
"anyOf": [{"type": "string"}, {"type": "null"}],
"title": "Ads Id",
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{"title": "Ads Id", "type": "string"}
),
"name": "ads_id",
"in": "cookie",
}
],
}
}
},
"components": {
"schemas": {
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
|
import pytest
from docarray import Document
from docarray.array.memory import DocumentArrayInMemory
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.weaviate import DocumentArrayWeaviate, WeaviateConfig
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.redis import DocumentArrayRedis, RedisConfig
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArrayInMemory, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128)),
],
)
def test_construct_docarray(da_cls, config, start_storage):
if config:
da = da_cls(config=config)
assert len(da) == 0
da = da_cls(Document(), config=config)
assert len(da) == 1
da = da_cls([Document(), Document()], config=config)
assert len(da) == 2
da = da_cls((Document(), Document()), config=config)
assert len(da) == 2
da = da_cls((Document() for _ in range(10)), config=config)
assert len(da) == 10
else:
da = da_cls()
assert len(da) == 0
da = da_cls(Document())
assert len(da) == 1
da = da_cls([Document(), Document()])
assert len(da) == 2
da = da_cls((Document(), Document()))
assert len(da) == 2
da = da_cls((Document() for _ in range(10)))
assert len(da) == 10
if da_cls is DocumentArrayInMemory:
da1 = da_cls(da)
assert len(da1) == 10
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArrayInMemory, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128)),
],
)
@pytest.mark.parametrize('is_copy', [True, False])
def test_docarray_copy_singleton(da_cls, config, is_copy, start_storage):
d = Document()
if config:
da = da_cls(d, copy=is_copy, config=config)
else:
da = da_cls(d, copy=is_copy)
d.id = 'hello'
if da_cls == DocumentArrayInMemory:
if is_copy:
assert da[0].id != 'hello'
else:
assert da[0].id == 'hello'
else:
assert da[0].id != 'hello'
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArrayInMemory, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128)),
],
)
@pytest.mark.parametrize('is_copy', [True, False])
def test_docarray_copy_da(da_cls, config, is_copy, start_storage):
d1 = Document()
d2 = Document()
if config:
da = da_cls([d1, d2], copy=is_copy, config=config)
else:
da = da_cls([d1, d2], copy=is_copy)
d1.id = 'hello'
if da_cls == DocumentArrayInMemory:
if is_copy:
assert da[0].id != 'hello'
else:
assert da[0].id == 'hello'
else:
assert da[0] != 'hello'
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArrayInMemory, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=1)),
(DocumentArrayQdrant, QdrantConfig(n_dim=1)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128)),
],
)
@pytest.mark.parametrize('is_copy', [True, False])
def test_docarray_copy_list(da_cls, config, is_copy, start_storage):
d1 = Document()
d2 = Document()
da = da_cls([d1, d2], copy=is_copy, config=config)
d1.id = 'hello'
if da_cls == DocumentArrayInMemory:
if is_copy:
assert da[0].id != 'hello'
else:
assert da[0].id == 'hello'
else:
assert da[0] != 'hello'
|
import pytest
from docarray import Document
from docarray.array.memory import DocumentArrayInMemory
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.weaviate import DocumentArrayWeaviate, WeaviateConfig
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.redis import DocumentArrayRedis, RedisConfig
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArrayInMemory, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128, flush=True)),
],
)
def test_construct_docarray(da_cls, config, start_storage):
if config:
da = da_cls(config=config)
assert len(da) == 0
da = da_cls(Document(), config=config)
assert len(da) == 1
da = da_cls([Document(), Document()], config=config)
assert len(da) == 2
da = da_cls((Document(), Document()), config=config)
assert len(da) == 2
da = da_cls((Document() for _ in range(10)), config=config)
assert len(da) == 10
else:
da = da_cls()
assert len(da) == 0
da = da_cls(Document())
assert len(da) == 1
da = da_cls([Document(), Document()])
assert len(da) == 2
da = da_cls((Document(), Document()))
assert len(da) == 2
da = da_cls((Document() for _ in range(10)))
assert len(da) == 10
if da_cls is DocumentArrayInMemory:
da1 = da_cls(da)
assert len(da1) == 10
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArrayInMemory, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128, flush=True)),
],
)
@pytest.mark.parametrize('is_copy', [True, False])
def test_docarray_copy_singleton(da_cls, config, is_copy, start_storage):
d = Document()
if config:
da = da_cls(d, copy=is_copy, config=config)
else:
da = da_cls(d, copy=is_copy)
d.id = 'hello'
if da_cls == DocumentArrayInMemory:
if is_copy:
assert da[0].id != 'hello'
else:
assert da[0].id == 'hello'
else:
assert da[0].id != 'hello'
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArrayInMemory, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128, flush=True)),
],
)
@pytest.mark.parametrize('is_copy', [True, False])
def test_docarray_copy_da(da_cls, config, is_copy, start_storage):
d1 = Document()
d2 = Document()
if config:
da = da_cls([d1, d2], copy=is_copy, config=config)
else:
da = da_cls([d1, d2], copy=is_copy)
d1.id = 'hello'
if da_cls == DocumentArrayInMemory:
if is_copy:
assert da[0].id != 'hello'
else:
assert da[0].id == 'hello'
else:
assert da[0] != 'hello'
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArrayInMemory, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=1)),
(DocumentArrayQdrant, QdrantConfig(n_dim=1)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128, flush=True)),
],
)
@pytest.mark.parametrize('is_copy', [True, False])
def test_docarray_copy_list(da_cls, config, is_copy, start_storage):
d1 = Document()
d2 = Document()
da = da_cls([d1, d2], copy=is_copy, config=config)
d1.id = 'hello'
if da_cls == DocumentArrayInMemory:
if is_copy:
assert da[0].id != 'hello'
else:
assert da[0].id == 'hello'
else:
assert da[0] != 'hello'
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseMSEEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
teacher_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load any dataset with some texts
dataset = load_dataset("sentence-transformers/stsb", split="validation")
sentences = dataset["sentence1"] + dataset["sentence2"]
# Given queries, a corpus and a mapping with relevant documents, the SparseMSEEvaluator computes different MSE metrics.
mse_evaluator = SparseMSEEvaluator(
source_sentences=sentences,
target_sentences=sentences,
teacher_model=teacher_model,
name="stsb-dev",
)
results = mse_evaluator(student_model)
"""
MSE evaluation (lower = better) on the stsb-dev dataset:
MSE (*100): 0.035540
Model Sparsity: Active Dimensions: 55.6, Sparsity Ratio: 0.9982
"""
# Print the results
print(f"Primary metric: {mse_evaluator.primary_metric}")
# => Primary metric: stsb-dev_negative_mse
print(f"Primary metric value: {results[mse_evaluator.primary_metric]:.4f}")
# => Primary metric value: -0.0355
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseMSEEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
teacher_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load any dataset with some texts
dataset = load_dataset("sentence-transformers/stsb", split="validation")
sentences = dataset["sentence1"] + dataset["sentence2"]
# Given queries, a corpus and a mapping with relevant documents, the SparseMSEEvaluator computes different MSE metrics.
mse_evaluator = SparseMSEEvaluator(
source_sentences=sentences,
target_sentences=sentences,
teacher_model=teacher_model,
name="stsb-dev",
)
results = mse_evaluator(student_model)
"""
MSE evaluation (lower = better) on the stsb-dev dataset:
MSE (*100): 0.035540
Model Sparsity Stats: Row Non-Zero Mean: 55.60933303833008, Row Sparsity Mean: 0.9981780648231506
"""
# Print the results
print(f"Primary metric: {mse_evaluator.primary_metric}")
# => Primary metric: stsb-dev_negative_mse
print(f"Primary metric value: {results[mse_evaluator.primary_metric]:.4f}")
# => Primary metric value: -0.0355
|
# Copyright (c) OpenMMLab. All rights reserved.
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .dynamic_soft_label_assigner import DynamicSoftLabelAssigner
from .grid_assigner import GridAssigner
from .hungarian_assigner import HungarianAssigner
from .iou2d_calculator import BboxOverlaps2D, BboxOverlaps2D_GLIP
from .match_cost import (BBoxL1Cost, ClassificationCost, CrossEntropyLossCost,
DiceCost, FocalLossCost, IoUCost)
from .max_iou_assigner import MaxIoUAssigner
from .multi_instance_assigner import MultiInstanceAssigner
from .point_assigner import PointAssigner
from .region_assigner import RegionAssigner
from .sim_ota_assigner import SimOTAAssigner
from .task_aligned_assigner import TaskAlignedAssigner
from .topk_hungarian_assigner import TopkHungarianAssigner
from .uniform_assigner import UniformAssigner
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner',
'TaskAlignedAssigner', 'TopkHungarianAssigner', 'BBoxL1Cost',
'ClassificationCost', 'CrossEntropyLossCost', 'DiceCost', 'FocalLossCost',
'IoUCost', 'BboxOverlaps2D', 'DynamicSoftLabelAssigner',
'MultiInstanceAssigner', 'BboxOverlaps2D_GLIP'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .dynamic_soft_label_assigner import DynamicSoftLabelAssigner
from .grid_assigner import GridAssigner
from .hungarian_assigner import HungarianAssigner
from .iou2d_calculator import BboxOverlaps2D
from .match_cost import (BBoxL1Cost, ClassificationCost, CrossEntropyLossCost,
DiceCost, FocalLossCost, IoUCost)
from .max_iou_assigner import MaxIoUAssigner
from .multi_instance_assigner import MultiInstanceAssigner
from .point_assigner import PointAssigner
from .region_assigner import RegionAssigner
from .sim_ota_assigner import SimOTAAssigner
from .task_aligned_assigner import TaskAlignedAssigner
from .topk_hungarian_assigner import TopkHungarianAssigner
from .uniform_assigner import UniformAssigner
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner',
'TaskAlignedAssigner', 'TopkHungarianAssigner', 'BBoxL1Cost',
'ClassificationCost', 'CrossEntropyLossCost', 'DiceCost', 'FocalLossCost',
'IoUCost', 'BboxOverlaps2D', 'DynamicSoftLabelAssigner',
'MultiInstanceAssigner'
]
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import List, Tuple
from jina import DocumentArray, Executor, requests
class MatchMerger(Executor):
"""
The MatchMerger merges the results of shards by appending all matches..
"""
def __init__(self, default_traversal_paths: Tuple[str, ...] = ('r',), **kwargs):
"""
:param default_traversal_paths: traverse path on docs, e.g. ['r'], ['c']
"""
super().__init__(**kwargs)
self.default_traversal_paths = default_traversal_paths
@requests
def merge(self, docs_matrix: List[DocumentArray], parameters: dict, **kwargs):
traversal_paths = parameters.get(
'traversal_paths', self.default_traversal_paths
)
results = {}
for docs in docs_matrix:
self._merge_shard(results, docs, traversal_paths)
return DocumentArray(list(results.values()))
def _merge_shard(self, results, docs, traversal_paths):
for doc in docs.traverse_flat(traversal_paths):
if doc.id in results:
results[doc.id].matches.extend(doc.matches)
else:
results[doc.id] = doc
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import List, Tuple
from jina import DocumentArray, Executor, requests
class MatchMerger(Executor):
"""
The MatchMerger merges the results of shards by appending all matches..
:param default_traversal_paths: traverse path on docs, e.g. ['r'], ['c']
:param args: additional arguments
:param kwargs: additional key value arguments
"""
def __init__(self, default_traversal_paths: Tuple[str, ...] = ('r',), **kwargs):
super().__init__(**kwargs)
self.default_traversal_paths = default_traversal_paths
@requests
def merge(self, docs_matrix: List[DocumentArray], parameters: dict, **kwargs):
traversal_paths = parameters.get(
'traversal_paths', self.default_traversal_paths
)
results = {}
for docs in docs_matrix:
self._merge_shard(results, docs, traversal_paths)
return DocumentArray(list(results.values()))
def _merge_shard(self, results, docs, traversal_paths):
for doc in docs.traverse_flat(traversal_paths):
if doc.id in results:
results[doc.id].matches.extend(doc.matches)
else:
results[doc.id] = doc
|
from __future__ import annotations
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import torch
from torch import Tensor, nn
from sentence_transformers.models.Module import Module
class WeightedLayerPooling(Module):
"""Token embeddings are weighted mean of their different hidden layer representations"""
config_keys: list[str] = ["word_embedding_dimension", "layer_start", "num_hidden_layers"]
def __init__(
self, word_embedding_dimension, num_hidden_layers: int = 12, layer_start: int = 4, layer_weights=None
):
super().__init__()
self.word_embedding_dimension = word_embedding_dimension
self.layer_start = layer_start
self.num_hidden_layers = num_hidden_layers
self.layer_weights = (
layer_weights
if layer_weights is not None
else nn.Parameter(torch.tensor([1] * (num_hidden_layers + 1 - layer_start), dtype=torch.float))
)
def forward(self, features: dict[str, Tensor]):
ft_all_layers = features["all_layer_embeddings"]
all_layer_embedding = torch.stack(ft_all_layers)
all_layer_embedding = all_layer_embedding[self.layer_start :, :, :, :] # Start from 4th layers output
weight_factor = self.layer_weights.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand(all_layer_embedding.size())
weighted_average = (weight_factor * all_layer_embedding).sum(dim=0) / self.layer_weights.sum()
features.update({"token_embeddings": weighted_average})
return features
def get_word_embedding_dimension(self):
return self.word_embedding_dimension
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
self.save_config(output_path)
self.save_torch_weights(output_path, safe_serialization=safe_serialization)
@classmethod
def load(
cls,
model_name_or_path: str,
subfolder: str = "",
token: bool | str | None = None,
cache_folder: str | None = None,
revision: str | None = None,
local_files_only: bool = False,
**kwargs,
) -> Self:
hub_kwargs = {
"subfolder": subfolder,
"token": token,
"cache_folder": cache_folder,
"revision": revision,
"local_files_only": local_files_only,
}
config = cls.load_config(model_name_or_path=model_name_or_path, **hub_kwargs)
model = cls(**config)
model = cls.load_torch_weights(model_name_or_path=model_name_or_path, model=model, **hub_kwargs)
return model
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
class WeightedLayerPooling(nn.Module):
"""Token embeddings are weighted mean of their different hidden layer representations"""
def __init__(
self, word_embedding_dimension, num_hidden_layers: int = 12, layer_start: int = 4, layer_weights=None
):
super().__init__()
self.config_keys = ["word_embedding_dimension", "layer_start", "num_hidden_layers"]
self.word_embedding_dimension = word_embedding_dimension
self.layer_start = layer_start
self.num_hidden_layers = num_hidden_layers
self.layer_weights = (
layer_weights
if layer_weights is not None
else nn.Parameter(torch.tensor([1] * (num_hidden_layers + 1 - layer_start), dtype=torch.float))
)
def forward(self, features: dict[str, Tensor]):
ft_all_layers = features["all_layer_embeddings"]
all_layer_embedding = torch.stack(ft_all_layers)
all_layer_embedding = all_layer_embedding[self.layer_start :, :, :, :] # Start from 4th layers output
weight_factor = self.layer_weights.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand(all_layer_embedding.size())
weighted_average = (weight_factor * all_layer_embedding).sum(dim=0) / self.layer_weights.sum()
features.update({"token_embeddings": weighted_average})
return features
def get_word_embedding_dimension(self):
return self.word_embedding_dimension
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path: str, safe_serialization: bool = True):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = WeightedLayerPooling(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(
os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"), weights_only=True
)
)
return model
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.registry import TASK_UTILS
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class YOLOBBoxCoder(BaseBBoxCoder):
"""YOLO BBox coder.
Following `YOLO <https://arxiv.org/abs/1506.02640>`_, this coder divide
image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh).
cx, cy in [0., 1.], denotes relative center position w.r.t the center of
bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`.
Args:
eps (float): Min value of cx, cy when encoding.
"""
def __init__(self, eps=1e-6):
super(BaseBBoxCoder, self).__init__()
self.eps = eps
@mmcv.jit(coderize=True)
def encode(self, bboxes, gt_bboxes, stride):
"""Get box regression transformation deltas that can be used to
transform the ``bboxes`` into the ``gt_bboxes``.
Args:
bboxes (torch.Tensor): Source boxes, e.g., anchors.
gt_bboxes (torch.Tensor): Target of the transformation, e.g.,
ground-truth boxes.
stride (torch.Tensor | int): Stride of bboxes.
Returns:
torch.Tensor: Box transformation deltas
"""
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5
y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5
w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0]
h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1]
x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5
y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5
w = bboxes[..., 2] - bboxes[..., 0]
h = bboxes[..., 3] - bboxes[..., 1]
w_target = torch.log((w_gt / w).clamp(min=self.eps))
h_target = torch.log((h_gt / h).clamp(min=self.eps))
x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
encoded_bboxes = torch.stack(
[x_center_target, y_center_target, w_target, h_target], dim=-1)
return encoded_bboxes
@mmcv.jit(coderize=True)
def decode(self, bboxes, pred_bboxes, stride):
"""Apply transformation `pred_bboxes` to `boxes`.
Args:
boxes (torch.Tensor): Basic boxes, e.g. anchors.
pred_bboxes (torch.Tensor): Encoded boxes with shape
stride (torch.Tensor | int): Strides of bboxes.
Returns:
torch.Tensor: Decoded boxes.
"""
assert pred_bboxes.size(-1) == bboxes.size(-1) == 4
xy_centers = (bboxes[..., :2] + bboxes[..., 2:]) * 0.5 + (
pred_bboxes[..., :2] - 0.5) * stride
whs = (bboxes[..., 2:] -
bboxes[..., :2]) * 0.5 * pred_bboxes[..., 2:].exp()
decoded_bboxes = torch.stack(
(xy_centers[..., 0] - whs[..., 0], xy_centers[..., 1] -
whs[..., 1], xy_centers[..., 0] + whs[..., 0],
xy_centers[..., 1] + whs[..., 1]),
dim=-1)
return decoded_bboxes
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from ..builder import BBOX_CODERS
from .base_bbox_coder import BaseBBoxCoder
@BBOX_CODERS.register_module()
class YOLOBBoxCoder(BaseBBoxCoder):
"""YOLO BBox coder.
Following `YOLO <https://arxiv.org/abs/1506.02640>`_, this coder divide
image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh).
cx, cy in [0., 1.], denotes relative center position w.r.t the center of
bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`.
Args:
eps (float): Min value of cx, cy when encoding.
"""
def __init__(self, eps=1e-6):
super(BaseBBoxCoder, self).__init__()
self.eps = eps
@mmcv.jit(coderize=True)
def encode(self, bboxes, gt_bboxes, stride):
"""Get box regression transformation deltas that can be used to
transform the ``bboxes`` into the ``gt_bboxes``.
Args:
bboxes (torch.Tensor): Source boxes, e.g., anchors.
gt_bboxes (torch.Tensor): Target of the transformation, e.g.,
ground-truth boxes.
stride (torch.Tensor | int): Stride of bboxes.
Returns:
torch.Tensor: Box transformation deltas
"""
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5
y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5
w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0]
h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1]
x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5
y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5
w = bboxes[..., 2] - bboxes[..., 0]
h = bboxes[..., 3] - bboxes[..., 1]
w_target = torch.log((w_gt / w).clamp(min=self.eps))
h_target = torch.log((h_gt / h).clamp(min=self.eps))
x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
encoded_bboxes = torch.stack(
[x_center_target, y_center_target, w_target, h_target], dim=-1)
return encoded_bboxes
@mmcv.jit(coderize=True)
def decode(self, bboxes, pred_bboxes, stride):
"""Apply transformation `pred_bboxes` to `boxes`.
Args:
boxes (torch.Tensor): Basic boxes, e.g. anchors.
pred_bboxes (torch.Tensor): Encoded boxes with shape
stride (torch.Tensor | int): Strides of bboxes.
Returns:
torch.Tensor: Decoded boxes.
"""
assert pred_bboxes.size(-1) == bboxes.size(-1) == 4
xy_centers = (bboxes[..., :2] + bboxes[..., 2:]) * 0.5 + (
pred_bboxes[..., :2] - 0.5) * stride
whs = (bboxes[..., 2:] -
bboxes[..., :2]) * 0.5 * pred_bboxes[..., 2:].exp()
decoded_bboxes = torch.stack(
(xy_centers[..., 0] - whs[..., 0], xy_centers[..., 1] -
whs[..., 1], xy_centers[..., 0] + whs[..., 0],
xy_centers[..., 1] + whs[..., 1]),
dim=-1)
return decoded_bboxes
|
import functorch._C
import torch
from functorch._C import dim as _C
from .tree_map import tree_flatten, tree_map
from .wrap_type import wrap_type
_C._patch_tensor_class()
dims, DimList, dimlists = _C.dims, _C.DimList, _C.dimlists
class DimensionMismatchError(Exception):
pass
class DimensionBindError(Exception):
pass
from . import op_properties
# use dict to avoid writing C++ bindings for set
pointwise = dict.fromkeys(op_properties.pointwise, True)
use_c = True
if not use_c:
from . import reference
class _Tensor:
# fast path around slow wrapping/unwrapping logic for simply queries used
# by the implementation...
@property
def dims(self):
return tuple(d for d in self._levels if isinstance(d, Dim))
def dim(self):
return self.ndim
if use_c:
__torch_function__ = classmethod(_C.__torch_function__)
expand = _C._instancemethod(_C.expand)
else:
__torch_function__ = reference.__torch_function__
expand = reference.expand
index = _C._instancemethod(_C.index)
def __repr__(self):
tensor, levels, ndim = self._tensor, self._levels, self.ndim
return f"{tensor}\nwith dims={tuple(l + ndim if isinstance(l, int) else l for l in levels)} sizes={tuple(tensor.size())}"
TensorLike = (_Tensor, torch.Tensor)
class Dim(_C.Dim, _Tensor):
# note that _C.Dim comes before tensor because we want the Dim API for things like size to take precedence.
# Tensor defines format, but we want to print Dims with special formatting
__format__ = object.__format__
class Tensor(_Tensor, _C.Tensor):
if not use_c:
from_batched = staticmethod(_C.Tensor_from_batched)
from_positional = staticmethod(_C.Tensor_from_positional)
sum = _C._instancemethod(_C.Tensor_sum)
def cat(tensors, dim, new_dim):
n = dims()
return stack(tensors, n, dim).index([n, dim], new_dim)
if use_c:
_wrap = _C._wrap
def _def(name, *args, **kwargs):
orig = getattr(torch.Tensor, name)
setattr(_Tensor, name, _C._instancemethod(_wrap(orig, *args, **kwargs)))
t__getitem__ = _C._instancemethod(_C.__getitem__)
stack = _C.stack
split = _C._instancemethod(_C.split)
else:
_wrap, _def = reference._wrap, reference._def
t__getitem__ = reference.t__getitem__
stack = reference.stack
split = reference.split
# note: there is no python reference
t__setitem__ = _C._instancemethod(_C.__setitem__)
# this is patched in the C API because otherwise torch.Tensor will
# no longer be considered a sequence and things will break
# torch.Tensor.__getitem__ = t__getitem__
_Tensor.__getitem__ = t__getitem__
# torch.Tensor.__setitem__ = t__setitem__
_Tensor.__setitem__ = t__setitem__
torch.Tensor.split = split
_Tensor.split = split
torch.Tensor.expand = _C._instancemethod(_C.expand)
torch.Tensor.index = _C._instancemethod(_C.index)
wrap_type(use_c, _Tensor, torch.Tensor, _Tensor.__torch_function__)
del _Tensor.ndim
if use_c:
_Tensor.order = _C._instancemethod(_C.order)
else:
_Tensor.order = reference.positional
_def("mean")
_def("sum")
_def("all")
_def("amax")
_def("amin")
_def("aminmax")
_def("any")
_def("count_nonzero")
_def("logsumexp")
_def("nanmean")
_def("nansum")
_def("prod")
_def("std", keepdim_offset=2)
_def("var", keepdim_offset=2)
_def("max", single_dim=True)
_def("min", single_dim=True)
_def("argmax", single_dim=True)
_def("argmin", single_dim=True)
_def("kthvalue", single_dim=True)
_def("median", single_dim=True)
_def("nanmedian", single_dim=True)
_def("mode", single_dim=True)
_def("sort", reduce=False)
_def("argsort", reduce=False)
_def("unbind", single_dim=True)
_def("chunk", dim_offset=1, reduce=False)
_def("cummax", single_dim=True, reduce=False)
_def("cummin", single_dim=True, reduce=False)
_def("cumprod", single_dim=True, reduce=False)
_def("cumprod_", single_dim=True, reduce=False)
_def("cumsum", single_dim=True, reduce=False)
_def("cumsum_", single_dim=True, reduce=False)
_def("logcumsumexp", single_dim=True, reduce=False)
_def("renorm", dim_offset=1, single_dim=True, reduce=False)
_def("softmax", single_dim=True, reduce=False)
softmax = _wrap(torch.nn.functional.softmax, single_dim=True, reduce=False)
# stuff to handle in the future, because they require special
# binding logic for dims
# cross
# diag_embed
# diagonal
# diagonal_scatter
# diff
# nanquantile
# quantile
# roll
# rot90
# topk (new dimes on output)
# should these all be subsumed by inplace indexing?
# index_add_
# index_add
# index_copy
# index_copy_
# index_fill
# index_fill_
# index_select
# scatter
# scatter_
# scatter_add
# scatter_add_
# scatter_reduce
|
import functorch._C
import torch
from functorch._C import dim as _C
from .tree_map import tree_flatten, tree_map
from .wrap_type import wrap_type
_C._patch_tensor_class()
dims, DimList, dimlists = _C.dims, _C.DimList, _C.dimlists
class DimensionMismatchError(Exception):
pass
class DimensionBindError(Exception):
pass
from . import op_properties
# use dict to avoid writing C++ bindings for set
pointwise = dict.fromkeys(op_properties.pointwise, True)
use_c = True
if not use_c:
from . import reference
class _Tensor:
# fast path around slow wrapping/unwrapping logic for simply queries used
# by the implementation...
@property
def dims(self):
return tuple(d for d in self._levels if isinstance(d, Dim))
def dim(self):
return self.ndim
if use_c:
__torch_function__ = classmethod(_C.__torch_function__)
expand = _C._instancemethod(_C.expand)
else:
__torch_function__ = reference.__torch_function__
expand = reference.expand
index = _C._instancemethod(_C.index)
def __repr__(self):
tensor, levels, ndim = self._tensor, self._levels, self.ndim
return f"{tensor}\nwith dims={tuple(l + ndim if isinstance(l, int) else l for l in levels)} sizes={tuple(tensor.size())}"
TensorLike = (_Tensor, torch.Tensor)
class Dim(_C.Dim, _Tensor):
# note that _C.Dim comes before tensor because we want the Dim API for things like size to take precendence.
# Tensor defines format, but we want to print Dims with special formatting
__format__ = object.__format__
class Tensor(_Tensor, _C.Tensor):
if not use_c:
from_batched = staticmethod(_C.Tensor_from_batched)
from_positional = staticmethod(_C.Tensor_from_positional)
sum = _C._instancemethod(_C.Tensor_sum)
def cat(tensors, dim, new_dim):
n = dims()
return stack(tensors, n, dim).index([n, dim], new_dim)
if use_c:
_wrap = _C._wrap
def _def(name, *args, **kwargs):
orig = getattr(torch.Tensor, name)
setattr(_Tensor, name, _C._instancemethod(_wrap(orig, *args, **kwargs)))
t__getitem__ = _C._instancemethod(_C.__getitem__)
stack = _C.stack
split = _C._instancemethod(_C.split)
else:
_wrap, _def = reference._wrap, reference._def
t__getitem__ = reference.t__getitem__
stack = reference.stack
split = reference.split
# note: there is no python reference
t__setitem__ = _C._instancemethod(_C.__setitem__)
# this is patched in the C API because otherwise torch.Tensor will
# no longer be considered a sequence and things will break
# torch.Tensor.__getitem__ = t__getitem__
_Tensor.__getitem__ = t__getitem__
# torch.Tensor.__setitem__ = t__setitem__
_Tensor.__setitem__ = t__setitem__
torch.Tensor.split = split
_Tensor.split = split
torch.Tensor.expand = _C._instancemethod(_C.expand)
torch.Tensor.index = _C._instancemethod(_C.index)
wrap_type(use_c, _Tensor, torch.Tensor, _Tensor.__torch_function__)
del _Tensor.ndim
if use_c:
_Tensor.order = _C._instancemethod(_C.order)
else:
_Tensor.order = reference.positional
_def("mean")
_def("sum")
_def("all")
_def("amax")
_def("amin")
_def("aminmax")
_def("any")
_def("count_nonzero")
_def("logsumexp")
_def("nanmean")
_def("nansum")
_def("prod")
_def("std", keepdim_offset=2)
_def("var", keepdim_offset=2)
_def("max", single_dim=True)
_def("min", single_dim=True)
_def("argmax", single_dim=True)
_def("argmin", single_dim=True)
_def("kthvalue", single_dim=True)
_def("median", single_dim=True)
_def("nanmedian", single_dim=True)
_def("mode", single_dim=True)
_def("sort", reduce=False)
_def("argsort", reduce=False)
_def("unbind", single_dim=True)
_def("chunk", dim_offset=1, reduce=False)
_def("cummax", single_dim=True, reduce=False)
_def("cummin", single_dim=True, reduce=False)
_def("cumprod", single_dim=True, reduce=False)
_def("cumprod_", single_dim=True, reduce=False)
_def("cumsum", single_dim=True, reduce=False)
_def("cumsum_", single_dim=True, reduce=False)
_def("logcumsumexp", single_dim=True, reduce=False)
_def("renorm", dim_offset=1, single_dim=True, reduce=False)
_def("softmax", single_dim=True, reduce=False)
softmax = _wrap(torch.nn.functional.softmax, single_dim=True, reduce=False)
# stuff to handle in the future, because they require special
# binding logic for dims
# cross
# diag_embed
# diagonal
# diagonal_scatter
# diff
# nanquantile
# quantile
# roll
# rot90
# topk (new dimes on output)
# should these all be subsumed by inplace indexing?
# index_add_
# index_add
# index_copy
# index_copy_
# index_fill
# index_fill_
# index_select
# scatter
# scatter_
# scatter_add
# scatter_add_
# scatter_reduce
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='CenterNet',
backbone=dict(
type='ResNet',
depth=18,
norm_eval=False,
norm_cfg=dict(type='BN'),
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(
type='CTResNetNeck',
in_channel=512,
num_deconv_filters=(256, 128, 64),
num_deconv_kernels=(4, 4, 4),
use_dcn=True),
bbox_head=dict(
type='CenterNetHead',
num_classes=80,
in_channel=64,
feat_channel=64,
loss_center_heatmap=dict(type='GaussianFocalLoss', loss_weight=1.0),
loss_wh=dict(type='L1Loss', loss_weight=0.1),
loss_offset=dict(type='L1Loss', loss_weight=1.0)),
train_cfg=None,
test_cfg=dict(topk=100, local_maximum_kernel=3, max_per_img=100))
# We fixed the incorrect img_norm_cfg problem in the source code.
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True, color_type='color'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
crop_size=(512, 512),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_pad_mode=None),
dict(type='Resize', img_scale=(512, 512), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(
type='MultiScaleFlipAug',
scale_factor=1.0,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(
type='RandomCenterCropPad',
ratios=None,
border=None,
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_mode=True,
test_pad_mode=['logical_or', 31],
test_pad_add_pix=1),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
meta_keys=('filename', 'ori_filename', 'ori_shape',
'img_shape', 'pad_shape', 'scale_factor', 'flip',
'flip_direction', 'img_norm_cfg', 'border'),
keys=['img'])
])
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# Use RepeatDataset to speed up training
data = dict(
samples_per_gpu=16,
workers_per_gpu=4,
train=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
# Based on the default settings of modern detectors, the SGD effect is better
# than the Adam in the source code, so we use SGD default settings and
# if you use adam+lr5e-4, the map is 29.1.
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
# Based on the default settings of modern detectors, we added warmup settings.
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=1.0 / 1000,
step=[18, 24]) # the real step is [18*5, 24*5]
runner = dict(max_epochs=28) # the real epoch is 28*5=140
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (16 samples per GPU)
auto_scale_lr = dict(base_batch_size=128)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='CenterNet',
backbone=dict(
type='ResNet',
depth=18,
norm_eval=False,
norm_cfg=dict(type='BN'),
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(
type='CTResNetNeck',
in_channel=512,
num_deconv_filters=(256, 128, 64),
num_deconv_kernels=(4, 4, 4),
use_dcn=True),
bbox_head=dict(
type='CenterNetHead',
num_classes=80,
in_channel=64,
feat_channel=64,
loss_center_heatmap=dict(type='GaussianFocalLoss', loss_weight=1.0),
loss_wh=dict(type='L1Loss', loss_weight=0.1),
loss_offset=dict(type='L1Loss', loss_weight=1.0)),
train_cfg=None,
test_cfg=dict(topk=100, local_maximum_kernel=3, max_per_img=100))
# We fixed the incorrect img_norm_cfg problem in the source code.
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True, color_type='color'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
crop_size=(512, 512),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_pad_mode=None),
dict(type='Resize', img_scale=(512, 512), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(
type='MultiScaleFlipAug',
scale_factor=1.0,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(
type='RandomCenterCropPad',
ratios=None,
border=None,
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_mode=True,
test_pad_mode=['logical_or', 31],
test_pad_add_pix=1),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'flip', 'flip_direction',
'img_norm_cfg', 'border'),
keys=['img'])
])
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# Use RepeatDataset to speed up training
data = dict(
samples_per_gpu=16,
workers_per_gpu=4,
train=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
# Based on the default settings of modern detectors, the SGD effect is better
# than the Adam in the source code, so we use SGD default settings and
# if you use adam+lr5e-4, the map is 29.1.
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
# Based on the default settings of modern detectors, we added warmup settings.
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=1.0 / 1000,
step=[18, 24]) # the real step is [18*5, 24*5]
runner = dict(max_epochs=28) # the real epoch is 28*5=140
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (16 samples per GPU)
auto_scale_lr = dict(base_batch_size=128)
|
from __future__ import annotations
import csv
import logging
import os
import numpy as np
from sentence_transformers import InputExample
logger = logging.getLogger(__name__)
class CESoftmaxAccuracyEvaluator:
"""
This evaluator can be used with the CrossEncoder class.
It is designed for CrossEncoders with 2 or more outputs. It measure the
accuracy of the predict class vs. the gold labels.
"""
def __init__(self, sentence_pairs: list[list[str]], labels: list[int], name: str = "", write_csv: bool = True):
self.sentence_pairs = sentence_pairs
self.labels = labels
self.name = name
self.csv_file = "CESoftmaxAccuracyEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = ["epoch", "steps", "Accuracy"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: list[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = f" after epoch {epoch}:"
else:
out_txt = f" in epoch {epoch} after {steps} steps:"
else:
out_txt = ":"
logger.info("CESoftmaxAccuracyEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False)
pred_labels = np.argmax(pred_scores, axis=1)
assert len(pred_labels) == len(self.labels)
acc = np.sum(pred_labels == self.labels) / len(self.labels)
logger.info(f"Accuracy: {acc * 100:.2f}")
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc])
return acc
|
from __future__ import annotations
import csv
import logging
import os
import numpy as np
from sentence_transformers import InputExample
logger = logging.getLogger(__name__)
class CESoftmaxAccuracyEvaluator:
"""
This evaluator can be used with the CrossEncoder class.
It is designed for CrossEncoders with 2 or more outputs. It measure the
accuracy of the predict class vs. the gold labels.
"""
def __init__(self, sentence_pairs: list[list[str]], labels: list[int], name: str = "", write_csv: bool = True):
self.sentence_pairs = sentence_pairs
self.labels = labels
self.name = name
self.csv_file = "CESoftmaxAccuracyEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = ["epoch", "steps", "Accuracy"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: list[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CESoftmaxAccuracyEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False)
pred_labels = np.argmax(pred_scores, axis=1)
assert len(pred_labels) == len(self.labels)
acc = np.sum(pred_labels == self.labels) / len(self.labels)
logger.info("Accuracy: {:.2f}".format(acc * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc])
return acc
|
import pytest
from langchain.chains.qa_with_sources.base import QAWithSourcesChain
from tests.unit_tests.llms.fake_llm import FakeLLM
@pytest.mark.parametrize(
"text,answer,sources",
[
(
"This Agreement is governed by English law.\nSOURCES: 28-pl",
"This Agreement is governed by English law.\n",
"28-pl",
),
(
"This Agreement is governed by English law.\nSources: 28-pl",
"This Agreement is governed by English law.\n",
"28-pl",
),
(
"This Agreement is governed by English law.\nsource: 28-pl",
"This Agreement is governed by English law.\n",
"28-pl",
),
(
"This Agreement is governed by English law.\nSource: 28-pl",
"This Agreement is governed by English law.\n",
"28-pl",
),
(
"According to the sources the agreement is governed by English law.\n"
"Source: 28-pl",
"According to the sources the agreement is governed by English law.\n",
"28-pl",
),
(
"This Agreement is governed by English law.\n"
"SOURCES: 28-pl\n\n"
"QUESTION: Which state/country's law governs the interpretation of the "
"contract?\n"
"FINAL ANSWER: This Agreement is governed by English law.\n"
"SOURCES: 28-pl",
"This Agreement is governed by English law.\n",
"28-pl",
),
(
"The president did not mention Michael Jackson in the provided content.\n"
"SOURCES: \n\n"
"Note: Since the content provided does not contain any information about "
"Michael Jackson, there are no sources to cite for this specific question.",
"The president did not mention Michael Jackson in the provided content.\n",
"",
),
# The following text was generated by gpt-3.5-turbo
(
"To diagnose the problem, please answer the following questions and send "
"them in one message to IT:\nA1. Are you connected to the office network? "
"VPN will not work from the office network.\nA2. Are you sure about your "
"login/password?\nA3. Are you using any other VPN (e.g. from a client)?\n"
"A4. When was the last time you used the company VPN?\n"
"SOURCES: 1\n\n"
"ALTERNATIVE OPTION: Another option is to run the VPN in CLI, but keep in "
"mind that DNS settings may not work and there may be a need for manual "
"modification of the local resolver or /etc/hosts and/or ~/.ssh/config "
"files to be able to connect to machines in the company. With the "
"appropriate packages installed, the only thing needed to establish "
"a connection is to run the command:\nsudo openvpn --config config.ovpn"
"\n\nWe will be asked for a username and password - provide the login "
"details, the same ones that have been used so far for VPN connection, "
"connecting to the company's WiFi, or printers (in the Warsaw office)."
"\n\nFinally, just use the VPN connection.\n"
"SOURCES: 2\n\n"
"ALTERNATIVE OPTION (for Windows): Download the"
"OpenVPN client application version 2.6 or newer from the official "
"website: https://openvpn.net/community-downloads/\n"
"SOURCES: 3",
"To diagnose the problem, please answer the following questions and send "
"them in one message to IT:\nA1. Are you connected to the office network? "
"VPN will not work from the office network.\nA2. Are you sure about your "
"login/password?\nA3. Are you using any other VPN (e.g. from a client)?\n"
"A4. When was the last time you used the company VPN?\n",
"1",
),
],
)
def test_spliting_answer_into_answer_and_sources(
text: str,
answer: str,
sources: str,
) -> None:
qa_chain = QAWithSourcesChain.from_llm(FakeLLM())
generated_answer, generated_sources = qa_chain._split_sources(text)
assert generated_answer == answer
assert generated_sources == sources
|
import pytest
from langchain.chains.qa_with_sources.base import QAWithSourcesChain
from tests.unit_tests.llms.fake_llm import FakeLLM
@pytest.mark.parametrize(
"text,answer,sources",
[
(
"This Agreement is governed by English law.\nSOURCES: 28-pl",
"This Agreement is governed by English law.\n",
"28-pl",
),
(
"This Agreement is governed by English law.\nSources: 28-pl",
"This Agreement is governed by English law.\n",
"28-pl",
),
(
"This Agreement is governed by English law.\nsource: 28-pl",
"This Agreement is governed by English law.\n",
"28-pl",
),
(
"This Agreement is governed by English law.\nSource: 28-pl",
"This Agreement is governed by English law.\n",
"28-pl",
),
(
"According to the sources the agreement is governed by English law.\n"
"Source: 28-pl",
"According to the sources the agreement is governed by English law.\n",
"28-pl",
),
(
"This Agreement is governed by English law.\n"
"SOURCES: 28-pl\n\n"
"QUESTION: Which state/country's law governs the interpretation of the "
"contract?\n"
"FINAL ANSWER: This Agreement is governed by English law.\n"
"SOURCES: 28-pl",
"This Agreement is governed by English law.\n",
"28-pl",
),
(
"The president did not mention Michael Jackson in the provided content.\n"
"SOURCES: \n\n"
"Note: Since the content provided does not contain any information about "
"Michael Jackson, there are no sources to cite for this specific question.",
"The president did not mention Michael Jackson in the provided content.\n",
"",
),
# The following text was generated by gpt-3.5-turbo
(
"To diagnose the problem, please answer the following questions and send "
"them in one message to IT:\nA1. Are you connected to the office network? "
"VPN will not work from the office network.\nA2. Are you sure about your "
"login/password?\nA3. Are you using any other VPN (e.g. from a client)?\n"
"A4. When was the last time you used the company VPN?\n"
"SOURCES: 1\n\n"
"ALTERNATIVE OPTION: Another option is to run the VPN in CLI, but keep in "
"mind that DNS settings may not work and there may be a need for manual "
"modification of the local resolver or /etc/hosts and/or ~/.ssh/config "
"files to be able to connect to machines in the company. With the "
"appropriate packages installed, the only thing needed to establish "
"a connection is to run the command:\nsudo openvpn --config config.ovpn"
"\n\nWe will be asked for a username and password - provide the login "
"details, the same ones that have been used so far for VPN connection, "
"connecting to the company's WiFi, or printers (in the Warsaw office)."
"\n\nFinally, just use the VPN connection.\n"
"SOURCES: 2\n\n"
"ALTERNATIVE OPTION (for Windows): Download the"
"OpenVPN client application version 2.6 or newer from the official "
"website: https://openvpn.net/community-downloads/\n"
"SOURCES: 3",
"To diagnose the problem, please answer the following questions and send "
"them in one message to IT:\nA1. Are you connected to the office network? "
"VPN will not work from the office network.\nA2. Are you sure about your "
"login/password?\nA3. Are you using any other VPN (e.g. from a client)?\n"
"A4. When was the last time you used the company VPN?\n",
"1",
),
],
)
def test_spliting_answer_into_answer_and_sources(
text: str, answer: str, sources: str
) -> None:
qa_chain = QAWithSourcesChain.from_llm(FakeLLM())
generated_answer, generated_sources = qa_chain._split_sources(text)
assert generated_answer == answer
assert generated_sources == sources
|
from torchaudio._internal.module_utils import dropping_support
# Initialize extension and backend first
from . import _extension # noqa # usort: skip
from ._backend import ( # noqa # usort: skip
AudioMetaData,
get_audio_backend,
info,
list_audio_backends,
load as _load,
save as _save,
set_audio_backend,
)
load = dropping_support(_load)
save = dropping_support(_save)
from . import ( # noqa: F401
compliance,
datasets,
functional,
io,
kaldi_io,
models,
pipelines,
sox_effects,
transforms,
utils,
)
# For BC
from . import backend # noqa # usort: skip
try:
from .version import __version__, git_version # noqa: F401
except ImportError:
pass
__all__ = [
"AudioMetaData",
"load",
"info",
"save",
"io",
"compliance",
"datasets",
"functional",
"models",
"pipelines",
"kaldi_io",
"utils",
"sox_effects",
"transforms",
"list_audio_backends",
"get_audio_backend",
"set_audio_backend",
]
|
# Initialize extension and backend first
from . import _extension # noqa # usort: skip
from ._backend import ( # noqa # usort: skip
AudioMetaData,
get_audio_backend,
info,
list_audio_backends,
load,
save,
set_audio_backend,
)
from . import ( # noqa: F401
compliance,
datasets,
functional,
io,
kaldi_io,
models,
pipelines,
sox_effects,
transforms,
utils,
)
# For BC
from . import backend # noqa # usort: skip
try:
from .version import __version__, git_version # noqa: F401
except ImportError:
pass
__all__ = [
"AudioMetaData",
"load",
"info",
"save",
"io",
"compliance",
"datasets",
"functional",
"models",
"pipelines",
"kaldi_io",
"utils",
"sox_effects",
"transforms",
"list_audio_backends",
"get_audio_backend",
"set_audio_backend",
]
|
_base_ = './faster-rcnn_hrnetv2p-w40-1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './faster_rcnn_hrnetv2p_w40_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from docarray.proto import NodeProto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.helper import _uri_to_blob
class TextUrl(AnyUrl):
"""
URL to a text file.
Cane be remote (web) URL, or a local file path.
"""
def _to_node_protobuf(self) -> 'NodeProto':
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to
be converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(text_url=str(self))
def load_to_bytes(self, timeout: Optional[float] = None) -> bytes:
"""
Load the text file into a bytes object.
EXAMPLE USAGE
.. code-block:: python
from docarray import Document
from docarray.typing import TextUrl
class MyDoc(Document):
remote_url: TextUrl
local_url: TextUrl
doc = MyDoc(
remote_url='https://de.wikipedia.org/wiki/Brixen',
local_url='home/username/my_file.txt',
)
remote_txt_bytes = doc.remote_url.load_to_bytes()
local_txt_bytes = doc.local_url.load_to_bytes()
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:return: the text file content as bytes
"""
return _uri_to_blob(self, timeout=timeout)
def load(self, charset: str = 'utf-8', timeout: Optional[float] = None) -> str:
"""
Load the text file into a string.
EXAMPLE USAGE
.. code-block:: python
from docarray import Document
from docarray.typing import TextUrl
class MyDoc(Document):
remote_url: TextUrl
local_url: TextUrl
doc = MyDoc(
remote_url='https://de.wikipedia.org/wiki/Brixen',
local_url='home/username/my_file.txt',
)
remote_txt = doc.remote_url.load()
print(remote_txt)
# prints: ```<!DOCTYPE html>\n<html class="client-nojs" ... > ...```
local_txt = doc.local_url.load()
print(local_txt)
# prints content of my_file.txt
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:param charset: decoding charset; may be any character set registered with IANA
:return: the text file content
"""
_bytes = _uri_to_blob(self, timeout=timeout)
return _bytes.decode(charset)
|
from typing import Optional
from docarray.proto import NodeProto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.helper import _uri_to_blob
class TextUrl(AnyUrl):
"""
URL to a text file.
Cane be remote (web) URL, or a local file path.
"""
def _to_node_protobuf(self) -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to
be converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(text_url=str(self))
def load_to_bytes(self, timeout: Optional[float] = None) -> bytes:
"""
Load the text file into a bytes object.
EXAMPLE USAGE
.. code-block:: python
from docarray import Document
from docarray.typing import TextUrl
class MyDoc(Document):
remote_url: TextUrl
local_url: TextUrl
doc = MyDoc(
remote_url='https://de.wikipedia.org/wiki/Brixen',
local_url='home/username/my_file.txt',
)
remote_txt_bytes = doc.remote_url.load_to_bytes()
local_txt_bytes = doc.local_url.load_to_bytes()
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:return: the text file content as bytes
"""
return _uri_to_blob(self, timeout=timeout)
def load(self, charset: str = 'utf-8', timeout: Optional[float] = None) -> str:
"""
Load the text file into a string.
EXAMPLE USAGE
.. code-block:: python
from docarray import Document
from docarray.typing import TextUrl
class MyDoc(Document):
remote_url: TextUrl
local_url: TextUrl
doc = MyDoc(
remote_url='https://de.wikipedia.org/wiki/Brixen',
local_url='home/username/my_file.txt',
)
remote_txt = doc.remote_url.load()
print(remote_txt)
# prints: ```<!DOCTYPE html>\n<html class="client-nojs" ... > ...```
local_txt = doc.local_url.load()
print(local_txt)
# prints content of my_file.txt
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:param charset: decoding charset; may be any character set registered with IANA
:return: the text file content
"""
_bytes = _uri_to_blob(self, timeout=timeout)
return _bytes.decode(charset)
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Version utils."""
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_VERSION_REG = re.compile(r"^(?P<major>\d+)" r"\.(?P<minor>\d+)" r"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class Version:
"""Dataset version MAJOR.MINOR.PATCH.
Args:
version_str (:obj:`str`): Eg: "1.2.3".
description (:obj:`str`): A description of what is new in this version.
Attributes:
version_str (:obj:`str`): Eg: "1.2.3".
description (:obj:`str`): A description of what is new in this version.
major (:obj:`str`):
minor (:obj:`str`):
patch (:obj:`str`):
Example:
```py
>>> VERSION = datasets.Version("1.0.0")
```
"""
version_str: str
description: Optional[str] = None
major: Optional[Union[str, int]] = None
minor: Optional[Union[str, int]] = None
patch: Optional[Union[str, int]] = None
def __post_init__(self):
self.major, self.minor, self.patch = _str_to_version_tuple(self.version_str)
def __repr__(self):
return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def tuple(self):
return self.major, self.minor, self.patch
def _validate_operand(self, other):
if isinstance(other, str):
return Version(other)
elif isinstance(other, Version):
return other
raise TypeError(f"{other} (type {type(other)}) cannot be compared to version.")
def __eq__(self, other):
try:
other = self._validate_operand(other)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__(self, other):
other = self._validate_operand(other)
return self.tuple < other.tuple
def __hash__(self):
return hash(_version_tuple_to_str(self.tuple))
@classmethod
def from_dict(cls, dic):
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def _to_yaml_string(self) -> str:
return self.version_str
def _str_to_version_tuple(version_str):
"""Return the tuple (major, minor, patch) version extracted from the str."""
res = _VERSION_REG.match(version_str)
if not res:
raise ValueError(f"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.")
return tuple(int(v) for v in [res.group("major"), res.group("minor"), res.group("patch")])
def _version_tuple_to_str(version_tuple):
"""Return the str version from the version tuple (major, minor, patch)."""
return ".".join(str(v) for v in version_tuple)
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Version utils."""
import dataclasses
import re
from dataclasses import dataclass
from typing import Optional, Union
_VERSION_TMPL = r"^(?P<major>{v})" r"\.(?P<minor>{v})" r"\.(?P<patch>{v})$"
_VERSION_WILDCARD_REG = re.compile(_VERSION_TMPL.format(v=r"\d+|\*"))
_VERSION_RESOLVED_REG = re.compile(_VERSION_TMPL.format(v=r"\d+"))
@dataclass
class Version:
"""Dataset version MAJOR.MINOR.PATCH.
Args:
version_str (:obj:`str`): Eg: "1.2.3".
description (:obj:`str`): A description of what is new in this version.
Attributes:
version_str (:obj:`str`): Eg: "1.2.3".
description (:obj:`str`): A description of what is new in this version.
major (:obj:`str`):
minor (:obj:`str`):
patch (:obj:`str`):
Example:
```py
>>> VERSION = datasets.Version("1.0.0")
```
"""
version_str: str
description: Optional[str] = None
major: Optional[Union[str, int]] = None
minor: Optional[Union[str, int]] = None
patch: Optional[Union[str, int]] = None
def __post_init__(self):
self.major, self.minor, self.patch = _str_to_version(self.version_str)
def __repr__(self):
return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def tuple(self):
return self.major, self.minor, self.patch
def _validate_operand(self, other):
if isinstance(other, str):
return Version(other)
elif isinstance(other, Version):
return other
raise TypeError(f"{other} (type {type(other)}) cannot be compared to version.")
def __eq__(self, other):
try:
other = self._validate_operand(other)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
other = self._validate_operand(other)
return self.tuple < other.tuple
def __le__(self, other):
other = self._validate_operand(other)
return self.tuple <= other.tuple
def __gt__(self, other):
other = self._validate_operand(other)
return self.tuple > other.tuple
def __ge__(self, other):
other = self._validate_operand(other)
return self.tuple >= other.tuple
def match(self, other_version):
"""Returns True if other_version matches.
Args:
other_version: string, of the form "x[.y[.x]]" where {x,y,z} can be a
number or a wildcard.
"""
major, minor, patch = _str_to_version(other_version, allow_wildcard=True)
return major in [self.major, "*"] and minor in [self.minor, "*"] and patch in [self.patch, "*"]
@classmethod
def from_dict(cls, dic):
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def _to_yaml_string(self) -> str:
return self.version_str
def _str_to_version(version_str, allow_wildcard=False):
"""Return the tuple (major, minor, patch) version extracted from the str."""
reg = _VERSION_WILDCARD_REG if allow_wildcard else _VERSION_RESOLVED_REG
res = reg.match(version_str)
if not res:
msg = f"Invalid version '{version_str}'. Format should be x.y.z"
if allow_wildcard:
msg += " with {x,y,z} being digits or wildcard."
else:
msg += " with {x,y,z} being digits."
raise ValueError(msg)
return tuple(v if v == "*" else int(v) for v in [res.group("major"), res.group("minor"), res.group("patch")])
|
import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
from keras.src.api_export import keras_export
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
relu,
leaky_relu,
relu6,
softmax,
celu,
elu,
selu,
softplus,
softsign,
silu,
gelu,
glu,
tanh,
tanh_shrink,
sigmoid,
exponential,
hard_sigmoid,
hard_silu,
hard_tanh,
hard_shrink,
linear,
mish,
log_softmax,
log_sigmoid,
}
ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS}
# Additional aliases
ALL_OBJECTS_DICT["swish"] = silu
ALL_OBJECTS_DICT["hard_swish"] = hard_silu
@keras_export("keras.activations.serialize")
def serialize(activation):
fn_config = serialization_lib.serialize_keras_object(activation)
if "config" not in fn_config:
raise ValueError(
f"Unknown activation function '{activation}' cannot be "
"serialized due to invalid function name. Make sure to use "
"an activation name that matches the references defined in "
"activations.py or use "
"`@keras.saving.register_keras_serializable()`"
"to register any custom activations. "
f"config={fn_config}"
)
if not isinstance(activation, types.FunctionType):
# Case for additional custom activations represented by objects
return fn_config
if (
isinstance(fn_config["config"], str)
and fn_config["config"] not in globals()
):
# Case for custom activation functions from external activations modules
fn_config["config"] = object_registration.get_registered_name(
activation
)
return fn_config
# Case for keras.activations builtins (simply return name)
return fn_config["config"]
@keras_export("keras.activations.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras activation function via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.activations.get")
def get(identifier):
"""Retrieve a Keras activation function via an identifier."""
if identifier is None:
return linear
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
return obj
raise ValueError(
f"Could not interpret activation function identifier: {identifier}"
)
|
import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
from keras.src.api_export import keras_export
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
relu,
leaky_relu,
relu6,
softmax,
celu,
elu,
selu,
softplus,
softsign,
silu,
gelu,
glu,
tanh,
sigmoid,
exponential,
hard_sigmoid,
hard_silu,
hard_tanh,
hard_shrink,
linear,
mish,
log_softmax,
log_sigmoid,
}
ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS}
# Additional aliases
ALL_OBJECTS_DICT["swish"] = silu
ALL_OBJECTS_DICT["hard_swish"] = hard_silu
@keras_export("keras.activations.serialize")
def serialize(activation):
fn_config = serialization_lib.serialize_keras_object(activation)
if "config" not in fn_config:
raise ValueError(
f"Unknown activation function '{activation}' cannot be "
"serialized due to invalid function name. Make sure to use "
"an activation name that matches the references defined in "
"activations.py or use "
"`@keras.saving.register_keras_serializable()`"
"to register any custom activations. "
f"config={fn_config}"
)
if not isinstance(activation, types.FunctionType):
# Case for additional custom activations represented by objects
return fn_config
if (
isinstance(fn_config["config"], str)
and fn_config["config"] not in globals()
):
# Case for custom activation functions from external activations modules
fn_config["config"] = object_registration.get_registered_name(
activation
)
return fn_config
# Case for keras.activations builtins (simply return name)
return fn_config["config"]
@keras_export("keras.activations.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras activation function via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.activations.get")
def get(identifier):
"""Retrieve a Keras activation function via an identifier."""
if identifier is None:
return linear
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
return obj
raise ValueError(
f"Could not interpret activation function identifier: {identifier}"
)
|
from .store import SupabaseIntegrationCredentialsStore
from .types import APIKeyCredentials, Credentials, OAuth2Credentials
__all__ = [
"SupabaseIntegrationCredentialsStore",
"Credentials",
"APIKeyCredentials",
"OAuth2Credentials",
]
|
from .store import SupabaseIntegrationCredentialsStore
from .types import Credentials, APIKeyCredentials, OAuth2Credentials
__all__ = [
"SupabaseIntegrationCredentialsStore",
"Credentials",
"APIKeyCredentials",
"OAuth2Credentials",
]
|
import logging
from typing import Any
from autogpt_libs.utils.cache import thread_cached
from backend.data.block import (
Block,
BlockCategory,
BlockInput,
BlockOutput,
BlockSchema,
BlockType,
get_block,
)
from backend.data.execution import ExecutionStatus
from backend.data.model import SchemaField
from backend.util import json
logger = logging.getLogger(__name__)
@thread_cached
def get_executor_manager_client():
from backend.executor import ExecutionManager
from backend.util.service import get_service_client
return get_service_client(ExecutionManager)
@thread_cached
def get_event_bus():
from backend.data.execution import RedisExecutionEventBus
return RedisExecutionEventBus()
class AgentExecutorBlock(Block):
class Input(BlockSchema):
user_id: str = SchemaField(description="User ID")
graph_id: str = SchemaField(description="Graph ID")
graph_version: int = SchemaField(description="Graph Version")
data: BlockInput = SchemaField(description="Input data for the graph")
input_schema: dict = SchemaField(description="Input schema for the graph")
output_schema: dict = SchemaField(description="Output schema for the graph")
@classmethod
def get_input_schema(cls, data: BlockInput) -> dict[str, Any]:
return data.get("input_schema", {})
@classmethod
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
return data.get("data", {})
@classmethod
def get_missing_input(cls, data: BlockInput) -> set[str]:
required_fields = cls.get_input_schema(data).get("required", [])
return set(required_fields) - set(data)
@classmethod
def get_mismatch_error(cls, data: BlockInput) -> str | None:
return json.validate_with_jsonschema(cls.get_input_schema(data), data)
class Output(BlockSchema):
pass
def __init__(self):
super().__init__(
id="e189baac-8c20-45a1-94a7-55177ea42565",
description="Executes an existing agent inside your agent",
input_schema=AgentExecutorBlock.Input,
output_schema=AgentExecutorBlock.Output,
block_type=BlockType.AGENT,
categories={BlockCategory.AGENT},
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
from backend.data.execution import ExecutionEventType
executor_manager = get_executor_manager_client()
event_bus = get_event_bus()
graph_exec = executor_manager.add_execution(
graph_id=input_data.graph_id,
graph_version=input_data.graph_version,
user_id=input_data.user_id,
data=input_data.data,
)
log_id = f"Graph #{input_data.graph_id}-V{input_data.graph_version}, exec-id: {graph_exec.graph_exec_id}"
logger.info(f"Starting execution of {log_id}")
for event in event_bus.listen(
graph_id=graph_exec.graph_id, graph_exec_id=graph_exec.graph_exec_id
):
if event.event_type == ExecutionEventType.GRAPH_EXEC_UPDATE:
if event.status in [
ExecutionStatus.COMPLETED,
ExecutionStatus.TERMINATED,
ExecutionStatus.FAILED,
]:
logger.info(f"Execution {log_id} ended with status {event.status}")
break
else:
continue
logger.info(
f"Execution {log_id} produced input {event.input_data} output {event.output_data}"
)
if not event.block_id:
logger.warning(f"{log_id} received event without block_id {event}")
continue
block = get_block(event.block_id)
if not block or block.block_type != BlockType.OUTPUT:
continue
output_name = event.input_data.get("name")
if not output_name:
logger.warning(f"{log_id} produced an output with no name {event}")
continue
for output_data in event.output_data.get("output", []):
logger.info(f"Execution {log_id} produced {output_name}: {output_data}")
yield output_name, output_data
|
import logging
from typing import Any
from autogpt_libs.utils.cache import thread_cached
from backend.data.block import (
Block,
BlockCategory,
BlockInput,
BlockOutput,
BlockSchema,
BlockType,
get_block,
)
from backend.data.execution import ExecutionStatus
from backend.data.model import SchemaField
from backend.util import json
logger = logging.getLogger(__name__)
@thread_cached
def get_executor_manager_client():
from backend.executor import ExecutionManager
from backend.util.service import get_service_client
return get_service_client(ExecutionManager)
@thread_cached
def get_event_bus():
from backend.data.execution import RedisExecutionEventBus
return RedisExecutionEventBus()
class AgentExecutorBlock(Block):
class Input(BlockSchema):
user_id: str = SchemaField(description="User ID")
graph_id: str = SchemaField(description="Graph ID")
graph_version: int = SchemaField(description="Graph Version")
data: BlockInput = SchemaField(description="Input data for the graph")
input_schema: dict = SchemaField(description="Input schema for the graph")
output_schema: dict = SchemaField(description="Output schema for the graph")
@classmethod
def get_input_schema(cls, data: BlockInput) -> dict[str, Any]:
return data.get("input_schema", {})
@classmethod
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
return data.get("data", {})
@classmethod
def get_missing_input(cls, data: BlockInput) -> set[str]:
required_fields = cls.get_input_schema(data).get("required", [])
return set(required_fields) - set(data)
@classmethod
def get_mismatch_error(cls, data: BlockInput) -> str | None:
return json.validate_with_jsonschema(cls.get_input_schema(data), data)
class Output(BlockSchema):
pass
def __init__(self):
super().__init__(
id="e189baac-8c20-45a1-94a7-55177ea42565",
description="Executes an existing agent inside your agent",
input_schema=AgentExecutorBlock.Input,
output_schema=AgentExecutorBlock.Output,
block_type=BlockType.AGENT,
categories={BlockCategory.AGENT},
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
executor_manager = get_executor_manager_client()
event_bus = get_event_bus()
graph_exec = executor_manager.add_execution(
graph_id=input_data.graph_id,
graph_version=input_data.graph_version,
user_id=input_data.user_id,
data=input_data.data,
)
log_id = f"Graph #{input_data.graph_id}-V{input_data.graph_version}, exec-id: {graph_exec.graph_exec_id}"
logger.info(f"Starting execution of {log_id}")
for event in event_bus.listen(
graph_id=graph_exec.graph_id, graph_exec_id=graph_exec.graph_exec_id
):
logger.info(
f"Execution {log_id} produced input {event.input_data} output {event.output_data}"
)
if not event.node_id:
if event.status in [
ExecutionStatus.COMPLETED,
ExecutionStatus.TERMINATED,
ExecutionStatus.FAILED,
]:
logger.info(f"Execution {log_id} ended with status {event.status}")
break
else:
continue
if not event.block_id:
logger.warning(f"{log_id} received event without block_id {event}")
continue
block = get_block(event.block_id)
if not block or block.block_type != BlockType.OUTPUT:
continue
output_name = event.input_data.get("name")
if not output_name:
logger.warning(f"{log_id} produced an output with no name {event}")
continue
for output_data in event.output_data.get("output", []):
logger.info(f"Execution {log_id} produced {output_name}: {output_data}")
yield output_name, output_data
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .boxinst import BoxInst
from .base_detr import DetectionTransformer
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .condinst import CondInst
from .conditional_detr import ConditionalDETR
from .cornernet import CornerNet
from .crowddet import CrowdDet
from .d2_wrapper import Detectron2Wrapper
from .ddod import DDOD
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask2former import Mask2Former
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .rtmdet import RTMDet
from .scnet import SCNet
from .semi_base import SemiBaseDetector
from .single_stage import SingleStageDetector
from .soft_teacher import SoftTeacher
from .solo import SOLO
from .solov2 import SOLOv2
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer', 'DDOD', 'Mask2Former', 'SemiBaseDetector', 'SoftTeacher',
'RTMDet', 'Detectron2Wrapper', 'CrowdDet', 'CondInst', 'BoxInst',
'DetectionTransformer', 'ConditionalDETR'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .boxinst import BoxInst
from .base_detr import DetectionTransformer
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .condinst import CondInst
from .cornernet import CornerNet
from .crowddet import CrowdDet
from .d2_wrapper import Detectron2Wrapper
from .ddod import DDOD
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask2former import Mask2Former
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .rtmdet import RTMDet
from .scnet import SCNet
from .semi_base import SemiBaseDetector
from .single_stage import SingleStageDetector
from .soft_teacher import SoftTeacher
from .solo import SOLO
from .solov2 import SOLOv2
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer', 'DDOD', 'Mask2Former', 'SemiBaseDetector', 'SoftTeacher',
'RTMDet', 'Detectron2Wrapper', 'CrowdDet', 'CondInst', 'BoxInst',
'DetectionTransformer'
]
|
"""Agent components."""
from typing import Any, Callable, Dict, Optional, Set
from llama_index.core.base.query_pipeline.query import (
QueryComponent,
)
from llama_index.core.bridge.pydantic import Field
from llama_index.core.query_pipeline.components.function import (
FnComponent,
get_parameters,
)
# from llama_index.core.query_pipeline.components.input import InputComponent
class BaseStatefulComponent(QueryComponent):
"""Takes in agent inputs and transforms it into desired outputs."""
state: Dict[str, Any] = Field(
default_factory=dict, description="State of the pipeline."
)
def reset_state(self) -> None:
"""Reset state."""
self.state = {}
class StatefulFnComponent(BaseStatefulComponent, FnComponent):
"""
Query component that takes in an arbitrary function.
Stateful version of `FnComponent`. Expects functions to have `state` as the first argument.
"""
def __init__(
self,
fn: Callable,
req_params: Optional[Set[str]] = None,
opt_params: Optional[Set[str]] = None,
state: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> None:
"""Init params."""
# determine parameters
default_req_params, default_opt_params = get_parameters(fn)
# make sure task and step are part of the list, and remove them from the list
if "state" not in default_req_params:
raise ValueError(
"StatefulFnComponent must have 'state' as required parameters"
)
default_req_params = default_req_params - {"state"}
default_opt_params = default_opt_params - {"state"}
if req_params is None:
req_params = default_req_params
if opt_params is None:
opt_params = default_opt_params
super().__init__(
fn=fn,
req_params=req_params,
opt_params=opt_params,
state=state or {},
**kwargs,
)
def _run_component(self, **kwargs: Any) -> Dict:
"""Run component."""
kwargs.update({"state": self.state})
return super()._run_component(**kwargs)
async def _arun_component(self, **kwargs: Any) -> Any:
"""Async run component."""
kwargs.update({"state": self.state})
return await super()._arun_component(**kwargs)
|
"""Agent components."""
from typing import Any, Callable, Dict, Optional, Set
from llama_index.core.base.query_pipeline.query import (
QueryComponent,
)
from llama_index.core.bridge.pydantic import Field
from llama_index.core.query_pipeline.components.function import (
FnComponent,
get_parameters,
)
# from llama_index.core.query_pipeline.components.input import InputComponent
class BaseStatefulComponent(QueryComponent):
"""Takes in agent inputs and transforms it into desired outputs."""
state: Dict[str, Any] = Field(
default_factory=dict, description="State of the pipeline."
)
def reset_state(self) -> None:
"""Reset state."""
self.state = {}
class StatefulFnComponent(BaseStatefulComponent, FnComponent):
"""
Query component that takes in an arbitrary function.
Stateful version of `FnComponent`. Expects functions to have `state` as the first argument.
"""
def __init__(
self,
fn: Callable,
req_params: Optional[Set[str]] = None,
opt_params: Optional[Set[str]] = None,
state: Optional[Dict[str, Any]] = None,
**kwargs: Any
) -> None:
"""Init params."""
# determine parameters
default_req_params, default_opt_params = get_parameters(fn)
# make sure task and step are part of the list, and remove them from the list
if "state" not in default_req_params:
raise ValueError(
"StatefulFnComponent must have 'state' as required parameters"
)
default_req_params = default_req_params - {"state"}
default_opt_params = default_opt_params - {"state"}
if req_params is None:
req_params = default_req_params
if opt_params is None:
opt_params = default_opt_params
super().__init__(
fn=fn,
req_params=req_params,
opt_params=opt_params,
state=state or {},
**kwargs
)
def _run_component(self, **kwargs: Any) -> Dict:
"""Run component."""
kwargs.update({"state": self.state})
return super()._run_component(**kwargs)
async def _arun_component(self, **kwargs: Any) -> Any:
"""Async run component."""
kwargs.update({"state": self.state})
return await super()._arun_component(**kwargs)
|
import copy
from typing import Any, List, Optional
from jina.serve.gateway import BaseGateway
class CompositeGateway(BaseGateway):
"""GRPC Gateway implementation"""
def __init__(
self,
**kwargs,
):
"""Initialize the gateway
:param kwargs: keyword args
"""
super().__init__(**kwargs)
from jina.parsers.helper import _get_gateway_class
self.gateways: List[BaseGateway] = []
for port, protocol in zip(self.ports, self.protocols):
gateway_cls = _get_gateway_class(protocol)
# ignore metrics_registry since it is not copyable
runtime_args = self._deepcopy_with_ignore_attrs(
self.runtime_args, ['metrics_registry']
)
runtime_args.port = [port]
runtime_args.protocol = [protocol]
gateway_kwargs = {k: v for k, v in kwargs.items() if k != 'runtime_args'}
gateway_kwargs['runtime_args'] = dict(vars(runtime_args))
gateway = gateway_cls(**gateway_kwargs)
self.gateways.append(gateway)
async def setup_server(self):
"""
setup GRPC server
"""
for gateway in self.gateways:
await gateway.setup_server()
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
for gateway in self.gateways:
await gateway.shutdown()
async def run_server(self):
"""Run GRPC server forever"""
for gateway in self.gateways:
await gateway.run_server()
@staticmethod
def _deepcopy_with_ignore_attrs(obj: Any, ignore_attrs: List[str]) -> Any:
"""Deep copy an object and ignore some attributes
:param obj: the object to copy
:param ignore_attrs: the attributes to ignore
:return: the copied object
"""
memo = {}
for k in ignore_attrs:
if hasattr(obj, k):
memo[id(getattr(obj, k))] = None # getattr(obj, k)
return copy.deepcopy(obj, memo)
@property
def _should_exit(self) -> bool:
should_exit_values = [
getattr(gateway.server, 'should_exit', True) for gateway in self.gateways
]
return all(should_exit_values)
|
import copy
from typing import List, Optional
from jina.serve.gateway import BaseGateway
class CompositeGateway(BaseGateway):
"""GRPC Gateway implementation"""
def __init__(
self,
**kwargs,
):
"""Initialize the gateway
:param kwargs: keyword args
"""
super().__init__(**kwargs)
from jina.parsers.helper import _get_gateway_class
self.gateways: List[BaseGateway] = []
for port, protocol in zip(self.ports, self.protocols):
gateway_cls = _get_gateway_class(protocol)
runtime_args = copy.deepcopy(self.runtime_args)
runtime_args.port = [port]
runtime_args.protocol = [protocol]
gateway_kwargs = copy.deepcopy(kwargs)
gateway_kwargs['runtime_args'] = dict(vars(runtime_args))
gateway = gateway_cls(**gateway_kwargs)
self.gateways.append(gateway)
async def setup_server(self):
"""
setup GRPC server
"""
for gateway in self.gateways:
await gateway.setup_server()
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
for gateway in self.gateways:
await gateway.shutdown()
async def run_server(self):
"""Run GRPC server forever"""
for gateway in self.gateways:
await gateway.run_server()
@property
def _should_exit(self) -> bool:
should_exit_values = [
getattr(gateway.server, 'should_exit', True) for gateway in self.gateways
]
return all(should_exit_values)
|
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MAE - Mean Absolute Error Metric"""
from sklearn.metrics import mean_absolute_error
import datasets
_CITATION = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
_DESCRIPTION = """\
Mean Absolute Error (MAE) is the mean of the magnitude of difference between the predicted and actual
values.
"""
_KWARGS_DESCRIPTION = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
Returns:
mae : mean absolute error.
If multioutput is "raw_values", then mean absolute error is returned for each output separately. If multioutput is "uniform_average" or an ndarray of weights, then the weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples:
>>> mae_metric = datasets.load_metric("mae")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mae_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mae': 0.5}
If you're using multi-dimensional lists, then set the config as follows :
>>> mae_metric = datasets.load_metric("mae", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mae_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mae': 0.75}
>>> results = mae_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results)
{'mae': array([0.5, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Mae(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(self._get_feature_types()),
reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_absolute_error.html"
],
)
def _get_feature_types(self):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float")),
"references": datasets.Sequence(datasets.Value("float")),
}
else:
return {
"predictions": datasets.Value("float"),
"references": datasets.Value("float"),
}
def _compute(self, predictions, references, sample_weight=None, multioutput="uniform_average"):
mae_score = mean_absolute_error(references, predictions, sample_weight=sample_weight, multioutput=multioutput)
return {"mae": mae_score}
|
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MAE - Mean Absolute Error Metric"""
from sklearn.metrics import mean_absolute_error
import datasets
_CITATION = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
_DESCRIPTION = """\
Mean Absolute Error (MAE) is the mean of the magnitude of difference between the predicted and actual
values.
"""
_KWARGS_DESCRIPTION = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
Returns:
mae : mean absolute error.
If multioutput is "raw_values", then mean absolute error is returned for each output separately. If multioutput is "uniform_average" or an ndarray of weights, then the weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples:
>>> mae_metric = datasets.load_metric("mae")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mae_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mae': 0.5}
If you're using multi-dimensional lists, then set the config as follows :
>>> mae_metric = datasets.load_metric("mae", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mae_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mae': 0.75}
>>> results = mae_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results)
{'mae': array([0.5, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Mae(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(self._get_feature_types()),
reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_absolute_error.html"
],
)
def _get_feature_types(self):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float")),
"references": datasets.Sequence(datasets.Value("float")),
}
else:
return {
"predictions": datasets.Value("float"),
"references": datasets.Value("float"),
}
def _compute(self, predictions, references, sample_weight=None, multioutput="uniform_average"):
mae_score = mean_absolute_error(references, predictions, sample_weight=sample_weight, multioutput=multioutput)
return {"mae": mae_score}
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Optional, Sequence, Union
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that logs the time spent during iteration.
E.g. ``data_time`` for loading data and ``time`` for a model train step.
"""
priority = 'NORMAL'
def __init__(self):
self.time_sec_tot = 0
self.start_iter = 0
def before_train(self, runner) -> None:
"""Synchronize the number of iterations with the runner after resuming
from checkpoints.
Args:
runner: The runner of the training, validation or testing
process.
"""
self.start_iter = runner.iter
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Record timestamp before start an epoch.
Args:
runner (Runner): The runner of the training validation and
testing process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
self.t = time.time()
def _before_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
mode: str = 'train') -> None:
"""Calculating time for loading data and updating "data_time"
``HistoryBuffer`` of ``runner.message_hub``.
Args:
runner (Runner): The runner of the training, validation and
testing process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# Update data loading time in `runner.message_hub`.
runner.message_hub.update_scalar(f'{mode}/data_time',
time.time() - self.t)
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataElement]]] = None,
mode: str = 'train') -> None:
"""Calculating time for an iteration and updating "time"
``HistoryBuffer`` of ``runner.message_hub``.
Args:
runner (Runner): The runner of the training validation and
testing process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
outputs (dict or sequence, optional): Outputs from model. Defaults
to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# Update iteration time in `runner.message_hub`.
message_hub = runner.message_hub
message_hub.update_scalar(f'{mode}/time', time.time() - self.t)
self.t = time.time()
window_size = runner.log_processor.window_size
# Calculate eta every `window_size` iterations. Since test and val
# loop will not update runner.iter, use `every_n_innter_iters`to check
# the interval.
if self.every_n_inner_iters(batch_idx, window_size):
iter_time = message_hub.get_scalar(f'{mode}/time').mean(
window_size)
if mode == 'train':
self.time_sec_tot += iter_time * window_size
# Calculate average iterative time.
time_sec_avg = self.time_sec_tot / (
runner.iter - self.start_iter + 1)
# Calculate eta.
eta_sec = time_sec_avg * (runner.max_iters - runner.iter - 1)
runner.message_hub.update_info('eta', eta_sec)
else:
if mode == 'val':
cur_dataloader = runner.val_dataloader
else:
cur_dataloader = runner.test_dataloader
eta_sec = iter_time * (len(cur_dataloader) - batch_idx - 1)
runner.message_hub.update_info('eta', eta_sec)
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Optional, Sequence, Union
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that logs the time spent during iteration.
E.g. ``data_time`` for loading data and ``time`` for a model train step.
"""
priority = 'NORMAL'
def __init__(self):
self.time_sec_tot = 0
self.start_iter = 0
def before_run(self, runner) -> None:
"""Synchronize the number of iterations with the runner.
Args:
runner: The runner of the training, validation or testing
process.
"""
self.start_iter = runner.iter
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Record timestamp before start an epoch.
Args:
runner (Runner): The runner of the training validation and
testing process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
self.t = time.time()
def _before_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
mode: str = 'train') -> None:
"""Calculating time for loading data and updating "data_time"
``HistoryBuffer`` of ``runner.message_hub``.
Args:
runner (Runner): The runner of the training, validation and
testing process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# Update data loading time in `runner.message_hub`.
runner.message_hub.update_scalar(f'{mode}/data_time',
time.time() - self.t)
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataElement]]] = None,
mode: str = 'train') -> None:
"""Calculating time for an iteration and updating "time"
``HistoryBuffer`` of ``runner.message_hub``.
Args:
runner (Runner): The runner of the training validation and
testing process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
outputs (dict or sequence, optional): Outputs from model. Defaults
to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# Update iteration time in `runner.message_hub`.
message_hub = runner.message_hub
message_hub.update_scalar(f'{mode}/time', time.time() - self.t)
self.t = time.time()
window_size = runner.log_processor.window_size
# Calculate eta every `window_size` iterations. Since test and val
# loop will not update runner.iter, use `every_n_innter_iters`to check
# the interval.
if self.every_n_inner_iters(batch_idx, window_size):
iter_time = message_hub.get_scalar(f'{mode}/time').mean(
window_size)
if mode == 'train':
self.time_sec_tot += iter_time * window_size
# Calculate average iterative time.
time_sec_avg = self.time_sec_tot / (
runner.iter - self.start_iter + 1)
# Calculate eta.
eta_sec = time_sec_avg * (runner.max_iters - runner.iter - 1)
runner.message_hub.update_info('eta', eta_sec)
else:
if mode == 'val':
cur_dataloader = runner.val_dataloader
else:
cur_dataloader = runner.test_dataloader
eta_sec = iter_time * (len(cur_dataloader) - batch_idx - 1)
runner.message_hub.update_info('eta', eta_sec)
|
from .document import DocumentArray
from .storage.memory import StorageMixins
class DocumentArrayInMemory(StorageMixins, DocumentArray):
"""
Default DocumentArray that stores Documents in memory.
With this implementation, :meth:`match` and :meth:`find` perform exact (exhaustive) vector search.
Example usage:
.. code-block:: python
from docarray import Document, DocumentArray
da = DocumentArray(
[Document(text='The cake is a lie'), Document(text='Do a barrel roll!')]
)
da.apply(Document.embed_feature_hashing)
query = Document(text='Can i have some cake?').embed_feature_hashing()
query.match(da, metric='jaccard', use_scipy=True)
print(query.matches[:, ('text', 'scores__jaccard__value')])
.. code-block:: bash
[['The cake is a lie', 'Do a barrel roll!'], [0.9, 1.0]]
.. seealso::
For further details, see our :ref:`user guide <documentarray>`.
"""
def __new__(cls, *args, **kwargs):
return super().__new__(cls)
|
from .document import DocumentArray
from .storage.memory import StorageMixins
class DocumentArrayInMemory(StorageMixins, DocumentArray):
def __new__(cls, *args, **kwargs):
return super().__new__(cls)
|
import asyncio
from itertools import cycle
from typing import Any, Optional, Union
from uuid import UUID
import pytest
from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-untyped]
from typing_extensions import override
from langchain_core.callbacks.base import AsyncCallbackHandler
from langchain_core.language_models import GenericFakeChatModel
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
class MyCustomAsyncHandler(AsyncCallbackHandler):
@override
async def on_chat_model_start(
self,
serialized: dict[str, Any],
messages: list[list[BaseMessage]],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[list[str]] = None,
metadata: Optional[dict[str, Any]] = None,
**kwargs: Any,
) -> Any:
# Do nothing
# Required to implement since this is an abstract method
pass
@override
async def on_llm_new_token(
self,
token: str,
*,
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[list[str]] = None,
**kwargs: Any,
) -> None:
await asyncio.sleep(0)
@pytest.mark.benchmark
async def test_async_callbacks_in_sync(benchmark: BenchmarkFixture) -> None:
infinite_cycle = cycle([AIMessage(content=" ".join(["hello", "goodbye"] * 5))])
model = GenericFakeChatModel(messages=infinite_cycle)
@benchmark # type: ignore[misc]
def sync_callbacks() -> None:
for _ in range(5):
for _ in model.stream("meow", {"callbacks": [MyCustomAsyncHandler()]}):
pass
|
import asyncio
from itertools import cycle
from typing import Any, Optional, Union
from uuid import UUID
import pytest
from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-untyped]
from typing_extensions import override
from langchain_core.callbacks.base import AsyncCallbackHandler
from langchain_core.language_models import GenericFakeChatModel
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
class MyCustomAsyncHandler(AsyncCallbackHandler):
@override
async def on_chat_model_start(
self,
serialized: dict[str, Any],
messages: list[list[BaseMessage]],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[list[str]] = None,
metadata: Optional[dict[str, Any]] = None,
**kwargs: Any,
) -> Any:
# Do nothing
# Required to implement since this is an abstract method
pass
@override
async def on_llm_new_token(
self,
token: str,
*,
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[list[str]] = None,
**kwargs: Any,
) -> None:
await asyncio.sleep(0)
@pytest.mark.benchmark
async def test_async_callbacks_in_sync(benchmark: BenchmarkFixture) -> None:
infinite_cycle = cycle([AIMessage(content=" ".join(["hello", "goodbye"] * 500))])
model = GenericFakeChatModel(messages=infinite_cycle)
@benchmark # type: ignore[misc]
def sync_callbacks() -> None:
for _ in range(5):
for _ in model.stream("meow", {"callbacks": [MyCustomAsyncHandler()]}):
pass
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.8.2'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.8.1'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# -*- coding: utf-8 -*-
"""
Audio Feature Augmentation
==========================
**Author**: `Moto Hira <[email protected]>`__
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio librosa
import torch
import torchaudio
import torchaudio.transforms as T
print(torch.__version__)
print(torchaudio.__version__)
######################################################################
# Preparation
# -----------
#
import librosa
import matplotlib.pyplot as plt
from IPython.display import Audio
from torchaudio.utils import download_asset
######################################################################
# In this tutorial, we will use a speech data from
# `VOiCES dataset <https://iqtlabs.github.io/voices/>`__,
# which is licensed under Creative Commos BY 4.0.
SAMPLE_WAV_SPEECH_PATH = download_asset("tutorial-assets/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav")
def _get_sample(path, resample=None):
effects = [["remix", "1"]]
if resample:
effects.extend(
[
["lowpass", f"{resample // 2}"],
["rate", f"{resample}"],
]
)
return torchaudio.sox_effects.apply_effects_file(path, effects=effects)
def get_speech_sample(*, resample=None):
return _get_sample(SAMPLE_WAV_SPEECH_PATH, resample=resample)
def get_spectrogram(
n_fft=400,
win_len=None,
hop_len=None,
power=2.0,
):
waveform, _ = get_speech_sample()
spectrogram = T.Spectrogram(
n_fft=n_fft,
win_length=win_len,
hop_length=hop_len,
center=True,
pad_mode="reflect",
power=power,
)
return spectrogram(waveform)
######################################################################
# SpecAugment
# -----------
#
# `SpecAugment <https://ai.googleblog.com/2019/04/specaugment-new-data-augmentation.html>`__
# is a popular spectrogram augmentation technique.
#
# ``torchaudio`` implements :py:func:`torchaudio.transforms.TimeStretch`,
# :py:func:`torchaudio.transforms.TimeMasking` and
# :py:func:`torchaudio.transforms.FrequencyMasking`.
#
######################################################################
# TimeStretch
# -----------
#
spec = get_spectrogram(power=None)
stretch = T.TimeStretch()
spec_12 = stretch(spec, overriding_rate=1.2)
spec_09 = stretch(spec, overriding_rate=0.9)
######################################################################
# Visualization
# ~~~~~~~~~~~~~
def plot():
def plot_spec(ax, spec, title):
ax.set_title(title)
ax.imshow(librosa.amplitude_to_db(spec), origin="lower", aspect="auto")
fig, axes = plt.subplots(3, 1, sharex=True, sharey=True)
plot_spec(axes[0], torch.abs(spec_12[0]), title="Stretched x1.2")
plot_spec(axes[1], torch.abs(spec[0]), title="Original")
plot_spec(axes[2], torch.abs(spec_09[0]), title="Stretched x0.9")
fig.tight_layout()
plot()
######################################################################
# Audio Samples
# ~~~~~~~~~~~~~
def preview(spec, rate=16000):
ispec = T.InverseSpectrogram()
waveform = ispec(spec)
return Audio(waveform[0].numpy().T, rate=rate)
preview(spec)
######################################################################
#
preview(spec_12)
######################################################################
#
preview(spec_09)
######################################################################
# Time and Frequency Masking
# --------------------------
#
torch.random.manual_seed(4)
time_masking = T.TimeMasking(time_mask_param=80)
freq_masking = T.FrequencyMasking(freq_mask_param=80)
spec = get_spectrogram()
time_masked = time_masking(spec)
freq_masked = freq_masking(spec)
######################################################################
#
def plot():
def plot_spec(ax, spec, title):
ax.set_title(title)
ax.imshow(librosa.power_to_db(spec), origin="lower", aspect="auto")
fig, axes = plt.subplots(3, 1, sharex=True, sharey=True)
plot_spec(axes[0], spec[0], title="Original")
plot_spec(axes[1], time_masked[0], title="Masked along time axis")
plot_spec(axes[2], freq_masked[0], title="Masked along frequency axis")
fig.tight_layout()
plot()
|
# -*- coding: utf-8 -*-
"""
Audio Feature Augmentation
==========================
**Author**: `Moto Hira <[email protected]>`__
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio librosa
import torch
import torchaudio
import torchaudio.transforms as T
print(torch.__version__)
print(torchaudio.__version__)
######################################################################
# Preparation
# -----------
#
import librosa
import matplotlib.pyplot as plt
from torchaudio.utils import download_asset
######################################################################
# In this tutorial, we will use a speech data from
# `VOiCES dataset <https://iqtlabs.github.io/voices/>`__,
# which is licensed under Creative Commos BY 4.0.
SAMPLE_WAV_SPEECH_PATH = download_asset("tutorial-assets/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav")
def _get_sample(path, resample=None):
effects = [["remix", "1"]]
if resample:
effects.extend(
[
["lowpass", f"{resample // 2}"],
["rate", f"{resample}"],
]
)
return torchaudio.sox_effects.apply_effects_file(path, effects=effects)
def get_speech_sample(*, resample=None):
return _get_sample(SAMPLE_WAV_SPEECH_PATH, resample=resample)
def get_spectrogram(
n_fft=400,
win_len=None,
hop_len=None,
power=2.0,
):
waveform, _ = get_speech_sample()
spectrogram = T.Spectrogram(
n_fft=n_fft,
win_length=win_len,
hop_length=hop_len,
center=True,
pad_mode="reflect",
power=power,
)
return spectrogram(waveform)
def plot_spec(ax, spec, title, ylabel="freq_bin"):
ax.set_title(title)
ax.imshow(librosa.power_to_db(spec), origin="lower", aspect="auto")
######################################################################
# SpecAugment
# -----------
#
# `SpecAugment <https://ai.googleblog.com/2019/04/specaugment-new-data-augmentation.html>`__
# is a popular spectrogram augmentation technique.
#
# ``torchaudio`` implements :py:func:`torchaudio.transforms.TimeStretch`,
# :py:func:`torchaudio.transforms.TimeMasking` and
# :py:func:`torchaudio.transforms.FrequencyMasking`.
#
######################################################################
# TimeStretch
# -----------
#
spec = get_spectrogram(power=None)
stretch = T.TimeStretch()
spec_12 = stretch(spec, overriding_rate=1.2)
spec_09 = stretch(spec, overriding_rate=0.9)
######################################################################
#
def plot():
fig, axes = plt.subplots(3, 1, sharex=True, sharey=True)
plot_spec(axes[0], torch.abs(spec_12[0]), title="Stretched x1.2")
plot_spec(axes[1], torch.abs(spec[0]), title="Original")
plot_spec(axes[2], torch.abs(spec_09[0]), title="Stretched x0.9")
fig.tight_layout()
plot()
######################################################################
# Time and Frequency Masking
# --------------------------
#
torch.random.manual_seed(4)
time_masking = T.TimeMasking(time_mask_param=80)
freq_masking = T.FrequencyMasking(freq_mask_param=80)
spec = get_spectrogram()
time_masked = time_masking(spec)
freq_masked = freq_masking(spec)
######################################################################
#
def plot():
fig, axes = plt.subplots(3, 1, sharex=True, sharey=True)
plot_spec(axes[0], spec[0], title="Original")
plot_spec(axes[1], time_masked[0], title="Masked along time axis")
plot_spec(axes[2], freq_masked[0], title="Masked along frequency axis")
fig.tight_layout()
plot()
|
import pytest
from backend.util.service import AppService, expose, get_service_client
TEST_SERVICE_PORT = 8765
class ServiceTest(AppService):
def __init__(self):
super().__init__()
@classmethod
def get_port(cls) -> int:
return TEST_SERVICE_PORT
@expose
def add(self, a: int, b: int) -> int:
return a + b
@expose
def subtract(self, a: int, b: int) -> int:
return a - b
@expose
def fun_with_async(self, a: int, b: int) -> int:
async def add_async(a: int, b: int) -> int:
return a + b
return self.run_and_wait(add_async(a, b))
@pytest.mark.asyncio(loop_scope="session")
async def test_service_creation(server):
with ServiceTest():
client = get_service_client(ServiceTest)
assert client.add(5, 3) == 8
assert client.subtract(10, 4) == 6
assert client.fun_with_async(5, 3) == 8
|
import pytest
from backend.util.service import AppService, expose, get_service_client
TEST_SERVICE_PORT = 8765
class ServiceTest(AppService):
def __init__(self):
super().__init__()
@classmethod
def get_port(cls) -> int:
return TEST_SERVICE_PORT
@expose
def add(self, a: int, b: int) -> int:
return a + b
@expose
def subtract(self, a: int, b: int) -> int:
return a - b
@expose
def fun_with_async(self, a: int, b: int) -> int:
async def add_async(a: int, b: int) -> int:
return a + b
return self.run_and_wait(add_async(a, b))
@pytest.mark.asyncio(scope="session")
async def test_service_creation(server):
with ServiceTest():
client = get_service_client(ServiceTest)
assert client.add(5, 3) == 8
assert client.subtract(10, 4) == 6
assert client.fun_with_async(5, 3) == 8
|
"""
This example runs a CNN after the word embedding lookup. The output of the CNN is than pooled,
for example with mean-pooling.
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses, models
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-uncased"
num_train_epochs = 1
batch_size = 32
output_dir = "output/training_stsbenchmark_cnn-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 2. Define the model
# Map tokens to vectors using BERT
word_embedding_model = models.Transformer(model_name)
cnn = models.CNN(
in_word_embedding_dimension=word_embedding_model.get_word_embedding_dimension(),
out_channels=256,
kernel_sizes=[1, 3, 5],
)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
cnn.get_word_embedding_dimension(),
pooling_mode="mean",
)
model = SentenceTransformer(modules=[word_embedding_model, cnn, pooling_model])
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and
# one similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="cnn", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-cnn")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-cnn')`."
)
|
"""
This example runs a CNN after the word embedding lookup. The output of the CNN is than pooled,
for example with mean-pooling.
"""
import sys
import traceback
from datasets import load_dataset
from sentence_transformers import models, losses
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-uncased"
num_train_epochs = 1
batch_size = 32
output_dir = "output/training_stsbenchmark_cnn-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 2. Define the model
# Map tokens to vectors using BERT
word_embedding_model = models.Transformer(model_name)
cnn = models.CNN(
in_word_embedding_dimension=word_embedding_model.get_word_embedding_dimension(),
out_channels=256,
kernel_sizes=[1, 3, 5],
)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
cnn.get_word_embedding_dimension(),
pooling_mode="mean",
)
model = SentenceTransformer(modules=[word_embedding_model, cnn, pooling_model])
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and
# one similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="cnn", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-cnn")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-cnn')`."
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class FCOS(SingleStageDetector):
"""Implementation of `FCOS <https://arxiv.org/abs/1904.01355>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
preprocess_cfg=None,
pretrained=None,
init_cfg=None):
super(FCOS, self).__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
preprocess_cfg=preprocess_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class FCOS(SingleStageDetector):
"""Implementation of `FCOS <https://arxiv.org/abs/1904.01355>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
from keras.src import backend
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
class BaseGlobalPooling(Layer):
"""Base global pooling layer."""
def __init__(
self, pool_dimensions, data_format=None, keepdims=False, **kwargs
):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
self.keepdims = keepdims
self.input_spec = InputSpec(ndim=pool_dimensions + 2)
self.built = True
def call(self, inputs):
raise NotImplementedError
def compute_output_shape(self, input_shape):
num_spatial_dims = len(input_shape) - 2
if self.data_format == "channels_last":
if self.keepdims:
return (
(input_shape[0],)
+ (1,) * num_spatial_dims
+ (input_shape[-1],)
)
else:
return (input_shape[0],) + (input_shape[-1],)
else:
if self.keepdims:
return (input_shape[0], input_shape[1]) + (
1,
) * num_spatial_dims
else:
return (input_shape[0], input_shape[1])
def get_config(self):
config = super().get_config()
config.update(
{
"data_format": self.data_format,
"keepdims": self.keepdims,
}
)
return config
|
from keras.src import backend
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
class BaseGlobalPooling(Layer):
"""Base global pooling layer."""
def __init__(
self, pool_dimensions, data_format=None, keepdims=False, **kwargs
):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
self.keepdims = keepdims
self.input_spec = InputSpec(ndim=pool_dimensions + 2)
def call(self, inputs):
raise NotImplementedError
def compute_output_shape(self, input_shape):
num_spatial_dims = len(input_shape) - 2
if self.data_format == "channels_last":
if self.keepdims:
return (
(input_shape[0],)
+ (1,) * num_spatial_dims
+ (input_shape[-1],)
)
else:
return (input_shape[0],) + (input_shape[-1],)
else:
if self.keepdims:
return (input_shape[0], input_shape[1]) + (
1,
) * num_spatial_dims
else:
return (input_shape[0], input_shape[1])
def get_config(self):
config = super().get_config()
config.update(
{
"data_format": self.data_format,
"keepdims": self.keepdims,
}
)
return config
|
import torchaudio
_STREAM_READER = [
"StreamReader",
]
_STREAM_WRITER = [
"StreamWriter",
]
_PLAYBACK = [
"play_audio",
]
_LAZILY_IMPORTED = _STREAM_READER + _STREAM_WRITER + _PLAYBACK
def __getattr__(name: str):
if name in _LAZILY_IMPORTED:
if not torchaudio._extension._FFMPEG_INITIALIZED:
torchaudio._extension._init_ffmpeg()
if name in _STREAM_READER:
from . import _stream_reader
item = getattr(_stream_reader, name)
elif name in _STREAM_WRITER:
from . import _stream_writer
item = getattr(_stream_writer, name)
elif name in _PLAYBACK:
from . import _playback
item = getattr(_playback, name)
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(__all__ + _LAZILY_IMPORTED)
__all__ = []
|
import torchaudio
_STREAM_READER = [
"StreamReader",
]
_STREAM_WRITER = [
"StreamWriter",
]
_LAZILY_IMPORTED = _STREAM_READER + _STREAM_WRITER
def __getattr__(name: str):
if name in _LAZILY_IMPORTED:
if not torchaudio._extension._FFMPEG_INITIALIZED:
torchaudio._extension._init_ffmpeg()
if name in _STREAM_READER:
from . import _stream_reader
item = getattr(_stream_reader, name)
else:
from . import _stream_writer
item = getattr(_stream_writer, name)
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(__all__ + _LAZILY_IMPORTED)
__all__ = []
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url, hf_hub_url
class HfFileSystem(AbstractFileSystem):
"""Interface to files in a Hugging face repository"""
root_marker = ""
protocol = "hf-legacy" # "hf://"" is reserved for hffs
def __init__(
self,
repo_info: Optional[DatasetInfo] = None,
token: Optional[str] = None,
**kwargs,
):
"""
The file system can be instantiated using a huggingface_hub.hf_api.DatasetInfo object,
and can be used to list and open files from a Hugging Face dataset repository with fsspec.
Args:
repo_info (:obj:``DatasetInfo``, `optional`):
Dataset repository info from huggingface_hub.HfApi().dataset_info(...)
token (:obj:``str``, `optional`):
Hugging Face token. Will default to the locally saved token if not provided.
"""
super().__init__(self, **kwargs)
self.repo_info = repo_info
self.token = token
self.dir_cache = None
def _get_dirs(self):
if self.dir_cache is None:
self.dir_cache = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
self.dir_cache[hf_file.rfilename] = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(d): {"name": str(d), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1]
}
)
def _open(
self,
path: str,
mode: str = "rb",
**kwargs,
):
if not isinstance(self.repo_info, DatasetInfo):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}")
url = hf_hub_url(self.repo_info.id, path, revision=self.repo_info.sha)
return fsspec.open(
url,
mode=mode,
headers=get_authentication_headers_for_url(url, use_auth_token=self.token),
).open()
def info(self, path, **kwargs):
self._get_dirs()
path = self._strip_protocol(path)
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(path)
def ls(self, path, detail=False, **kwargs):
self._get_dirs()
path = PurePosixPath(path.strip("/"))
paths = {}
for p, f in self.dir_cache.items():
p = PurePosixPath(p.strip("/"))
root = p.parent
if root == path:
paths[str(p)] = f
out = list(paths.values())
if detail:
return out
else:
return list(sorted(f["name"] for f in out))
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url, hf_hub_url
class HfFileSystem(AbstractFileSystem):
"""Interface to files in a Hugging face repository"""
root_marker = ""
protocol = "hf"
def __init__(
self,
repo_info: Optional[DatasetInfo] = None,
token: Optional[str] = None,
**kwargs,
):
"""
The file system can be instantiated using a huggingface_hub.hf_api.DatasetInfo object,
and can be used to list and open files from a Hugging Face dataset repository with fsspec.
Args:
repo_info (:obj:``DatasetInfo``, `optional`):
Dataset repository info from huggingface_hub.HfApi().dataset_info(...)
token (:obj:``str``, `optional`):
Hugging Face token. Will default to the locally saved token if not provided.
"""
super().__init__(self, **kwargs)
self.repo_info = repo_info
self.token = token
self.dir_cache = None
def _get_dirs(self):
if self.dir_cache is None:
self.dir_cache = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
self.dir_cache[hf_file.rfilename] = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(d): {"name": str(d), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1]
}
)
def _open(
self,
path: str,
mode: str = "rb",
**kwargs,
):
if not isinstance(self.repo_info, DatasetInfo):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}")
url = hf_hub_url(self.repo_info.id, path, revision=self.repo_info.sha)
return fsspec.open(
url,
mode=mode,
headers=get_authentication_headers_for_url(url, use_auth_token=self.token),
).open()
def info(self, path, **kwargs):
self._get_dirs()
path = self._strip_protocol(path)
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(path)
def ls(self, path, detail=False, **kwargs):
self._get_dirs()
path = PurePosixPath(path.strip("/"))
paths = {}
for p, f in self.dir_cache.items():
p = PurePosixPath(p.strip("/"))
root = p.parent
if root == path:
paths[str(p)] = f
out = list(paths.values())
if detail:
return out
else:
return list(sorted(f["name"] for f in out))
|
from __future__ import annotations
from .PhraseTokenizer import PhraseTokenizer
from .WhitespaceTokenizer import WhitespaceTokenizer
from .WordTokenizer import ENGLISH_STOP_WORDS, TransformersTokenizerWrapper, WordTokenizer
__all__ = [
"WordTokenizer",
"WhitespaceTokenizer",
"PhraseTokenizer",
"ENGLISH_STOP_WORDS",
"TransformersTokenizerWrapper",
]
|
from __future__ import annotations
from .PhraseTokenizer import PhraseTokenizer
from .WhitespaceTokenizer import WhitespaceTokenizer
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
__all__ = ["WordTokenizer", "WhitespaceTokenizer", "PhraseTokenizer", "ENGLISH_STOP_WORDS"]
|
from docarray.typing.id import ID
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding.embedding import Embedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.url import (
AnyUrl,
AudioUrl,
ImageUrl,
Mesh3DUrl,
PointCloud3DUrl,
TextUrl,
)
__all__ = [
'AudioNdArray',
'NdArray',
'Embedding',
'ImageUrl',
'AudioUrl',
'TextUrl',
'Mesh3DUrl',
'PointCloud3DUrl',
'AnyUrl',
'ID',
'AnyTensor',
]
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor import TorchEmbedding, TorchTensor # noqa: F401
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa
__all__.extend(['AudioTorchTensor', 'TorchEmbedding', 'TorchTensor'])
|
from docarray.typing.id import ID
from docarray.typing.tensor.embedding.embedding import Embedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.url import AnyUrl, ImageUrl, Mesh3DUrl, PointCloud3DUrl, TextUrl
__all__ = [
'NdArray',
'Embedding',
'ImageUrl',
'TextUrl',
'Mesh3DUrl',
'PointCloud3DUrl',
'AnyUrl',
'ID',
'AnyTensor',
]
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor import TorchEmbedding, TorchTensor # noqa: F401
__all__.extend(['TorchEmbedding', 'TorchTensor'])
|
from ._vggish import VGGISH, VGGishBundle
from .hifigan_pipeline import HIFIGAN_VOCODER_V3_LJSPEECH, HiFiGANVocoderBundle
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
__all__ = [
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
"HIFIGAN_VOCODER_V3_LJSPEECH",
"HiFiGANVocoderBundle",
"VGGISH",
"VGGishBundle",
]
|
from .hifigan_pipeline import HIFIGAN_VOCODER_V3_LJSPEECH, HiFiGANVocoderBundle
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
__all__ = [
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
"HIFIGAN_VOCODER_V3_LJSPEECH",
"HiFiGANVocoderBundle",
]
|
from .hifigan_pipeline import HIFIGAN_VOCODER_V3_LJSPEECH, HiFiGANVocoderBundle
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
__all__ = [
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
"HIFIGAN_VOCODER_V3_LJSPEECH",
"HiFiGANVocoderBundle",
]
|
from .hifigan_pipeline import HIFIGAN_VOCODER_V3_LJSPEECH, HiFiGANVocoderBundle
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
from .squim_pipeline import SQUIM_OBJECTIVE, SQUIM_SUBJECTIVE, SquimObjectiveBundle, SquimSubjectiveBundle
__all__ = [
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
"HIFIGAN_VOCODER_V3_LJSPEECH",
"HiFiGANVocoderBundle",
"SQUIM_OBJECTIVE",
"SQUIM_SUBJECTIVE",
"SquimObjectiveBundle",
"SquimSubjectiveBundle",
]
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, TypeVar, Optional, Type
if TYPE_CHECKING:
from docarray.proto import NodeProto
T = TypeVar('T')
class BaseNode(ABC):
"""
A DocumentNode is an object than can be nested inside a Document.
A Document itself is a DocumentNode as well as prebuilt type
"""
_proto_type_name: Optional[str] = None
@abstractmethod
def _to_node_protobuf(self) -> 'NodeProto':
"""Convert itself into a NodeProto message. This function should
be called when the self is nested into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
...
@classmethod
@abstractmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
...
def _docarray_to_json_compatible(self):
"""
Convert itself into a json compatible object
"""
...
|
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, TypeVar, Optional, Type
if TYPE_CHECKING:
from docarray.proto import NodeProto
T = TypeVar('T')
class BaseNode(ABC):
"""
A DocumentNode is an object than can be nested inside a Document.
A Document itself is a DocumentNode as well as prebuilt type
"""
_proto_type_name: Optional[str] = None
@abstractmethod
def _to_node_protobuf(self) -> 'NodeProto':
"""Convert itself into a NodeProto message. This function should
be called when the self is nested into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
...
@classmethod
@abstractmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
...
def _docarray_to_json_compatible(self):
"""
Convert itself into a json compatible object
"""
...
|
"""Copyright 2024, XGBoost contributors"""
import pytest
from distributed import Client, Scheduler, Worker
from distributed.utils_test import gen_cluster
from xgboost import testing as tm
from xgboost.testing.dask import check_external_memory, get_rabit_args
@pytest.mark.parametrize("is_qdm", [True, False])
@gen_cluster(client=True)
async def test_external_memory(
client: Client, s: Scheduler, a: Worker, b: Worker, is_qdm: bool
) -> None:
workers = tm.get_client_workers(client)
n_workers = len(workers)
args = await get_rabit_args(client, n_workers)
futs = client.map(
check_external_memory,
range(n_workers),
n_workers=n_workers,
device="cpu",
comm_args=args,
is_qdm=is_qdm,
)
await client.gather(futs)
|
"""Copyright 2024, XGBoost contributors"""
import pytest
from distributed import Client, Scheduler, Worker
from distributed.utils_test import gen_cluster
import xgboost as xgb
from xgboost import dask as dxgb
from xgboost import testing as tm
from xgboost.testing.dask import check_external_memory
@pytest.mark.parametrize("is_qdm", [True, False])
@gen_cluster(client=True)
async def test_external_memory(
client: Client, s: Scheduler, a: Worker, b: Worker, is_qdm: bool
) -> None:
workers = tm.get_client_workers(client)
args = await client.sync(
dxgb._get_rabit_args,
len(workers),
None,
client,
)
n_workers = len(workers)
futs = client.map(
check_external_memory,
range(n_workers),
n_workers=n_workers,
device="cpu",
comm_args=args,
is_qdm=is_qdm,
)
await client.gather(futs)
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import unittest
git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
check_dummies.PATH_TO_DIFFUSERS = os.path.join(git_repo_path, "src", "diffusers")
class CheckDummiesTester(unittest.TestCase):
def test_find_backend(self):
simple_backend = find_backend(" if not is_torch_available():")
self.assertEqual(simple_backend, "torch")
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
double_backend = find_backend(" if not (is_torch_available() and is_transformers_available()):")
self.assertEqual(double_backend, "torch_and_transformers")
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
triple_backend = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):"
)
self.assertEqual(triple_backend, "torch_and_transformers_and_onnx")
def test_read_init(self):
objects = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch", objects)
self.assertIn("torch_and_transformers", objects)
self.assertIn("flax_and_transformers", objects)
self.assertIn("torch_and_transformers_and_onnx", objects)
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel", objects["torch"])
self.assertIn("FlaxUNet2DConditionModel", objects["flax"])
self.assertIn("StableDiffusionPipeline", objects["torch_and_transformers"])
self.assertIn("FlaxStableDiffusionPipeline", objects["flax_and_transformers"])
self.assertIn("LMSDiscreteScheduler", objects["torch_and_scipy"])
self.assertIn("OnnxStableDiffusionPipeline", objects["torch_and_transformers_and_onnx"])
def test_create_dummy_object(self):
dummy_constant = create_dummy_object("CONSTANT", "'torch'")
self.assertEqual(dummy_constant, "\nCONSTANT = None\n")
dummy_function = create_dummy_object("function", "'torch'")
self.assertEqual(
dummy_function, "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n"
)
expected_dummy_class = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
dummy_class = create_dummy_object("FakeClass", "'torch'")
self.assertEqual(dummy_class, expected_dummy_class)
def test_create_dummy_files(self):
expected_dummy_pytorch_file = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
"""
dummy_files = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]})
self.assertEqual(dummy_files["torch"], expected_dummy_pytorch_file)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import unittest
git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
check_dummies.PATH_TO_DIFFUSERS = os.path.join(git_repo_path, "src", "diffusers")
class CheckDummiesTester(unittest.TestCase):
def test_find_backend(self):
simple_backend = find_backend(" if not is_torch_available():")
self.assertEqual(simple_backend, "torch")
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
double_backend = find_backend(" if not (is_torch_available() and is_transformers_available()):")
self.assertEqual(double_backend, "torch_and_transformers")
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
triple_backend = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):"
)
self.assertEqual(triple_backend, "torch_and_transformers_and_onnx")
def test_read_init(self):
objects = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch", objects)
self.assertIn("torch_and_transformers", objects)
self.assertIn("flax_and_transformers", objects)
self.assertIn("torch_and_transformers_and_onnx", objects)
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel", objects["torch"])
self.assertIn("FlaxUNet2DConditionModel", objects["flax"])
self.assertIn("StableDiffusionPipeline", objects["torch_and_transformers"])
self.assertIn("FlaxStableDiffusionPipeline", objects["flax_and_transformers"])
self.assertIn("LMSDiscreteScheduler", objects["torch_and_scipy"])
self.assertIn("OnnxStableDiffusionPipeline", objects["torch_and_transformers_and_onnx"])
def test_create_dummy_object(self):
dummy_constant = create_dummy_object("CONSTANT", "'torch'")
self.assertEqual(dummy_constant, "\nCONSTANT = None\n")
dummy_function = create_dummy_object("function", "'torch'")
self.assertEqual(
dummy_function, "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n"
)
expected_dummy_class = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
dummy_class = create_dummy_object("FakeClass", "'torch'")
self.assertEqual(dummy_class, expected_dummy_class)
def test_create_dummy_files(self):
expected_dummy_pytorch_file = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
"""
dummy_files = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]})
self.assertEqual(dummy_files["torch"], expected_dummy_pytorch_file)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@MODELS.register_module()
class SOLO(SingleStageInstanceSegmentor):
"""`SOLO: Segmenting Objects by Locations
<https://arxiv.org/abs/1912.04488>`_
"""
def __init__(self,
backbone: ConfigType,
neck: OptConfigType = None,
bbox_head: OptConfigType = None,
mask_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@MODELS.register_module()
class SOLO(SingleStageInstanceSegmentor):
"""`SOLO: Segmenting Objects by Locations
<https://arxiv.org/abs/1912.04488>`_
"""
def __init__(self,
backbone: ConfigType,
neck: OptConfigType = None,
bbox_head: OptConfigType = None,
mask_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# dataset settings
input_size = 300
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean={{_base_.model.data_preprocessor.mean}},
to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}},
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8,
num_workers=2,
batch_sampler=None,
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type={{_base_.dataset_type}},
data_root={{_base_.data_root}},
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args={{_base_.backend_args}})))
val_dataloader = dict(batch_size=8, dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4))
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# dataset settings
input_size = 300
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean={{_base_.model.data_preprocessor.mean}},
to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}},
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8,
num_workers=2,
batch_sampler=None,
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type={{_base_.dataset_type}},
data_root={{_base_.data_root}},
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(batch_size=8, dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4))
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
from typing import Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_explicit_noop, _register_kernel_internal, is_simple_tensor
@_register_explicit_noop(datapoints.Mask, datapoints.BoundingBoxes, warn_passthrough=True)
def erase(
inpt: Union[datapoints._ImageTypeJIT, datapoints._VideoTypeJIT],
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> Union[datapoints._ImageTypeJIT, datapoints._VideoTypeJIT]:
if not torch.jit.is_scripting():
_log_api_usage_once(erase)
if torch.jit.is_scripting() or is_simple_tensor(inpt):
return erase_image_tensor(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
elif isinstance(inpt, datapoints.Datapoint):
kernel = _get_kernel(erase, type(inpt))
return kernel(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
elif isinstance(inpt, PIL.Image.Image):
return erase_image_pil(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
else:
raise TypeError(
f"Input can either be a plain tensor, any TorchVision datapoint, or a PIL image, "
f"but got {type(inpt)} instead."
)
@_register_kernel_internal(erase, datapoints.Image)
def erase_image_tensor(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@torch.jit.unused
def erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image_tensor(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
@_register_kernel_internal(erase, datapoints.Video)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image_tensor(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
|
from typing import Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import is_simple_tensor
def erase_image_tensor(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@torch.jit.unused
def erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image_tensor(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image_tensor(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
def erase(
inpt: Union[datapoints._ImageTypeJIT, datapoints._VideoTypeJIT],
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> Union[datapoints._ImageTypeJIT, datapoints._VideoTypeJIT]:
if not torch.jit.is_scripting():
_log_api_usage_once(erase)
if torch.jit.is_scripting() or is_simple_tensor(inpt):
return erase_image_tensor(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
elif isinstance(inpt, datapoints.Image):
output = erase_image_tensor(inpt.as_subclass(torch.Tensor), i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return datapoints.Image.wrap_like(inpt, output)
elif isinstance(inpt, datapoints.Video):
output = erase_video(inpt.as_subclass(torch.Tensor), i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return datapoints.Video.wrap_like(inpt, output)
elif isinstance(inpt, PIL.Image.Image):
return erase_image_pil(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
else:
raise TypeError(
f"Input can either be a plain tensor, an `Image` or `Video` datapoint, or a PIL image, "
f"but got {type(inpt)} instead."
)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import (
UpstashRedisChatMessageHistory,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"UpstashRedisChatMessageHistory": "langchain_community.chat_message_histories",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"UpstashRedisChatMessageHistory",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import (
UpstashRedisChatMessageHistory,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"UpstashRedisChatMessageHistory": "langchain_community.chat_message_histories"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"UpstashRedisChatMessageHistory",
]
|
from __future__ import annotations
import pytest
from sentence_transformers import SparseEncoder
@pytest.fixture()
def splade_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
@pytest.fixture(scope="session")
def splade_bert_tiny_model_reused() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
@pytest.fixture()
def csr_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sentence-transformers-testing/stsb-bert-tiny-safetensors")
|
from __future__ import annotations
import pytest
from sentence_transformers import SparseEncoder
@pytest.fixture()
def splade_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
@pytest.fixture()
def csr_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sentence-transformers-testing/stsb-bert-tiny-safetensors")
|
from typing import Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.utils._internal.misc import is_jax_available
if is_jax_available():
import jax.numpy as jnp
from jax import jit
from docarray.typing import JaxArray
@pytest.mark.jax
def test_basic_jax_operation():
def basic_jax_fn(x):
return jnp.sum(x)
def abstract_JaxArray(array: 'JaxArray') -> jnp.ndarray:
return array.tensor
class Mmdoc(BaseDoc):
tensor: Optional[JaxArray[3, 224, 224]] = None
N = 10
batch = DocList[Mmdoc](Mmdoc() for _ in range(N))
batch.tensor = jnp.zeros((N, 3, 224, 224))
batch = batch.to_doc_vec()
jax_fn = jit(basic_jax_fn)
result = jax_fn(abstract_JaxArray(batch.tensor))
assert (
result == 0.0
) # checking if the sum of the tensor data is zero as initialized
|
from typing import Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.utils._internal.misc import is_jax_available
if is_jax_available():
import jax.numpy as jnp
from jax import jit
from docarray.typing import JaxArray
@pytest.mark.jax
def test_basic_jax_operation():
def basic_jax_fn(x):
return jnp.sum(x)
def abstract_JaxArray(array: 'JaxArray') -> jnp.ndarray:
return array.tensor
class Mmdoc(BaseDoc):
tensor: Optional[JaxArray[3, 224, 224]]
N = 10
batch = DocList[Mmdoc](Mmdoc() for _ in range(N))
batch.tensor = jnp.zeros((N, 3, 224, 224))
batch = batch.to_doc_vec()
jax_fn = jit(basic_jax_fn)
result = jax_fn(abstract_JaxArray(batch.tensor))
assert (
result == 0.0
) # checking if the sum of the tensor data is zero as initialized
|
import os
import pathlib
import pytest
from docarray.helper import (
protocol_and_compress_from_file_path,
add_protocol_and_compress_to_file_path,
filter_dict,
get_full_version,
_safe_cast_int,
)
@pytest.mark.parametrize(
'file_path', ['doc_array', '../docarray', './a_folder/docarray']
)
@pytest.mark.parametrize(
'protocol', ['protobuf', 'protobuf-array', 'pickle', 'pickle-array']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
def test_protocol_and_compress_from_file_path(file_path, protocol, compress):
file_path_extended = file_path
if protocol:
file_path_extended += '.' + protocol
if compress:
file_path_extended += '.' + compress
_protocol, _compress = protocol_and_compress_from_file_path(file_path_extended)
assert _protocol in {'protobuf', 'protobuf-array', 'pickle', 'pickle-array', None}
assert _compress in {'lz4', 'bz2', 'lzma', 'zlib', 'gzip', None}
assert protocol == _protocol
assert compress == _compress
@pytest.mark.parametrize('file_path', ['doc_array', './some_folder/doc_array'])
@pytest.mark.parametrize(
'protocol', ['protobuf', 'protobuf-array', 'pickle', 'pickle-array']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip'])
def test_add_protocol_and_compress_to_file_path(file_path, compress, protocol):
file_path_extended = add_protocol_and_compress_to_file_path(
file_path, compress, protocol
)
file_path_suffixes = [
e.replace('.', '') for e in pathlib.Path(file_path_extended).suffixes
]
if compress:
assert compress in file_path_suffixes
if protocol:
assert protocol in file_path_suffixes
def test_filter_dict():
conf_dict = {'x': 0, 'y': 1, 'z': None, 'k': ''}
assert list(filter_dict(conf_dict).keys()) == ['x', 'y', 'k']
def test_ci_vendor():
if 'GITHUB_WORKFLOW' in os.environ:
assert get_full_version()['ci-vendor'] == 'GITHUB_ACTIONS'
@pytest.mark.parametrize('input,output', [(1, 1), (1.0, 1), ('1', 1)])
def test_safe_cast(input, output):
assert output == _safe_cast_int(input)
@pytest.mark.parametrize('wrong_input', [1.5, 1.001, 2 / 3])
def test_safe_cast_raise_error(wrong_input):
with pytest.raises(ValueError):
_safe_cast_int(wrong_input)
|
import os
import pathlib
import pytest
from docarray.helper import (
protocol_and_compress_from_file_path,
add_protocol_and_compress_to_file_path,
filter_dict,
get_full_version,
)
@pytest.mark.parametrize(
'file_path', ['doc_array', '../docarray', './a_folder/docarray']
)
@pytest.mark.parametrize(
'protocol', ['protobuf', 'protobuf-array', 'pickle', 'pickle-array']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
def test_protocol_and_compress_from_file_path(file_path, protocol, compress):
file_path_extended = file_path
if protocol:
file_path_extended += '.' + protocol
if compress:
file_path_extended += '.' + compress
_protocol, _compress = protocol_and_compress_from_file_path(file_path_extended)
assert _protocol in {'protobuf', 'protobuf-array', 'pickle', 'pickle-array', None}
assert _compress in {'lz4', 'bz2', 'lzma', 'zlib', 'gzip', None}
assert protocol == _protocol
assert compress == _compress
@pytest.mark.parametrize('file_path', ['doc_array', './some_folder/doc_array'])
@pytest.mark.parametrize(
'protocol', ['protobuf', 'protobuf-array', 'pickle', 'pickle-array']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip'])
def test_add_protocol_and_compress_to_file_path(file_path, compress, protocol):
file_path_extended = add_protocol_and_compress_to_file_path(
file_path, compress, protocol
)
file_path_suffixes = [
e.replace('.', '') for e in pathlib.Path(file_path_extended).suffixes
]
if compress:
assert compress in file_path_suffixes
if protocol:
assert protocol in file_path_suffixes
def test_filter_dict():
conf_dict = {'x': 0, 'y': 1, 'z': None, 'k': ''}
assert list(filter_dict(conf_dict).keys()) == ['x', 'y', 'k']
def test_ci_vendor():
if 'GITHUB_WORKFLOW' in os.environ:
assert get_full_version()['ci-vendor'] == 'GITHUB_ACTIONS'
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils import is_installed
from .amp_optimizer_wrapper import AmpOptimWrapper
from .apex_optimizer_wrapper import ApexOptimWrapper
from .base import BaseOptimWrapper
from .builder import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
build_optim_wrapper)
from .default_constructor import DefaultOptimWrapperConstructor
from .optimizer_wrapper import OptimWrapper
from .optimizer_wrapper_dict import OptimWrapperDict
from .zero_optimizer import ZeroRedundancyOptimizer
__all__ = [
'OPTIM_WRAPPER_CONSTRUCTORS', 'OPTIMIZERS',
'DefaultOptimWrapperConstructor', 'build_optim_wrapper', 'OptimWrapper',
'AmpOptimWrapper', 'ApexOptimWrapper', 'OptimWrapperDict',
'ZeroRedundancyOptimizer', 'BaseOptimWrapper'
]
if is_installed('deepspeed'):
from ._deepspeed import DeepSpeedOptimWrapper # noqa:F401
__all__.append('DeepSpeedOptimWrapper')
|
# Copyright (c) OpenMMLab. All rights reserved.
from .amp_optimizer_wrapper import AmpOptimWrapper
from .apex_optimizer_wrapper import ApexOptimWrapper
from .builder import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
build_optim_wrapper)
from .default_constructor import DefaultOptimWrapperConstructor
from .optimizer_wrapper import OptimWrapper
from .optimizer_wrapper_dict import OptimWrapperDict
from .zero_optimizer import ZeroRedundancyOptimizer
__all__ = [
'OPTIM_WRAPPER_CONSTRUCTORS', 'OPTIMIZERS',
'DefaultOptimWrapperConstructor', 'build_optim_wrapper', 'OptimWrapper',
'AmpOptimWrapper', 'ApexOptimWrapper', 'OptimWrapperDict',
'ZeroRedundancyOptimizer'
]
|
from typing import Any, Dict, Optional, Type
from jina.jaml.parsers.base import BaseLegacyParser
from jina.serve.runtimes.gateway.gateway import BaseGateway
from jina.serve.runtimes.gateway.request_handling import GatewayRequestHandler
class GatewayLegacyParser(BaseLegacyParser):
"""Legacy parser for gateway."""
def parse(
self,
cls: Type['BaseGateway'],
data: Dict,
runtime_args: Optional[Dict[str, Any]] = None,
) -> 'BaseGateway':
"""
:param cls: target class type to parse into, must be a :class:`JAMLCompatible` type
:param data: gateway yaml file loaded as python dict
:param runtime_args: Optional runtime_args to be directly passed without being parsed into a yaml config
:return: the Gateway YAML parser given the syntax version number
"""
data['metas'] = {}
cls._init_from_yaml = True
# tmp_p = {kk: expand_env_var(vv) for kk, vv in data.get('with', {}).items()}
for key in {
'name',
'port',
'protocol',
'host',
'tracing',
'graph_description',
'graph_conditions',
'deployments_addresses',
'deployments_metadata',
'deployments_no_reduce',
'timeout_send',
'retries',
'compression',
'runtime_name',
'prefetch',
'meter',
'log_config',
}:
if runtime_args and not runtime_args.get(key) and data.get(key):
runtime_args[key] = data.get(key)
if runtime_args.get('default_port'):
yaml_port = data.get('port')
if isinstance(yaml_port, int):
yaml_port = [yaml_port]
runtime_args['port'] = yaml_port or runtime_args.get('port')
obj = cls(
**data.get('with', {}),
metas=data.get('metas', {}),
requests=data.get('requests', {}),
runtime_args=runtime_args,
req_handler_cls=GatewayRequestHandler
)
cls._init_from_yaml = False
obj.is_updated = False
return obj
def dump(self, data: 'BaseGateway') -> Dict:
"""
:param data: versioned gateway object
:return: the dictionary given a versioned gateway object
"""
a = {k: v for k, v in data._init_kwargs_dict.items()}
r = {}
if a:
r['with'] = a
return r
|
from typing import Any, Dict, Optional, Type
from jina.jaml.parsers.base import BaseLegacyParser
from jina.serve.runtimes.gateway.gateway import BaseGateway
from jina.serve.runtimes.gateway.request_handling import GatewayRequestHandler
class GatewayLegacyParser(BaseLegacyParser):
"""Legacy parser for gateway."""
def parse(
self,
cls: Type['BaseGateway'],
data: Dict,
runtime_args: Optional[Dict[str, Any]] = None,
) -> 'BaseGateway':
"""
:param cls: target class type to parse into, must be a :class:`JAMLCompatible` type
:param data: gateway yaml file loaded as python dict
:param runtime_args: Optional runtime_args to be directly passed without being parsed into a yaml config
:return: the Gateway YAML parser given the syntax version number
"""
from jina.logging.predefined import default_logger
data['metas'] = {}
cls._init_from_yaml = True
# tmp_p = {kk: expand_env_var(vv) for kk, vv in data.get('with', {}).items()}
for key in {
'name',
'port',
'protocol',
'host',
'tracing',
'graph_description',
'graph_conditions',
'deployments_addresses',
'deployments_metadata',
'deployments_no_reduce',
'timeout_send',
'retries',
'compression',
'runtime_name',
'prefetch',
'meter',
'log_config',
}:
if runtime_args and not runtime_args.get(key) and data.get(key):
runtime_args[key] = data.get(key)
if runtime_args.get('default_port'):
yaml_port = data.get('port')
if isinstance(yaml_port, int):
yaml_port = [yaml_port]
runtime_args['port'] = yaml_port or runtime_args.get('port')
obj = cls(
**data.get('with', {}),
metas=data.get('metas', {}),
requests=data.get('requests', {}),
runtime_args=runtime_args,
req_handler_cls=GatewayRequestHandler
)
cls._init_from_yaml = False
obj.is_updated = False
return obj
def dump(self, data: 'BaseGateway') -> Dict:
"""
:param data: versioned gateway object
:return: the dictionary given a versioned gateway object
"""
a = {k: v for k, v in data._init_kwargs_dict.items()}
r = {}
if a:
r['with'] = a
return r
|
"""monday.com reader."""
from typing import Dict, List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class MondayReader(BaseReader):
"""
monday.com reader. Reads board's data by a GraphQL query.
Args:
api_key (str): monday.com API key.
"""
def __init__(self, api_key: str) -> None:
"""Initialize monday.com reader."""
self.api_key = api_key
self.api_url = "https://api.monday.com/v2"
def _parse_item_values(self, cv) -> Dict[str, str]:
data = {}
data["title"] = cv["title"]
data["value"] = cv["text"]
return data
def _parse_data(self, item) -> Dict[str, str]:
data = {}
data["id"] = item["id"]
data["name"] = item["name"]
data["values"] = list(map(self._parse_item_values, list(item["column_values"])))
return data
def _perform_request(self, board_id) -> Dict[str, str]:
headers = {"Authorization": self.api_key}
query = """
query{
boards(ids: [%d]){
name,
items{
id,
name,
column_values{
title,
text
}
}
}
} """ % (board_id)
data = {"query": query}
response = requests.post(url=self.api_url, json=data, headers=headers)
return response.json()
def load_data(self, board_id: int) -> List[Document]:
"""
Load board data by board_id.
Args:
board_id (int): monday.com board id.
Returns:
List[Document]: List of items as documents.
[{id, name, values: [{title, value}]}]
"""
json_response = self._perform_request(board_id)
board_data = json_response["data"]["boards"][0]
board_data["name"]
items_array = list(board_data["items"])
parsed_items = list(map(self._parse_data, list(items_array)))
result = []
for item in parsed_items:
text = f"name: {item['name']}"
for item_value in item["values"]:
if item_value["value"]:
text += f", {item_value['title']}: {item_value['value']}"
result.append(
Document(
text=text, extra_info={"board_id": board_id, "item_id": item["id"]}
)
)
return result
if __name__ == "__main__":
reader = MondayReader("api_key")
print(reader.load_data(12345))
|
"""monday.com reader."""
from typing import Dict, List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class MondayReader(BaseReader):
"""
monday.com reader. Reads board's data by a GraphQL query.
Args:
api_key (str): monday.com API key.
"""
def __init__(self, api_key: str) -> None:
"""Initialize monday.com reader."""
self.api_key = api_key
self.api_url = "https://api.monday.com/v2"
def _parse_item_values(self, cv) -> Dict[str, str]:
data = {}
data["title"] = cv["title"]
data["value"] = cv["text"]
return data
def _parse_data(self, item) -> Dict[str, str]:
data = {}
data["id"] = item["id"]
data["name"] = item["name"]
data["values"] = list(map(self._parse_item_values, list(item["column_values"])))
return data
def _perform_request(self, board_id) -> Dict[str, str]:
headers = {"Authorization": self.api_key}
query = """
query{
boards(ids: [%d]){
name,
items{
id,
name,
column_values{
title,
text
}
}
}
} """ % (
board_id
)
data = {"query": query}
response = requests.post(url=self.api_url, json=data, headers=headers)
return response.json()
def load_data(self, board_id: int) -> List[Document]:
"""
Load board data by board_id.
Args:
board_id (int): monday.com board id.
Returns:
List[Document]: List of items as documents.
[{id, name, values: [{title, value}]}]
"""
json_response = self._perform_request(board_id)
board_data = json_response["data"]["boards"][0]
board_data["name"]
items_array = list(board_data["items"])
parsed_items = list(map(self._parse_data, list(items_array)))
result = []
for item in parsed_items:
text = f"name: {item['name']}"
for item_value in item["values"]:
if item_value["value"]:
text += f", {item_value['title']}: {item_value['value']}"
result.append(
Document(
text=text, extra_info={"board_id": board_id, "item_id": item["id"]}
)
)
return result
if __name__ == "__main__":
reader = MondayReader("api_key")
print(reader.load_data(12345))
|
"""Methods for scaling, centering, normalization, binarization, and more."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._data import (
Binarizer,
KernelCenterer,
MaxAbsScaler,
MinMaxScaler,
Normalizer,
PowerTransformer,
QuantileTransformer,
RobustScaler,
StandardScaler,
add_dummy_feature,
binarize,
maxabs_scale,
minmax_scale,
normalize,
power_transform,
quantile_transform,
robust_scale,
scale,
)
from ._discretization import KBinsDiscretizer
from ._encoders import OneHotEncoder, OrdinalEncoder
from ._function_transformer import FunctionTransformer
from ._label import LabelBinarizer, LabelEncoder, MultiLabelBinarizer, label_binarize
from ._polynomial import PolynomialFeatures, SplineTransformer
from ._target_encoder import TargetEncoder
__all__ = [
"Binarizer",
"FunctionTransformer",
"KBinsDiscretizer",
"KernelCenterer",
"LabelBinarizer",
"LabelEncoder",
"MaxAbsScaler",
"MinMaxScaler",
"MultiLabelBinarizer",
"Normalizer",
"OneHotEncoder",
"OrdinalEncoder",
"PolynomialFeatures",
"PowerTransformer",
"QuantileTransformer",
"RobustScaler",
"SplineTransformer",
"StandardScaler",
"TargetEncoder",
"add_dummy_feature",
"binarize",
"label_binarize",
"maxabs_scale",
"minmax_scale",
"normalize",
"power_transform",
"quantile_transform",
"robust_scale",
"scale",
]
|
"""Methods for scaling, centering, normalization, binarization, and more."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._data import (
Binarizer,
KernelCenterer,
MaxAbsScaler,
MinMaxScaler,
Normalizer,
PowerTransformer,
QuantileTransformer,
RobustScaler,
StandardScaler,
add_dummy_feature,
binarize,
maxabs_scale,
minmax_scale,
normalize,
power_transform,
quantile_transform,
robust_scale,
scale,
)
from ._discretization import KBinsDiscretizer
from ._encoders import OneHotEncoder, OrdinalEncoder
from ._function_transformer import FunctionTransformer
from ._label import LabelBinarizer, LabelEncoder, MultiLabelBinarizer, label_binarize
from ._polynomial import PolynomialFeatures, SplineTransformer
from ._target_encoder import TargetEncoder
__all__ = [
"Binarizer",
"FunctionTransformer",
"KBinsDiscretizer",
"KernelCenterer",
"LabelBinarizer",
"LabelEncoder",
"MultiLabelBinarizer",
"MinMaxScaler",
"MaxAbsScaler",
"QuantileTransformer",
"Normalizer",
"OneHotEncoder",
"OrdinalEncoder",
"PowerTransformer",
"RobustScaler",
"SplineTransformer",
"StandardScaler",
"TargetEncoder",
"add_dummy_feature",
"PolynomialFeatures",
"binarize",
"normalize",
"scale",
"robust_scale",
"maxabs_scale",
"minmax_scale",
"label_binarize",
"quantile_transform",
"power_transform",
]
|
from __future__ import annotations
__version__ = "4.1.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder import (
CrossEncoder,
CrossEncoderModelCardData,
CrossEncoderTrainer,
CrossEncoderTrainingArguments,
)
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
from sentence_transformers.util import mine_hard_negatives
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"CrossEncoderTrainer",
"CrossEncoderTrainingArguments",
"CrossEncoderModelCardData",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
"mine_hard_negatives",
]
|
from __future__ import annotations
__version__ = "3.5.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder import (
CrossEncoder,
CrossEncoderModelCardData,
CrossEncoderTrainer,
CrossEncoderTrainingArguments,
)
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
from sentence_transformers.util import mine_hard_negatives
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"CrossEncoderTrainer",
"CrossEncoderTrainingArguments",
"CrossEncoderModelCardData",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
"mine_hard_negatives",
]
|
from __future__ import annotations
import torch
import transformers
from PIL import Image
from torch import nn
class CLIPModel(nn.Module):
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None) -> None:
super(CLIPModel, self).__init__()
if processor_name is None:
processor_name = model_name
self.model = transformers.CLIPModel.from_pretrained(model_name)
self.processor = transformers.CLIPProcessor.from_pretrained(processor_name)
def __repr__(self) -> str:
return "CLIPModel()"
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
image_embeds = []
text_embeds = []
if "pixel_values" in features:
vision_outputs = self.model.vision_model(pixel_values=features["pixel_values"])
image_embeds = self.model.visual_projection(vision_outputs[1])
if "input_ids" in features:
text_outputs = self.model.text_model(
input_ids=features.get("input_ids"),
attention_mask=features.get("attention_mask", None),
position_ids=features.get("position_ids", None),
output_attentions=features.get("output_attentions", None),
output_hidden_states=features.get("output_hidden_states", None),
)
text_embeds = self.model.text_projection(text_outputs[1])
sentence_embedding = []
image_features = iter(image_embeds)
text_features = iter(text_embeds)
for idx, input_type in enumerate(features["image_text_info"]):
if input_type == 0:
sentence_embedding.append(next(image_features))
else:
sentence_embedding.append(next(text_features))
features["sentence_embedding"] = torch.stack(sentence_embedding).float()
return features
def tokenize(self, texts, padding: str | bool = True) -> dict[str, torch.Tensor]:
images = []
texts_values = []
image_text_info = []
for idx, data in enumerate(texts):
if isinstance(data, Image.Image): # An Image
images.append(data)
image_text_info.append(0)
else: # A text
texts_values.append(data)
image_text_info.append(1)
encoding = {}
if len(texts_values):
encoding = self.processor.tokenizer(texts_values, return_tensors="pt", padding=padding)
if len(images):
image_features = self.processor.image_processor(images, return_tensors="pt")
encoding["pixel_values"] = image_features.pixel_values
encoding["image_text_info"] = image_text_info
return dict(encoding)
@property
def tokenizer(self) -> transformers.CLIPProcessor:
return self.processor
def save(self, output_path: str) -> None:
self.model.save_pretrained(output_path)
self.processor.save_pretrained(output_path)
@staticmethod
def load(input_path: str) -> "CLIPModel":
return CLIPModel(model_name=input_path)
|
from typing import Dict, Union
import torch
import transformers
from PIL import Image
from torch import nn
class CLIPModel(nn.Module):
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None) -> None:
super(CLIPModel, self).__init__()
if processor_name is None:
processor_name = model_name
self.model = transformers.CLIPModel.from_pretrained(model_name)
self.processor = transformers.CLIPProcessor.from_pretrained(processor_name)
def __repr__(self) -> str:
return "CLIPModel()"
def forward(self, features: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
image_embeds = []
text_embeds = []
if "pixel_values" in features:
vision_outputs = self.model.vision_model(pixel_values=features["pixel_values"])
image_embeds = self.model.visual_projection(vision_outputs[1])
if "input_ids" in features:
text_outputs = self.model.text_model(
input_ids=features.get("input_ids"),
attention_mask=features.get("attention_mask", None),
position_ids=features.get("position_ids", None),
output_attentions=features.get("output_attentions", None),
output_hidden_states=features.get("output_hidden_states", None),
)
text_embeds = self.model.text_projection(text_outputs[1])
sentence_embedding = []
image_features = iter(image_embeds)
text_features = iter(text_embeds)
for idx, input_type in enumerate(features["image_text_info"]):
if input_type == 0:
sentence_embedding.append(next(image_features))
else:
sentence_embedding.append(next(text_features))
features["sentence_embedding"] = torch.stack(sentence_embedding).float()
return features
def tokenize(self, texts, padding: Union[str, bool] = True) -> Dict[str, torch.Tensor]:
images = []
texts_values = []
image_text_info = []
for idx, data in enumerate(texts):
if isinstance(data, Image.Image): # An Image
images.append(data)
image_text_info.append(0)
else: # A text
texts_values.append(data)
image_text_info.append(1)
encoding = {}
if len(texts_values):
encoding = self.processor.tokenizer(texts_values, return_tensors="pt", padding=padding)
if len(images):
image_features = self.processor.image_processor(images, return_tensors="pt")
encoding["pixel_values"] = image_features.pixel_values
encoding["image_text_info"] = image_text_info
return dict(encoding)
@property
def tokenizer(self) -> transformers.CLIPProcessor:
return self.processor
def save(self, output_path: str) -> None:
self.model.save_pretrained(output_path)
self.processor.save_pretrained(output_path)
@staticmethod
def load(input_path: str) -> "CLIPModel":
return CLIPModel(model_name=input_path)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import numpy as np
import torch
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.data_elements.mask import BitmapMasks
from mmdet.models.dense_heads import SOLOV2Head
def _rand_masks(num_items, bboxes, img_w, img_h):
rng = np.random.RandomState(0)
masks = np.zeros((num_items, img_h, img_w))
for i, bbox in enumerate(bboxes):
bbox = bbox.astype(np.int32)
mask = (rng.rand(1, bbox[3] - bbox[1], bbox[2] - bbox[0]) >
0.3).astype(np.int)
masks[i:i + 1, bbox[1]:bbox[3], bbox[0]:bbox[2]] = mask
return BitmapMasks(masks, height=img_h, width=img_w)
def _fake_mask_feature_head():
mask_feature_head = ConfigDict(
feat_channels=128,
start_level=0,
end_level=3,
out_channels=256,
mask_stride=4,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))
return mask_feature_head
class TestSOLOv2Head(TestCase):
def test_solov2_head_loss(self):
"""Tests mask head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}]
mask_feature_head = _fake_mask_feature_head()
mask_head = SOLOV2Head(
num_classes=4, in_channels=1, mask_feature_head=mask_feature_head)
# SOLO head expects a multiple levels of features per image
feats = []
for i in range(len(mask_head.strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2))))
feats = tuple(feats)
mask_outs = mask_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty(0, 4)
gt_instances.labels = torch.LongTensor([])
gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s)
empty_gt_losses = mask_head.loss_by_feat(
*mask_outs,
batch_gt_instances=[gt_instances],
batch_img_metas=img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_mask_loss = empty_gt_losses['loss_mask']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_mask_loss.item(), 0,
'there should be no mask loss when there are no true mask')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s)
one_gt_losses = mask_head.loss_by_feat(
*mask_outs,
batch_gt_instances=[gt_instances],
batch_img_metas=img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_mask_loss = one_gt_losses['loss_mask']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_mask_loss.item(), 0,
'mask loss should be non-zero')
def test_solov2_head_empty_result(self):
s = 256
img_metas = {
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}
mask_feature_head = _fake_mask_feature_head()
mask_head = SOLOV2Head(
num_classes=4, in_channels=1, mask_feature_head=mask_feature_head)
kernel_preds = torch.empty(0, 128)
cls_scores = torch.empty(0, 80)
mask_feats = torch.empty(0, 16, 16)
test_cfg = ConfigDict(
score_thr=0.1,
mask_thr=0.5,
)
results = mask_head._predict_by_feat_single(
kernel_preds=kernel_preds,
cls_scores=cls_scores,
mask_feats=mask_feats,
img_meta=img_metas,
cfg=test_cfg)
self.assertIsInstance(results, InstanceData)
self.assertEqual(len(results), 0)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import numpy as np
import torch
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.core.mask import BitmapMasks
from mmdet.models.dense_heads import SOLOV2Head
def _rand_masks(num_items, bboxes, img_w, img_h):
rng = np.random.RandomState(0)
masks = np.zeros((num_items, img_h, img_w))
for i, bbox in enumerate(bboxes):
bbox = bbox.astype(np.int32)
mask = (rng.rand(1, bbox[3] - bbox[1], bbox[2] - bbox[0]) >
0.3).astype(np.int)
masks[i:i + 1, bbox[1]:bbox[3], bbox[0]:bbox[2]] = mask
return BitmapMasks(masks, height=img_h, width=img_w)
def _fake_mask_feature_head():
mask_feature_head = ConfigDict(
feat_channels=128,
start_level=0,
end_level=3,
out_channels=256,
mask_stride=4,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))
return mask_feature_head
class TestSOLOv2Head(TestCase):
def test_solov2_head_loss(self):
"""Tests mask head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}]
mask_feature_head = _fake_mask_feature_head()
mask_head = SOLOV2Head(
num_classes=4, in_channels=1, mask_feature_head=mask_feature_head)
# SOLO head expects a multiple levels of features per image
feats = []
for i in range(len(mask_head.strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2))))
feats = tuple(feats)
mask_outs = mask_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty(0, 4)
gt_instances.labels = torch.LongTensor([])
gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s)
empty_gt_losses = mask_head.loss_by_feat(
*mask_outs,
batch_gt_instances=[gt_instances],
batch_img_metas=img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_mask_loss = empty_gt_losses['loss_mask']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_mask_loss.item(), 0,
'there should be no mask loss when there are no true mask')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s)
one_gt_losses = mask_head.loss_by_feat(
*mask_outs,
batch_gt_instances=[gt_instances],
batch_img_metas=img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_mask_loss = one_gt_losses['loss_mask']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_mask_loss.item(), 0,
'mask loss should be non-zero')
def test_solov2_head_empty_result(self):
s = 256
img_metas = {
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}
mask_feature_head = _fake_mask_feature_head()
mask_head = SOLOV2Head(
num_classes=4, in_channels=1, mask_feature_head=mask_feature_head)
kernel_preds = torch.empty(0, 128)
cls_scores = torch.empty(0, 80)
mask_feats = torch.empty(0, 16, 16)
test_cfg = ConfigDict(
score_thr=0.1,
mask_thr=0.5,
)
results = mask_head._predict_by_feat_single(
kernel_preds=kernel_preds,
cls_scores=cls_scores,
mask_feats=mask_feats,
img_meta=img_metas,
cfg=test_cfg)
self.assertIsInstance(results, InstanceData)
self.assertEqual(len(results), 0)
|
"""
Example of training survival model with Dask on CPU
===================================================
"""
import os
import dask.array as da
import dask.dataframe as dd
from dask.distributed import Client, LocalCluster
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def main(client: Client) -> da.Array:
# Load an example survival data from CSV into a Dask data frame.
# The Veterans' Administration Lung Cancer Trial
# The Statistical Analysis of Failure Time Data by Kalbfleisch J. and Prentice R (1980)
CURRENT_DIR = os.path.dirname(__file__)
df = dd.read_csv(
os.path.join(CURRENT_DIR, os.pardir, "data", "veterans_lung_cancer.csv")
)
# DaskDMatrix acts like normal DMatrix, works as a proxy for local
# DMatrix scatter around workers.
# For AFT survival, you'd need to extract the lower and upper bounds for the label
# and pass them as arguments to DaskDMatrix.
y_lower_bound = df["Survival_label_lower_bound"]
y_upper_bound = df["Survival_label_upper_bound"]
X = df.drop(["Survival_label_lower_bound", "Survival_label_upper_bound"], axis=1)
dtrain = DaskDMatrix(
client, X, label_lower_bound=y_lower_bound, label_upper_bound=y_upper_bound
)
# Use train method from xgboost.dask instead of xgboost. This
# distributed version of train returns a dictionary containing the
# resulting booster and evaluation history obtained from
# evaluation metrics.
params = {
"verbosity": 1,
"objective": "survival:aft",
"eval_metric": "aft-nloglik",
"learning_rate": 0.05,
"aft_loss_distribution_scale": 1.20,
"aft_loss_distribution": "normal",
"max_depth": 6,
"lambda": 0.01,
"alpha": 0.02,
}
output = dxgb.train(
client, params, dtrain, num_boost_round=100, evals=[(dtrain, "train")]
)
bst = output["booster"]
history = output["history"]
# you can pass output directly into `predict` too.
prediction = dxgb.predict(client, bst, dtrain)
print("Evaluation history: ", history)
# Uncomment the following line to save the model to the disk
# bst.save_model('survival_model.json')
return prediction
if __name__ == "__main__":
# or use other clusters for scaling
with LocalCluster(n_workers=7, threads_per_worker=4) as cluster:
with Client(cluster) as client:
main(client)
|
"""
Example of training survival model with Dask on CPU
===================================================
"""
import os
import dask.dataframe as dd
from dask.distributed import Client, LocalCluster
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def main(client):
# Load an example survival data from CSV into a Dask data frame.
# The Veterans' Administration Lung Cancer Trial
# The Statistical Analysis of Failure Time Data by Kalbfleisch J. and Prentice R (1980)
CURRENT_DIR = os.path.dirname(__file__)
df = dd.read_csv(
os.path.join(CURRENT_DIR, os.pardir, "data", "veterans_lung_cancer.csv")
)
# DaskDMatrix acts like normal DMatrix, works as a proxy for local
# DMatrix scatter around workers.
# For AFT survival, you'd need to extract the lower and upper bounds for the label
# and pass them as arguments to DaskDMatrix.
y_lower_bound = df["Survival_label_lower_bound"]
y_upper_bound = df["Survival_label_upper_bound"]
X = df.drop(["Survival_label_lower_bound", "Survival_label_upper_bound"], axis=1)
dtrain = DaskDMatrix(
client, X, label_lower_bound=y_lower_bound, label_upper_bound=y_upper_bound
)
# Use train method from xgboost.dask instead of xgboost. This
# distributed version of train returns a dictionary containing the
# resulting booster and evaluation history obtained from
# evaluation metrics.
params = {
"verbosity": 1,
"objective": "survival:aft",
"eval_metric": "aft-nloglik",
"learning_rate": 0.05,
"aft_loss_distribution_scale": 1.20,
"aft_loss_distribution": "normal",
"max_depth": 6,
"lambda": 0.01,
"alpha": 0.02,
}
output = dxgb.train(
client, params, dtrain, num_boost_round=100, evals=[(dtrain, "train")]
)
bst = output["booster"]
history = output["history"]
# you can pass output directly into `predict` too.
prediction = dxgb.predict(client, bst, dtrain)
print("Evaluation history: ", history)
# Uncomment the following line to save the model to the disk
# bst.save_model('survival_model.json')
return prediction
if __name__ == "__main__":
# or use other clusters for scaling
with LocalCluster(n_workers=7, threads_per_worker=4) as cluster:
with Client(cluster) as client:
main(client)
|
from typing import Iterable, Dict, Sequence
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``,
and ``__delitem__`` for ``DocumentArrayElastic``"""
MAX_ES_RETURNED_DOCS = 10000
def _document_to_elastic(self, doc: 'Document') -> Dict:
extra_columns = {
col: doc.tags.get(col) for col, _ in self._config.columns.items()
}
request = {
'_op_type': 'index',
'_id': doc.id,
'_index': self._config.index_name,
'embedding': self._map_embedding(doc.embedding),
'blob': doc.to_base64(),
**extra_columns,
}
if self._config.tag_indices:
for index in self._config.tag_indices:
request[index] = doc.tags.get(index)
if doc.text:
request['text'] = doc.text
return request
def _getitem(self, doc_id: str) -> 'Document':
"""Helper method for getting item with elastic as storage
:param doc_id: id of the document
:raises KeyError: raise error when elastic id does not exist in storage
:return: Document
"""
try:
result = self._client.get(index=self._config.index_name, id=doc_id)
doc = Document.from_base64(result['_source']['blob'])
return doc
except Exception as ex:
raise KeyError(doc_id) from ex
def _get_doc_by_id(self, _id: str) -> 'Document':
"""Concrete implementation of base class' ``_get_doc_by_id``
:param _id: the id of the document
:return: the retrieved document from elastic
"""
return self._getitem(_id)
def _get_docs_by_ids(self, ids: Sequence[str]) -> Iterable['Document']:
"""Concrete implementation of base class' ``_get_docs_by_ids``
:param ids: ids of the document
:return: Iterable[Document]
"""
accumulated_docs = []
accumulated_docs_id_not_found = []
if not ids:
return accumulated_docs
# Handle if doc len is more than MAX_ES_RETURNED_DOCS
for pos in range(0, len(ids), self.MAX_ES_RETURNED_DOCS):
es_docs = self._client.mget(
index=self._config.index_name,
ids=ids[pos : pos + self.MAX_ES_RETURNED_DOCS],
)['docs']
for doc in es_docs:
if doc['found']:
accumulated_docs.append(
Document.from_base64(doc['_source']['blob'])
)
else:
accumulated_docs_id_not_found.append(doc['_id'])
if accumulated_docs_id_not_found:
raise KeyError(accumulated_docs_id_not_found, accumulated_docs)
return accumulated_docs
def _set_doc_by_id(self, _id: str, value: 'Document'):
"""Concrete implementation of base class' ``_set_doc_by_id``
:param _id: the id of doc to update
:param value: the document to update to
"""
if _id != value.id:
self._del_doc_by_id(_id)
request = [self._document_to_elastic(value)]
self._send_requests(request)
self._refresh(self._config.index_name)
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
"""Overridden implementation of _set_docs_by_ids in order to add docs in batches and flush at the end
:param ids: the ids used for indexing
"""
for _id, doc in zip(ids, docs):
self._set_doc_by_id(_id, doc)
self._refresh(self._config.index_name)
def _del_doc_by_id(self, _id: str):
"""Concrete implementation of base class' ``_del_doc_by_id``
:param _id: the id of the document to delete
"""
if self._doc_id_exists(_id):
self._client.delete(index=self._config.index_name, id=_id)
self._refresh(self._config.index_name)
def _clear_storage(self):
"""Concrete implementation of base class' ``_clear_storage``"""
self._client.indices.delete(index=self._config.index_name)
def _load_offset2ids(self):
ids = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids)
def _save_offset2ids(self):
self._update_offset2ids_meta()
|
from typing import Iterable, Dict, Sequence
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``,
and ``__delitem__`` for ``DocumentArrayElastic``"""
MAX_ES_RETURNED_DOCS = 10000
def _document_to_elastic(self, doc: 'Document') -> Dict:
extra_columns = {col: doc.tags.get(col) for col, _ in self._config.columns}
request = {
'_op_type': 'index',
'_id': doc.id,
'_index': self._config.index_name,
'embedding': self._map_embedding(doc.embedding),
'blob': doc.to_base64(),
**extra_columns,
}
if self._config.tag_indices:
for index in self._config.tag_indices:
request[index] = doc.tags.get(index)
if doc.text:
request['text'] = doc.text
return request
def _getitem(self, doc_id: str) -> 'Document':
"""Helper method for getting item with elastic as storage
:param doc_id: id of the document
:raises KeyError: raise error when elastic id does not exist in storage
:return: Document
"""
try:
result = self._client.get(index=self._config.index_name, id=doc_id)
doc = Document.from_base64(result['_source']['blob'])
return doc
except Exception as ex:
raise KeyError(doc_id) from ex
def _get_doc_by_id(self, _id: str) -> 'Document':
"""Concrete implementation of base class' ``_get_doc_by_id``
:param _id: the id of the document
:return: the retrieved document from elastic
"""
return self._getitem(_id)
def _get_docs_by_ids(self, ids: Sequence[str]) -> Iterable['Document']:
"""Concrete implementation of base class' ``_get_docs_by_ids``
:param ids: ids of the document
:return: Iterable[Document]
"""
accumulated_docs = []
accumulated_docs_id_not_found = []
if not ids:
return accumulated_docs
# Handle if doc len is more than MAX_ES_RETURNED_DOCS
for pos in range(0, len(ids), self.MAX_ES_RETURNED_DOCS):
es_docs = self._client.mget(
index=self._config.index_name,
ids=ids[pos : pos + self.MAX_ES_RETURNED_DOCS],
)['docs']
for doc in es_docs:
if doc['found']:
accumulated_docs.append(
Document.from_base64(doc['_source']['blob'])
)
else:
accumulated_docs_id_not_found.append(doc['_id'])
if accumulated_docs_id_not_found:
raise KeyError(accumulated_docs_id_not_found, accumulated_docs)
return accumulated_docs
def _set_doc_by_id(self, _id: str, value: 'Document'):
"""Concrete implementation of base class' ``_set_doc_by_id``
:param _id: the id of doc to update
:param value: the document to update to
"""
if _id != value.id:
self._del_doc_by_id(_id)
request = [self._document_to_elastic(value)]
self._send_requests(request)
self._refresh(self._config.index_name)
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
"""Overridden implementation of _set_docs_by_ids in order to add docs in batches and flush at the end
:param ids: the ids used for indexing
"""
for _id, doc in zip(ids, docs):
self._set_doc_by_id(_id, doc)
self._refresh(self._config.index_name)
def _del_doc_by_id(self, _id: str):
"""Concrete implementation of base class' ``_del_doc_by_id``
:param _id: the id of the document to delete
"""
if self._doc_id_exists(_id):
self._client.delete(index=self._config.index_name, id=_id)
self._refresh(self._config.index_name)
def _clear_storage(self):
"""Concrete implementation of base class' ``_clear_storage``"""
self._client.indices.delete(index=self._config.index_name)
def _load_offset2ids(self):
ids = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids)
def _save_offset2ids(self):
self._update_offset2ids_meta()
|
from abc import ABC
from dataclasses import is_dataclass, asdict
from typing import Dict, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from ....typing import DocumentArraySourceType, ArrayType
class BaseBackendMixin(ABC):
TYPE_MAP: Dict
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
copy: bool = False,
*args,
**kwargs
):
self._load_offset2ids()
def _get_storage_infos(self) -> Optional[Dict]:
if hasattr(self, '_config') and is_dataclass(self._config):
return {k: str(v) for k, v in asdict(self._config).items()}
def _map_id(self, _id: str) -> str:
return _id
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
from ....math.ndarray import to_numpy_array
return to_numpy_array(embedding)
def _map_type(self, col_type: str) -> str:
return self.TYPE_MAP[col_type]
|
from abc import ABC
from dataclasses import is_dataclass, asdict
from typing import Dict, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from ....typing import DocumentArraySourceType, ArrayType
class BaseBackendMixin(ABC):
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
copy: bool = False,
*args,
**kwargs
):
self._load_offset2ids()
def _get_storage_infos(self) -> Optional[Dict]:
if hasattr(self, '_config') and is_dataclass(self._config):
return {k: str(v) for k, v in asdict(self._config).items()}
def _map_id(self, _id: str) -> str:
return _id
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
from ....math.ndarray import to_numpy_array
return to_numpy_array(embedding)
|
from __future__ import annotations
from typing import Any, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field, SecretStr
from langchain_community.utilities.brave_search import BraveSearchWrapper
class BraveSearch(BaseTool): # type: ignore[override]
"""Tool that queries the BraveSearch.
Api key can be provided as an environment variable BRAVE_SEARCH_API_KEY
or as a parameter.
Example usages:
.. code-block:: python
# uses BRAVE_SEARCH_API_KEY from environment
tool = BraveSearch()
.. code-block:: python
# uses the provided api key
tool = BraveSearch.from_api_key("your-api-key")
.. code-block:: python
# uses the provided api key and search kwargs
tool = BraveSearch.from_api_key(
api_key = "your-api-key",
search_kwargs={"max_results": 5}
)
.. code-block:: python
# uses BRAVE_SEARCH_API_KEY from environment
tool = BraveSearch.from_search_kwargs({"max_results": 5})
"""
name: str = "brave_search"
description: str = (
"a search engine. "
"useful for when you need to answer questions about current events."
" input should be a search query."
)
search_wrapper: BraveSearchWrapper = Field(default_factory=BraveSearchWrapper)
@classmethod
def from_api_key(
cls, api_key: str, search_kwargs: Optional[dict] = None, **kwargs: Any
) -> BraveSearch:
"""Create a tool from an api key.
Args:
api_key: The api key to use.
search_kwargs: Any additional kwargs to pass to the search wrapper.
**kwargs: Any additional kwargs to pass to the tool.
Returns:
A tool.
"""
wrapper = BraveSearchWrapper(
api_key=SecretStr(api_key), search_kwargs=search_kwargs or {}
)
return cls(search_wrapper=wrapper, **kwargs)
@classmethod
def from_search_kwargs(cls, search_kwargs: dict, **kwargs: Any) -> BraveSearch:
"""Create a tool from search kwargs.
Uses the environment variable BRAVE_SEARCH_API_KEY for api key.
Args:
search_kwargs: Any additional kwargs to pass to the search wrapper.
**kwargs: Any additional kwargs to pass to the tool.
Returns:
A tool.
"""
# we can not provide api key because it's calculated in the wrapper,
# so the ignore is needed for linter
# not ideal but needed to keep the tool code changes non-breaking
wrapper = BraveSearchWrapper(search_kwargs=search_kwargs)
return cls(search_wrapper=wrapper, **kwargs)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.search_wrapper.run(query)
|
from __future__ import annotations
from typing import Any, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.brave_search import BraveSearchWrapper
class BraveSearch(BaseTool): # type: ignore[override]
"""Tool that queries the BraveSearch."""
name: str = "brave_search"
description: str = (
"a search engine. "
"useful for when you need to answer questions about current events."
" input should be a search query."
)
search_wrapper: BraveSearchWrapper
@classmethod
def from_api_key(
cls, api_key: str, search_kwargs: Optional[dict] = None, **kwargs: Any
) -> BraveSearch:
"""Create a tool from an api key.
Args:
api_key: The api key to use.
search_kwargs: Any additional kwargs to pass to the search wrapper.
**kwargs: Any additional kwargs to pass to the tool.
Returns:
A tool.
"""
wrapper = BraveSearchWrapper(api_key=api_key, search_kwargs=search_kwargs or {})
return cls(search_wrapper=wrapper, **kwargs)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.search_wrapper.run(query)
|
from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import get_graph, get_node
from backend.data.queue import RedisExecutionEventBus
from backend.data.user import (
get_user_integrations,
get_user_metadata,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose
from backend.util.settings import Config
P = ParamSpec("P")
R = TypeVar("R")
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return Config().database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
@staticmethod
def exposed_run_and_wait(
f: Callable[P, Coroutine[None, None, R]]
) -> Callable[Concatenate[object, P], R]:
@expose
@wraps(f)
def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> R:
coroutine = f(*args, **kwargs)
res = self.run_and_wait(coroutine)
return res
return wrapper
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
# Credits
user_credit_model = get_user_credit_model()
get_or_refill_credit = cast(
Callable[[Any, str], int],
exposed_run_and_wait(user_credit_model.get_or_refill_credit),
)
spend_credits = cast(
Callable[[Any, str, int, str, dict[str, str], float, float], int],
exposed_run_and_wait(user_credit_model.spend_credits),
)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
|
from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import get_graph, get_node
from backend.data.queue import RedisExecutionEventBus
from backend.data.user import (
get_user_integrations,
get_user_metadata,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose
from backend.util.settings import Config
P = ParamSpec("P")
R = TypeVar("R")
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return Config().database_api_port
@expose
def send_execution_update(self, execution_result_dict: dict[Any, Any]):
self.event_queue.publish(ExecutionResult(**execution_result_dict))
@staticmethod
def exposed_run_and_wait(
f: Callable[P, Coroutine[None, None, R]]
) -> Callable[Concatenate[object, P], R]:
@expose
@wraps(f)
def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> R:
coroutine = f(*args, **kwargs)
res = self.run_and_wait(coroutine)
return res
return wrapper
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
# Credits
user_credit_model = get_user_credit_model()
get_or_refill_credit = cast(
Callable[[Any, str], int],
exposed_run_and_wait(user_credit_model.get_or_refill_credit),
)
spend_credits = cast(
Callable[[Any, str, int, str, dict[str, str], float, float], int],
exposed_run_and_wait(user_credit_model.spend_credits),
)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .activations import SiLU
from .bbox_nms import fast_nms, multiclass_nms
from .brick_wrappers import (AdaptiveAvgPool2d, FrozenBatchNorm2d,
adaptive_avg_pool2d)
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .dropblock import DropBlock
from .ema import ExpMomentumEMA
from .inverted_residual import InvertedResidual
from .matrix_nms import mask_matrix_nms
from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder
from .normed_predictor import NormedConv2d, NormedLinear
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding,
SinePositionalEncoding3D)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import ChannelAttention, DyReLU, SELayer
# yapf: disable
from .transformer import (MLP, AdaptivePadding, CdnQueryGenerator,
ConditionalAttention,
ConditionalDetrTransformerDecoder,
ConditionalDetrTransformerDecoderLayer,
DABDetrTransformerDecoder,
DABDetrTransformerDecoderLayer,
DABDetrTransformerEncoder, DDQTransformerDecoder,
DeformableDetrTransformerDecoder,
DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder,
DeformableDetrTransformerEncoderLayer,
DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer,
DinoTransformerDecoder, DynamicConv,
Mask2FormerTransformerDecoder,
Mask2FormerTransformerDecoderLayer,
Mask2FormerTransformerEncoder, PatchEmbed,
PatchMerging, coordinate_to_encoding,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
# yapf: enable
__all__ = [
'fast_nms', 'multiclass_nms', 'mask_matrix_nms', 'DropBlock',
'PixelDecoder', 'TransformerEncoderPixelDecoder',
'MSDeformAttnPixelDecoder', 'ResLayer', 'PatchMerging',
'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv',
'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'InvertedResidual',
'SELayer', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d',
'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'DyReLU',
'ExpMomentumEMA', 'inverse_sigmoid', 'ChannelAttention', 'SiLU', 'MLP',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer', 'AdaptivePadding',
'coordinate_to_encoding', 'ConditionalAttention',
'DABDetrTransformerDecoderLayer', 'DABDetrTransformerDecoder',
'DABDetrTransformerEncoder', 'DDQTransformerDecoder',
'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',
'CdnQueryGenerator', 'Mask2FormerTransformerEncoder',
'Mask2FormerTransformerDecoderLayer', 'Mask2FormerTransformerDecoder',
'SinePositionalEncoding3D', 'FrozenBatchNorm2d'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .activations import SiLU
from .bbox_nms import fast_nms, multiclass_nms
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .dropblock import DropBlock
from .ema import ExpMomentumEMA
from .inverted_residual import InvertedResidual
from .matrix_nms import mask_matrix_nms
from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder
from .normed_predictor import NormedConv2d, NormedLinear
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding,
SinePositionalEncoding3D)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import ChannelAttention, DyReLU, SELayer
# yapf: disable
from .transformer import (MLP, AdaptivePadding, CdnQueryGenerator,
ConditionalAttention,
ConditionalDetrTransformerDecoder,
ConditionalDetrTransformerDecoderLayer,
DABDetrTransformerDecoder,
DABDetrTransformerDecoderLayer,
DABDetrTransformerEncoder, DDQTransformerDecoder,
DeformableDetrTransformerDecoder,
DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder,
DeformableDetrTransformerEncoderLayer,
DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer,
DinoTransformerDecoder, DynamicConv,
Mask2FormerTransformerDecoder,
Mask2FormerTransformerDecoderLayer,
Mask2FormerTransformerEncoder, PatchEmbed,
PatchMerging, coordinate_to_encoding,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
# yapf: enable
__all__ = [
'fast_nms', 'multiclass_nms', 'mask_matrix_nms', 'DropBlock',
'PixelDecoder', 'TransformerEncoderPixelDecoder',
'MSDeformAttnPixelDecoder', 'ResLayer', 'PatchMerging',
'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv',
'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'InvertedResidual',
'SELayer', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d',
'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'DyReLU',
'ExpMomentumEMA', 'inverse_sigmoid', 'ChannelAttention', 'SiLU', 'MLP',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer', 'AdaptivePadding',
'coordinate_to_encoding', 'ConditionalAttention',
'DABDetrTransformerDecoderLayer', 'DABDetrTransformerDecoder',
'DABDetrTransformerEncoder', 'DDQTransformerDecoder',
'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',
'CdnQueryGenerator', 'Mask2FormerTransformerEncoder',
'Mask2FormerTransformerDecoderLayer', 'Mask2FormerTransformerDecoder',
'SinePositionalEncoding3D'
]
|
import asyncio
import pytest
from jina import Document, DocumentArray
from jina.helper import Namespace, random_identity
from jina.serve.stream import RequestStreamer
from jina.types.request.data import DataRequest
@pytest.mark.asyncio
@pytest.mark.parametrize('prefetch', [0, 5])
@pytest.mark.parametrize('num_requests', [1, 5, 13])
@pytest.mark.parametrize('async_iterator', [False, True])
async def test_request_streamer(prefetch, num_requests, async_iterator):
requests_handled = []
results_handled = []
def request_handler_fn(request):
requests_handled.append(request)
async def task():
await asyncio.sleep(0.5)
docs = request.docs
docs[0].tags['request_handled'] = True
request.data.docs = docs
return request
future = asyncio.ensure_future(task())
return future, None
def result_handle_fn(result):
results_handled.append(result)
assert isinstance(result, DataRequest)
docs = result.docs
docs[0].tags['result_handled'] = True
result.data.docs = docs
return result
def end_of_iter_fn():
# with a sync generator, iteration
assert len(requests_handled) == num_requests
assert len(results_handled) <= num_requests
def _yield_data_request():
req = DataRequest()
req.header.request_id = random_identity()
da = DocumentArray()
da.append(Document())
req.data.docs = da
return req
def _get_sync_requests_iterator(num_requests):
for i in range(num_requests):
yield _yield_data_request()
async def _get_async_requests_iterator(num_requests):
for i in range(num_requests):
yield _yield_data_request()
await asyncio.sleep(0.1)
args = Namespace()
args.prefetch = prefetch
streamer = RequestStreamer(
request_handler=request_handler_fn,
result_handler=result_handle_fn,
end_of_iter_handler=end_of_iter_fn,
prefetch=getattr(args, 'prefetch', 0),
)
it = (
_get_async_requests_iterator(num_requests)
if async_iterator
else _get_sync_requests_iterator(num_requests)
)
response = streamer.stream(it)
num_responses = 0
async for r in response:
num_responses += 1
assert r.docs[0].tags['request_handled']
assert r.docs[0].tags['result_handled']
assert num_responses == num_requests
|
import asyncio
import pytest
from jina import Document, DocumentArray
from jina.helper import Namespace, random_identity
from jina.serve.stream import RequestStreamer
from jina.types.request.data import DataRequest
@pytest.mark.asyncio
@pytest.mark.parametrize('prefetch', [0, 5])
@pytest.mark.parametrize('num_requests', [1, 5, 13])
@pytest.mark.parametrize('async_iterator', [False, True])
async def test_request_streamer(prefetch, num_requests, async_iterator):
requests_handled = []
results_handled = []
def request_handler_fn(request):
requests_handled.append(request)
async def task():
await asyncio.sleep(0.5)
docs = request.docs
docs[0].tags['request_handled'] = True
request.data.docs = docs
return request
future = asyncio.ensure_future(task())
return future, None
def result_handle_fn(result):
results_handled.append(result)
assert isinstance(result, DataRequest)
docs = result.docs
docs[0].tags['result_handled'] = True
result.data.docs = docs
return result
def end_of_iter_fn():
# with a sync generator, iteration
assert len(requests_handled) == num_requests
assert len(results_handled) <= num_requests
def _yield_data_request():
req = DataRequest()
req.header.request_id = random_identity()
da = DocumentArray()
da.append(Document())
req.data.docs = da
return req
def _get_sync_requests_iterator(num_requests):
for i in range(num_requests):
yield _yield_data_request()
async def _get_async_requests_iterator(num_requests):
for i in range(num_requests):
yield _yield_data_request()
await asyncio.sleep(0.1)
args = Namespace()
args.prefetch = prefetch
streamer = RequestStreamer(
args=args,
request_handler=request_handler_fn,
result_handler=result_handle_fn,
end_of_iter_handler=end_of_iter_fn,
)
it = (
_get_async_requests_iterator(num_requests)
if async_iterator
else _get_sync_requests_iterator(num_requests)
)
response = streamer.stream(it)
num_responses = 0
async for r in response:
num_responses += 1
assert r.docs[0].tags['request_handled']
assert r.docs[0].tags['result_handled']
assert num_responses == num_requests
|
"""Top-level imports for LlamaIndex."""
__version__ = "0.12.47"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Top-level imports for LlamaIndex."""
__version__ = "0.12.46"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
from typing import TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing import NdArray
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.url_3d.url_3d import Url3D
T = TypeVar('T', bound='PointCloud3DUrl')
@_register_proto(proto_type_name='point_cloud_url')
class PointCloud3DUrl(Url3D):
"""
URL to a .obj, .glb, or .ply file containing point cloud information.
Can be remote (web) URL, or a local file path.
"""
def load(self: T, samples: int, multiple_geometries: bool = False) -> NdArray:
"""
Load the data from the url into an NdArray containing point cloud information.
EXAMPLE USAGE
.. code-block:: python
import numpy as np
from docarray import BaseDocument
from docarray.typing import PointCloud3DUrl
class MyDoc(BaseDocument):
point_cloud_url: PointCloud3DvUrl
doc = MyDoc(point_cloud_url="toydata/tetrahedron.obj")
point_cloud = doc.point_cloud_url.load(samples=100)
assert isinstance(point_cloud, np.ndarray)
assert point_cloud.shape == (100, 3)
:param samples: number of points to sample from the mesh
:param multiple_geometries: if False, store point cloud in 2D np.ndarray.
If True, store point clouds from multiple geometries in 3D np.ndarray.
:return: np.ndarray representing the point cloud
"""
if multiple_geometries:
# try to coerce everything into a scene
scene = self._load_trimesh_instance(force='scene')
point_cloud = np.stack(
[np.array(geo.sample(samples)) for geo in scene.geometry.values()],
axis=0,
)
else:
# combine a scene into a single mesh
mesh = self._load_trimesh_instance(force='mesh')
point_cloud = np.array(mesh.sample(samples))
return parse_obj_as(NdArray, point_cloud)
|
from typing import TYPE_CHECKING, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.proto import NodeProto
T = TypeVar('T', bound='PointCloud3DUrl')
class PointCloud3DUrl(Url3D):
"""
URL to a .obj, .glb, or .ply file containing point cloud information.
Can be remote (web) URL, or a local file path.
"""
def _to_node_protobuf(self: T) -> 'NodeProto':
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that needs to
be converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(point_cloud_url=str(self))
def load(self: T, samples: int, multiple_geometries: bool = False) -> NdArray:
"""
Load the data from the url into an NdArray containing point cloud information.
EXAMPLE USAGE
.. code-block:: python
import numpy as np
from docarray import BaseDocument
from docarray.typing import PointCloud3DUrl
class MyDoc(BaseDocument):
point_cloud_url: PointCloud3DvUrl
doc = MyDoc(point_cloud_url="toydata/tetrahedron.obj")
point_cloud = doc.point_cloud_url.load(samples=100)
assert isinstance(point_cloud, np.ndarray)
assert point_cloud.shape == (100, 3)
:param samples: number of points to sample from the mesh
:param multiple_geometries: if False, store point cloud in 2D np.ndarray.
If True, store point clouds from multiple geometries in 3D np.ndarray.
:return: np.ndarray representing the point cloud
"""
if multiple_geometries:
# try to coerce everything into a scene
scene = self._load_trimesh_instance(force='scene')
point_cloud = np.stack(
[np.array(geo.sample(samples)) for geo in scene.geometry.values()],
axis=0,
)
else:
# combine a scene into a single mesh
mesh = self._load_trimesh_instance(force='mesh')
point_cloud = np.array(mesh.sample(samples))
return parse_obj_as(NdArray, point_cloud)
|
"""Configure global settings and get information about the working environment."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# Machine learning module for Python
# ==================================
#
# sklearn is a Python module integrating classical machine
# learning algorithms in the tightly-knit world of scientific Python
# packages (numpy, scipy, matplotlib).
#
# It aims to provide simple and efficient solutions to learning problems
# that are accessible to everybody and reusable in various contexts:
# machine-learning as a versatile tool for science and engineering.
#
# See https://scikit-learn.org for complete documentation.
import importlib as _importlib
import logging
import os
import random
from ._config import config_context, get_config, set_config
logger = logging.getLogger(__name__)
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y.0 # For first release after an increment in Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.Y.ZaN # Alpha release
# X.Y.ZbN # Beta release
# X.Y.ZrcN # Release Candidate
# X.Y.Z # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = "1.8.dev0"
# On OSX, we can get a runtime error due to multiple OpenMP libraries loaded
# simultaneously. This can happen for instance when calling BLAS inside a
# prange. Setting the following environment variable allows multiple OpenMP
# libraries to be loaded. It should not degrade performances since we manually
# take care of potential over-subcription performance issues, in sections of
# the code where nested OpenMP loops can happen, by dynamically reconfiguring
# the inner OpenMP runtime to temporarily disable it while under the scope of
# the outer OpenMP parallel section.
os.environ.setdefault("KMP_DUPLICATE_LIB_OK", "True")
# Workaround issue discovered in intel-openmp 2019.5:
# https://github.com/ContinuumIO/anaconda-issues/issues/11294
os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")
# `_distributor_init` allows distributors to run custom init code.
# For instance, for the Windows wheel, this is used to pre-load the
# vcomp shared library runtime for OpenMP embedded in the sklearn/.libs
# sub-folder.
# It is necessary to do this prior to importing show_versions as the
# later is linked to the OpenMP runtime to make it possible to introspect
# it and importing it first would fail if the OpenMP dll cannot be found.
from . import ( # noqa: F401 E402
__check_build,
_distributor_init,
)
from .base import clone # noqa: E402
from .utils._show_versions import show_versions # noqa: E402
_submodules = [
"calibration",
"cluster",
"covariance",
"cross_decomposition",
"datasets",
"decomposition",
"dummy",
"ensemble",
"exceptions",
"experimental",
"externals",
"feature_extraction",
"feature_selection",
"frozen",
"gaussian_process",
"inspection",
"isotonic",
"kernel_approximation",
"kernel_ridge",
"linear_model",
"manifold",
"metrics",
"mixture",
"model_selection",
"multiclass",
"multioutput",
"naive_bayes",
"neighbors",
"neural_network",
"pipeline",
"preprocessing",
"random_projection",
"semi_supervised",
"svm",
"tree",
"discriminant_analysis",
"impute",
"compose",
]
__all__ = _submodules + [
# Non-modules:
"clone",
"get_config",
"set_config",
"config_context",
"show_versions",
]
def __dir__():
return __all__
def __getattr__(name):
if name in _submodules:
return _importlib.import_module(f"sklearn.{name}")
else:
try:
return globals()[name]
except KeyError:
raise AttributeError(f"Module 'sklearn' has no attribute '{name}'")
_BUILT_WITH_MESON = False
try:
import sklearn._built_with_meson # noqa: F401
_BUILT_WITH_MESON = True
except ModuleNotFoundError:
pass
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import numpy as np
# Check if a random seed exists in the environment, if not create one.
_random_seed = os.environ.get("SKLEARN_SEED", None)
if _random_seed is None:
_random_seed = np.random.uniform() * np.iinfo(np.int32).max
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
|
"""Configure global settings and get information about the working environment."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# Machine learning module for Python
# ==================================
#
# sklearn is a Python module integrating classical machine
# learning algorithms in the tightly-knit world of scientific Python
# packages (numpy, scipy, matplotlib).
#
# It aims to provide simple and efficient solutions to learning problems
# that are accessible to everybody and reusable in various contexts:
# machine-learning as a versatile tool for science and engineering.
#
# See https://scikit-learn.org for complete documentation.
import importlib as _importlib
import logging
import os
import random
from ._config import config_context, get_config, set_config
logger = logging.getLogger(__name__)
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y.0 # For first release after an increment in Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.Y.ZaN # Alpha release
# X.Y.ZbN # Beta release
# X.Y.ZrcN # Release Candidate
# X.Y.Z # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = "1.7.dev0"
# On OSX, we can get a runtime error due to multiple OpenMP libraries loaded
# simultaneously. This can happen for instance when calling BLAS inside a
# prange. Setting the following environment variable allows multiple OpenMP
# libraries to be loaded. It should not degrade performances since we manually
# take care of potential over-subcription performance issues, in sections of
# the code where nested OpenMP loops can happen, by dynamically reconfiguring
# the inner OpenMP runtime to temporarily disable it while under the scope of
# the outer OpenMP parallel section.
os.environ.setdefault("KMP_DUPLICATE_LIB_OK", "True")
# Workaround issue discovered in intel-openmp 2019.5:
# https://github.com/ContinuumIO/anaconda-issues/issues/11294
os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")
# `_distributor_init` allows distributors to run custom init code.
# For instance, for the Windows wheel, this is used to pre-load the
# vcomp shared library runtime for OpenMP embedded in the sklearn/.libs
# sub-folder.
# It is necessary to do this prior to importing show_versions as the
# later is linked to the OpenMP runtime to make it possible to introspect
# it and importing it first would fail if the OpenMP dll cannot be found.
from . import ( # noqa: F401 E402
__check_build,
_distributor_init,
)
from .base import clone # noqa: E402
from .utils._show_versions import show_versions # noqa: E402
_submodules = [
"calibration",
"cluster",
"covariance",
"cross_decomposition",
"datasets",
"decomposition",
"dummy",
"ensemble",
"exceptions",
"experimental",
"externals",
"feature_extraction",
"feature_selection",
"frozen",
"gaussian_process",
"inspection",
"isotonic",
"kernel_approximation",
"kernel_ridge",
"linear_model",
"manifold",
"metrics",
"mixture",
"model_selection",
"multiclass",
"multioutput",
"naive_bayes",
"neighbors",
"neural_network",
"pipeline",
"preprocessing",
"random_projection",
"semi_supervised",
"svm",
"tree",
"discriminant_analysis",
"impute",
"compose",
]
__all__ = _submodules + [
# Non-modules:
"clone",
"get_config",
"set_config",
"config_context",
"show_versions",
]
def __dir__():
return __all__
def __getattr__(name):
if name in _submodules:
return _importlib.import_module(f"sklearn.{name}")
else:
try:
return globals()[name]
except KeyError:
raise AttributeError(f"Module 'sklearn' has no attribute '{name}'")
_BUILT_WITH_MESON = False
try:
import sklearn._built_with_meson # noqa: F401
_BUILT_WITH_MESON = True
except ModuleNotFoundError:
pass
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import numpy as np
# Check if a random seed exists in the environment, if not create one.
_random_seed = os.environ.get("SKLEARN_SEED", None)
if _random_seed is None:
_random_seed = np.random.uniform() * np.iinfo(np.int32).max
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
|
from abc import ABC
class BaseStandardTests(ABC):
""":private:"""
def test_no_overrides_DO_NOT_OVERRIDE(self) -> None:
"""Test that no standard tests are overridden.
:private:
"""
# find path to standard test implementations
comparison_class = None
def explore_bases(cls: type) -> None:
nonlocal comparison_class
for base in cls.__bases__:
if base.__module__.startswith("langchain_tests."):
if comparison_class is None:
comparison_class = base
else:
msg = (
"Multiple standard test base classes found: "
f"{comparison_class}, {base}"
)
raise ValueError(msg)
else:
explore_bases(base)
explore_bases(self.__class__)
assert comparison_class is not None, "No standard test base class found."
print(f"Comparing {self.__class__} to {comparison_class}") # noqa: T201
running_tests = {method for method in dir(self) if method.startswith("test_")}
base_tests = {
method for method in dir(comparison_class) if method.startswith("test_")
}
deleted_tests = base_tests - running_tests
assert not deleted_tests, f"Standard tests deleted: {deleted_tests}"
overridden_tests = [
method
for method in base_tests
if getattr(self.__class__, method) is not getattr(comparison_class, method)
]
def is_xfail(method: str) -> bool:
m = getattr(self.__class__, method)
if not hasattr(m, "pytestmark"):
return False
marks = m.pytestmark
return any(
mark.name == "xfail" and mark.kwargs.get("reason") for mark in marks
)
overridden_not_xfail = [
method for method in overridden_tests if not is_xfail(method)
]
assert not overridden_not_xfail, (
"Standard tests overridden without "
f'@pytest.mark.xfail(reason="..."): {overridden_not_xfail}\n'
"Note: reason is required to explain why the standard test has an expected "
"failure."
)
|
from abc import ABC
class BaseStandardTests(ABC):
"""
:private:
"""
def test_no_overrides_DO_NOT_OVERRIDE(self) -> None:
"""
Test that no standard tests are overridden.
:private:
"""
# find path to standard test implementations
comparison_class = None
def explore_bases(cls: type) -> None:
nonlocal comparison_class
for base in cls.__bases__:
if base.__module__.startswith("langchain_tests."):
if comparison_class is None:
comparison_class = base
else:
raise ValueError(
"Multiple standard test base classes found: "
f"{comparison_class}, {base}"
)
else:
explore_bases(base)
explore_bases(self.__class__)
assert comparison_class is not None, "No standard test base class found."
print(f"Comparing {self.__class__} to {comparison_class}") # noqa: T201
running_tests = set(
[method for method in dir(self) if method.startswith("test_")]
)
base_tests = set(
[method for method in dir(comparison_class) if method.startswith("test_")]
)
deleted_tests = base_tests - running_tests
assert not deleted_tests, f"Standard tests deleted: {deleted_tests}"
overridden_tests = [
method
for method in base_tests
if getattr(self.__class__, method) is not getattr(comparison_class, method)
]
def is_xfail(method: str) -> bool:
m = getattr(self.__class__, method)
if not hasattr(m, "pytestmark"):
return False
marks = m.pytestmark
return any(
mark.name == "xfail" and mark.kwargs.get("reason") for mark in marks
)
overridden_not_xfail = [
method for method in overridden_tests if not is_xfail(method)
]
assert not overridden_not_xfail, (
"Standard tests overridden without "
f'@pytest.mark.xfail(reason="..."): {overridden_not_xfail}\n'
"Note: reason is required to explain why the standard test has an expected "
"failure."
)
|
from __future__ import annotations
__version__ = "3.5.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder import (
CrossEncoder,
CrossEncoderModelCardData,
CrossEncoderTrainer,
CrossEncoderTrainingArguments,
)
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
from sentence_transformers.util import mine_hard_negatives
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"CrossEncoderTrainer",
"CrossEncoderTrainingArguments",
"CrossEncoderModelCardData",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
"mine_hard_negatives",
]
|
from __future__ import annotations
__version__ = "3.5.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder import (
CrossEncoder,
CrossEncoderModelCardData,
CrossEncoderTrainer,
CrossEncoderTrainingArguments,
)
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"CrossEncoderTrainer",
"CrossEncoderTrainingArguments",
"CrossEncoderModelCardData",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
]
|
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin
class AutogradTestImpl(TestBaseMixin):
@nested_params(
[F.convolve, F.fftconvolve],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (4, 3, 2)
L_x, L_y = 23, 40
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device, requires_grad=True)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device, requires_grad=True)
self.assertTrue(gradcheck(fn, (x, y, mode)))
self.assertTrue(gradgradcheck(fn, (x, y, mode)))
def test_add_noise(self):
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self.assertTrue(gradcheck(F.add_noise, (waveform, noise, lengths, snr)))
self.assertTrue(gradgradcheck(F.add_noise, (waveform, noise, lengths, snr)))
@parameterized.expand(
[
(8000, (2, 3, 5, 7)),
(8000, (8000, 1)),
]
)
def test_oscillator_bank(self, sample_rate, shape):
# can be replaced with math.prod when we drop 3.7 support
def prod(iterable):
ret = 1
for item in iterable:
ret *= item
return ret
numel = prod(shape)
# use 1.9 instead of 2 so as to include values above nyquist frequency
fmax = sample_rate / 1.9
freq = torch.linspace(-fmax, fmax, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(
shape
)
amps = torch.linspace(-5, 5, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(shape)
assert gradcheck(F.oscillator_bank, (freq, amps, sample_rate))
def test_extend_pitch(self):
num_frames, num_pitches = 5, 7
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype, requires_grad=True)
pattern = torch.linspace(1, num_pitches, num_pitches, device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.extend_pitch, (input, num_pitches))
assert gradcheck(F.extend_pitch, (input, pattern))
def test_sinc_ir(self):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, False))
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, True))
def test_speed(self):
leading_dims = (3, 2)
T = 200
waveform = torch.rand(*leading_dims, T, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, T, leading_dims, dtype=self.dtype, device=self.device)
self.assertTrue(gradcheck(F.speed, (waveform, lengths, 1000, 1.1)))
self.assertTrue(gradgradcheck(F.speed, (waveform, lengths, 1000, 1.1)))
def test_preemphasis(self):
waveform = torch.rand(3, 2, 100, device=self.device, dtype=self.dtype, requires_grad=True)
coeff = 0.9
self.assertTrue(gradcheck(F.preemphasis, (waveform, coeff)))
self.assertTrue(gradgradcheck(F.preemphasis, (waveform, coeff)))
def test_deemphasis(self):
waveform = torch.rand(3, 2, 100, device=self.device, dtype=self.dtype, requires_grad=True)
coeff = 0.9
self.assertTrue(gradcheck(F.deemphasis, (waveform, coeff)))
self.assertTrue(gradgradcheck(F.deemphasis, (waveform, coeff)))
def test_freq_ir(self):
mags = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.frequency_impulse_response, (mags,))
def test_filter_waveform(self):
waveform = torch.rand(3, 1, 2, 10, device=self.device, dtype=self.dtype, requires_grad=True)
filters = torch.rand(3, 2, device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.filter_waveform, (waveform, filters))
|
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin
class AutogradTestImpl(TestBaseMixin):
@nested_params(
[F.convolve, F.fftconvolve],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (4, 3, 2)
L_x, L_y = 23, 40
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device, requires_grad=True)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device, requires_grad=True)
self.assertTrue(gradcheck(fn, (x, y, mode)))
self.assertTrue(gradgradcheck(fn, (x, y, mode)))
def test_add_noise(self):
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self.assertTrue(gradcheck(F.add_noise, (waveform, noise, lengths, snr)))
self.assertTrue(gradgradcheck(F.add_noise, (waveform, noise, lengths, snr)))
@parameterized.expand(
[
(8000, (2, 3, 5, 7)),
(8000, (8000, 1)),
]
)
def test_oscillator_bank(self, sample_rate, shape):
# can be replaced with math.prod when we drop 3.7 support
def prod(iterable):
ret = 1
for item in iterable:
ret *= item
return ret
numel = prod(shape)
# use 1.9 instead of 2 so as to include values above nyquist frequency
fmax = sample_rate / 1.9
freq = torch.linspace(-fmax, fmax, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(
shape
)
amps = torch.linspace(-5, 5, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(shape)
assert gradcheck(F.oscillator_bank, (freq, amps, sample_rate))
def test_extend_pitch(self):
num_frames, num_pitches = 5, 7
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype, requires_grad=True)
pattern = torch.linspace(1, num_pitches, num_pitches, device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.extend_pitch, (input, num_pitches))
assert gradcheck(F.extend_pitch, (input, pattern))
def test_sinc_ir(self):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, False))
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, True))
def test_speed(self):
leading_dims = (3, 2)
T = 200
waveform = torch.rand(*leading_dims, T, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, T, leading_dims, dtype=self.dtype, device=self.device)
self.assertTrue(gradcheck(F.speed, (waveform, lengths, 1000, 1.1)))
self.assertTrue(gradgradcheck(F.speed, (waveform, lengths, 1000, 1.1)))
def test_preemphasis(self):
waveform = torch.rand(3, 2, 100, device=self.device, dtype=self.dtype, requires_grad=True)
coeff = 0.9
self.assertTrue(gradcheck(F.preemphasis, (waveform, coeff)))
self.assertTrue(gradgradcheck(F.preemphasis, (waveform, coeff)))
def test_deemphasis(self):
waveform = torch.rand(3, 2, 100, device=self.device, dtype=self.dtype, requires_grad=True)
coeff = 0.9
self.assertTrue(gradcheck(F.deemphasis, (waveform, coeff)))
self.assertTrue(gradgradcheck(F.deemphasis, (waveform, coeff)))
def test_freq_ir(self):
mags = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.frequency_impulse_response, (mags,))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.