input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# TODO: Awaiting refactoring
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# Set evaluation interval
evaluation = dict(interval=2)
# Set checkpoint interval
checkpoint_config = dict(interval=4)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
dict(type='MMDetWandbHook',
init_kwargs={
'project': 'mmdetection',
'group': 'maskrcnn-r50-fpn-1x-coco'
},
interval=50,
log_checkpoint=True,
log_checkpoint_metadata=True,
num_eval_images=100)
])
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# Set evaluation interval
evaluation = dict(interval=2)
# Set checkpoint interval
checkpoint_config = dict(interval=4)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
dict(type='MMDetWandbHook',
init_kwargs={
'project': 'mmdetection',
'group': 'maskrcnn-r50-fpn-1x-coco'
},
interval=50,
log_checkpoint=True,
log_checkpoint_metadata=True,
num_eval_images=100)
])
|
import pytest
from typing import List
from unittest.mock import patch, MagicMock
from llama_index.embeddings.ibm import WatsonxEmbeddings
class TestWasonxLLMInference:
TEST_URL = "https://us-south.ml.cloud.ibm.com"
TEST_APIKEY = "apikey"
TEST_PROJECT_ID = "project_id"
TEST_MODEL = "test_model"
def mock_embed_query(self) -> List[float]:
return [-0.053358648, -0.009175377, -0.025022397]
def mock_embed_texts(self) -> List[List[float]]:
return [
[-0.053358648, -0.009175377, -0.025022397],
[-0.053358648, -0.009175377, -0.025022397],
]
def test_initialization(self) -> None:
with pytest.raises(ValueError, match=r"^Did not find") as e_info:
_ = WatsonxEmbeddings(
model_id=self.TEST_MODEL, project_id=self.TEST_PROJECT_ID
)
# Cloud scenario
with pytest.raises(
ValueError, match=r"^Did not find 'apikey' or 'token',"
) as e_info:
_ = WatsonxEmbeddings(
model_id=self.TEST_MODEL,
url=self.TEST_URL,
project_id=self.TEST_PROJECT_ID,
)
# CPD scenario
with pytest.raises(ValueError, match=r"^Did not find instance_id") as e_info:
_ = WatsonxEmbeddings(
model_id=self.TEST_MODEL,
token="123",
url="cpd-instance",
project_id=self.TEST_PROJECT_ID,
)
@patch("llama_index.embeddings.ibm.base.Embeddings")
def test_get_query_embedding(self, MockEmbedding: MagicMock) -> None:
mock_instance = MockEmbedding.return_value
mock_instance.embed_query.return_value = self.mock_embed_query()
embed = WatsonxEmbeddings(
model_id=self.TEST_MODEL,
url=self.TEST_URL,
apikey=self.TEST_APIKEY,
project_id=self.TEST_PROJECT_ID,
)
assert embed.get_query_embedding(query="TEST") == self.mock_embed_query()
@patch("llama_index.embeddings.ibm.base.Embeddings")
def test_get_texts_embedding(self, MockEmbedding: MagicMock) -> None:
mock_instance = MockEmbedding.return_value
mock_instance.embed_documents.return_value = self.mock_embed_texts()
embed = WatsonxEmbeddings(
model_id=self.TEST_MODEL,
url=self.TEST_URL,
apikey=self.TEST_APIKEY,
project_id=self.TEST_PROJECT_ID,
)
assert (
embed.get_text_embedding_batch(texts=["TEST1", "TEST2"])
== self.mock_embed_texts()
)
@pytest.mark.asyncio
@patch("llama_index.embeddings.ibm.base.Embeddings")
async def test_get_query_embedding_async(self, MockEmbedding: MagicMock) -> None:
mock_instance = MockEmbedding.return_value
mock_instance.embed_query.return_value = self.mock_embed_query()
embed = WatsonxEmbeddings(
model_id=self.TEST_MODEL,
url=self.TEST_URL,
apikey=self.TEST_APIKEY,
project_id=self.TEST_PROJECT_ID,
)
response = await embed.aget_text_embedding("TEST1")
assert response == self.mock_embed_query()
|
import pytest
from typing import List
from unittest.mock import patch, MagicMock
from llama_index.embeddings.ibm import WatsonxEmbeddings
class TestWasonxLLMInference:
TEST_URL = "https://us-south.ml.cloud.ibm.com"
TEST_APIKEY = "apikey"
TEST_PROJECT_ID = "project_id"
TEST_MODEL = "test_model"
def mock_embed_query(self) -> List[float]:
return [-0.053358648, -0.009175377, -0.025022397]
def mock_embed_texts(self) -> List[List[float]]:
return [
[-0.053358648, -0.009175377, -0.025022397],
[-0.053358648, -0.009175377, -0.025022397],
]
def test_initialization(self) -> None:
with pytest.raises(ValueError, match=r"^Did not find") as e_info:
_ = WatsonxEmbeddings(
model_id=self.TEST_MODEL, project_id=self.TEST_PROJECT_ID
)
# Cloud scenario
with pytest.raises(
ValueError, match=r"^Did not find 'apikey' or 'token',"
) as e_info:
_ = WatsonxEmbeddings(
model_id=self.TEST_MODEL,
url=self.TEST_URL,
project_id=self.TEST_PROJECT_ID,
)
# CPD scenario
with pytest.raises(ValueError, match=r"^Did not find instance_id") as e_info:
_ = WatsonxEmbeddings(
model_id=self.TEST_MODEL,
token="123",
url="cpd-instance",
project_id=self.TEST_PROJECT_ID,
)
@patch("llama_index.embeddings.ibm.base.Embeddings")
def test_get_query_embedding(self, MockEmbedding: MagicMock) -> None:
mock_instance = MockEmbedding.return_value
mock_instance.embed_query.return_value = self.mock_embed_query()
embed = WatsonxEmbeddings(
model_id=self.TEST_MODEL,
url=self.TEST_URL,
apikey=self.TEST_APIKEY,
project_id=self.TEST_PROJECT_ID,
)
assert embed.get_query_embedding(query="TEST") == self.mock_embed_query()
@patch("llama_index.embeddings.ibm.base.Embeddings")
def test_get_texts_embedding(self, MockEmbedding: MagicMock) -> None:
mock_instance = MockEmbedding.return_value
mock_instance.embed_documents.return_value = self.mock_embed_texts()
embed = WatsonxEmbeddings(
model_id=self.TEST_MODEL,
url=self.TEST_URL,
apikey=self.TEST_APIKEY,
project_id=self.TEST_PROJECT_ID,
)
assert (
embed.get_text_embedding_batch(texts=["TEST1", "TEST2"])
== self.mock_embed_texts()
)
@pytest.mark.asyncio()
@patch("llama_index.embeddings.ibm.base.Embeddings")
async def test_get_query_embedding_async(self, MockEmbedding: MagicMock) -> None:
mock_instance = MockEmbedding.return_value
mock_instance.embed_query.return_value = self.mock_embed_query()
embed = WatsonxEmbeddings(
model_id=self.TEST_MODEL,
url=self.TEST_URL,
apikey=self.TEST_APIKEY,
project_id=self.TEST_PROJECT_ID,
)
response = await embed.aget_text_embedding("TEST1")
assert response == self.mock_embed_query()
|
"""
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# We generate a dataset with two concentric circles. In addition, a label
# is associated with each sample of the dataset that is: 0 (belonging to
# the outer circle), 1 (belonging to the inner circle), and -1 (unknown).
# Here, all labels but two are tagged as unknown.
import numpy as np
from sklearn.datasets import make_circles
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = np.full(n_samples, -1.0)
labels[0] = outer
labels[-1] = inner
# %%
# Plot raw data
import matplotlib.pyplot as plt
plt.figure(figsize=(4, 4))
plt.scatter(
X[labels == outer, 0],
X[labels == outer, 1],
color="navy",
marker="s",
lw=0,
label="outer labeled",
s=10,
)
plt.scatter(
X[labels == inner, 0],
X[labels == inner, 1],
color="c",
marker="s",
lw=0,
label="inner labeled",
s=10,
)
plt.scatter(
X[labels == -1, 0],
X[labels == -1, 1],
color="darkorange",
marker=".",
label="unlabeled",
)
plt.legend(scatterpoints=1, shadow=False, loc="center")
_ = plt.title("Raw data (2 classes=outer and inner)")
# %%
#
# The aim of :class:`~sklearn.semi_supervised.LabelSpreading` is to associate
# a label to sample where the label is initially unknown.
from sklearn.semi_supervised import LabelSpreading
label_spread = LabelSpreading(kernel="knn", alpha=0.8)
label_spread.fit(X, labels)
# %%
# Now, we can check which labels have been associated with each sample
# when the label was unknown.
output_labels = label_spread.transduction_
output_label_array = np.asarray(output_labels)
outer_numbers = (output_label_array == outer).nonzero()[0]
inner_numbers = (output_label_array == inner).nonzero()[0]
plt.figure(figsize=(4, 4))
plt.scatter(
X[outer_numbers, 0],
X[outer_numbers, 1],
color="navy",
marker="s",
lw=0,
s=10,
label="outer learned",
)
plt.scatter(
X[inner_numbers, 0],
X[inner_numbers, 1],
color="c",
marker="s",
lw=0,
s=10,
label="inner learned",
)
plt.legend(scatterpoints=1, shadow=False, loc="center")
plt.title("Labels learned with Label Spreading (KNN)")
plt.show()
|
"""
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# We generate a dataset with two concentric circles. In addition, a label
# is associated with each sample of the dataset that is: 0 (belonging to
# the outer circle), 1 (belonging to the inner circle), and -1 (unknown).
# Here, all labels but two are tagged as unknown.
import numpy as np
from sklearn.datasets import make_circles
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = np.full(n_samples, -1.0)
labels[0] = outer
labels[-1] = inner
# %%
# Plot raw data
import matplotlib.pyplot as plt
plt.figure(figsize=(4, 4))
plt.scatter(
X[labels == outer, 0],
X[labels == outer, 1],
color="navy",
marker="s",
lw=0,
label="outer labeled",
s=10,
)
plt.scatter(
X[labels == inner, 0],
X[labels == inner, 1],
color="c",
marker="s",
lw=0,
label="inner labeled",
s=10,
)
plt.scatter(
X[labels == -1, 0],
X[labels == -1, 1],
color="darkorange",
marker=".",
label="unlabeled",
)
plt.legend(scatterpoints=1, shadow=False, loc="center")
_ = plt.title("Raw data (2 classes=outer and inner)")
# %%
#
# The aim of :class:`~sklearn.semi_supervised.LabelSpreading` is to associate
# a label to sample where the label is initially unknown.
from sklearn.semi_supervised import LabelSpreading
label_spread = LabelSpreading(kernel="knn", alpha=0.8)
label_spread.fit(X, labels)
# %%
# Now, we can check which labels have been associated with each sample
# when the label was unknown.
output_labels = label_spread.transduction_
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plt.figure(figsize=(4, 4))
plt.scatter(
X[outer_numbers, 0],
X[outer_numbers, 1],
color="navy",
marker="s",
lw=0,
s=10,
label="outer learned",
)
plt.scatter(
X[inner_numbers, 0],
X[inner_numbers, 1],
color="c",
marker="s",
lw=0,
s=10,
label="inner learned",
)
plt.legend(scatterpoints=1, shadow=False, loc="center")
plt.title("Labels learned with Label Spreading (KNN)")
plt.show()
|
"""
The :mod:`sklearn._loss` module includes loss function classes suitable for
fitting classification and regression tasks.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from .loss import (
AbsoluteError,
HalfBinomialLoss,
HalfGammaLoss,
HalfMultinomialLoss,
HalfPoissonLoss,
HalfSquaredError,
HalfTweedieLoss,
HalfTweedieLossIdentity,
HuberLoss,
PinballLoss,
)
__all__ = [
"AbsoluteError",
"HalfBinomialLoss",
"HalfGammaLoss",
"HalfMultinomialLoss",
"HalfPoissonLoss",
"HalfSquaredError",
"HalfTweedieLoss",
"HalfTweedieLossIdentity",
"HuberLoss",
"PinballLoss",
]
|
"""
The :mod:`sklearn._loss` module includes loss function classes suitable for
fitting classification and regression tasks.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from .loss import (
AbsoluteError,
HalfBinomialLoss,
HalfGammaLoss,
HalfMultinomialLoss,
HalfPoissonLoss,
HalfSquaredError,
HalfTweedieLoss,
HalfTweedieLossIdentity,
HuberLoss,
PinballLoss,
)
__all__ = [
"HalfSquaredError",
"AbsoluteError",
"PinballLoss",
"HuberLoss",
"HalfPoissonLoss",
"HalfGammaLoss",
"HalfTweedieLoss",
"HalfTweedieLossIdentity",
"HalfBinomialLoss",
"HalfMultinomialLoss",
]
|
import os
from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredPowerPointLoader(UnstructuredFileLoader):
"""Load `Microsoft PowerPoint` files using `Unstructured`.
Works with both .ppt and .pptx files.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredPowerPointLoader
loader = UnstructuredPowerPointLoader(
"example.pptx", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-pptx
"""
def __init__(
self,
file_path: Union[str, Path],
mode: str = "single",
**unstructured_kwargs: Any,
):
"""
Args:
file_path: The path to the PowerPoint file to load.
mode: The mode to use when loading the file. Can be one of "single",
"multi", or "all". Default is "single".
**unstructured_kwargs: Any kwargs to pass to the unstructured.
"""
file_path = str(file_path)
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.file_utils.filetype import FileType, detect_filetype
# NOTE(MthwRobinson) - magic will raise an import error if the libmagic
# system dependency isn't installed. If it's not installed, we'll just
# check the file extension
try:
import magic # noqa: F401
is_ppt = detect_filetype(self.file_path) == FileType.PPT
except ImportError:
_, extension = os.path.splitext(str(self.file_path))
is_ppt = extension == ".ppt"
if is_ppt:
validate_unstructured_version("0.4.11")
if is_ppt:
from unstructured.partition.ppt import partition_ppt
return partition_ppt(filename=self.file_path, **self.unstructured_kwargs)
else:
from unstructured.partition.pptx import partition_pptx
return partition_pptx(filename=self.file_path, **self.unstructured_kwargs)
|
import os
from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredPowerPointLoader(UnstructuredFileLoader):
"""Load `Microsoft PowerPoint` files using `Unstructured`.
Works with both .ppt and .pptx files.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredPowerPointLoader
loader = UnstructuredPowerPointLoader(
"example.pptx", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-pptx
"""
def __init__(
self,
file_path: Union[str, Path],
mode: str = "single",
**unstructured_kwargs: Any,
):
"""
Args:
file_path: The path to the PowerPoint file to load.
mode: The mode to use when loading the file. Can be one of "single",
"multi", or "all". Default is "single".
**unstructured_kwargs: Any kwargs to pass to the unstructured.
"""
file_path = str(file_path)
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.file_utils.filetype import FileType, detect_filetype
# NOTE(MthwRobinson) - magic will raise an import error if the libmagic
# system dependency isn't installed. If it's not installed, we'll just
# check the file extension
try:
import magic # noqa: F401
is_ppt = detect_filetype(self.file_path) == FileType.PPT # type: ignore[arg-type]
except ImportError:
_, extension = os.path.splitext(str(self.file_path))
is_ppt = extension == ".ppt"
if is_ppt:
validate_unstructured_version("0.4.11")
if is_ppt:
from unstructured.partition.ppt import partition_ppt
return partition_ppt(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
else:
from unstructured.partition.pptx import partition_pptx
return partition_pptx(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
|
from workflows.decorators import StepConfig # noqa
from workflows.decorators import step as upstream_step # noqa
from typing import Callable, Any
def step(*args: Any, **kwargs: Any) -> Callable:
# Remove old, unused parameter
kwargs.pop("pass_context", None)
return upstream_step(*args, **kwargs)
|
from typing import TYPE_CHECKING, Any, Callable, List, Optional, Type
from llama_index.core.bridge.pydantic import BaseModel, ConfigDict
from .errors import WorkflowValidationError
from .utils import (
ServiceDefinition,
inspect_signature,
is_free_function,
validate_step_signature,
)
from .resource import ResourceDefinition
if TYPE_CHECKING: # pragma: no cover
from .workflow import Workflow
from .retry_policy import RetryPolicy
class StepConfig(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
accepted_events: List[Any]
event_name: str
return_types: List[Any]
context_parameter: Optional[str]
num_workers: int
requested_services: List[ServiceDefinition]
retry_policy: Optional[RetryPolicy]
resources: List[ResourceDefinition]
def step(
*args: Any,
workflow: Optional[Type["Workflow"]] = None,
pass_context: bool = False,
num_workers: int = 4,
retry_policy: Optional[RetryPolicy] = None,
) -> Callable:
"""
Decorator used to mark methods and functions as workflow steps.
Decorators are evaluated at import time, but we need to wait for
starting the communication channels until runtime. For this reason,
we temporarily store the list of events that will be consumed by this
step in the function object itself.
Args:
workflow: Workflow class to which the decorated step will be added. Only needed when using the
decorator on free functions instead of class methods.
num_workers: The number of workers that will process events for the decorated step. The default
value works most of the times.
retry_policy: The policy used to retry a step that encountered an error while running.
"""
def decorator(func: Callable) -> Callable:
if not isinstance(num_workers, int) or num_workers <= 0:
raise WorkflowValidationError(
"num_workers must be an integer greater than 0"
)
# This will raise providing a message with the specific validation failure
spec = inspect_signature(func)
validate_step_signature(spec)
event_name, accepted_events = next(iter(spec.accepted_events.items()))
# store the configuration in the function object
func.__step_config = StepConfig( # type: ignore[attr-defined]
accepted_events=accepted_events,
event_name=event_name,
return_types=spec.return_types,
context_parameter=spec.context_parameter,
num_workers=num_workers,
requested_services=spec.requested_services or [],
retry_policy=retry_policy,
resources=spec.resources,
)
# If this is a free function, call add_step() explicitly.
if is_free_function(func.__qualname__):
if workflow is None:
msg = f"To decorate {func.__name__} please pass a workflow class to the @step decorator."
raise WorkflowValidationError(msg)
workflow.add_step(func)
return func
if len(args):
# The decorator was used without parentheses, like `@step`
func = args[0]
decorator(func)
return func
return decorator
|
from typing import Any
from langchain_core.memory import BaseMemory
class ReadOnlySharedMemory(BaseMemory):
"""Memory wrapper that is read-only and cannot be changed."""
memory: BaseMemory
@property
def memory_variables(self) -> list[str]:
"""Return memory variables."""
return self.memory.memory_variables
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]:
"""Load memory variables from memory."""
return self.memory.load_memory_variables(inputs)
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Nothing should be saved or changed"""
pass
def clear(self) -> None:
"""Nothing to clear, got a memory like a vault."""
pass
|
from typing import Any, Dict, List
from langchain_core.memory import BaseMemory
class ReadOnlySharedMemory(BaseMemory):
"""Memory wrapper that is read-only and cannot be changed."""
memory: BaseMemory
@property
def memory_variables(self) -> List[str]:
"""Return memory variables."""
return self.memory.memory_variables
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Load memory variables from memory."""
return self.memory.load_memory_variables(inputs)
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Nothing should be saved or changed"""
pass
def clear(self) -> None:
"""Nothing to clear, got a memory like a vault."""
pass
|
import abc
import importlib
import pathlib
from typing import Any, Collection, Dict, Iterator, List, Optional, Sequence, Union
from torchdata.datapipes.iter import IterDataPipe
from torchvision.datasets.utils import verify_str_arg
from ._resource import OnlineResource
class Dataset(IterDataPipe[Dict[str, Any]], abc.ABC):
@staticmethod
def _verify_str_arg(
value: str,
arg: Optional[str] = None,
valid_values: Optional[Collection[str]] = None,
*,
custom_msg: Optional[str] = None,
) -> str:
return verify_str_arg(value, arg, valid_values, custom_msg=custom_msg)
def __init__(
self, root: Union[str, pathlib.Path], *, skip_integrity_check: bool = False, dependencies: Collection[str] = ()
) -> None:
for dependency in dependencies:
try:
importlib.import_module(dependency)
except ModuleNotFoundError:
raise ModuleNotFoundError(
f"{type(self).__name__}() depends on the third-party package '{dependency}'. "
f"Please install it, for example with `pip install {dependency}`."
) from None
self._root = pathlib.Path(root).expanduser().resolve()
resources = [
resource.load(self._root, skip_integrity_check=skip_integrity_check) for resource in self._resources()
]
self._dp = self._datapipe(resources)
def __iter__(self) -> Iterator[Dict[str, Any]]:
yield from self._dp
@abc.abstractmethod
def _resources(self) -> List[OnlineResource]:
pass
@abc.abstractmethod
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
pass
@abc.abstractmethod
def __len__(self) -> int:
pass
def _generate_categories(self) -> Sequence[Union[str, Sequence[str]]]:
raise NotImplementedError
|
import abc
import importlib
import pathlib
from typing import Any, Collection, Dict, Iterator, List, Optional, Sequence, Union
from torch.utils.data import IterDataPipe
from torchvision.datasets.utils import verify_str_arg
from ._resource import OnlineResource
class Dataset(IterDataPipe[Dict[str, Any]], abc.ABC):
@staticmethod
def _verify_str_arg(
value: str,
arg: Optional[str] = None,
valid_values: Optional[Collection[str]] = None,
*,
custom_msg: Optional[str] = None,
) -> str:
return verify_str_arg(value, arg, valid_values, custom_msg=custom_msg)
def __init__(
self, root: Union[str, pathlib.Path], *, skip_integrity_check: bool = False, dependencies: Collection[str] = ()
) -> None:
for dependency in dependencies:
try:
importlib.import_module(dependency)
except ModuleNotFoundError:
raise ModuleNotFoundError(
f"{type(self).__name__}() depends on the third-party package '{dependency}'. "
f"Please install it, for example with `pip install {dependency}`."
) from None
self._root = pathlib.Path(root).expanduser().resolve()
resources = [
resource.load(self._root, skip_integrity_check=skip_integrity_check) for resource in self._resources()
]
self._dp = self._datapipe(resources)
def __iter__(self) -> Iterator[Dict[str, Any]]:
yield from self._dp
@abc.abstractmethod
def _resources(self) -> List[OnlineResource]:
pass
@abc.abstractmethod
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
pass
@abc.abstractmethod
def __len__(self) -> int:
pass
def _generate_categories(self) -> Sequence[Union[str, Sequence[str]]]:
raise NotImplementedError
|
import copy
import warnings
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Dict, Optional, Union
from .. import config
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*):
Specify a cache directory to save the file to (overwrite the
default cache dir).
force_download (`bool`, defaults to `False`):
If `True`, re-dowload the file even if it's already cached in
the cache dir.
resume_download (`bool`, defaults to `False`):
If `True`, resume the download if an incompletely received file is
found.
proxies (`dict`, *optional*):
user_agent (`str`, *optional*):
Optional string or dict that will be appended to the user-agent on remote
requests.
extract_compressed_file (`bool`, defaults to `False`):
If `True` and the path point to a zip or tar file,
extract the compressed file in a folder along the archive.
force_extract (`bool`, defaults to `False`):
If `True` when `extract_compressed_file` is `True` and the archive
was already extracted, re-extract the archive and override the folder where it was extracted.
delete_extracted (`bool`, defaults to `False`):
Whether to delete (or keep) the extracted files.
use_etag (`bool`, defaults to `True`):
Whether to use the ETag HTTP response header to validate the cached files.
num_proc (`int`, *optional*):
The number of processes to launch to download the files in parallel.
max_retries (`int`, default to `1`):
The number of times to retry an HTTP request if it fails.
token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
use_auth_token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
<Deprecated version="2.14.0">
`use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
</Deprecated>
ignore_url_params (`bool`, defaults to `False`):
Whether to strip all query parameters and fragments from
the download URL before using it for caching the file.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the dataset file-system backend, if any.
download_desc (`str`, *optional*):
A description to be displayed alongside with the progress bar while downloading the files.
"""
cache_dir: Optional[Union[str, Path]] = None
force_download: bool = False
resume_download: bool = False
local_files_only: bool = False
proxies: Optional[Dict] = None
user_agent: Optional[str] = None
extract_compressed_file: bool = False
force_extract: bool = False
delete_extracted: bool = False
use_etag: bool = True
num_proc: Optional[int] = None
max_retries: int = 1
token: Optional[Union[str, bool]] = None
use_auth_token = "deprecated"
ignore_url_params: bool = False
storage_options: Dict[str, Any] = field(default_factory=dict)
download_desc: Optional[str] = None
def __post_init__(self):
if self.use_auth_token != "deprecated":
warnings.warn(
"'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
f"You can remove this warning by passing 'token={self.use_auth_token}' instead.",
FutureWarning,
)
self.token = self.use_auth_token
if "hf" not in self.storage_options:
self.storage_options["hf"] = {"token": self.token, "endpoint": config.HF_ENDPOINT}
def copy(self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
|
import copy
import warnings
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Dict, Optional, Union
from .. import config
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*):
Specify a cache directory to save the file to (overwrite the
default cache dir).
force_download (`bool`, defaults to `False`):
If `True`, re-dowload the file even if it's already cached in
the cache dir.
resume_download (`bool`, defaults to `False`):
If `True`, resume the download if an incompletely received file is
found.
proxies (`dict`, *optional*):
user_agent (`str`, *optional*):
Optional string or dict that will be appended to the user-agent on remote
requests.
extract_compressed_file (`bool`, defaults to `False`):
If `True` and the path point to a zip or tar file,
extract the compressed file in a folder along the archive.
force_extract (`bool`, defaults to `False`):
If `True` when `extract_compressed_file` is `True` and the archive
was already extracted, re-extract the archive and override the folder where it was extracted.
delete_extracted (`bool`, defaults to `False`):
Whether to delete (or keep) the extracted files.
use_etag (`bool`, defaults to `True`):
Whether to use the ETag HTTP response header to validate the cached files.
num_proc (`int`, *optional*):
The number of processes to launch to download the files in parallel.
max_retries (`int`, default to `1`):
The number of times to retry an HTTP request if it fails.
token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
use_auth_token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
<Deprecated version="2.14.0">
`use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
</Deprecated>
ignore_url_params (`bool`, defaults to `False`):
Whether to strip all query parameters and fragments from
the download URL before using it for caching the file.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the dataset file-system backend, if any.
download_desc (`str`, *optional*):
A description to be displayed alongside with the progress bar while downloading the files.
"""
cache_dir: Optional[Union[str, Path]] = None
force_download: bool = False
resume_download: bool = False
local_files_only: bool = False
proxies: Optional[Dict] = None
user_agent: Optional[str] = None
extract_compressed_file: bool = False
force_extract: bool = False
delete_extracted: bool = False
use_etag: bool = True
num_proc: Optional[int] = None
max_retries: int = 1
token: Optional[Union[str, bool]] = None
use_auth_token = "deprecated"
ignore_url_params: bool = False
storage_options: Dict[str, Dict[str, Any]] = field(default_factory=dict)
download_desc: Optional[str] = None
def __post_init__(self):
if self.use_auth_token != "deprecated":
warnings.warn(
"'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
f"You can remove this warning by passing 'token={self.use_auth_token}' instead.",
FutureWarning,
)
self.token = self.use_auth_token
if "hf" not in self.storage_options:
self.storage_options["hf"] = {"token": self.token, "endpoint": config.HF_ENDPOINT}
def copy(self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(
f'multiprocessing start method is set to `fork`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `fork`: {e!r}'
)
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.17.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(
f'multiprocessing start method is set to `fork`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `fork`: {e!r}'
)
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.16.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
# Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def make_atom14_masks(protein: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
"""Construct denser atom positions (14 dimensions instead of 37)."""
restype_atom14_to_atom37_list = []
restype_atom37_to_atom14_list = []
restype_atom14_mask_list = []
for rt in rc.restypes:
atom_names = rc.restype_name_to_atom14_names[rc.restype_1to3[rt]]
restype_atom14_to_atom37_list.append([(rc.atom_order[name] if name else 0) for name in atom_names])
atom_name_to_idx14 = {name: i for i, name in enumerate(atom_names)}
restype_atom37_to_atom14_list.append(
[(atom_name_to_idx14[name] if name in atom_name_to_idx14 else 0) for name in rc.atom_types]
)
restype_atom14_mask_list.append([(1.0 if name else 0.0) for name in atom_names])
# Add dummy mapping for restype 'UNK'
restype_atom14_to_atom37_list.append([0] * 14)
restype_atom37_to_atom14_list.append([0] * 37)
restype_atom14_mask_list.append([0.0] * 14)
restype_atom14_to_atom37 = torch.tensor(
restype_atom14_to_atom37_list,
dtype=torch.int32,
device=protein["aatype"].device,
)
restype_atom37_to_atom14 = torch.tensor(
restype_atom37_to_atom14_list,
dtype=torch.int32,
device=protein["aatype"].device,
)
restype_atom14_mask = torch.tensor(
restype_atom14_mask_list,
dtype=torch.float32,
device=protein["aatype"].device,
)
protein_aatype = protein["aatype"].to(torch.long)
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
residx_atom14_to_atom37 = restype_atom14_to_atom37[protein_aatype]
residx_atom14_mask = restype_atom14_mask[protein_aatype]
protein["atom14_atom_exists"] = residx_atom14_mask
protein["residx_atom14_to_atom37"] = residx_atom14_to_atom37.long()
# create the gather indices for mapping back
residx_atom37_to_atom14 = restype_atom37_to_atom14[protein_aatype]
protein["residx_atom37_to_atom14"] = residx_atom37_to_atom14.long()
# create the corresponding mask
restype_atom37_mask = torch.zeros([21, 37], dtype=torch.float32, device=protein["aatype"].device)
for restype, restype_letter in enumerate(rc.restypes):
restype_name = rc.restype_1to3[restype_letter]
atom_names = rc.residue_atoms[restype_name]
for atom_name in atom_names:
atom_type = rc.atom_order[atom_name]
restype_atom37_mask[restype, atom_type] = 1
residx_atom37_mask = restype_atom37_mask[protein_aatype]
protein["atom37_atom_exists"] = residx_atom37_mask
return protein
def make_atom14_masks_np(batch: dict[str, torch.Tensor]) -> dict[str, np.ndarray]:
batch = tree_map(lambda n: torch.tensor(n, device=batch["aatype"].device), batch, np.ndarray)
out = tensor_tree_map(lambda t: np.array(t), make_atom14_masks(batch))
return out
|
# Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def make_atom14_masks(protein: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""Construct denser atom positions (14 dimensions instead of 37)."""
restype_atom14_to_atom37_list = []
restype_atom37_to_atom14_list = []
restype_atom14_mask_list = []
for rt in rc.restypes:
atom_names = rc.restype_name_to_atom14_names[rc.restype_1to3[rt]]
restype_atom14_to_atom37_list.append([(rc.atom_order[name] if name else 0) for name in atom_names])
atom_name_to_idx14 = {name: i for i, name in enumerate(atom_names)}
restype_atom37_to_atom14_list.append(
[(atom_name_to_idx14[name] if name in atom_name_to_idx14 else 0) for name in rc.atom_types]
)
restype_atom14_mask_list.append([(1.0 if name else 0.0) for name in atom_names])
# Add dummy mapping for restype 'UNK'
restype_atom14_to_atom37_list.append([0] * 14)
restype_atom37_to_atom14_list.append([0] * 37)
restype_atom14_mask_list.append([0.0] * 14)
restype_atom14_to_atom37 = torch.tensor(
restype_atom14_to_atom37_list,
dtype=torch.int32,
device=protein["aatype"].device,
)
restype_atom37_to_atom14 = torch.tensor(
restype_atom37_to_atom14_list,
dtype=torch.int32,
device=protein["aatype"].device,
)
restype_atom14_mask = torch.tensor(
restype_atom14_mask_list,
dtype=torch.float32,
device=protein["aatype"].device,
)
protein_aatype = protein["aatype"].to(torch.long)
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
residx_atom14_to_atom37 = restype_atom14_to_atom37[protein_aatype]
residx_atom14_mask = restype_atom14_mask[protein_aatype]
protein["atom14_atom_exists"] = residx_atom14_mask
protein["residx_atom14_to_atom37"] = residx_atom14_to_atom37.long()
# create the gather indices for mapping back
residx_atom37_to_atom14 = restype_atom37_to_atom14[protein_aatype]
protein["residx_atom37_to_atom14"] = residx_atom37_to_atom14.long()
# create the corresponding mask
restype_atom37_mask = torch.zeros([21, 37], dtype=torch.float32, device=protein["aatype"].device)
for restype, restype_letter in enumerate(rc.restypes):
restype_name = rc.restype_1to3[restype_letter]
atom_names = rc.residue_atoms[restype_name]
for atom_name in atom_names:
atom_type = rc.atom_order[atom_name]
restype_atom37_mask[restype, atom_type] = 1
residx_atom37_mask = restype_atom37_mask[protein_aatype]
protein["atom37_atom_exists"] = residx_atom37_mask
return protein
def make_atom14_masks_np(batch: Dict[str, torch.Tensor]) -> Dict[str, np.ndarray]:
batch = tree_map(lambda n: torch.tensor(n, device=batch["aatype"].device), batch, np.ndarray)
out = tensor_tree_map(lambda t: np.array(t), make_atom14_masks(batch))
return out
|
import io
from typing import Tuple, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.bytes.base_bytes import BaseBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio import AudioNdArray
from docarray.utils._internal.misc import import_library
T = TypeVar('T', bound='AudioBytes')
@_register_proto(proto_type_name='audio_bytes')
class AudioBytes(BaseBytes):
"""
Bytes that store an audio and that can be load into an Audio tensor
"""
def load(self) -> Tuple[AudioNdArray, int]:
"""
Load the Audio from the [`AudioBytes`][docarray.typing.AudioBytes] into an
[`AudioNdArray`][docarray.typing.AudioNdArray].
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import AudioBytes, AudioNdArray, AudioUrl
class MyAudio(BaseDoc):
url: AudioUrl
tensor: Optional[AudioNdArray]
bytes_: Optional[AudioBytes]
frame_rate: Optional[float]
doc = MyAudio(url='https://www.kozco.com/tech/piano2.wav')
doc.bytes_ = doc.url.load_bytes()
doc.tensor, doc.frame_rate = doc.bytes_.load()
# Note this is equivalent to do
doc.tensor, doc.frame_rate = doc.url.load()
assert isinstance(doc.tensor, AudioNdArray)
```
---
:return: tuple of an [`AudioNdArray`][docarray.typing.AudioNdArray] representing the
audio bytes content, and an integer representing the frame rate.
"""
pydub = import_library('pydub', raise_error=True) # noqa: F841
from pydub import AudioSegment
segment = AudioSegment.from_file(io.BytesIO(self))
# Convert to float32 using NumPy
samples = np.array(segment.get_array_of_samples())
# Normalise float32 array so that values are between -1.0 and +1.0
samples_norm = samples / 2 ** (segment.sample_width * 8 - 1)
return parse_obj_as(AudioNdArray, samples_norm), segment.frame_rate
|
import io
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio import AudioNdArray
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='AudioBytes')
@_register_proto(proto_type_name='audio_bytes')
class AudioBytes(bytes, AbstractType):
"""
Bytes that store an audio and that can be load into an Audio tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(self) -> Tuple[AudioNdArray, int]:
"""
Load the Audio from the [`AudioBytes`][docarray.typing.AudioBytes] into an
[`AudioNdArray`][docarray.typing.AudioNdArray].
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import AudioBytes, AudioNdArray, AudioUrl
class MyAudio(BaseDoc):
url: AudioUrl
tensor: Optional[AudioNdArray]
bytes_: Optional[AudioBytes]
frame_rate: Optional[float]
doc = MyAudio(url='https://www.kozco.com/tech/piano2.wav')
doc.bytes_ = doc.url.load_bytes()
doc.tensor, doc.frame_rate = doc.bytes_.load()
# Note this is equivalent to do
doc.tensor, doc.frame_rate = doc.url.load()
assert isinstance(doc.tensor, AudioNdArray)
```
---
:return: tuple of an [`AudioNdArray`][docarray.typing.AudioNdArray] representing the
audio bytes content, and an integer representing the frame rate.
"""
pydub = import_library('pydub', raise_error=True) # noqa: F841
from pydub import AudioSegment
segment = AudioSegment.from_file(io.BytesIO(self))
# Convert to float32 using NumPy
samples = np.array(segment.get_array_of_samples())
# Normalise float32 array so that values are between -1.0 and +1.0
samples_norm = samples / 2 ** (segment.sample_width * 8 - 1)
return parse_obj_as(AudioNdArray, samples_norm), segment.frame_rate
|
from __future__ import annotations
from .CrossEncoder import CrossEncoder
__all__ = ["CrossEncoder"]
|
from .CrossEncoder import CrossEncoder
__all__ = ["CrossEncoder"]
|
__version__ = '0.37.1'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
__version__ = '0.37.0'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
from __future__ import annotations
import pytest
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer
@pytest.mark.parametrize(
("revision", "expected_base_revision"),
[
("f3cb857cba53019a20df283396bcca179cf051a4", "f3cb857cba53019a20df283396bcca179cf051a4"),
("f3cb857", "f3cb857"),
("main", "valid-revision"),
(None, "valid-revision"),
],
)
def test_model_card_data(revision, expected_base_revision) -> None:
model_name = "sentence-transformers-testing/stsb-bert-tiny-safetensors"
model = SentenceTransformer(model_name, revision=revision)
assert model.model_card_data.base_model == model_name
if expected_base_revision == "valid-revision":
assert model.model_card_data.base_model_revision
assert len(model.model_card_data.base_model_revision) == 40
else:
assert model.model_card_data.base_model_revision == expected_base_revision
def test_generated_from_trainer_tag(stsb_bert_tiny_model: SentenceTransformer) -> None:
model = stsb_bert_tiny_model
assert "generated_from_trainer" not in model.model_card_data.tags
SentenceTransformerTrainer(model)
assert "generated_from_trainer" in model.model_card_data.tags
|
import pytest
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer
@pytest.mark.parametrize(
("revision", "expected_base_revision"),
[
("f3cb857cba53019a20df283396bcca179cf051a4", "f3cb857cba53019a20df283396bcca179cf051a4"),
("f3cb857", "f3cb857"),
("main", "valid-revision"),
(None, "valid-revision"),
],
)
def test_model_card_data(revision, expected_base_revision) -> None:
model_name = "sentence-transformers-testing/stsb-bert-tiny-safetensors"
model = SentenceTransformer(model_name, revision=revision)
assert model.model_card_data.base_model == model_name
if expected_base_revision == "valid-revision":
assert model.model_card_data.base_model_revision
assert len(model.model_card_data.base_model_revision) == 40
else:
assert model.model_card_data.base_model_revision == expected_base_revision
def test_generated_from_trainer_tag(stsb_bert_tiny_model: SentenceTransformer) -> None:
model = stsb_bert_tiny_model
assert "generated_from_trainer" not in model.model_card_data.tags
SentenceTransformerTrainer(model)
assert "generated_from_trainer" in model.model_card_data.tags
|
import json
import os
import subprocess
import pytest
from jina.checker import NetworkChecker
from jina.jaml import JAML
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_deployment_parser
from jina.parsers.ping import set_ping_parser
from jina_cli.autocomplete import ac_table
from jina_cli.export import api_to_dict
from jina_cli.lookup import _build_lookup_table, lookup_and_print
from tests.helper import _generate_pod_args
def test_export_api(tmpdir):
with open(tmpdir / 'test.yml', 'w', encoding='utf8') as fp:
JAML.dump(api_to_dict(), fp)
with open(tmpdir / 'test.json', 'w', encoding='utf8') as fp:
json.dump(api_to_dict(), fp)
@pytest.mark.parametrize('cli', ac_table['commands'])
def test_help_lookup(cli, capsys):
nkw2kw, kw2info = _build_lookup_table()
if cli not in {'--help', '--version', '--version-full'}:
assert cli in nkw2kw
lookup_and_print(cli)
captured = capsys.readouterr()
assert 'Traceback (most recent call last)' not in captured.out
def test_main_cli():
subprocess.check_call(['jina'])
def test_cli_help():
subprocess.check_call(['jina', 'help', 'deployment'])
@pytest.mark.parametrize(
'uses', ['jinaai://jina-ai/DummyHubExecutor']
)
def test_cli_hub(uses):
subprocess.check_call(['jina', 'hub', '--help'])
for cmd in ['new', 'status', 'pull', 'push']:
subprocess.check_call(['jina', 'hub', cmd, '--help'])
subprocess.check_call(['jina', 'hub', 'pull', uses])
def test_cli_warn_unknown_args():
subprocess.check_call(['jina', 'help', 'deployment', '--abcdefg'])
@pytest.mark.parametrize('cli', ac_table['commands'])
def test_all_cli(cli):
subprocess.check_call(['jina', cli, '--help'])
@pytest.mark.parametrize('smethod', ['fork', 'spawn'])
def test_all_start_method(smethod):
s = subprocess.check_output(
['jina', '-v'],
env=dict(os.environ, JINA_MP_START_METHOD=smethod),
stderr=subprocess.STDOUT,
)
assert 'UserWarning' in s.decode()
assert smethod in s.decode()
def test_help_non_exist():
s = subprocess.check_output(
['jina', 'help', 'abcdefg'],
stderr=subprocess.STDOUT,
)
assert 'misspelling' in s.decode()
def test_help_exist():
s = subprocess.check_output(
['jina', 'help', 'port'],
stderr=subprocess.STDOUT,
)
assert 'a CLI argument of Jina' in s.decode()
def test_parse_env_map():
a = set_deployment_parser().parse_args(
['--env', 'key1=value1', '--env', 'key2=value2']
)
assert a.env == {'key1': 'value1', 'key2': 'value2'}
a = set_deployment_parser().parse_args(
['--env', 'key1=value1', 'key2=value2', 'key3=3']
)
assert a.env == {'key1': 'value1', 'key2': 'value2', 'key3': 3}
@pytest.mark.slow
def test_ping():
a1 = _generate_pod_args()
a2 = set_ping_parser().parse_args(['executor', f'0.0.0.0:{a1.port}'])
a3 = set_ping_parser().parse_args(
['executor', f'0.0.0.1:{a1.port}', '--timeout', '1000']
)
with pytest.raises(SystemExit) as cm:
with PodFactory.build_pod(a1):
NetworkChecker(a2)
assert cm.value.code == 0
# test with bad address
with pytest.raises(SystemExit) as cm:
with PodFactory.build_pod(a1):
NetworkChecker(a3)
assert cm.value.code == 1
@pytest.mark.parametrize(
'cmd',
[
['jina', 'ping', 'flow', '127.0.0.1:8080'],
['jina', 'help', 'port'],
['jina', 'hub'],
],
)
def test_logo_silence(cmd):
from jina import __resources_path__
with open(os.path.join(__resources_path__, 'jina.logo')) as fp:
logo_str = fp.read()
s = subprocess.run(
cmd,
stdout=subprocess.PIPE,
)
assert logo_str not in s.stdout.decode()
|
import json
import os
import subprocess
import pytest
from jina.checker import NetworkChecker
from jina.jaml import JAML
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_deployment_parser
from jina.parsers.ping import set_ping_parser
from jina_cli.autocomplete import ac_table
from jina_cli.export import api_to_dict
from jina_cli.lookup import _build_lookup_table, lookup_and_print
from tests.helper import _generate_pod_args
def test_export_api(tmpdir):
with open(tmpdir / 'test.yml', 'w', encoding='utf8') as fp:
JAML.dump(api_to_dict(), fp)
with open(tmpdir / 'test.json', 'w', encoding='utf8') as fp:
json.dump(api_to_dict(), fp)
@pytest.mark.parametrize('cli', ac_table['commands'])
def test_help_lookup(cli, capsys):
nkw2kw, kw2info = _build_lookup_table()
if cli not in {'--help', '--version', '--version-full'}:
assert cli in nkw2kw
lookup_and_print(cli)
captured = capsys.readouterr()
assert 'Traceback (most recent call last)' not in captured.out
def test_main_cli():
subprocess.check_call(['jina'])
def test_cli_help():
subprocess.check_call(['jina', 'help', 'deployment'])
def test_cli_hub():
subprocess.check_call(['jina', 'hub', '--help'])
for cmd in ['new', 'status', 'pull', 'push']:
subprocess.check_call(['jina', 'hub', cmd, '--help'])
subprocess.check_call(['jina', 'hub', 'pull', 'jinahub://DummyHubExecutor'])
def test_cli_warn_unknown_args():
subprocess.check_call(['jina', 'help', 'deployment', '--abcdefg'])
@pytest.mark.parametrize('cli', ac_table['commands'])
def test_all_cli(cli):
subprocess.check_call(['jina', cli, '--help'])
@pytest.mark.parametrize('smethod', ['fork', 'spawn'])
def test_all_start_method(smethod):
s = subprocess.check_output(
['jina', '-v'],
env=dict(os.environ, JINA_MP_START_METHOD=smethod),
stderr=subprocess.STDOUT,
)
assert 'UserWarning' in s.decode()
assert smethod in s.decode()
def test_help_non_exist():
s = subprocess.check_output(
['jina', 'help', 'abcdefg'],
stderr=subprocess.STDOUT,
)
assert 'misspelling' in s.decode()
def test_help_exist():
s = subprocess.check_output(
['jina', 'help', 'port'],
stderr=subprocess.STDOUT,
)
assert 'a CLI argument of Jina' in s.decode()
def test_parse_env_map():
a = set_deployment_parser().parse_args(
['--env', 'key1=value1', '--env', 'key2=value2']
)
assert a.env == {'key1': 'value1', 'key2': 'value2'}
a = set_deployment_parser().parse_args(
['--env', 'key1=value1', 'key2=value2', 'key3=3']
)
assert a.env == {'key1': 'value1', 'key2': 'value2', 'key3': 3}
@pytest.mark.slow
def test_ping():
a1 = _generate_pod_args()
a2 = set_ping_parser().parse_args(['executor', f'0.0.0.0:{a1.port}'])
a3 = set_ping_parser().parse_args(
['executor', f'0.0.0.1:{a1.port}', '--timeout', '1000']
)
with pytest.raises(SystemExit) as cm:
with PodFactory.build_pod(a1):
NetworkChecker(a2)
assert cm.value.code == 0
# test with bad address
with pytest.raises(SystemExit) as cm:
with PodFactory.build_pod(a1):
NetworkChecker(a3)
assert cm.value.code == 1
@pytest.mark.parametrize(
'cmd',
[
['jina', 'ping', 'flow', '127.0.0.1:8080'],
['jina', 'help', 'port'],
['jina', 'hub'],
],
)
def test_logo_silence(cmd):
from jina import __resources_path__
with open(os.path.join(__resources_path__, 'jina.logo')) as fp:
logo_str = fp.read()
s = subprocess.run(
cmd,
stdout=subprocess.PIPE,
)
assert logo_str not in s.stdout.decode()
|
"""Internal representation of a structured query language."""
from __future__ import annotations
from abc import ABC, abstractmethod
from enum import Enum
from typing import TYPE_CHECKING, Any, Optional, Union
from pydantic import BaseModel
if TYPE_CHECKING:
from collections.abc import Sequence
class Visitor(ABC):
"""Defines interface for IR translation using a visitor pattern."""
allowed_comparators: Optional[Sequence[Comparator]] = None
"""Allowed comparators for the visitor."""
allowed_operators: Optional[Sequence[Operator]] = None
"""Allowed operators for the visitor."""
def _validate_func(self, func: Union[Operator, Comparator]) -> None:
if (
isinstance(func, Operator)
and self.allowed_operators is not None
and func not in self.allowed_operators
):
msg = (
f"Received disallowed operator {func}. Allowed "
f"comparators are {self.allowed_operators}"
)
raise ValueError(msg)
if (
isinstance(func, Comparator)
and self.allowed_comparators is not None
and func not in self.allowed_comparators
):
msg = (
f"Received disallowed comparator {func}. Allowed "
f"comparators are {self.allowed_comparators}"
)
raise ValueError(msg)
@abstractmethod
def visit_operation(self, operation: Operation) -> Any:
"""Translate an Operation.
Args:
operation: Operation to translate.
"""
@abstractmethod
def visit_comparison(self, comparison: Comparison) -> Any:
"""Translate a Comparison.
Args:
comparison: Comparison to translate.
"""
@abstractmethod
def visit_structured_query(self, structured_query: StructuredQuery) -> Any:
"""Translate a StructuredQuery.
Args:
structured_query: StructuredQuery to translate.
"""
def _to_snake_case(name: str) -> str:
"""Convert a name into snake_case."""
snake_case = ""
for i, char in enumerate(name):
if char.isupper() and i != 0:
snake_case += "_" + char.lower()
else:
snake_case += char.lower()
return snake_case
class Expr(BaseModel):
"""Base class for all expressions."""
def accept(self, visitor: Visitor) -> Any:
"""Accept a visitor.
Args:
visitor: visitor to accept.
Returns:
result of visiting.
"""
return getattr(visitor, f"visit_{_to_snake_case(self.__class__.__name__)}")(
self
)
class Operator(str, Enum):
"""Enumerator of the operations."""
AND = "and"
OR = "or"
NOT = "not"
class Comparator(str, Enum):
"""Enumerator of the comparison operators."""
EQ = "eq"
NE = "ne"
GT = "gt"
GTE = "gte"
LT = "lt"
LTE = "lte"
CONTAIN = "contain"
LIKE = "like"
IN = "in"
NIN = "nin"
class FilterDirective(Expr, ABC):
"""Filtering expression."""
class Comparison(FilterDirective):
"""Comparison to a value.
Parameters:
comparator: The comparator to use.
attribute: The attribute to compare.
value: The value to compare to.
"""
comparator: Comparator
attribute: str
value: Any
def __init__(
self, comparator: Comparator, attribute: str, value: Any, **kwargs: Any
) -> None:
# super exists from BaseModel
super().__init__( # type: ignore[call-arg]
comparator=comparator, attribute=attribute, value=value, **kwargs
)
class Operation(FilterDirective):
"""Logical operation over other directives.
Parameters:
operator: The operator to use.
arguments: The arguments to the operator.
"""
operator: Operator
arguments: list[FilterDirective]
def __init__(
self, operator: Operator, arguments: list[FilterDirective], **kwargs: Any
) -> None:
# super exists from BaseModel
super().__init__( # type: ignore[call-arg]
operator=operator, arguments=arguments, **kwargs
)
class StructuredQuery(Expr):
"""Structured query."""
query: str
"""Query string."""
filter: Optional[FilterDirective]
"""Filtering expression."""
limit: Optional[int]
"""Limit on the number of results."""
def __init__(
self,
query: str,
filter: Optional[FilterDirective],
limit: Optional[int] = None,
**kwargs: Any,
) -> None:
# super exists from BaseModel
super().__init__( # type: ignore[call-arg]
query=query, filter=filter, limit=limit, **kwargs
)
|
"""Internal representation of a structured query language."""
from __future__ import annotations
from abc import ABC, abstractmethod
from collections.abc import Sequence
from enum import Enum
from typing import Any, Optional, Union
from pydantic import BaseModel
class Visitor(ABC):
"""Defines interface for IR translation using a visitor pattern."""
allowed_comparators: Optional[Sequence[Comparator]] = None
"""Allowed comparators for the visitor."""
allowed_operators: Optional[Sequence[Operator]] = None
"""Allowed operators for the visitor."""
def _validate_func(self, func: Union[Operator, Comparator]) -> None:
if (
isinstance(func, Operator)
and self.allowed_operators is not None
and func not in self.allowed_operators
):
msg = (
f"Received disallowed operator {func}. Allowed "
f"comparators are {self.allowed_operators}"
)
raise ValueError(msg)
if (
isinstance(func, Comparator)
and self.allowed_comparators is not None
and func not in self.allowed_comparators
):
msg = (
f"Received disallowed comparator {func}. Allowed "
f"comparators are {self.allowed_comparators}"
)
raise ValueError(msg)
@abstractmethod
def visit_operation(self, operation: Operation) -> Any:
"""Translate an Operation.
Args:
operation: Operation to translate.
"""
@abstractmethod
def visit_comparison(self, comparison: Comparison) -> Any:
"""Translate a Comparison.
Args:
comparison: Comparison to translate.
"""
@abstractmethod
def visit_structured_query(self, structured_query: StructuredQuery) -> Any:
"""Translate a StructuredQuery.
Args:
structured_query: StructuredQuery to translate.
"""
def _to_snake_case(name: str) -> str:
"""Convert a name into snake_case."""
snake_case = ""
for i, char in enumerate(name):
if char.isupper() and i != 0:
snake_case += "_" + char.lower()
else:
snake_case += char.lower()
return snake_case
class Expr(BaseModel):
"""Base class for all expressions."""
def accept(self, visitor: Visitor) -> Any:
"""Accept a visitor.
Args:
visitor: visitor to accept.
Returns:
result of visiting.
"""
return getattr(visitor, f"visit_{_to_snake_case(self.__class__.__name__)}")(
self
)
class Operator(str, Enum):
"""Enumerator of the operations."""
AND = "and"
OR = "or"
NOT = "not"
class Comparator(str, Enum):
"""Enumerator of the comparison operators."""
EQ = "eq"
NE = "ne"
GT = "gt"
GTE = "gte"
LT = "lt"
LTE = "lte"
CONTAIN = "contain"
LIKE = "like"
IN = "in"
NIN = "nin"
class FilterDirective(Expr, ABC):
"""Filtering expression."""
class Comparison(FilterDirective):
"""Comparison to a value.
Parameters:
comparator: The comparator to use.
attribute: The attribute to compare.
value: The value to compare to.
"""
comparator: Comparator
attribute: str
value: Any
def __init__(
self, comparator: Comparator, attribute: str, value: Any, **kwargs: Any
) -> None:
# super exists from BaseModel
super().__init__( # type: ignore[call-arg]
comparator=comparator, attribute=attribute, value=value, **kwargs
)
class Operation(FilterDirective):
"""Logical operation over other directives.
Parameters:
operator: The operator to use.
arguments: The arguments to the operator.
"""
operator: Operator
arguments: list[FilterDirective]
def __init__(
self, operator: Operator, arguments: list[FilterDirective], **kwargs: Any
) -> None:
# super exists from BaseModel
super().__init__( # type: ignore[call-arg]
operator=operator, arguments=arguments, **kwargs
)
class StructuredQuery(Expr):
"""Structured query."""
query: str
"""Query string."""
filter: Optional[FilterDirective]
"""Filtering expression."""
limit: Optional[int]
"""Limit on the number of results."""
def __init__(
self,
query: str,
filter: Optional[FilterDirective],
limit: Optional[int] = None,
**kwargs: Any,
) -> None:
# super exists from BaseModel
super().__init__( # type: ignore[call-arg]
query=query, filter=filter, limit=limit, **kwargs
)
|
import os
import re
from pathlib import Path
from typing import Optional, Tuple, Union
import torch
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _extract_tar, _load_waveform
URL = "https://speech.fit.vutbr.cz/files/quesst14Database.tgz"
SAMPLE_RATE = 8000
_CHECKSUM = "4f869e06bc066bbe9c5dde31dbd3909a0870d70291110ebbb38878dcbc2fc5e4"
_LANGUAGES = [
"albanian",
"basque",
"czech",
"nnenglish",
"romanian",
"slovak",
]
class QUESST14(Dataset):
"""*QUESST14* :cite:`Mir2015QUESST2014EQ` dataset.
Args:
root (str or Path): Root directory where the dataset's top level directory is found
subset (str): Subset of the dataset to use. Options: [``"docs"``, ``"dev"``, ``"eval"``].
language (str or None, optional): Language to get dataset for.
Options: [``None``, ``albanian``, ``basque``, ``czech``, ``nnenglish``, ``romanian``, ``slovak``].
If ``None``, dataset consists of all languages. (default: ``"nnenglish"``)
download (bool, optional): Whether to download the dataset if it is not found at root path.
(default: ``False``)
"""
def __init__(
self,
root: Union[str, Path],
subset: str,
language: Optional[str] = "nnenglish",
download: bool = False,
) -> None:
if subset not in ["docs", "dev", "eval"]:
raise ValueError("`subset` must be one of ['docs', 'dev', 'eval']")
if language is not None and language not in _LANGUAGES:
raise ValueError(f"`language` must be None or one of {str(_LANGUAGES)}")
# Get string representation of 'root'
root = os.fspath(root)
basename = os.path.basename(URL)
archive = os.path.join(root, basename)
basename = basename.rsplit(".", 2)[0]
self._path = os.path.join(root, basename)
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download")
download_url_to_file(URL, archive, hash_prefix=_CHECKSUM)
_extract_tar(archive, root)
if subset == "docs":
self.data = filter_audio_paths(self._path, language, "language_key_utterances.lst")
elif subset == "dev":
self.data = filter_audio_paths(self._path, language, "language_key_dev.lst")
elif subset == "eval":
self.data = filter_audio_paths(self._path, language, "language_key_eval.lst")
def get_metadata(self, n: int) -> Tuple[str, int, str]:
"""Get metadata for the n-th sample from the dataset. Returns filepath instead of waveform,
but otherwise returns the same fields as :py:func:`__getitem__`.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
str:
Path to audio
int:
Sample rate
str:
File name
"""
audio_path = self.data[n]
relpath = os.path.relpath(audio_path, self._path)
return relpath, SAMPLE_RATE, audio_path.with_suffix("").name
def __getitem__(self, n: int) -> Tuple[torch.Tensor, int, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
Tensor:
Waveform
int:
Sample rate
str:
File name
"""
metadata = self.get_metadata(n)
waveform = _load_waveform(self._path, metadata[0], metadata[1])
return (waveform,) + metadata[1:]
def __len__(self) -> int:
return len(self.data)
def filter_audio_paths(
path: str,
language: str,
lst_name: str,
):
"""Extract audio paths for the given language."""
audio_paths = []
path = Path(path)
with open(path / "scoring" / lst_name) as f:
for line in f:
audio_path, lang = line.strip().split()
if language is not None and lang != language:
continue
audio_path = re.sub(r"^.*?\/", "", audio_path)
audio_paths.append(path / audio_path)
return audio_paths
|
import os
import re
from pathlib import Path
from typing import Optional, Tuple, Union
import torch
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _load_waveform, extract_archive
URL = "https://speech.fit.vutbr.cz/files/quesst14Database.tgz"
SAMPLE_RATE = 8000
_CHECKSUM = "4f869e06bc066bbe9c5dde31dbd3909a0870d70291110ebbb38878dcbc2fc5e4"
_LANGUAGES = [
"albanian",
"basque",
"czech",
"nnenglish",
"romanian",
"slovak",
]
class QUESST14(Dataset):
"""*QUESST14* :cite:`Mir2015QUESST2014EQ` dataset.
Args:
root (str or Path): Root directory where the dataset's top level directory is found
subset (str): Subset of the dataset to use. Options: [``"docs"``, ``"dev"``, ``"eval"``].
language (str or None, optional): Language to get dataset for.
Options: [``None``, ``albanian``, ``basque``, ``czech``, ``nnenglish``, ``romanian``, ``slovak``].
If ``None``, dataset consists of all languages. (default: ``"nnenglish"``)
download (bool, optional): Whether to download the dataset if it is not found at root path.
(default: ``False``)
"""
def __init__(
self,
root: Union[str, Path],
subset: str,
language: Optional[str] = "nnenglish",
download: bool = False,
) -> None:
if subset not in ["docs", "dev", "eval"]:
raise ValueError("`subset` must be one of ['docs', 'dev', 'eval']")
if language is not None and language not in _LANGUAGES:
raise ValueError(f"`language` must be None or one of {str(_LANGUAGES)}")
# Get string representation of 'root'
root = os.fspath(root)
basename = os.path.basename(URL)
archive = os.path.join(root, basename)
basename = basename.rsplit(".", 2)[0]
self._path = os.path.join(root, basename)
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download")
download_url_to_file(URL, archive, hash_prefix=_CHECKSUM)
extract_archive(archive, root)
if subset == "docs":
self.data = filter_audio_paths(self._path, language, "language_key_utterances.lst")
elif subset == "dev":
self.data = filter_audio_paths(self._path, language, "language_key_dev.lst")
elif subset == "eval":
self.data = filter_audio_paths(self._path, language, "language_key_eval.lst")
def get_metadata(self, n: int) -> Tuple[str, int, str]:
"""Get metadata for the n-th sample from the dataset. Returns filepath instead of waveform,
but otherwise returns the same fields as :py:func:`__getitem__`.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
str:
Path to audio
int:
Sample rate
str:
File name
"""
audio_path = self.data[n]
relpath = os.path.relpath(audio_path, self._path)
return relpath, SAMPLE_RATE, audio_path.with_suffix("").name
def __getitem__(self, n: int) -> Tuple[torch.Tensor, int, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
Tensor:
Waveform
int:
Sample rate
str:
File name
"""
metadata = self.get_metadata(n)
waveform = _load_waveform(self._path, metadata[0], metadata[1])
return (waveform,) + metadata[1:]
def __len__(self) -> int:
return len(self.data)
def filter_audio_paths(
path: str,
language: str,
lst_name: str,
):
"""Extract audio paths for the given language."""
audio_paths = []
path = Path(path)
with open(path / "scoring" / lst_name) as f:
for line in f:
audio_path, lang = line.strip().split()
if language is not None and lang != language:
continue
audio_path = re.sub(r"^.*?\/", "", audio_path)
audio_paths.append(path / audio_path)
return audio_paths
|
from collections.abc import Generator
from langchain_huggingface.llms import HuggingFacePipeline
def test_huggingface_pipeline_streaming() -> None:
"""Test streaming tokens from huggingface_pipeline."""
llm = HuggingFacePipeline.from_model_id(
model_id="openai-community/gpt2",
task="text-generation",
pipeline_kwargs={"max_new_tokens": 10},
)
generator = llm.stream("Q: How do you say 'hello' in German? A:'", stop=["."])
stream_results_string = ""
assert isinstance(generator, Generator)
for chunk in generator:
assert isinstance(chunk, str)
stream_results_string = chunk
assert len(stream_results_string.strip()) > 0
|
from collections.abc import Generator
from langchain_huggingface.llms import HuggingFacePipeline
def test_huggingface_pipeline_streaming() -> None:
"""Test streaming tokens from huggingface_pipeline."""
llm = HuggingFacePipeline.from_model_id(
model_id="gpt2", task="text-generation", pipeline_kwargs={"max_new_tokens": 10}
)
generator = llm.stream("Q: How do you say 'hello' in German? A:'", stop=["."])
stream_results_string = ""
assert isinstance(generator, Generator)
for chunk in generator:
assert isinstance(chunk, str)
stream_results_string = chunk
assert len(stream_results_string.strip()) > 1
|
# Copyright (c) OpenMMLab. All rights reserved.
import json
import os.path as osp
from typing import List, Optional
from mmengine.fileio import get_local_path
from mmdet.registry import DATASETS
from .base_det_dataset import BaseDetDataset
@DATASETS.register_module()
class ODVGDataset(BaseDetDataset):
"""object detection and visual grounding dataset."""
def __init__(self,
*args,
data_root: str = '',
label_map_file: Optional[str] = None,
need_text: bool = True,
**kwargs) -> None:
self.dataset_mode = 'VG'
self.need_text = need_text
if label_map_file:
label_map_file = osp.join(data_root, label_map_file)
with open(label_map_file, 'r') as file:
self.label_map = json.load(file)
self.dataset_mode = 'OD'
super().__init__(*args, data_root=data_root, **kwargs)
assert self.return_classes is True
def load_data_list(self) -> List[dict]:
with get_local_path(
self.ann_file, backend_args=self.backend_args) as local_path:
with open(local_path, 'r') as f:
data_list = [json.loads(line) for line in f]
out_data_list = []
for data in data_list:
data_info = {}
img_path = osp.join(self.data_prefix['img'], data['filename'])
data_info['img_path'] = img_path
data_info['height'] = data['height']
data_info['width'] = data['width']
if self.dataset_mode == 'OD':
if self.need_text:
data_info['text'] = self.label_map
anno = data.get('detection', {})
instances = [obj for obj in anno.get('instances', [])]
bboxes = [obj['bbox'] for obj in instances]
bbox_labels = [str(obj['label']) for obj in instances]
instances = []
for bbox, label in zip(bboxes, bbox_labels):
instance = {}
x1, y1, x2, y2 = bbox
inter_w = max(0, min(x2, data['width']) - max(x1, 0))
inter_h = max(0, min(y2, data['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if (x2 - x1) < 1 or (y2 - y1) < 1:
continue
instance['ignore_flag'] = 0
instance['bbox'] = bbox
instance['bbox_label'] = int(label)
instances.append(instance)
data_info['instances'] = instances
data_info['dataset_mode'] = self.dataset_mode
out_data_list.append(data_info)
else:
anno = data['grounding']
data_info['text'] = anno['caption']
regions = anno['regions']
instances = []
phrases = {}
for i, region in enumerate(regions):
bbox = region['bbox']
phrase = region['phrase']
tokens_positive = region['tokens_positive']
if not isinstance(bbox[0], list):
bbox = [bbox]
for box in bbox:
instance = {}
x1, y1, x2, y2 = box
inter_w = max(0, min(x2, data['width']) - max(x1, 0))
inter_h = max(0, min(y2, data['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if (x2 - x1) < 1 or (y2 - y1) < 1:
continue
instance['ignore_flag'] = 0
instance['bbox'] = box
instance['bbox_label'] = i
phrases[i] = {
'phrase': phrase,
'tokens_positive': tokens_positive
}
instances.append(instance)
data_info['instances'] = instances
data_info['phrases'] = phrases
data_info['dataset_mode'] = self.dataset_mode
out_data_list.append(data_info)
del data_list
return out_data_list
|
# Copyright (c) OpenMMLab. All rights reserved.
import json
import os.path as osp
from typing import List, Optional
from mmengine.fileio import get_local_path
from mmdet.registry import DATASETS
from .base_det_dataset import BaseDetDataset
@DATASETS.register_module()
class ODVGDataset(BaseDetDataset):
"""object detection and visual grounding dataset."""
def __init__(self,
*args,
data_root: str = '',
label_map_file: Optional[str] = None,
need_text: bool = True,
**kwargs) -> None:
self.dataset_mode = 'VG'
self.need_text = need_text
if label_map_file:
label_map_file = osp.join(data_root, label_map_file)
with open(label_map_file, 'r') as file:
self.label_map = json.load(file)
self.dataset_mode = 'OD'
super().__init__(*args, data_root=data_root, **kwargs)
assert self.return_classes is True
def load_data_list(self) -> List[dict]:
with get_local_path(
self.ann_file, backend_args=self.backend_args) as local_path:
with open(local_path, 'r') as f:
data_list = [json.loads(line) for line in f]
out_data_list = []
for data in data_list:
data_info = {}
img_path = osp.join(self.data_prefix['img'], data['filename'])
data_info['img_path'] = img_path
data_info['height'] = data['height']
data_info['width'] = data['width']
if self.dataset_mode == 'OD':
if self.need_text:
data_info['text'] = self.label_map
anno = data['detection']
instances = [obj for obj in anno['instances']]
bboxes = [obj['bbox'] for obj in instances]
bbox_labels = [str(obj['label']) for obj in instances]
instances = []
for bbox, label in zip(bboxes, bbox_labels):
instance = {}
x1, y1, x2, y2 = bbox
inter_w = max(0, min(x2, data['width']) - max(x1, 0))
inter_h = max(0, min(y2, data['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if (x2 - x1) < 1 or (y2 - y1) < 1:
continue
instance['ignore_flag'] = 0
instance['bbox'] = bbox
instance['bbox_label'] = int(label)
instances.append(instance)
data_info['instances'] = instances
data_info['dataset_mode'] = self.dataset_mode
out_data_list.append(data_info)
else:
anno = data['grounding']
data_info['text'] = anno['caption']
regions = anno['regions']
instances = []
phrases = {}
for i, region in enumerate(regions):
bbox = region['bbox']
phrase = region['phrase']
tokens_positive = region['tokens_positive']
if not isinstance(bbox[0], list):
bbox = [bbox]
for box in bbox:
instance = {}
x1, y1, x2, y2 = box
inter_w = max(0, min(x2, data['width']) - max(x1, 0))
inter_h = max(0, min(y2, data['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if (x2 - x1) < 1 or (y2 - y1) < 1:
continue
instance['ignore_flag'] = 0
instance['bbox'] = box
instance['bbox_label'] = i
# phrase only for vis. tokens_positive is important
phrases[i] = {
'phrase': phrase,
'tokens_positive': tokens_positive
}
instances.append(instance)
data_info['instances'] = instances
data_info['phrases'] = phrases
data_info['dataset_mode'] = self.dataset_mode
out_data_list.append(data_info)
del data_list
return out_data_list
|
import pytest
from keras.src import backend
from keras.src import testing
class DeviceTest(testing.TestCase):
@pytest.mark.skipif(backend.backend() != "tensorflow", reason="tf only")
def test_tf_device_scope(self):
import tensorflow as tf
if not tf.config.list_physical_devices("GPU"):
self.skipTest("Need at least one GPU for testing")
with backend.device("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("CPU:0", t.device)
with backend.device("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("CPU:0", t.device)
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertIn("GPU:0", t.device)
# Also verify the explicit gpu device
with backend.device("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("GPU:0", t.device)
@pytest.mark.skipif(backend.backend() != "jax", reason="jax only")
def test_jax_device_scope(self):
import jax
def get_device(t):
# After updating to Jax 0.4.33, Directly access via t.device attr.
return list(t.devices())[0]
platform = jax.default_backend()
if platform != "gpu":
self.skipTest("Need at least one GPU for testing")
with backend.device("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("cpu")[0])
with backend.device("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("cpu")[0])
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("gpu")[0])
# Also verify the explicit gpu device
with backend.device("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("gpu")[0])
@pytest.mark.skipif(backend.backend() != "jax", reason="jax only")
def test_invalid_jax_device(self):
with self.assertRaisesRegex(ValueError, "Received: device_name='123'"):
backend.device(123).__enter__()
@pytest.mark.skipif(backend.backend() != "torch", reason="torch only")
def test_torch_device_scope(self):
import torch
if not torch.cuda.device_count():
self.skipTest("Need at least one GPU for testing")
with backend.device("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cpu"))
with backend.device("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cpu"))
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cuda", 0))
# Also verify the explicit gpu -> cuda conversion
with backend.device("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cuda", 0))
@pytest.mark.skipif(backend.backend() != "torch", reason="torch only")
def test_invalid_torch_device(self):
with self.assertRaisesRegex(ValueError, "Received: device_name='123'"):
backend.device(123).__enter__()
@pytest.mark.skipif(backend.backend() != "torch", reason="torch only")
def test_torch_meta_device(self):
import torch
with torch.device("meta"):
x = torch.ones(5)
t = backend.convert_to_tensor(x)
self.assertEqual(t.device, torch.device("cpu"))
|
import pytest
from keras.src import backend
from keras.src import testing
class DeviceTest(testing.TestCase):
@pytest.mark.skipif(backend.backend() != "tensorflow", reason="tf only")
def test_tf_device_scope(self):
import tensorflow as tf
if not tf.config.list_physical_devices("GPU"):
self.skipTest("Need at least one GPU for testing")
with backend.device("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("CPU:0", t.device)
with backend.device("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("CPU:0", t.device)
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertIn("GPU:0", t.device)
# Also verify the explicit gpu device
with backend.device("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("GPU:0", t.device)
@pytest.mark.skipif(backend.backend() != "jax", reason="jax only")
def test_jax_device_scope(self):
import jax
def get_device(t):
# After updating to Jax 0.4.33, Directly access via t.device attr.
return list(t.devices())[0]
platform = jax.default_backend()
if platform != "gpu":
self.skipTest("Need at least one GPU for testing")
with backend.device("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("cpu")[0])
with backend.device("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("cpu")[0])
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("gpu")[0])
# Also verify the explicit gpu device
with backend.device("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("gpu")[0])
@pytest.mark.skipif(backend.backend() != "jax", reason="jax only")
def test_invalid_jax_device(self):
with self.assertRaisesRegex(ValueError, "Received: device_name='123'"):
backend.device(123).__enter__()
@pytest.mark.skipif(backend.backend() != "torch", reason="torch only")
def test_torch_device_scope(self):
import torch
if not torch.cuda.device_count():
self.skipTest("Need at least one GPU for testing")
with backend.device("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cpu"))
with backend.device("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cpu"))
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cuda", 0))
# Also verify the explicit gpu -> cuda conversion
with backend.device("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cuda", 0))
@pytest.mark.skipif(backend.backend() != "torch", reason="torch only")
def test_invalid_torch_device(self):
with self.assertRaisesRegex(ValueError, "Received: device_name='123'"):
backend.device(123).__enter__()
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.gitlab.toolkit import GitLabToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GitLabToolkit": "langchain_community.agent_toolkits.gitlab.toolkit",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GitLabToolkit",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.gitlab.toolkit import GitLabToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GitLabToolkit": "langchain_community.agent_toolkits.gitlab.toolkit"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GitLabToolkit",
]
|
"""
This script contains an example how to perform semantic search with PyTorch. It performs exact nearest neighborh search.
As dataset, we use the Quora Duplicate Questions dataset, which contains about 500k questions (we only use about 100k):
https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs
As embeddings model, we use the SBERT model 'quora-distilbert-multilingual',
that it aligned for 100 languages. I.e., you can type in a question in various languages and it will
return the closest questions in the corpus (questions in the corpus are mainly in English).
Google Colab example: https://colab.research.google.com/drive/12cn5Oo0v3HfQQ8Tv6-ukgxXSmT3zl35A?usp=sharing
"""
import csv
import os
import pickle
import time
from sentence_transformers import SentenceTransformer, util
model_name = "quora-distilbert-multilingual"
model = SentenceTransformer(model_name)
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 100000
embedding_cache_path = "quora-embeddings-{}-size-{}.pkl".format(model_name.replace("/", "_"), max_corpus_size)
# Check if embedding cache path exists
if not os.path.exists(embedding_cache_path):
# Check if the dataset exists. If not, download and extract
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
corpus_sentences = set()
with open(dataset_path, encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for row in reader:
corpus_sentences.add(row["question1"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences.add(row["question2"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences = list(corpus_sentences)
print("Encode the corpus. This might take a while")
corpus_embeddings = model.encode(corpus_sentences, show_progress_bar=True, convert_to_tensor=True)
print("Store file on disc")
with open(embedding_cache_path, "wb") as fOut:
pickle.dump({"sentences": corpus_sentences, "embeddings": corpus_embeddings}, fOut)
else:
print("Load pre-computed embeddings from disc")
with open(embedding_cache_path, "rb") as fIn:
cache_data = pickle.load(fIn)
corpus_sentences = cache_data["sentences"][0:max_corpus_size]
corpus_embeddings = cache_data["embeddings"][0:max_corpus_size]
###############################
print(f"Corpus loaded with {len(corpus_sentences)} sentences / embeddings")
# Move embeddings to the target device of the model
corpus_embeddings = corpus_embeddings.to(model.device)
while True:
inp_question = input("Please enter a question: ")
start_time = time.time()
question_embedding = model.encode(inp_question, convert_to_tensor=True)
hits = util.semantic_search(question_embedding, corpus_embeddings)
end_time = time.time()
hits = hits[0] # Get the hits for the first query
print("Input question:", inp_question)
print(f"Results (after {end_time - start_time:.3f} seconds):")
for hit in hits[0:5]:
print("\t{:.3f}\t{}".format(hit["score"], corpus_sentences[hit["corpus_id"]]))
print("\n\n========\n")
|
"""
This script contains an example how to perform semantic search with PyTorch. It performs exact nearest neighborh search.
As dataset, we use the Quora Duplicate Questions dataset, which contains about 500k questions (we only use about 100k):
https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs
As embeddings model, we use the SBERT model 'quora-distilbert-multilingual',
that it aligned for 100 languages. I.e., you can type in a question in various languages and it will
return the closest questions in the corpus (questions in the corpus are mainly in English).
Google Colab example: https://colab.research.google.com/drive/12cn5Oo0v3HfQQ8Tv6-ukgxXSmT3zl35A?usp=sharing
"""
import csv
import os
import pickle
import time
from sentence_transformers import SentenceTransformer, util
model_name = "quora-distilbert-multilingual"
model = SentenceTransformer(model_name)
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 100000
embedding_cache_path = "quora-embeddings-{}-size-{}.pkl".format(model_name.replace("/", "_"), max_corpus_size)
# Check if embedding cache path exists
if not os.path.exists(embedding_cache_path):
# Check if the dataset exists. If not, download and extract
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
corpus_sentences = set()
with open(dataset_path, encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for row in reader:
corpus_sentences.add(row["question1"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences.add(row["question2"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences = list(corpus_sentences)
print("Encode the corpus. This might take a while")
corpus_embeddings = model.encode(corpus_sentences, show_progress_bar=True, convert_to_tensor=True)
print("Store file on disc")
with open(embedding_cache_path, "wb") as fOut:
pickle.dump({"sentences": corpus_sentences, "embeddings": corpus_embeddings}, fOut)
else:
print("Load pre-computed embeddings from disc")
with open(embedding_cache_path, "rb") as fIn:
cache_data = pickle.load(fIn)
corpus_sentences = cache_data["sentences"][0:max_corpus_size]
corpus_embeddings = cache_data["embeddings"][0:max_corpus_size]
###############################
print("Corpus loaded with {} sentences / embeddings".format(len(corpus_sentences)))
# Move embeddings to the target device of the model
corpus_embeddings = corpus_embeddings.to(model.device)
while True:
inp_question = input("Please enter a question: ")
start_time = time.time()
question_embedding = model.encode(inp_question, convert_to_tensor=True)
hits = util.semantic_search(question_embedding, corpus_embeddings)
end_time = time.time()
hits = hits[0] # Get the hits for the first query
print("Input question:", inp_question)
print("Results (after {:.3f} seconds):".format(end_time - start_time))
for hit in hits[0:5]:
print("\t{:.3f}\t{}".format(hit["score"], corpus_sentences[hit["corpus_id"]]))
print("\n\n========\n")
|
import asyncio
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
__all__ = ['WebSocketGatewayRuntime']
from jina.serve.runtimes.gateway.websocket.gateway import WebSocketGateway
class WebSocketGatewayRuntime(GatewayRuntime):
"""Runtime for Websocket interface."""
async def async_setup(self):
"""
The async method setup the runtime.
Setup the uvicorn server.
"""
self.gateway = WebSocketGateway(
name=self.name,
port=self.args.port,
ssl_keyfile=self.args.ssl_keyfile,
ssl_certfile=self.args.ssl_certfile,
uvicorn_kwargs=self.args.uvicorn_kwargs,
logger=self.logger,
)
self.gateway.set_streamer(
args=self.args,
timeout_send=self.timeout_send,
metrics_registry=self.metrics_registry,
runtime_name=self.args.name,
)
await self.gateway.setup_server()
async def _wait_for_cancel(self):
"""Do NOT override this method when inheriting from :class:`GatewayPod`"""
# handle terminate signals
while not self.is_cancel.is_set() and not self.gateway.should_exit:
await asyncio.sleep(0.1)
await self.async_cancel()
async def async_teardown(self):
"""Shutdown the server."""
await self.gateway.teardown()
async def async_cancel(self):
"""Stop the server."""
await self.gateway.stop_server()
async def async_run_forever(self):
"""Running method of ther server."""
await self.gateway.run_server()
|
import asyncio
import logging
import os
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
__all__ = ['WebSocketGatewayRuntime']
class WebSocketGatewayRuntime(GatewayRuntime):
"""Runtime for Websocket interface."""
async def async_setup(self):
"""
The async method setup the runtime.
Setup the uvicorn server.
"""
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
self.install_signal_handlers()
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
from jina.helper import extend_rest_interface
uvicorn_kwargs = self.args.uvicorn_kwargs or {}
for ssl_file in ['ssl_keyfile', 'ssl_certfile']:
if getattr(self.args, ssl_file):
if ssl_file not in uvicorn_kwargs.keys():
uvicorn_kwargs[ssl_file] = getattr(self.args, ssl_file)
self._server = UviServer(
config=Config(
app=extend_rest_interface(
get_fastapi_app(
args=self.args,
logger=self.logger,
timeout_send=self.timeout_send,
metrics_registry=self.metrics_registry,
)
),
host=__default_host__,
port=self.args.port,
ws_max_size=1024 * 1024 * 1024,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs
)
)
await self._server.setup()
async def async_run_forever(self):
"""Running method of ther server."""
await self._server.serve()
async def _wait_for_cancel(self):
"""Do NOT override this method when inheriting from :class:`GatewayPod`"""
# handle terminate signals
while not self.is_cancel.is_set() and not self._server.should_exit:
await asyncio.sleep(0.1)
await self.async_cancel()
async def async_teardown(self):
"""Shutdown the server."""
await self._server.shutdown()
async def async_cancel(self):
"""Stop the server."""
self._server.should_exit = True
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
data_preprocessor=dict(
# The mean and std are used in PyCls when training RegNets
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False),
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_3.2gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')),
neck=dict(
type='FPN',
in_channels=[96, 192, 432, 1008],
out_channels=256,
num_outs=5))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
data_preprocessor=dict(
# The mean and std are used in PyCls when training RegNets
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False),
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_3.2gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')),
neck=dict(
type='FPN',
in_channels=[96, 192, 432, 1008],
out_channels=256,
num_outs=5))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005))
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Get image metas on a specific dataset.
Here is an example to run this script.
Example:
python tools/misc/get_image_metas.py ${CONFIG} \
--out ${OUTPUT FILE NAME}
"""
import argparse
import csv
import os.path as osp
from multiprocessing import Pool
import mmcv
from mmengine.config import Config
from mmengine.fileio import dump, get
def parse_args():
parser = argparse.ArgumentParser(description='Collect image metas')
parser.add_argument('config', help='Config file path')
parser.add_argument(
'--dataset',
default='val',
choices=['train', 'val', 'test'],
help='Collect image metas from which dataset')
parser.add_argument(
'--out',
default='validation-image-metas.pkl',
help='The output image metas file name. The save dir is in the '
'same directory as `dataset.ann_file` path')
parser.add_argument(
'--nproc',
default=4,
type=int,
help='Processes used for get image metas')
args = parser.parse_args()
return args
def get_metas_from_csv_style_ann_file(ann_file):
data_infos = []
cp_filename = None
with open(ann_file, 'r') as f:
reader = csv.reader(f)
for i, line in enumerate(reader):
if i == 0:
continue
img_id = line[0]
filename = f'{img_id}.jpg'
if filename != cp_filename:
data_infos.append(dict(filename=filename))
cp_filename = filename
return data_infos
def get_metas_from_txt_style_ann_file(ann_file):
with open(ann_file) as f:
lines = f.readlines()
i = 0
data_infos = []
while i < len(lines):
filename = lines[i].rstrip()
data_infos.append(dict(filename=filename))
skip_lines = int(lines[i + 2]) + 3
i += skip_lines
return data_infos
def get_image_metas(data_info, img_prefix):
filename = data_info.get('filename', None)
if filename is not None:
if img_prefix is not None:
filename = osp.join(img_prefix, filename)
img_bytes = get(filename)
img = mmcv.imfrombytes(img_bytes, flag='color')
shape = img.shape
meta = dict(filename=filename, ori_shape=shape)
else:
raise NotImplementedError('Missing `filename` in data_info')
return meta
def main():
args = parse_args()
assert args.out.endswith('pkl'), 'The output file name must be pkl suffix'
# load config files
cfg = Config.fromfile(args.config)
dataloader_cfg = cfg.get(f'{args.dataset}_dataloader')
ann_file = osp.join(dataloader_cfg.dataset.data_root,
dataloader_cfg.dataset.ann_file)
img_prefix = osp.join(dataloader_cfg.dataset.data_root,
dataloader_cfg.dataset.data_prefix['img'])
print(f'{"-" * 5} Start Processing {"-" * 5}')
if ann_file.endswith('csv'):
data_infos = get_metas_from_csv_style_ann_file(ann_file)
elif ann_file.endswith('txt'):
data_infos = get_metas_from_txt_style_ann_file(ann_file)
else:
shuffix = ann_file.split('.')[-1]
raise NotImplementedError('File name must be csv or txt suffix but '
f'get {shuffix}')
print(f'Successfully load annotation file from {ann_file}')
print(f'Processing {len(data_infos)} images...')
pool = Pool(args.nproc)
# get image metas with multiple processes
image_metas = pool.starmap(
get_image_metas,
zip(data_infos, [img_prefix for _ in range(len(data_infos))]),
)
pool.close()
# save image metas
root_path = dataloader_cfg.dataset.ann_file.rsplit('/', 1)[0]
save_path = osp.join(root_path, args.out)
dump(image_metas, save_path, protocol=4)
print(f'Image meta file save to: {save_path}')
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Get image metas on a specific dataset.
Here is an example to run this script.
Example:
python tools/misc/get_image_metas.py ${CONFIG} \
--out ${OUTPUT FILE NAME}
"""
import argparse
import csv
import os.path as osp
from multiprocessing import Pool
import mmcv
from mmengine.config import Config
from mmengine.fileio import FileClient, dump
def parse_args():
parser = argparse.ArgumentParser(description='Collect image metas')
parser.add_argument('config', help='Config file path')
parser.add_argument(
'--dataset',
default='val',
choices=['train', 'val', 'test'],
help='Collect image metas from which dataset')
parser.add_argument(
'--out',
default='validation-image-metas.pkl',
help='The output image metas file name. The save dir is in the '
'same directory as `dataset.ann_file` path')
parser.add_argument(
'--nproc',
default=4,
type=int,
help='Processes used for get image metas')
args = parser.parse_args()
return args
def get_metas_from_csv_style_ann_file(ann_file):
data_infos = []
cp_filename = None
with open(ann_file, 'r') as f:
reader = csv.reader(f)
for i, line in enumerate(reader):
if i == 0:
continue
img_id = line[0]
filename = f'{img_id}.jpg'
if filename != cp_filename:
data_infos.append(dict(filename=filename))
cp_filename = filename
return data_infos
def get_metas_from_txt_style_ann_file(ann_file):
with open(ann_file) as f:
lines = f.readlines()
i = 0
data_infos = []
while i < len(lines):
filename = lines[i].rstrip()
data_infos.append(dict(filename=filename))
skip_lines = int(lines[i + 2]) + 3
i += skip_lines
return data_infos
def get_image_metas(data_info, img_prefix):
file_client = FileClient(backend='disk')
filename = data_info.get('filename', None)
if filename is not None:
if img_prefix is not None:
filename = osp.join(img_prefix, filename)
img_bytes = file_client.get(filename)
img = mmcv.imfrombytes(img_bytes, flag='color')
shape = img.shape
meta = dict(filename=filename, ori_shape=shape)
else:
raise NotImplementedError('Missing `filename` in data_info')
return meta
def main():
args = parse_args()
assert args.out.endswith('pkl'), 'The output file name must be pkl suffix'
# load config files
cfg = Config.fromfile(args.config)
dataloader_cfg = cfg.get(f'{args.dataset}_dataloader')
ann_file = osp.join(dataloader_cfg.dataset.data_root,
dataloader_cfg.dataset.ann_file)
img_prefix = osp.join(dataloader_cfg.dataset.data_root,
dataloader_cfg.dataset.data_prefix['img'])
print(f'{"-" * 5} Start Processing {"-" * 5}')
if ann_file.endswith('csv'):
data_infos = get_metas_from_csv_style_ann_file(ann_file)
elif ann_file.endswith('txt'):
data_infos = get_metas_from_txt_style_ann_file(ann_file)
else:
shuffix = ann_file.split('.')[-1]
raise NotImplementedError('File name must be csv or txt suffix but '
f'get {shuffix}')
print(f'Successfully load annotation file from {ann_file}')
print(f'Processing {len(data_infos)} images...')
pool = Pool(args.nproc)
# get image metas with multiple processes
image_metas = pool.starmap(
get_image_metas,
zip(data_infos, [img_prefix for _ in range(len(data_infos))]),
)
pool.close()
# save image metas
root_path = dataloader_cfg.dataset.ann_file.rsplit('/', 1)[0]
save_path = osp.join(root_path, args.out)
dump(image_metas, save_path, protocol=4)
print(f'Image meta file save to: {save_path}')
if __name__ == '__main__':
main()
|
"""
This file contains some utilities functions used to find parallel sentences
in two monolingual corpora.
Code in this file has been adapted from the LASER repository:
https://github.com/facebookresearch/LASER
"""
import gzip
import lzma
import time
import faiss
import numpy as np
######## Functions to find and score candidates
def score(x, y, fwd_mean, bwd_mean, margin):
return margin(x.dot(y), (fwd_mean + bwd_mean) / 2)
def score_candidates(x, y, candidate_inds, fwd_mean, bwd_mean, margin):
scores = np.zeros(candidate_inds.shape)
for i in range(scores.shape[0]):
for j in range(scores.shape[1]):
k = candidate_inds[i, j]
scores[i, j] = score(x[i], y[k], fwd_mean[i], bwd_mean[k], margin)
return scores
def kNN(x, y, k, use_ann_search=False, ann_num_clusters=32768, ann_num_cluster_probe=3):
start_time = time.time()
if use_ann_search:
print("Perform approx. kNN search")
n_cluster = min(ann_num_clusters, int(y.shape[0] / 1000))
quantizer = faiss.IndexFlatIP(y.shape[1])
index = faiss.IndexIVFFlat(quantizer, y.shape[1], n_cluster, faiss.METRIC_INNER_PRODUCT)
index.nprobe = ann_num_cluster_probe
index.train(y)
index.add(y)
sim, ind = index.search(x, k)
else:
print("Perform exact search")
idx = faiss.IndexFlatIP(y.shape[1])
idx.add(y)
sim, ind = idx.search(x, k)
print(f"Done: {time.time() - start_time:.2f} sec")
return sim, ind
def file_open(filepath):
# Function to allowing opening files based on file extension
if filepath.endswith(".gz"):
return gzip.open(filepath, "rt", encoding="utf8")
elif filepath.endswith("xz"):
return lzma.open(filepath, "rt", encoding="utf8")
else:
return open(filepath, encoding="utf8")
|
"""
This file contains some utilities functions used to find parallel sentences
in two monolingual corpora.
Code in this file has been adapted from the LASER repository:
https://github.com/facebookresearch/LASER
"""
import gzip
import lzma
import time
import faiss
import numpy as np
######## Functions to find and score candidates
def score(x, y, fwd_mean, bwd_mean, margin):
return margin(x.dot(y), (fwd_mean + bwd_mean) / 2)
def score_candidates(x, y, candidate_inds, fwd_mean, bwd_mean, margin):
scores = np.zeros(candidate_inds.shape)
for i in range(scores.shape[0]):
for j in range(scores.shape[1]):
k = candidate_inds[i, j]
scores[i, j] = score(x[i], y[k], fwd_mean[i], bwd_mean[k], margin)
return scores
def kNN(x, y, k, use_ann_search=False, ann_num_clusters=32768, ann_num_cluster_probe=3):
start_time = time.time()
if use_ann_search:
print("Perform approx. kNN search")
n_cluster = min(ann_num_clusters, int(y.shape[0] / 1000))
quantizer = faiss.IndexFlatIP(y.shape[1])
index = faiss.IndexIVFFlat(quantizer, y.shape[1], n_cluster, faiss.METRIC_INNER_PRODUCT)
index.nprobe = ann_num_cluster_probe
index.train(y)
index.add(y)
sim, ind = index.search(x, k)
else:
print("Perform exact search")
idx = faiss.IndexFlatIP(y.shape[1])
idx.add(y)
sim, ind = idx.search(x, k)
print("Done: {:.2f} sec".format(time.time() - start_time))
return sim, ind
def file_open(filepath):
# Function to allowing opening files based on file extension
if filepath.endswith(".gz"):
return gzip.open(filepath, "rt", encoding="utf8")
elif filepath.endswith("xz"):
return lzma.open(filepath, "rt", encoding="utf8")
else:
return open(filepath, "r", encoding="utf8")
|
_base_ = ['./mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa
depths = [2, 2, 18, 2]
model = dict(
backbone=dict(
depths=depths, init_cfg=dict(type='Pretrained',
checkpoint=pretrained)))
# set all layers in backbone to lr_mult=0.1
# set all norm layers, position_embeding,
# query_embeding, level_embeding to decay_multi=0.0
backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0)
backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0)
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'backbone': dict(lr_mult=0.1, decay_mult=1.0),
'backbone.patch_embed.norm': backbone_norm_multi,
'backbone.norm': backbone_norm_multi,
'absolute_pos_embed': backbone_embed_multi,
'relative_position_bias_table': backbone_embed_multi,
'query_embed': embed_multi,
'query_feat': embed_multi,
'level_embed': embed_multi
}
custom_keys.update({
f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi
for stage_id, num_blocks in enumerate(depths)
for block_id in range(num_blocks)
})
custom_keys.update({
f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi
for stage_id in range(len(depths) - 1)
})
# optimizer
optim_wrapper = dict(
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
|
_base_ = ['./mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa
depths = [2, 2, 18, 2]
model = dict(
backbone=dict(
depths=depths, init_cfg=dict(type='Pretrained',
checkpoint=pretrained)))
# set all layers in backbone to lr_mult=0.1
# set all norm layers, position_embeding,
# query_embeding, level_embeding to decay_multi=0.0
backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0)
backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0)
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'backbone': dict(lr_mult=0.1, decay_mult=1.0),
'backbone.patch_embed.norm': backbone_norm_multi,
'backbone.norm': backbone_norm_multi,
'absolute_pos_embed': backbone_embed_multi,
'relative_position_bias_table': backbone_embed_multi,
'query_embed': embed_multi,
'query_feat': embed_multi,
'level_embed': embed_multi
}
custom_keys.update({
f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi
for stage_id, num_blocks in enumerate(depths)
for block_id in range(num_blocks)
})
custom_keys.update({
f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi
for stage_id in range(len(depths) - 1)
})
# optimizer
optimizer = dict(
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from pydantic import Field
from docarray.base_doc import BaseDoc
from docarray.documents import AudioDoc
from docarray.typing import AnyEmbedding, AnyTensor, VideoBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
tf = import_library('tensorflow', raise_error=False)
torch = import_library('torch', raise_error=False)
T = TypeVar('T', bound='VideoDoc')
class VideoDoc(BaseDoc):
"""
Document for handling video.
The Video Document can contain:
- a [`VideoUrl`][docarray.typing.url.VideoUrl] (`VideoDoc.url`)
- an [`AudioDoc`][docarray.documents.AudioDoc] (`VideoDoc.audio`)
- a [`VideoTensor`](../../../api_references/typing/tensor/video) (`VideoDoc.tensor`)
- an [`AnyTensor`](../../../api_references/typing/tensor/tensor) representing the indices of the video's key frames (`VideoDoc.key_frame_indices`)
- an [`AnyEmbedding`](../../../api_references/typing/tensor/embedding) (`VideoDoc.embedding`)
- a [`VideoBytes`][docarray.typing.bytes.VideoBytes] object (`VideoDoc.bytes_`)
You can use this Document directly:
```python
from docarray.documents import VideoDoc, AudioDoc
# use it directly
vid = VideoDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
)
tensor, audio_tensor, key_frame_indices = vid.url.load()
vid.tensor = tensor
vid.audio = AudioDoc(tensor=audio_tensor)
vid.key_frame_indices = key_frame_indices
# model = MyEmbeddingModel()
# vid.embedding = model(vid.tensor)
```
You can extend this Document:
```python
from typing import Optional
from docarray.documents import TextDoc, VideoDoc
# extend it
class MyVideo(VideoDoc):
name: Optional[TextDoc]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
)
video.name = TextDoc(text='my first video')
video.tensor = video.url.load().video
# model = MyEmbeddingModel()
# video.embedding = model(video.tensor)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import TextDoc, VideoDoc
# compose it
class MultiModalDoc(BaseDoc):
video: VideoDoc
text: TextDoc
mmdoc = MultiModalDoc(
video=VideoDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
),
text=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.video.tensor = mmdoc.video.url.load().video
# or
mmdoc.video.bytes_ = mmdoc.video.url.load_bytes()
mmdoc.video.tensor = mmdoc.video.bytes_.load().video
```
"""
url: Optional[VideoUrl] = Field(
description='URL to a (potentially remote) video file that needs to be loaded',
example='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true',
default=None,
)
audio: Optional[AudioDoc] = Field(
description='Audio document associated with the video',
default=None,
)
tensor: Optional[VideoTensor] = Field(
description='Tensor object representing the video which be specified to one of `VideoNdArray`, `VideoTorchTensor`, `VideoTensorFlowTensor`',
default=None,
)
key_frame_indices: Optional[AnyTensor] = Field(
description='List of all the key frames in the video',
example=[0, 1, 2, 3, 4],
default=None,
)
embedding: Optional[AnyEmbedding] = Field(
description='Store an embedding: a vector representation of the video',
example=[1, 0, 1],
default=None,
)
bytes_: Optional[VideoBytes] = Field(
description='Bytes representation of the video',
default=None,
)
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch is not None
and isinstance(value, torch.Tensor)
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from pydantic import Field
from docarray.base_doc import BaseDoc
from docarray.documents import AudioDoc
from docarray.typing import AnyEmbedding, AnyTensor, VideoBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
tf = import_library('tensorflow', raise_error=False)
torch = import_library('torch', raise_error=False)
T = TypeVar('T', bound='VideoDoc')
class VideoDoc(BaseDoc):
"""
Document for handling video.
The Video Document can contain:
- a [`VideoUrl`][docarray.typing.url.VideoUrl] (`VideoDoc.url`)
- an [`AudioDoc`][docarray.documents.AudioDoc] (`VideoDoc.audio`)
- a [`VideoTensor`](../../../api_references/typing/tensor/video) (`VideoDoc.tensor`)
- an [`AnyTensor`](../../../api_references/typing/tensor/tensor) representing the indices of the video's key frames (`VideoDoc.key_frame_indices`)
- an [`AnyEmbedding`](../../../api_references/typing/tensor/embedding) (`VideoDoc.embedding`)
- a [`VideoBytes`][docarray.typing.bytes.VideoBytes] object (`VideoDoc.bytes_`)
You can use this Document directly:
```python
from docarray.documents import VideoDoc
# use it directly
vid = VideoDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.tensor, vid.audio.tensor, vid.key_frame_indices = vid.url.load()
# model = MyEmbeddingModel()
# vid.embedding = model(vid.tensor)
```
You can extend this Document:
```python
from typing import Optional
from docarray.documents import TextDoc, VideoDoc
# extend it
class MyVideo(VideoDoc):
name: Optional[TextDoc]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
)
video.name = TextDoc(text='my first video')
video.tensor = video.url.load().video
# model = MyEmbeddingModel()
# video.embedding = model(video.tensor)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import TextDoc, VideoDoc
# compose it
class MultiModalDoc(BaseDoc):
video: VideoDoc
text: TextDoc
mmdoc = MultiModalDoc(
video=VideoDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
),
text=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.video.tensor = mmdoc.video.url.load().video
# or
mmdoc.video.bytes_ = mmdoc.video.url.load_bytes()
mmdoc.video.tensor = mmdoc.video.bytes_.load().video
```
"""
url: Optional[VideoUrl] = Field(
description='URL to a (potentially remote) video file that needs to be loaded',
example='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true',
default=None,
)
audio: Optional[AudioDoc] = Field(
description='Audio document associated with the video',
default=None,
)
tensor: Optional[VideoTensor] = Field(
description='Tensor object representing the video which be specified to one of `VideoNdArray`, `VideoTorchTensor`, `VideoTensorFlowTensor`',
default=None,
)
key_frame_indices: Optional[AnyTensor] = Field(
description='List of all the key frames in the video',
example=[0, 1, 2, 3, 4],
default=None,
)
embedding: Optional[AnyEmbedding] = Field(
description='Store an embedding: a vector representation of the video',
example=[1, 0, 1],
default=None,
)
bytes_: Optional[VideoBytes] = Field(
description='Bytes representation of the video',
default=None,
)
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch is not None
and isinstance(value, torch.Tensor)
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
_base_ = './faster-rcnn_r50-caffe_fpn_ms-1x_coco.py'
model = dict(roi_head=dict(bbox_head=dict(num_classes=1)))
metainfo = {
'CLASSES': ('person', ),
'PALETTE': [
(220, 20, 60),
]
}
train_dataloader = dict(dataset=dict(metainfo=metainfo))
val_dataloader = dict(dataset=dict(metainfo=metainfo))
test_dataloader = dict(dataset=dict(metainfo=metainfo))
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_bbox_mAP-0.398_20200504_163323-30042637.pth' # noqa
|
_base_ = './faster-rcnn_r50-caffe_fpn_ms-1x_coco.py'
model = dict(roi_head=dict(bbox_head=dict(num_classes=1)))
classes = ('person', )
data = dict(
train=dict(classes=classes),
val=dict(classes=classes),
test=dict(classes=classes))
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_bbox_mAP-0.398_20200504_163323-30042637.pth' # noqa
|
"""Standard LangChain interface tests."""
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import ( # type: ignore[import-not-found]
ChatModelIntegrationTests, # type: ignore[import-not-found]
)
from langchain_fireworks import ChatFireworks
class TestFireworksStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatFireworks
@property
def chat_model_params(self) -> dict:
return {
"model": "accounts/fireworks/models/llama-v3p1-70b-instruct",
"temperature": 0,
}
@pytest.mark.xfail(reason="Not yet implemented.")
def test_tool_message_histories_list_content(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
super().test_tool_message_histories_list_content(model, my_adder_tool)
@property
def supports_json_mode(self) -> bool:
return True
|
"""Standard LangChain interface tests"""
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import ( # type: ignore[import-not-found]
ChatModelIntegrationTests, # type: ignore[import-not-found]
)
from langchain_fireworks import ChatFireworks
class TestFireworksStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatFireworks
@property
def chat_model_params(self) -> dict:
return {
"model": "accounts/fireworks/models/llama-v3p1-70b-instruct",
"temperature": 0,
}
@pytest.mark.xfail(reason="Not yet implemented.")
def test_tool_message_histories_list_content(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
super().test_tool_message_histories_list_content(model, my_adder_tool)
@property
def supports_json_mode(self) -> bool:
return True
|
"""Test tool spec."""
from typing import List, Tuple, Union
import pytest
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.core.tools.types import ToolMetadata
from llama_index.core.workflow import Context
class FooSchema(BaseModel):
arg1: str
arg2: int
class BarSchema(BaseModel):
arg1: bool
class AbcSchema(BaseModel):
arg1: str
class TestToolSpec(BaseToolSpec):
spec_functions: List[Union[str, Tuple[str, str]]] = [
"foo",
"bar",
"abc",
"abc_with_ctx",
]
def foo(self, arg1: str, arg2: int) -> str:
"""Foo."""
return f"foo {arg1} {arg2}"
def bar(self, arg1: bool) -> str:
"""Bar."""
return f"bar {arg1}"
async def afoo(self, arg1: str, arg2: int) -> str:
"""Afoo."""
return self.foo(arg1=arg1, arg2=arg2)
async def abar(self, arg1: bool) -> str:
"""Abar."""
return self.bar(arg1=arg1)
def abc(self, arg1: str) -> str:
# NOTE: no docstring
return f"bar {arg1}"
def abc_with_ctx(self, arg1: str, ctx: Context) -> str:
return f"bar {arg1}"
def unused_function(self, arg1: str) -> str:
return f"unused {arg1}"
def test_tool_spec() -> None:
"""Test tool spec."""
tool_spec = TestToolSpec()
# first is foo, second is bar
tools = tool_spec.to_tool_list()
assert len(tools) == 4
assert tools[0].metadata.name == "foo"
assert tools[0].metadata.description == "foo(arg1: str, arg2: int) -> str\nFoo."
assert tools[0].fn("hello", 1) == "foo hello 1"
assert tools[0].ctx_param_name is None
assert not tools[0].requires_context
assert tools[1].metadata.name == "bar"
assert tools[1].metadata.description == "bar(arg1: bool) -> str\nBar."
assert str(tools[1](True)) == "bar True"
assert tools[1].ctx_param_name is None
assert not tools[1].requires_context
assert tools[2].metadata.name == "abc"
assert tools[2].metadata.description == "abc(arg1: str) -> str\n"
assert (
tools[2].metadata.fn_schema.model_json_schema()["properties"]
== AbcSchema.model_json_schema()["properties"]
)
assert tools[2].ctx_param_name is None
assert not tools[2].requires_context
assert tools[3].metadata.name == "abc_with_ctx"
assert tools[3].metadata.description == "abc_with_ctx(arg1: str) -> str\n"
assert (
tools[3].metadata.fn_schema.model_json_schema()["properties"]
== AbcSchema.model_json_schema()["properties"]
)
assert tools[3].ctx_param_name == "ctx"
assert tools[3].requires_context
# test metadata mapping
tools = tool_spec.to_tool_list(
func_to_metadata_mapping={
"foo": ToolMetadata(
"foo_description", name="foo_name", fn_schema=FooSchema
),
}
)
assert len(tools) == 4
assert tools[0].metadata.name == "foo_name"
assert tools[0].metadata.description == "foo_description"
assert tools[0].metadata.fn_schema is not None
fn_schema = tools[0].metadata.fn_schema.model_json_schema()
print(fn_schema)
assert fn_schema["properties"]["arg1"]["type"] == "string"
assert fn_schema["properties"]["arg2"]["type"] == "integer"
assert tools[1].metadata.name == "bar"
assert tools[1].metadata.description == "bar(arg1: bool) -> str\nBar."
assert tools[1].metadata.fn_schema is not None
fn_schema = tools[1].metadata.fn_schema.model_json_schema()
assert fn_schema["properties"]["arg1"]["type"] == "boolean"
@pytest.mark.asyncio
async def test_tool_spec_async() -> None:
"""Test async_fn of tool spec."""
tool_spec = TestToolSpec()
tools = tool_spec.to_tool_list()
assert len(tools) == 4
assert await tools[0].async_fn("hello", 1) == "foo hello 1"
assert str(await tools[1].acall(True)) == "bar True"
def test_async_patching() -> None:
# test sync patching of async function
tool_spec = TestToolSpec()
tool_spec.spec_functions = ["afoo"]
tools = tool_spec.to_tool_list()
assert len(tools) == 1
assert tools[0].fn("hello", 1) == "foo hello 1"
def test_tool_spec_subset() -> None:
"""Test tool spec subset."""
tool_spec = TestToolSpec()
tools = tool_spec.to_tool_list(spec_functions=["abc"])
assert len(tools) == 1
assert tools[0].metadata.name == "abc"
assert tools[0].metadata.description == "abc(arg1: str) -> str\n"
assert (
tools[0].metadata.fn_schema.model_json_schema()["properties"]
== AbcSchema.model_json_schema()["properties"]
)
|
"""Test tool spec."""
from typing import List, Tuple, Union
import pytest
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.core.tools.types import ToolMetadata
from llama_index.core.workflow import Context
class FooSchema(BaseModel):
arg1: str
arg2: int
class BarSchema(BaseModel):
arg1: bool
class AbcSchema(BaseModel):
arg1: str
class TestToolSpec(BaseToolSpec):
spec_functions: List[Union[str, Tuple[str, str]]] = ["foo", "bar", "abc", "abc_with_ctx"]
def foo(self, arg1: str, arg2: int) -> str:
"""Foo."""
return f"foo {arg1} {arg2}"
def bar(self, arg1: bool) -> str:
"""Bar."""
return f"bar {arg1}"
async def afoo(self, arg1: str, arg2: int) -> str:
"""Afoo."""
return self.foo(arg1=arg1, arg2=arg2)
async def abar(self, arg1: bool) -> str:
"""Abar."""
return self.bar(arg1=arg1)
def abc(self, arg1: str) -> str:
# NOTE: no docstring
return f"bar {arg1}"
def abc_with_ctx(self, arg1: str, ctx: Context) -> str:
return f"bar {arg1}"
def unused_function(self, arg1: str) -> str:
return f"unused {arg1}"
def test_tool_spec() -> None:
"""Test tool spec."""
tool_spec = TestToolSpec()
# first is foo, second is bar
tools = tool_spec.to_tool_list()
assert len(tools) == 4
assert tools[0].metadata.name == "foo"
assert tools[0].metadata.description == "foo(arg1: str, arg2: int) -> str\nFoo."
assert tools[0].fn("hello", 1) == "foo hello 1"
assert tools[0].ctx_param_name is None
assert not tools[0].requires_context
assert tools[1].metadata.name == "bar"
assert tools[1].metadata.description == "bar(arg1: bool) -> str\nBar."
assert str(tools[1](True)) == "bar True"
assert tools[1].ctx_param_name is None
assert not tools[1].requires_context
assert tools[2].metadata.name == "abc"
assert tools[2].metadata.description == "abc(arg1: str) -> str\n"
assert tools[2].metadata.fn_schema.model_json_schema()["properties"] == AbcSchema.model_json_schema()["properties"]
assert tools[2].ctx_param_name is None
assert not tools[2].requires_context
assert tools[3].metadata.name == "abc_with_ctx"
assert tools[3].metadata.description == "abc_with_ctx(arg1: str) -> str\n"
assert tools[3].metadata.fn_schema.model_json_schema()["properties"] == AbcSchema.model_json_schema()["properties"]
assert tools[3].ctx_param_name == "ctx"
assert tools[3].requires_context
# test metadata mapping
tools = tool_spec.to_tool_list(
func_to_metadata_mapping={
"foo": ToolMetadata(
"foo_description", name="foo_name", fn_schema=FooSchema
),
}
)
assert len(tools) == 4
assert tools[0].metadata.name == "foo_name"
assert tools[0].metadata.description == "foo_description"
assert tools[0].metadata.fn_schema is not None
fn_schema = tools[0].metadata.fn_schema.model_json_schema()
print(fn_schema)
assert fn_schema["properties"]["arg1"]["type"] == "string"
assert fn_schema["properties"]["arg2"]["type"] == "integer"
assert tools[1].metadata.name == "bar"
assert tools[1].metadata.description == "bar(arg1: bool) -> str\nBar."
assert tools[1].metadata.fn_schema is not None
fn_schema = tools[1].metadata.fn_schema.model_json_schema()
assert fn_schema["properties"]["arg1"]["type"] == "boolean"
@pytest.mark.asyncio
async def test_tool_spec_async() -> None:
"""Test async_fn of tool spec."""
tool_spec = TestToolSpec()
tools = tool_spec.to_tool_list()
assert len(tools) == 4
assert await tools[0].async_fn("hello", 1) == "foo hello 1"
assert str(await tools[1].acall(True)) == "bar True"
def test_async_patching() -> None:
# test sync patching of async function
tool_spec = TestToolSpec()
tool_spec.spec_functions = ["afoo"]
tools = tool_spec.to_tool_list()
assert len(tools) == 1
assert tools[0].fn("hello", 1) == "foo hello 1"
def test_tool_spec_subset() -> None:
"""Test tool spec subset."""
tool_spec = TestToolSpec()
tools = tool_spec.to_tool_list(spec_functions=["abc"])
assert len(tools) == 1
assert tools[0].metadata.name == "abc"
assert tools[0].metadata.description == "abc(arg1: str) -> str\n"
assert tools[0].metadata.fn_schema.model_json_schema()["properties"] == AbcSchema.model_json_schema()["properties"]
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
import torch
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Union[dict, tuple, list]]
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Releases all unoccupied cached GPU memory during the process of
training.
Args:
before_epoch (bool): Whether to release cache before an epoch. Defaults
to False.
after_epoch (bool): Whether to release cache after an epoch. Defaults
to True.
after_iter (bool): Whether to release cache after an iteration.
Defaults to False.
"""
priority = 'NORMAL'
def __init__(self,
before_epoch: bool = False,
after_epoch: bool = True,
after_iter: bool = False) -> None:
self._do_before_epoch = before_epoch
self._do_after_epoch = after_epoch
self._do_after_iter = after_iter
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict, Sequence]] = None,
mode: str = 'train') -> None:
"""Empty cache after an iteration.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (dict or tuple or list, optional): Data from dataloader.
outputs (dict or sequence, optional): Outputs from model.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_iter:
torch.cuda.empty_cache()
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache before an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_before_epoch:
torch.cuda.empty_cache()
def _after_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache after an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_epoch:
torch.cuda.empty_cache()
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
import torch
from mmengine.registry import HOOKS
from mmengine.structures import BaseDataElement
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Releases all unoccupied cached GPU memory during the process of
training.
Args:
before_epoch (bool): Whether to release cache before an epoch. Defaults
to False.
after_epoch (bool): Whether to release cache after an epoch. Defaults
to True.
after_iter (bool): Whether to release cache after an iteration.
Defaults to False.
"""
priority = 'NORMAL'
def __init__(self,
before_epoch: bool = False,
after_epoch: bool = True,
after_iter: bool = False) -> None:
self._do_before_epoch = before_epoch
self._do_after_epoch = after_epoch
self._do_after_iter = after_iter
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataElement]]] = None,
mode: str = 'train') -> None:
"""Empty cache after an iteration.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
outputs (dict or sequence, optional): Outputs from model.
Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_iter:
torch.cuda.empty_cache()
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache before an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_before_epoch:
torch.cuda.empty_cache()
def _after_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache after an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_epoch:
torch.cuda.empty_cache()
|
from typing import Any, Iterable, Protocol, Sequence, runtime_checkable
import uuid
from llama_index.core.schema import Document as LIDocument
from llama_index.core.node_parser import NodeParser
from docling_core.transforms.chunker import BaseChunker, HierarchicalChunker
from docling_core.types import DoclingDocument as DLDocument
from llama_index.core import Document as LIDocument
from llama_index.core.node_parser import NodeParser
from llama_index.core.schema import (
BaseNode,
NodeRelationship,
RelatedNodeType,
TextNode,
)
from llama_index.core.utils import get_tqdm_iterable
class DoclingNodeParser(NodeParser):
"""
Docling format node parser.
Splits the JSON format of `DoclingReader` into nodes corresponding
to respective document elements from Docling's data model
(paragraphs, headings, tables etc.).
Args:
chunker (BaseChunker, optional): The chunker to use. Defaults to `HierarchicalChunker()`.
id_func(NodeIDGenCallable, optional): The node ID generation function to use. Defaults to `_uuid4_node_id_gen`.
"""
@runtime_checkable
class NodeIDGenCallable(Protocol):
def __call__(self, i: int, node: BaseNode) -> str: ...
@staticmethod
def _uuid4_node_id_gen(i: int, node: BaseNode) -> str:
return str(uuid.uuid4())
chunker: BaseChunker = HierarchicalChunker()
id_func: NodeIDGenCallable = _uuid4_node_id_gen
def _parse_nodes(
self,
nodes: Sequence[BaseNode],
show_progress: bool = False,
**kwargs: Any,
) -> list[BaseNode]:
nodes_with_progress: Iterable[BaseNode] = get_tqdm_iterable(
items=nodes, show_progress=show_progress, desc="Parsing nodes"
)
all_nodes: list[BaseNode] = []
for input_node in nodes_with_progress:
li_doc = LIDocument.model_validate(input_node)
dl_doc: DLDocument = DLDocument.model_validate_json(li_doc.get_content())
chunk_iter = self.chunker.chunk(dl_doc=dl_doc)
for i, chunk in enumerate(chunk_iter):
rels: dict[NodeRelationship, RelatedNodeType] = {
NodeRelationship.SOURCE: li_doc.as_related_node_info(),
}
metadata = chunk.meta.export_json_dict()
excl_embed_keys = [
k for k in chunk.meta.excluded_embed if k in metadata
]
excl_llm_keys = [k for k in chunk.meta.excluded_llm if k in metadata]
excl_embed_keys.extend(
[
k
for k in li_doc.excluded_embed_metadata_keys
if k not in excl_embed_keys
]
)
excl_llm_keys.extend(
[
k
for k in li_doc.excluded_llm_metadata_keys
if k not in excl_llm_keys
]
)
node = TextNode(
id_=self.id_func(i=i, node=li_doc),
text=chunk.text,
excluded_embed_metadata_keys=excl_embed_keys,
excluded_llm_metadata_keys=excl_llm_keys,
relationships=rels,
)
node.metadata = metadata
all_nodes.append(node)
return all_nodes
|
from typing import Any, Iterable, Protocol, Sequence, runtime_checkable
import uuid
from llama_index.core.schema import Document as LIDocument
from llama_index.core.node_parser import NodeParser
from docling_core.transforms.chunker import BaseChunker, HierarchicalChunker
from docling_core.types import DoclingDocument as DLDocument
from llama_index.core import Document as LIDocument
from llama_index.core.node_parser import NodeParser
from llama_index.core.schema import (
BaseNode,
NodeRelationship,
RelatedNodeType,
TextNode,
)
from llama_index.core.utils import get_tqdm_iterable
class DoclingNodeParser(NodeParser):
"""
Docling format node parser.
Splits the JSON format of `DoclingReader` into nodes corresponding
to respective document elements from Docling's data model
(paragraphs, headings, tables etc.).
Args:
chunker (BaseChunker, optional): The chunker to use. Defaults to `HierarchicalChunker()`.
id_func(NodeIDGenCallable, optional): The node ID generation function to use. Defaults to `_uuid4_node_id_gen`.
"""
@runtime_checkable
class NodeIDGenCallable(Protocol):
def __call__(self, i: int, node: BaseNode) -> str:
...
@staticmethod
def _uuid4_node_id_gen(i: int, node: BaseNode) -> str:
return str(uuid.uuid4())
chunker: BaseChunker = HierarchicalChunker()
id_func: NodeIDGenCallable = _uuid4_node_id_gen
def _parse_nodes(
self,
nodes: Sequence[BaseNode],
show_progress: bool = False,
**kwargs: Any,
) -> list[BaseNode]:
nodes_with_progress: Iterable[BaseNode] = get_tqdm_iterable(
items=nodes, show_progress=show_progress, desc="Parsing nodes"
)
all_nodes: list[BaseNode] = []
for input_node in nodes_with_progress:
li_doc = LIDocument.model_validate(input_node)
dl_doc: DLDocument = DLDocument.model_validate_json(li_doc.get_content())
chunk_iter = self.chunker.chunk(dl_doc=dl_doc)
for i, chunk in enumerate(chunk_iter):
rels: dict[NodeRelationship, RelatedNodeType] = {
NodeRelationship.SOURCE: li_doc.as_related_node_info(),
}
metadata = chunk.meta.export_json_dict()
excl_embed_keys = [
k for k in chunk.meta.excluded_embed if k in metadata
]
excl_llm_keys = [k for k in chunk.meta.excluded_llm if k in metadata]
node = TextNode(
id_=self.id_func(i=i, node=li_doc),
text=chunk.text,
excluded_embed_metadata_keys=excl_embed_keys,
excluded_llm_metadata_keys=excl_llm_keys,
relationships=rels,
)
node.metadata = metadata
all_nodes.append(node)
return all_nodes
|
import multiprocessing
import time
import grpc
import pytest
import requests
from jina import __jina_env__, __version__
from jina.proto import jina_pb2, jina_pb2_grpc
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from tests.helper import _generate_pod_args
from .test_runtimes import _create_gateway_runtime, _create_head_runtime
def _create_worker_runtime(port, name='', executor=None):
args = _generate_pod_args()
args.port = port
args.name = name
if executor:
args.uses = executor
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _create_worker(port):
# create a single worker runtime
p = multiprocessing.Process(target=_create_worker_runtime, args=(port,))
p.start()
time.sleep(0.1)
return p
def _create_gateway(port, graph, pod_addr, protocol):
# create a single worker runtime
# create a single gateway runtime
p = multiprocessing.Process(
target=_create_gateway_runtime,
args=(graph, pod_addr, port, protocol),
)
p.start()
time.sleep(0.1)
return p
def _create_head(port, connection_list_dict, polling='ANY'):
p = multiprocessing.Process(
target=_create_head_runtime, args=(port, connection_list_dict, 'head', polling)
)
p.start()
time.sleep(0.1)
return p
@pytest.mark.parametrize('runtime', ['head', 'worker', 'gateway'])
def test_jina_info_grpc_based_runtimes(runtime, port_generator):
port = port_generator()
connection_list_dict = {}
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{port}"]}}'
if runtime == 'head':
p = _create_head(port, connection_list_dict)
elif runtime == 'gateway':
p = _create_gateway(port, graph_description, pod_addresses, 'grpc')
else:
p = _create_worker(port)
try:
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
channel = grpc.insecure_channel(f'localhost:{port}')
stub = jina_pb2_grpc.JinaInfoRPCStub(channel)
res = stub._status(
jina_pb2.google_dot_protobuf_dot_empty__pb2.Empty(),
)
assert res.jina['jina'] == __version__
for env_var in __jina_env__:
assert env_var in res.envs
except Exception:
assert False
finally:
p.terminate()
p.join()
@pytest.mark.parametrize('protocol', ['http', 'websocket'])
def test_jina_info_gateway_http(protocol, port_generator):
port = port_generator()
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{port}"]}}'
p = _create_gateway(port, graph_description, pod_addresses, protocol)
try:
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
x = requests.get(f'http://localhost:{port}/status')
resp = x.json()
assert 'jina' in resp
assert 'envs' in resp
assert resp['jina']['jina'] == __version__
for env_var in __jina_env__:
assert env_var in resp['envs']
except Exception:
assert False
finally:
p.terminate()
p.join()
|
import multiprocessing
import time
import grpc
import pytest
import requests
from jina import __jina_env__, __version__
from jina.parsers import set_pod_parser
from jina.proto import jina_pb2, jina_pb2_grpc
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from .test_runtimes import _create_gateway_runtime, _create_head_runtime
def _create_worker_runtime(port, name='', executor=None):
args = set_pod_parser().parse_args([])
args.port = port
args.name = name
if executor:
args.uses = executor
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _create_worker(port):
# create a single worker runtime
p = multiprocessing.Process(target=_create_worker_runtime, args=(port,))
p.start()
time.sleep(0.1)
return p
def _create_gateway(port, graph, pod_addr, protocol):
# create a single worker runtime
# create a single gateway runtime
p = multiprocessing.Process(
target=_create_gateway_runtime,
args=(graph, pod_addr, port, protocol),
)
p.start()
time.sleep(0.1)
return p
def _create_head(port, connection_list_dict, polling='ANY'):
p = multiprocessing.Process(
target=_create_head_runtime, args=(port, connection_list_dict, 'head', polling)
)
p.start()
time.sleep(0.1)
return p
@pytest.mark.parametrize('runtime', ['head', 'worker', 'gateway'])
def test_jina_info_grpc_based_runtimes(runtime, port_generator):
port = port_generator()
connection_list_dict = {}
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{port}"]}}'
if runtime == 'head':
p = _create_head(port, connection_list_dict)
elif runtime == 'gateway':
p = _create_gateway(port, graph_description, pod_addresses, 'grpc')
else:
p = _create_worker(port)
try:
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
channel = grpc.insecure_channel(f'localhost:{port}')
stub = jina_pb2_grpc.JinaInfoRPCStub(channel)
res = stub._status(
jina_pb2.google_dot_protobuf_dot_empty__pb2.Empty(),
)
assert res.jina['jina'] == __version__
for env_var in __jina_env__:
assert env_var in res.envs
except Exception:
assert False
finally:
p.terminate()
p.join()
@pytest.mark.parametrize('protocol', ['http', 'websocket'])
def test_jina_info_gateway_http(protocol, port_generator):
port = port_generator()
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{port}"]}}'
p = _create_gateway(port, graph_description, pod_addresses, protocol)
try:
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
x = requests.get(f'http://localhost:{port}/status')
resp = x.json()
assert 'jina' in resp
assert 'envs' in resp
assert resp['jina']['jina'] == __version__
for env_var in __jina_env__:
assert env_var in resp['envs']
except Exception:
assert False
finally:
p.terminate()
p.join()
|
"""XGBoost: eXtreme Gradient Boosting library.
Contributors: https://github.com/dmlc/xgboost/blob/master/CONTRIBUTORS.md
"""
from . import tracker # noqa
from . import collective, dask
from .core import Booster, DataIter, DMatrix, QuantileDMatrix, _py_version, build_info
from .tracker import RabitTracker # noqa
from .training import cv, train
try:
from .config import config_context, get_config, set_config
from .plotting import plot_importance, plot_tree, to_graphviz
from .sklearn import (
XGBClassifier,
XGBModel,
XGBRanker,
XGBRegressor,
XGBRFClassifier,
XGBRFRegressor,
)
except ImportError:
pass
__version__ = _py_version()
__all__ = [
# core
"DMatrix",
"QuantileDMatrix",
"Booster",
"DataIter",
"train",
"cv",
# utilities
"RabitTracker",
"build_info",
"plot_importance",
"plot_tree",
"to_graphviz",
"set_config",
"get_config",
"config_context",
# sklearn
"XGBModel",
"XGBClassifier",
"XGBRegressor",
"XGBRanker",
"XGBRFClassifier",
"XGBRFRegressor",
# dask
"dask",
# collective
"collective",
]
|
"""XGBoost: eXtreme Gradient Boosting library.
Contributors: https://github.com/dmlc/xgboost/blob/master/CONTRIBUTORS.md
"""
from . import tracker # noqa
from . import collective, dask
from .core import (
Booster,
DataIter,
DeviceQuantileDMatrix,
DMatrix,
QuantileDMatrix,
_py_version,
build_info,
)
from .tracker import RabitTracker # noqa
from .training import cv, train
try:
from .config import config_context, get_config, set_config
from .plotting import plot_importance, plot_tree, to_graphviz
from .sklearn import (
XGBClassifier,
XGBModel,
XGBRanker,
XGBRegressor,
XGBRFClassifier,
XGBRFRegressor,
)
except ImportError:
pass
__version__ = _py_version()
__all__ = [
# core
"DMatrix",
"DeviceQuantileDMatrix",
"QuantileDMatrix",
"Booster",
"DataIter",
"train",
"cv",
# utilities
"RabitTracker",
"build_info",
"plot_importance",
"plot_tree",
"to_graphviz",
"set_config",
"get_config",
"config_context",
# sklearn
"XGBModel",
"XGBClassifier",
"XGBRegressor",
"XGBRanker",
"XGBRFClassifier",
"XGBRFRegressor",
# dask
"dask",
# collective
"collective",
]
|
import logging
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseNanoBEIREvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Create evaluator for all NanoBEIR datasets
evaluator = SparseNanoBEIREvaluator(
dataset_names=None, # None means evaluate on all datasets
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = evaluator(model)
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
|
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseNanoBEIREvaluator,
SpladePooling,
)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Create evaluator for all NanoBEIR datasets
evaluator = SparseNanoBEIREvaluator(
dataset_names=None, # None means evaluate on all datasets
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
print("Starting evaluation on all NanoBEIR datasets")
results = evaluator(model)
print(f"Primary metric: {evaluator.primary_metric}")
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# Print results for each dataset
for key, value in results.items():
if key.startswith("Nano"):
print(f"{key}: {value:.4f}")
|
import pathlib
import pytest
from mktestdocs import grab_code_blocks
from mktestdocs.__main__ import _executors, check_raw_string
def check_raw_file_full(raw, lang="python", keyword_ignore=[]):
if lang not in _executors:
raise LookupError(
f"{lang} is not a supported language to check\n"
"\tHint: you can add support for any language by using register_executor"
)
executor = _executors[lang]
all_code = ""
add_code_block = True
for b in grab_code_blocks(raw, lang=lang):
add_code_block = True
for keyword in keyword_ignore:
if keyword in b:
add_code_block = False
break
if add_code_block:
all_code = f"{all_code}\n{b}"
executor(all_code)
def check_md_file(fpath, memory=False, lang="python", keyword_ignore=[]):
"""
NOTE: copy paste from mktestdocs.__main__ and add the keyword ignore
Given a markdown file, parse the contents for python code blocks
and check that each independent block does not cause an error.
Arguments:
fpath: path to markdown file
memory: whether or not previous code-blocks should be remembered
"""
text = pathlib.Path(fpath).read_text()
if not memory:
check_raw_string(text, lang=lang)
else:
check_raw_file_full(text, lang=lang, keyword_ignore=keyword_ignore)
@pytest.mark.parametrize(
'fpath',
[
*list(pathlib.Path('docs/user_guide').glob('**/*.md')),
*list(pathlib.Path('docs/data_types').glob('**/*.md')),
],
ids=str,
)
def test_files_good(fpath):
check_md_file(fpath=fpath, memory=True)
def test_readme():
check_md_file(
fpath='README.md', memory=True, keyword_ignore=['tensorflow', 'fastapi', 'push']
)
|
import pathlib
import pytest
from mktestdocs import grab_code_blocks
from mktestdocs.__main__ import _executors, check_raw_string
def check_raw_file_full(raw, lang="python", keyword_ignore=[]):
if lang not in _executors:
raise LookupError(
f"{lang} is not a supported language to check\n"
"\tHint: you can add support for any language by using register_executor"
)
executor = _executors[lang]
all_code = ""
add_code_block = True
for b in grab_code_blocks(raw, lang=lang):
add_code_block = True
for keyword in keyword_ignore:
if keyword in b:
add_code_block = False
break
if add_code_block:
all_code = f"{all_code}\n{b}"
executor(all_code)
def check_md_file(fpath, memory=False, lang="python", keyword_ignore=[]):
"""
NOTE: copy paste from mktestdocs.__main__ and add the keyword ignore
Given a markdown file, parse the contents for python code blocks
and check that each independent block does not cause an error.
Arguments:
fpath: path to markdown file
memory: whether or not previous code-blocks should be remembered
"""
text = pathlib.Path(fpath).read_text()
if not memory:
check_raw_string(text, lang=lang)
else:
check_raw_file_full(text, lang=lang, keyword_ignore=keyword_ignore)
@pytest.mark.parametrize(
'fpath', pathlib.Path('docs/user_guide').glob('**/*.md'), ids=str
)
def test_files_good(fpath):
check_md_file(fpath=fpath, memory=True)
def test_readme():
check_md_file(
fpath='README.md', memory=True, keyword_ignore=['tensorflow', 'fastapi', 'push']
)
|
from pathlib import Path
from typing import Any, Optional, TypedDict
from tomlkit import load
def get_package_root(cwd: Optional[Path] = None) -> Path:
# traverse path for routes to host (any directory holding a pyproject.toml file)
package_root = Path.cwd() if cwd is None else cwd
visited: set[Path] = set()
while package_root not in visited:
visited.add(package_root)
pyproject_path = package_root / "pyproject.toml"
if pyproject_path.exists():
return package_root
package_root = package_root.parent
msg = "No pyproject.toml found"
raise FileNotFoundError(msg)
class LangServeExport(TypedDict):
"""Fields from pyproject.toml that are relevant to LangServe.
Attributes:
module: The module to import from, tool.langserve.export_module
attr: The attribute to import from the module, tool.langserve.export_attr
package_name: The name of the package, tool.poetry.name
"""
module: str
attr: str
package_name: str
def get_langserve_export(filepath: Path) -> LangServeExport:
with open(filepath) as f:
data: dict[str, Any] = load(f)
try:
module = data["tool"]["langserve"]["export_module"]
attr = data["tool"]["langserve"]["export_attr"]
package_name = data["tool"]["poetry"]["name"]
except KeyError as e:
msg = "Invalid LangServe PyProject.toml"
raise KeyError(msg) from e
return LangServeExport(module=module, attr=attr, package_name=package_name)
|
from pathlib import Path
from typing import Any, Optional, TypedDict
from tomlkit import load
def get_package_root(cwd: Optional[Path] = None) -> Path:
# traverse path for routes to host (any directory holding a pyproject.toml file)
package_root = Path.cwd() if cwd is None else cwd
visited: set[Path] = set()
while package_root not in visited:
visited.add(package_root)
pyproject_path = package_root / "pyproject.toml"
if pyproject_path.exists():
return package_root
package_root = package_root.parent
msg = "No pyproject.toml found"
raise FileNotFoundError(msg)
class LangServeExport(TypedDict):
"""Fields from pyproject.toml that are relevant to LangServe.
Attributes:
module: The module to import from, tool.langserve.export_module
attr: The attribute to import from the module, tool.langserve.export_attr
package_name: The name of the package, tool.poetry.name
"""
module: str
attr: str
package_name: str
def get_langserve_export(filepath: Path) -> LangServeExport:
with open(filepath) as f:
data: dict[str, Any] = load(f)
try:
module = data["tool"]["langserve"]["export_module"]
attr = data["tool"]["langserve"]["export_attr"]
package_name = data["tool"]["poetry"]["name"]
except KeyError as e:
msg = "Invalid LangServe PyProject.toml"
raise KeyError(msg) from e
return LangServeExport(module=module, attr=attr, package_name=package_name)
|
import abc
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, List, Tuple, Type, TypeVar, Union
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AbstractTensor')
ShapeT = TypeVar('ShapeT')
class AbstractTensor(AbstractType, Generic[ShapeT], ABC):
__parametrized_meta__ = type
@classmethod
@abc.abstractmethod
def __docarray_validate_shape__(cls, t: T, shape: Tuple[int]) -> T:
"""Every tensor has to implement this method in order to
enable syntax of the form Tensor[shape].
It is called when a tensor is assigned to a field of this type.
i.e. when a tensor is passed to a Document field of type Tensor[shape].
The intended behaviour is as follows:
- If the shape of `t` is equal to `shape`, return `t`.
- If the shape of `t` is not equal to `shape`,
but can be reshaped to `shape`, return `t` reshaped to `shape`.
- If the shape of `t` is not equal to `shape`
and cannot be reshaped to `shape`, raise a ValueError.
:param t: The tensor to validate.
:param shape: The shape to validate against.
:return: The validated tensor.
"""
...
@classmethod
def __docarray_validate_getitem__(cls, item: Any) -> Tuple[int]:
"""This method validates the input to __class_getitem__.
It is called at "class creation time",
i.e. when a class is created with syntax of the form Tensor[shape].
The default implementation tries to cast any `item` to a tuple of ints.
A subclass can override this method to implement custom validation logic.
The output of this is eventually passed to
{ref}`AbstractTensor.__validate_shape__` as its `shape` argument.
Raises `ValueError` if the input `item` does not pass validation.
:param item: The item to validate, passed to __class_getitem__ (`Tensor[item]`).
:return: The validated item == the target shape of this tensor.
"""
if isinstance(item, int):
item = (item,)
try:
item = tuple(item)
except TypeError:
raise TypeError(f'{item} is not a valid tensor shape.')
return item
@classmethod
def _docarray_create_parametrized_type(cls: Type[T], shape: Tuple[int]):
shape_str = ', '.join([str(s) for s in shape])
class _ParametrizedTensor(
cls, # type: ignore
metaclass=cls.__parametrized_meta__, # type: ignore
):
_docarray_target_shape = shape
@classmethod
def validate(
_cls,
value: Any,
field: 'ModelField',
config: 'BaseConfig',
):
t = super().validate(value, field, config)
return _cls.__docarray_validate_shape__(t, _cls._docarray_target_shape)
_ParametrizedTensor.__name__ = f'{cls.__name__}[{shape_str}]'
_ParametrizedTensor.__qualname__ = f'{cls.__qualname__}[{shape_str}]'
return _ParametrizedTensor
def __class_getitem__(cls, item: Any):
target_shape = cls.__docarray_validate_getitem__(item)
return cls._docarray_create_parametrized_type(target_shape)
@classmethod
@abc.abstractmethod
def __docarray_stack__(cls: Type[T], seq: Union[List[T], Tuple[T]]) -> T:
"""Stack a sequence of tensors into a single tensor."""
...
|
import abc
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, List, Tuple, Type, TypeVar, Union
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AbstractTensor')
ShapeT = TypeVar('ShapeT')
class AbstractTensor(AbstractType, Generic[ShapeT], ABC):
__parametrized_meta__ = type
@classmethod
@abc.abstractmethod
def __docarray_validate_shape__(cls, t: T, shape: Tuple[int]) -> T:
"""Every tensor has to implement this method in order to
enable syntax of the form Tensor[shape].
It is called when a tensor is assigned to a field of this type.
i.e. when a tensor is passed to a Document field of type Tensor[shape].
The intended behaviour is as follows:
- If the shape of `t` is equal to `shape`, return `t`.
- If the shape of `t` is not equal to `shape`,
but can be reshaped to `shape`, return `t` reshaped to `shape`.
- If the shape of `t` is not equal to `shape`
and cannot be reshaped to `shape`, raise a ValueError.
:param t: The tensor to validate.
:param shape: The shape to validate against.
:return: The validated tensor.
"""
...
@classmethod
def __docarray_validate_getitem__(cls, item: Any) -> Tuple[int]:
"""This method validates the input to __class_getitem__.
It is called at "class creation time",
i.e. when a class is created with syntax of the form Tensor[shape].
The default implementation tries to cast any `item` to a tuple of ints.
A subclass can override this method to implement custom validation logic.
The output of this is eventually passed to
{ref}`AbstractTensor.__validate_shape__` as its `shape` argument.
Raises `ValueError` if the input `item` does not pass validation.
:param item: The item to validate, passed to __class_getitem__ (`Tensor[item]`).
:return: The validated item == the target shape of this tensor.
"""
if isinstance(item, int):
item = (item,)
try:
item = tuple(item)
except TypeError:
raise TypeError(f'{item} is not a valid tensor shape.')
return item
@classmethod
def _docarray_create_parametrized_type(cls: Type[T], shape: Tuple[int]):
shape_str = ', '.join([str(s) for s in shape])
class _ParametrizedTensor(
cls, # type: ignore
metaclass=cls.__parametrized_meta__, # type: ignore
):
_docarray_target_shape = shape
__name__ = f'{cls.__name__}[{shape_str}]'
__qualname__ = f'{cls.__qualname__}[{shape_str}]'
@classmethod
def validate(
_cls,
value: Any,
field: 'ModelField',
config: 'BaseConfig',
):
t = super().validate(value, field, config)
return _cls.__docarray_validate_shape__(t, _cls._docarray_target_shape)
return _ParametrizedTensor
def __class_getitem__(cls, item: Any):
target_shape = cls.__docarray_validate_getitem__(item)
return cls._docarray_create_parametrized_type(target_shape)
@classmethod
@abc.abstractmethod
def __docarray_stack__(cls: Type[T], seq: Union[List[T], Tuple[T]]) -> T:
"""Stack a sequence of tensors into a single tensor."""
...
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
from mmengine.utils.manager import ManagerMixin, _accquire_lock, _release_lock
class DefaultScope(ManagerMixin):
"""Scope of current task used to reset the current registry, which can be
accessed globally.
Consider the case of resetting the current ``Resgitry`` by``default_scope``
in the internal module which cannot access runner directly, it is difficult
to get the ``default_scope`` defined in ``Runner``. However, if ``Runner``
created ``DefaultScope`` instance by given ``default_scope``, the internal
module can get ``default_scope`` by ``DefaultScope.get_current_instance``
everywhere.
Args:
name (str): Name of default scope for global access.
scope_name (str): Scope of current task.
Examples:
>>> from mmengine import MODELS
>>> # Define default scope in runner.
>>> DefaultScope.get_instance('task', scope_name='mmdet')
>>> # Get default scope globally.
>>> scope_name = DefaultScope.get_instance('task').scope_name
"""
def __init__(self, name: str, scope_name: str):
super().__init__(name)
self._scope_name = scope_name
@property
def scope_name(self) -> str:
"""
Returns:
str: Get current scope.
"""
return self._scope_name
@classmethod
def get_current_instance(cls) -> Optional['DefaultScope']:
"""Get latest created default scope.
Since default_scope is an optional argument for ``Registry.build``.
``get_current_instance`` should return ``None`` if there is no
``DefaultScope`` created.
Examples:
>>> default_scope = DefaultScope.get_current_instance()
>>> # There is no `DefaultScope` created yet,
>>> # `get_current_instance` return `None`.
>>> default_scope = DefaultScope.get_instance(
>>> 'instance_name', scope_name='mmengine')
>>> default_scope.scope_name
mmengine
>>> default_scope = DefaultScope.get_current_instance()
>>> default_scope.scope_name
mmengine
Returns:
Optional[DefaultScope]: Return None If there has not been
``DefaultScope`` instance created yet, otherwise return the
latest created DefaultScope instance.
"""
_accquire_lock()
if cls._instance_dict:
instance = super(DefaultScope, cls).get_current_instance()
else:
instance = None
_release_lock()
return instance
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
from mmengine.utils.manager import ManagerMixin, _accquire_lock, _release_lock
class DefaultScope(ManagerMixin):
"""Scope of current task used to reset the current registry, which can be
accessed globally.
Consider the case of resetting the current ``Resgitry`` by``default_scope``
in the internal module which cannot access runner directly, it is difficult
to get the ``default_scope`` defined in ``Runner``. However, if ``Runner``
created ``DefaultScope`` instance by given ``default_scope``, the internal
module can get ``default_scope`` by ``DefaultScope.get_current_instance``
everywhere.
Args:
name (str): Name of default scope for global access.
scope_name (str): Scope of current task.
Examples:
>>> from mmengine import MODELS
>>> # Define default scope in runner.
>>> DefaultScope.get_instance('task', scope_name='mmdet')
>>> # Get default scope globally.
>>> scope_name = DefaultScope.get_instance('task').scope_name
>>> # build model from cfg.
>>> model = MODELS.build(model_cfg, default_scope=scope_name)
"""
def __init__(self, name: str, scope_name: str):
super().__init__(name)
self._scope_name = scope_name
@property
def scope_name(self) -> str:
"""
Returns:
str: Get current scope.
"""
return self._scope_name
@classmethod
def get_current_instance(cls) -> Optional['DefaultScope']:
"""Get latest created default scope.
Since default_scope is an optional argument for ``Registry.build``.
``get_current_instance`` should return ``None`` if there is no
``DefaultScope`` created.
Examples:
>>> default_scope = DefaultScope.get_current_instance()
>>> # There is no `DefaultScope` created yet,
>>> # `get_current_instance` return `None`.
>>> default_scope = DefaultScope.get_instance(
>>> 'instance_name', scope_name='mmengine')
>>> default_scope.scope_name
mmengine
>>> default_scope = DefaultScope.get_current_instance()
>>> default_scope.scope_name
mmengine
Returns:
Optional[DefaultScope]: Return None If there has not been
``DefaultScope`` instance created yet, otherwise return the
latest created DefaultScope instance.
"""
_accquire_lock()
if cls._instance_dict:
instance = super(DefaultScope, cls).get_current_instance()
else:
instance = None
_release_lock()
return instance
|
"""**Prompt** is the input to the model.
Prompt is often constructed
from multiple components and prompt values. Prompt classes and functions make constructing
and working with prompts easy.
**Class hierarchy:**
.. code-block::
BasePromptTemplate --> PipelinePromptTemplate
StringPromptTemplate --> PromptTemplate
FewShotPromptTemplate
FewShotPromptWithTemplates
BaseChatPromptTemplate --> AutoGPTPrompt
ChatPromptTemplate --> AgentScratchPadChatPromptTemplate
BaseMessagePromptTemplate --> MessagesPlaceholder
BaseStringMessagePromptTemplate --> ChatMessagePromptTemplate
HumanMessagePromptTemplate
AIMessagePromptTemplate
SystemMessagePromptTemplate
""" # noqa: E501
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.prompts.base import (
BasePromptTemplate,
aformat_document,
format_document,
)
from langchain_core.prompts.chat import (
AIMessagePromptTemplate,
BaseChatPromptTemplate,
ChatMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
)
from langchain_core.prompts.few_shot import (
FewShotChatMessagePromptTemplate,
FewShotPromptTemplate,
)
from langchain_core.prompts.few_shot_with_templates import (
FewShotPromptWithTemplates,
)
from langchain_core.prompts.loading import load_prompt
from langchain_core.prompts.pipeline import PipelinePromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.prompts.string import (
StringPromptTemplate,
check_valid_template,
get_template_variables,
jinja2_formatter,
validate_jinja2,
)
__all__ = [
"AIMessagePromptTemplate",
"BaseChatPromptTemplate",
"BasePromptTemplate",
"ChatMessagePromptTemplate",
"ChatPromptTemplate",
"FewShotPromptTemplate",
"FewShotPromptWithTemplates",
"FewShotChatMessagePromptTemplate",
"HumanMessagePromptTemplate",
"MessagesPlaceholder",
"PipelinePromptTemplate",
"PromptTemplate",
"StringPromptTemplate",
"SystemMessagePromptTemplate",
"load_prompt",
"format_document",
"aformat_document",
"check_valid_template",
"get_template_variables",
"jinja2_formatter",
"validate_jinja2",
]
_dynamic_imports = {
"BasePromptTemplate": "base",
"format_document": "base",
"aformat_document": "base",
"AIMessagePromptTemplate": "chat",
"BaseChatPromptTemplate": "chat",
"ChatMessagePromptTemplate": "chat",
"ChatPromptTemplate": "chat",
"HumanMessagePromptTemplate": "chat",
"MessagesPlaceholder": "chat",
"SystemMessagePromptTemplate": "chat",
"FewShotChatMessagePromptTemplate": "few_shot",
"FewShotPromptTemplate": "few_shot",
"FewShotPromptWithTemplates": "few_shot_with_templates",
"load_prompt": "loading",
"PipelinePromptTemplate": "pipeline",
"PromptTemplate": "prompt",
"StringPromptTemplate": "string",
"check_valid_template": "string",
"get_template_variables": "string",
"jinja2_formatter": "string",
"validate_jinja2": "string",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""**Prompt** is the input to the model.
Prompt is often constructed
from multiple components and prompt values. Prompt classes and functions make constructing
and working with prompts easy.
**Class hierarchy:**
.. code-block::
BasePromptTemplate --> PipelinePromptTemplate
StringPromptTemplate --> PromptTemplate
FewShotPromptTemplate
FewShotPromptWithTemplates
BaseChatPromptTemplate --> AutoGPTPrompt
ChatPromptTemplate --> AgentScratchPadChatPromptTemplate
BaseMessagePromptTemplate --> MessagesPlaceholder
BaseStringMessagePromptTemplate --> ChatMessagePromptTemplate
HumanMessagePromptTemplate
AIMessagePromptTemplate
SystemMessagePromptTemplate
""" # noqa: E501
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.prompts.base import (
BasePromptTemplate,
aformat_document,
format_document,
)
from langchain_core.prompts.chat import (
AIMessagePromptTemplate,
BaseChatPromptTemplate,
ChatMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
)
from langchain_core.prompts.few_shot import (
FewShotChatMessagePromptTemplate,
FewShotPromptTemplate,
)
from langchain_core.prompts.few_shot_with_templates import (
FewShotPromptWithTemplates,
)
from langchain_core.prompts.loading import load_prompt
from langchain_core.prompts.pipeline import PipelinePromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.prompts.string import (
StringPromptTemplate,
check_valid_template,
get_template_variables,
jinja2_formatter,
validate_jinja2,
)
__all__ = [
"AIMessagePromptTemplate",
"BaseChatPromptTemplate",
"BasePromptTemplate",
"ChatMessagePromptTemplate",
"ChatPromptTemplate",
"FewShotPromptTemplate",
"FewShotPromptWithTemplates",
"FewShotChatMessagePromptTemplate",
"HumanMessagePromptTemplate",
"MessagesPlaceholder",
"PipelinePromptTemplate",
"PromptTemplate",
"StringPromptTemplate",
"SystemMessagePromptTemplate",
"load_prompt",
"format_document",
"aformat_document",
"check_valid_template",
"get_template_variables",
"jinja2_formatter",
"validate_jinja2",
]
_dynamic_imports = {
"BasePromptTemplate": "base",
"format_document": "base",
"aformat_document": "base",
"AIMessagePromptTemplate": "chat",
"BaseChatPromptTemplate": "chat",
"ChatMessagePromptTemplate": "chat",
"ChatPromptTemplate": "chat",
"HumanMessagePromptTemplate": "chat",
"MessagesPlaceholder": "chat",
"SystemMessagePromptTemplate": "chat",
"FewShotChatMessagePromptTemplate": "few_shot",
"FewShotPromptTemplate": "few_shot",
"FewShotPromptWithTemplates": "few_shot_with_templates",
"load_prompt": "loading",
"PipelinePromptTemplate": "pipeline",
"PromptTemplate": "prompt",
"StringPromptTemplate": "string",
"check_valid_template": "string",
"get_template_variables": "string",
"jinja2_formatter": "string",
"validate_jinja2": "string",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent # type: ignore[name-defined]
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
import json
from collections.abc import Sequence
from langchain_core.agents import AgentAction
from langchain_core.messages import (
AIMessage,
BaseMessage,
ToolMessage,
)
from langchain.agents.output_parsers.tools import ToolAgentAction
def _create_tool_message(
agent_action: ToolAgentAction,
observation: str,
) -> ToolMessage:
"""Convert agent action and observation into a tool message.
Args:
agent_action: the tool invocation request from the agent.
observation: the result of the tool invocation.
Returns:
ToolMessage that corresponds to the original tool invocation.
Raises:
ValueError: if the observation cannot be converted to a string.
"""
if not isinstance(observation, str):
try:
content = json.dumps(observation, ensure_ascii=False)
except Exception:
content = str(observation)
else:
content = observation
return ToolMessage(
tool_call_id=agent_action.tool_call_id,
content=content,
additional_kwargs={"name": agent_action.tool},
)
def format_to_tool_messages(
intermediate_steps: Sequence[tuple[AgentAction, str]],
) -> list[BaseMessage]:
"""Convert (AgentAction, tool output) tuples into ToolMessages.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations.
Returns:
list of messages to send to the LLM for the next prediction.
"""
messages = []
for agent_action, observation in intermediate_steps:
if isinstance(agent_action, ToolAgentAction):
new_messages = [
*list(agent_action.message_log),
_create_tool_message(agent_action, observation),
]
messages.extend([new for new in new_messages if new not in messages])
else:
messages.append(AIMessage(content=agent_action.log))
return messages
|
import json
from collections.abc import Sequence
from langchain_core.agents import AgentAction
from langchain_core.messages import (
AIMessage,
BaseMessage,
ToolMessage,
)
from langchain.agents.output_parsers.tools import ToolAgentAction
def _create_tool_message(
agent_action: ToolAgentAction, observation: str
) -> ToolMessage:
"""Convert agent action and observation into a tool message.
Args:
agent_action: the tool invocation request from the agent.
observation: the result of the tool invocation.
Returns:
ToolMessage that corresponds to the original tool invocation.
Raises:
ValueError: if the observation cannot be converted to a string.
"""
if not isinstance(observation, str):
try:
content = json.dumps(observation, ensure_ascii=False)
except Exception:
content = str(observation)
else:
content = observation
return ToolMessage(
tool_call_id=agent_action.tool_call_id,
content=content,
additional_kwargs={"name": agent_action.tool},
)
def format_to_tool_messages(
intermediate_steps: Sequence[tuple[AgentAction, str]],
) -> list[BaseMessage]:
"""Convert (AgentAction, tool output) tuples into ToolMessages.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations.
Returns:
list of messages to send to the LLM for the next prediction.
"""
messages = []
for agent_action, observation in intermediate_steps:
if isinstance(agent_action, ToolAgentAction):
new_messages = [
*list(agent_action.message_log),
_create_tool_message(agent_action, observation),
]
messages.extend([new for new in new_messages if new not in messages])
else:
messages.append(AIMessage(content=agent_action.log))
return messages
|
"""Setup script."""
import os
import pathlib
from setuptools import find_packages
from setuptools import setup
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, rel_path)) as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.")
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
VERSION = get_version("keras/src/version.py")
setup(
name="keras",
description="Multi-backend Keras.",
long_description_content_type="text/markdown",
long_description=README,
version=VERSION,
url="https://github.com/keras-team/keras",
author="Keras team",
author_email="[email protected]",
license="Apache License 2.0",
install_requires=[
"absl-py",
"numpy",
"rich",
"namex",
"h5py",
"optree",
"ml-dtypes",
"packaging",
],
# Supported Python versions
python_requires=">=3.9",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3 :: Only",
"Operating System :: Unix",
"Operating System :: MacOS",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Software Development",
],
packages=find_packages(exclude=("*_test.py",)),
)
|
"""Setup script."""
import os
import pathlib
from setuptools import find_packages
from setuptools import setup
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, rel_path)) as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.")
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
VERSION = get_version("keras/src/version.py")
setup(
name="keras",
description="Multi-backend Keras.",
long_description_content_type="text/markdown",
long_description=README,
version=VERSION,
url="https://github.com/keras-team/keras",
author="Keras team",
author_email="[email protected]",
license="Apache License 2.0",
install_requires=[
"absl-py",
"numpy",
"rich",
"namex",
"h5py",
"optree",
"ml-dtypes",
],
# Supported Python versions
python_requires=">=3.9",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3 :: Only",
"Operating System :: Unix",
"Operating System :: MacOS",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Software Development",
],
packages=find_packages(exclude=("*_test.py",)),
)
|
import torchaudio
_STREAM_READER = [
"StreamReader",
"StreamReaderSourceStream",
"StreamReaderSourceAudioStream",
"StreamReaderSourceVideoStream",
"StreamReaderOutputStream",
]
_STREAM_WRITER = [
"StreamWriter",
]
_LAZILY_IMPORTED = _STREAM_READER + _STREAM_WRITER
def __getattr__(name: str):
if name in _LAZILY_IMPORTED:
torchaudio._extension._init_ffmpeg()
if name in _STREAM_READER:
from . import _stream_reader
item = getattr(_stream_reader, name)
else:
from . import _stream_writer
item = getattr(_stream_writer, name)
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(__all__ + _LAZILY_IMPORTED)
__all__ = []
|
import torchaudio
_LAZILY_IMPORTED = [
"StreamReader",
"StreamReaderSourceStream",
"StreamReaderSourceAudioStream",
"StreamReaderSourceVideoStream",
"StreamReaderOutputStream",
]
def __getattr__(name: str):
if name in _LAZILY_IMPORTED:
torchaudio._extension._init_ffmpeg()
from . import _stream_reader
item = getattr(_stream_reader, name)
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(__all__ + _LAZILY_IMPORTED)
__all__ = []
|
_base_ = './vfnet_r50-mdconv-c3-c5_fpn_ms-2x_coco.py'
model = dict(
backbone=dict(
type='Res2Net',
depth=101,
scales=4,
base_width=26,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://res2net101_v1d_26w_4s')))
|
_base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py'
model = dict(
backbone=dict(
type='Res2Net',
depth=101,
scales=4,
base_width=26,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://res2net101_v1d_26w_4s')))
|
import json
from typing import (
Any,
Union,
)
from langchain_core._api import deprecated
from pydantic import PrivateAttr
from langchain_anthropic.chat_models import ChatAnthropic
SYSTEM_PROMPT_FORMAT = """In this environment you have access to a set of tools you can use to answer the user's question.
You may call them like this:
<function_calls>
<invoke>
<tool_name>$TOOL_NAME</tool_name>
<parameters>
<$PARAMETER_NAME>$PARAMETER_VALUE</$PARAMETER_NAME>
...
</parameters>
</invoke>
</function_calls>
Here are the tools available:
<tools>
{formatted_tools}
</tools>""" # noqa: E501
TOOL_FORMAT = """<tool_description>
<tool_name>{tool_name}</tool_name>
<description>{tool_description}</description>
<parameters>
{formatted_parameters}
</parameters>
</tool_description>"""
TOOL_PARAMETER_FORMAT = """<parameter>
<name>{parameter_name}</name>
<type>{parameter_type}</type>
<description>{parameter_description}</description>
</parameter>"""
def _get_type(parameter: dict[str, Any]) -> str:
if "type" in parameter:
return parameter["type"]
if "anyOf" in parameter:
return json.dumps({"anyOf": parameter["anyOf"]})
if "allOf" in parameter:
return json.dumps({"allOf": parameter["allOf"]})
return json.dumps(parameter)
def get_system_message(tools: list[dict]) -> str:
"""Generate a system message that describes the available tools."""
tools_data: list[dict] = [
{
"tool_name": tool["name"],
"tool_description": tool["description"],
"formatted_parameters": "\n".join(
[
TOOL_PARAMETER_FORMAT.format(
parameter_name=name,
parameter_type=_get_type(parameter),
parameter_description=parameter.get("description"),
)
for name, parameter in tool["parameters"]["properties"].items()
]
),
}
for tool in tools
]
tools_formatted = "\n".join(
[
TOOL_FORMAT.format(
tool_name=tool["tool_name"],
tool_description=tool["tool_description"],
formatted_parameters=tool["formatted_parameters"],
)
for tool in tools_data
]
)
return SYSTEM_PROMPT_FORMAT.format(formatted_tools=tools_formatted)
def _xml_to_dict(t: Any) -> Union[str, dict[str, Any]]:
# Base case: If the element has no children, return its text or an empty string.
if len(t) == 0:
return t.text or ""
# Recursive case: The element has children. Convert them into a dictionary.
d: dict[str, Any] = {}
for child in t:
if child.tag not in d:
d[child.tag] = _xml_to_dict(child)
else:
# Handle multiple children with the same tag
if not isinstance(d[child.tag], list):
d[child.tag] = [d[child.tag]] # Convert existing entry into a list
d[child.tag].append(_xml_to_dict(child))
return d
def _xml_to_function_call(invoke: Any, tools: list[dict]) -> dict[str, Any]:
name = invoke.find("tool_name").text
arguments = _xml_to_dict(invoke.find("parameters"))
# make list elements in arguments actually lists
filtered_tools = [tool for tool in tools if tool["name"] == name]
if len(filtered_tools) > 0 and not isinstance(arguments, str):
tool = filtered_tools[0]
for key, value in arguments.items():
if key in tool["parameters"]["properties"]:
if "type" in tool["parameters"]["properties"][key]:
if tool["parameters"]["properties"][key][
"type"
] == "array" and not isinstance(value, list):
arguments[key] = [value]
if (
tool["parameters"]["properties"][key]["type"] != "object"
and isinstance(value, dict)
and len(value.keys()) == 1
):
arguments[key] = list(value.values())[0]
return {
"function": {
"name": name,
"arguments": json.dumps(arguments),
},
"type": "function",
}
def _xml_to_tool_calls(elem: Any, tools: list[dict]) -> list[dict[str, Any]]:
"""
Convert an XML element and its children into a dictionary of dictionaries.
"""
invokes = elem.findall("invoke")
return [_xml_to_function_call(invoke, tools) for invoke in invokes]
@deprecated(
"0.1.5",
removal="1.0.0",
alternative="ChatAnthropic",
message=(
"Tool-calling is now officially supported by the Anthropic API so this "
"workaround is no longer needed."
),
)
class ChatAnthropicTools(ChatAnthropic):
"""Chat model for interacting with Anthropic functions."""
_xmllib: Any = PrivateAttr(default=None)
|
import json
from typing import (
Any,
Dict,
List,
Union,
)
from langchain_core._api import deprecated
from pydantic import PrivateAttr
from langchain_anthropic.chat_models import ChatAnthropic
SYSTEM_PROMPT_FORMAT = """In this environment you have access to a set of tools you can use to answer the user's question.
You may call them like this:
<function_calls>
<invoke>
<tool_name>$TOOL_NAME</tool_name>
<parameters>
<$PARAMETER_NAME>$PARAMETER_VALUE</$PARAMETER_NAME>
...
</parameters>
</invoke>
</function_calls>
Here are the tools available:
<tools>
{formatted_tools}
</tools>""" # noqa: E501
TOOL_FORMAT = """<tool_description>
<tool_name>{tool_name}</tool_name>
<description>{tool_description}</description>
<parameters>
{formatted_parameters}
</parameters>
</tool_description>"""
TOOL_PARAMETER_FORMAT = """<parameter>
<name>{parameter_name}</name>
<type>{parameter_type}</type>
<description>{parameter_description}</description>
</parameter>"""
def _get_type(parameter: Dict[str, Any]) -> str:
if "type" in parameter:
return parameter["type"]
if "anyOf" in parameter:
return json.dumps({"anyOf": parameter["anyOf"]})
if "allOf" in parameter:
return json.dumps({"allOf": parameter["allOf"]})
return json.dumps(parameter)
def get_system_message(tools: List[Dict]) -> str:
"""Generate a system message that describes the available tools."""
tools_data: List[Dict] = [
{
"tool_name": tool["name"],
"tool_description": tool["description"],
"formatted_parameters": "\n".join(
[
TOOL_PARAMETER_FORMAT.format(
parameter_name=name,
parameter_type=_get_type(parameter),
parameter_description=parameter.get("description"),
)
for name, parameter in tool["parameters"]["properties"].items()
]
),
}
for tool in tools
]
tools_formatted = "\n".join(
[
TOOL_FORMAT.format(
tool_name=tool["tool_name"],
tool_description=tool["tool_description"],
formatted_parameters=tool["formatted_parameters"],
)
for tool in tools_data
]
)
return SYSTEM_PROMPT_FORMAT.format(formatted_tools=tools_formatted)
def _xml_to_dict(t: Any) -> Union[str, Dict[str, Any]]:
# Base case: If the element has no children, return its text or an empty string.
if len(t) == 0:
return t.text or ""
# Recursive case: The element has children. Convert them into a dictionary.
d: Dict[str, Any] = {}
for child in t:
if child.tag not in d:
d[child.tag] = _xml_to_dict(child)
else:
# Handle multiple children with the same tag
if not isinstance(d[child.tag], list):
d[child.tag] = [d[child.tag]] # Convert existing entry into a list
d[child.tag].append(_xml_to_dict(child))
return d
def _xml_to_function_call(invoke: Any, tools: List[Dict]) -> Dict[str, Any]:
name = invoke.find("tool_name").text
arguments = _xml_to_dict(invoke.find("parameters"))
# make list elements in arguments actually lists
filtered_tools = [tool for tool in tools if tool["name"] == name]
if len(filtered_tools) > 0 and not isinstance(arguments, str):
tool = filtered_tools[0]
for key, value in arguments.items():
if key in tool["parameters"]["properties"]:
if "type" in tool["parameters"]["properties"][key]:
if tool["parameters"]["properties"][key][
"type"
] == "array" and not isinstance(value, list):
arguments[key] = [value]
if (
tool["parameters"]["properties"][key]["type"] != "object"
and isinstance(value, dict)
and len(value.keys()) == 1
):
arguments[key] = list(value.values())[0]
return {
"function": {
"name": name,
"arguments": json.dumps(arguments),
},
"type": "function",
}
def _xml_to_tool_calls(elem: Any, tools: List[Dict]) -> List[Dict[str, Any]]:
"""
Convert an XML element and its children into a dictionary of dictionaries.
"""
invokes = elem.findall("invoke")
return [_xml_to_function_call(invoke, tools) for invoke in invokes]
@deprecated(
"0.1.5",
removal="1.0.0",
alternative="ChatAnthropic",
message=(
"Tool-calling is now officially supported by the Anthropic API so this "
"workaround is no longer needed."
),
)
class ChatAnthropicTools(ChatAnthropic):
"""Chat model for interacting with Anthropic functions."""
_xmllib: Any = PrivateAttr(default=None)
|
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings, semantic_search_usearch
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
num_queries = 1_000
queries = corpus[:num_queries]
# 2. Load the model
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
# 3. Encode the corpus
full_corpus_embeddings = model.encode(corpus, normalize_embeddings=True, show_progress_bar=True)
# 4. Encode the queries using the full precision
query_embeddings = model.encode(queries, normalize_embeddings=True)
for exact in (True, False):
for corpus_precision in ("float32", "int8", "binary"):
corpus_embeddings = quantize_embeddings(full_corpus_embeddings, precision=corpus_precision)
# NOTE: We can also pass "precision=..." to the encode method to quantize the embeddings directly,
# but we want to keep the full precision embeddings to act as a calibration dataset for quantizing
# the query embeddings. This is important only if you are using uint8 or int8 precision
# 5. Perform semantic search using usearch
rescore_multiplier = 4
results, search_time = semantic_search_usearch(
query_embeddings,
corpus_embeddings=corpus_embeddings,
corpus_precision=corpus_precision,
top_k=10,
calibration_embeddings=full_corpus_embeddings,
rescore=corpus_precision != "float32",
rescore_multiplier=rescore_multiplier,
exact=exact,
)
print(
f"{'Exact' if exact else 'Approximate'} search time using {corpus_precision} corpus: {search_time:.6f} seconds"
+ (f" (rescore_multiplier: {rescore_multiplier})" if corpus_precision != "float32" else "")
)
|
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings, semantic_search_usearch
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
num_queries = 1_000
queries = corpus[:num_queries]
# 2. Load the model
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
# 3. Encode the corpus
full_corpus_embeddings = model.encode(corpus, normalize_embeddings=True, show_progress_bar=True)
# 4. Encode the queries using the full precision
query_embeddings = model.encode(queries, normalize_embeddings=True)
for exact in (True, False):
for corpus_precision in ("float32", "int8", "binary"):
corpus_embeddings = quantize_embeddings(full_corpus_embeddings, precision=corpus_precision)
# NOTE: We can also pass "precision=..." to the encode method to quantize the embeddings directly,
# but we want to keep the full precision embeddings to act as a calibration dataset for quantizing
# the query embeddings. This is important only if you are using uint8 or int8 precision
# 5. Perform semantic search using usearch
rescore_multiplier = 4
results, search_time = semantic_search_usearch(
query_embeddings,
corpus_embeddings=corpus_embeddings,
corpus_precision=corpus_precision,
top_k=10,
calibration_embeddings=full_corpus_embeddings,
rescore=corpus_precision != "float32",
rescore_multiplier=rescore_multiplier,
exact=exact,
)
print(
f"{'Exact' if exact else 'Approximate'} search time using {corpus_precision} corpus: {search_time:.6f} seconds"
+ (f" (rescore_multiplier: {rescore_multiplier})" if corpus_precision != "float32" else "")
)
|
from collections import defaultdict
import torch
def get_modules(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.datapoints
import torchvision.transforms.v2
import v2_extras
return torchvision.transforms.v2, torchvision.datapoints, v2_extras
else:
import transforms
return transforms, None, None
class SegmentationPresetTrain:
def __init__(
self,
*,
base_size,
crop_size,
hflip_prob=0.5,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
backend="pil",
use_v2=False,
):
T, datapoints, v2_extras = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "datapoint":
transforms.append(T.ToImageTensor())
elif backend == "tensor":
transforms.append(T.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
transforms += [T.RandomResize(min_size=int(0.5 * base_size), max_size=int(2.0 * base_size))]
if hflip_prob > 0:
transforms += [T.RandomHorizontalFlip(hflip_prob)]
if use_v2:
# We need a custom pad transform here, since the padding we want to perform here is fundamentally
# different from the padding in `RandomCrop` if `pad_if_needed=True`.
transforms += [v2_extras.PadIfSmaller(crop_size, fill=defaultdict(lambda: 0, {datapoints.Mask: 255}))]
transforms += [T.RandomCrop(crop_size)]
if backend == "pil":
transforms += [T.PILToTensor()]
if use_v2:
img_type = datapoints.Image if backend == "datapoint" else torch.Tensor
transforms += [
T.ToDtype(dtype={img_type: torch.float32, datapoints.Mask: torch.int64, "others": None}, scale=True)
]
else:
# No need to explicitly convert masks as they're magically int64 already
transforms += [T.ConvertImageDtype(torch.float)]
transforms += [T.Normalize(mean=mean, std=std)]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
class SegmentationPresetEval:
def __init__(
self, *, base_size, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), backend="pil", use_v2=False
):
T, _, _ = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "tensor":
transforms += [T.PILToTensor()]
elif backend == "datapoint":
transforms += [T.ToImageTensor()]
elif backend != "pil":
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
if use_v2:
transforms += [T.Resize(size=(base_size, base_size))]
else:
transforms += [T.RandomResize(min_size=base_size, max_size=base_size)]
if backend == "pil":
# Note: we could just convert to pure tensors even in v2?
transforms += [T.ToImageTensor() if use_v2 else T.PILToTensor()]
transforms += [
T.ConvertImageDtype(torch.float),
T.Normalize(mean=mean, std=std),
]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
|
import torch
import transforms as T
class SegmentationPresetTrain:
def __init__(self, *, base_size, crop_size, hflip_prob=0.5, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):
min_size = int(0.5 * base_size)
max_size = int(2.0 * base_size)
trans = [T.RandomResize(min_size, max_size)]
if hflip_prob > 0:
trans.append(T.RandomHorizontalFlip(hflip_prob))
trans.extend(
[
T.RandomCrop(crop_size),
T.PILToTensor(),
T.ConvertImageDtype(torch.float),
T.Normalize(mean=mean, std=std),
]
)
self.transforms = T.Compose(trans)
def __call__(self, img, target):
return self.transforms(img, target)
class SegmentationPresetEval:
def __init__(self, *, base_size, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):
self.transforms = T.Compose(
[
T.RandomResize(base_size, base_size),
T.PILToTensor(),
T.ConvertImageDtype(torch.float),
T.Normalize(mean=mean, std=std),
]
)
def __call__(self, img, target):
return self.transforms(img, target)
|
import logging
import os
from argparse import ArgumentParser
import sentencepiece as spm
from average_checkpoints import ensemble
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.strategies import DDPStrategy
from transforms import get_data_module
def get_trainer(args):
seed_everything(1)
checkpoint = ModelCheckpoint(
dirpath=os.path.join(args.exp_dir, args.experiment_name) if args.exp_dir else None,
monitor="monitoring_step",
mode="max",
save_last=True,
filename="{{epoch}}",
save_top_k=10,
)
lr_monitor = LearningRateMonitor(logging_interval="step")
callbacks = [
checkpoint,
lr_monitor,
]
return Trainer(
sync_batchnorm=True,
default_root_dir=args.exp_dir,
max_epochs=args.epochs,
num_nodes=args.num_nodes,
devices=args.gpus,
accelerator="gpu",
strategy=DDPStrategy(find_unused_parameters=False),
callbacks=callbacks,
reload_dataloaders_every_n_epochs=1,
resume_from_checkpoint=args.resume_from_checkpoint,
)
def get_lightning_module(args):
sp_model = spm.SentencePieceProcessor(model_file=str(args.sp_model_path))
if args.md == "av":
from lightning_av import AVConformerRNNTModule
model = AVConformerRNNTModule(args, sp_model)
else:
from lightning import ConformerRNNTModule
model = ConformerRNNTModule(args, sp_model)
return model
def parse_args():
parser = ArgumentParser()
parser.add_argument(
"--md",
type=str,
help="Modality",
required=True,
)
parser.add_argument(
"--mode",
type=str,
help="Perform online or offline recognition.",
required=True,
)
parser.add_argument(
"--dataset-path",
type=str,
help="Path to LRW audio-visual datasets.",
required=True,
)
parser.add_argument(
"--sp-model-path",
type=str,
help="Path to SentencePiece model.",
required=True,
)
parser.add_argument(
"--pretrained-model-path",
type=str,
help="Path to Pretraned model.",
)
parser.add_argument(
"--exp-dir",
type=str,
help="Directory to save checkpoints and logs to. (Default: './exp')",
)
parser.add_argument(
"--experiment-name",
default="online_avsr_public_test",
type=str,
help="Experiment name",
)
parser.add_argument(
"--num-nodes",
default=8,
type=int,
help="Number of nodes to use for training. (Default: 8)",
)
parser.add_argument(
"--gpus",
default=8,
type=int,
help="Number of GPUs per node to use for training. (Default: 8)",
)
parser.add_argument(
"--epochs",
default=55,
type=int,
help="Number of epochs to train for. (Default: 55)",
)
parser.add_argument(
"--resume-from-checkpoint", default=None, type=str, help="Path to the checkpoint to resume from"
)
parser.add_argument("--debug", action="store_true", help="whether to use debug level for logging")
return parser.parse_args()
def init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def cli_main():
args = parse_args()
init_logger(args.debug)
model = get_lightning_module(args)
data_module = get_data_module(args, str(args.sp_model_path))
trainer = get_trainer(args)
trainer.fit(model, data_module)
ensemble(args)
if __name__ == "__main__":
cli_main()
|
import logging
import os
import pathlib
from argparse import ArgumentParser
import sentencepiece as spm
from average_checkpoints import ensemble
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.strategies import DDPStrategy
from transforms import get_data_module
def get_trainer(args):
seed_everything(1)
checkpoint = ModelCheckpoint(
dirpath=os.path.join(args.exp_dir, args.experiment_name) if args.exp_dir else None,
monitor="monitoring_step",
mode="max",
save_last=True,
filename=f"{{epoch}}",
save_top_k=10,
)
lr_monitor = LearningRateMonitor(logging_interval="step")
callbacks = [
checkpoint,
lr_monitor,
]
return Trainer(
sync_batchnorm=True,
default_root_dir=args.exp_dir,
max_epochs=args.epochs,
num_nodes=args.num_nodes,
devices=args.gpus,
accelerator="gpu",
strategy=DDPStrategy(find_unused_parameters=False),
callbacks=callbacks,
reload_dataloaders_every_n_epochs=1,
resume_from_checkpoint=args.resume_from_checkpoint,
)
def get_lightning_module(args):
sp_model = spm.SentencePieceProcessor(model_file=str(args.sp_model_path))
if args.md == "av":
from lightning_av import AVConformerRNNTModule
model = AVConformerRNNTModule(args, sp_model)
else:
from lightning import ConformerRNNTModule
model = ConformerRNNTModule(args, sp_model)
return model
def parse_args():
parser = ArgumentParser()
parser.add_argument(
"--md",
type=str,
help="Modality",
required=True,
)
parser.add_argument(
"--mode",
type=str,
help="Perform online or offline recognition.",
required=True,
)
parser.add_argument(
"--dataset-path",
type=str,
help="Path to LRW audio-visual datasets.",
required=True,
)
parser.add_argument(
"--sp-model-path",
type=str,
help="Path to SentencePiece model.",
required=True,
)
parser.add_argument(
"--pretrained-model-path",
type=str,
help="Path to Pretraned model.",
)
parser.add_argument(
"--exp-dir",
type=str,
help="Directory to save checkpoints and logs to. (Default: './exp')",
)
parser.add_argument(
"--experiment-name",
default="online_avsr_public_test",
type=str,
help="Experiment name",
)
parser.add_argument(
"--num-nodes",
default=8,
type=int,
help="Number of nodes to use for training. (Default: 8)",
)
parser.add_argument(
"--gpus",
default=8,
type=int,
help="Number of GPUs per node to use for training. (Default: 8)",
)
parser.add_argument(
"--epochs",
default=55,
type=int,
help="Number of epochs to train for. (Default: 55)",
)
parser.add_argument(
"--resume-from-checkpoint", default=None, type=str, help="Path to the checkpoint to resume from"
)
parser.add_argument("--debug", action="store_true", help="whether to use debug level for logging")
return parser.parse_args()
def init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def cli_main():
args = parse_args()
init_logger(args.debug)
model = get_lightning_module(args)
data_module = get_data_module(args, str(args.sp_model_path))
trainer = get_trainer(args)
trainer.fit(model, data_module)
ensemble(args)
if __name__ == "__main__":
cli_main()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_head import BBoxHead
from .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead,
Shared4Conv1FCBBoxHead)
from .dii_head import DIIHead
from .double_bbox_head import DoubleConvFCBBoxHead
from .multi_instance_bbox_head import MultiInstanceBBoxHead
from .sabl_head import SABLHead
from .scnet_bbox_head import SCNetBBoxHead
__all__ = [
'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead',
'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'SABLHead', 'DIIHead',
'SCNetBBoxHead', 'MultiInstanceBBoxHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_head import BBoxHead
from .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead,
Shared4Conv1FCBBoxHead)
from .dii_head import DIIHead
from .double_bbox_head import DoubleConvFCBBoxHead
from .sabl_head import SABLHead
from .scnet_bbox_head import SCNetBBoxHead
__all__ = [
'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead',
'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'SABLHead', 'DIIHead',
'SCNetBBoxHead'
]
|
import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
from keras.src.api_export import keras_export
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
relu,
leaky_relu,
relu6,
softmax,
celu,
elu,
selu,
softplus,
softsign,
squareplus,
soft_shrink,
silu,
gelu,
glu,
tanh,
tanh_shrink,
sigmoid,
exponential,
hard_sigmoid,
hard_silu,
hard_tanh,
hard_shrink,
linear,
mish,
log_softmax,
log_sigmoid,
}
ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS}
# Additional aliases
ALL_OBJECTS_DICT["swish"] = silu
ALL_OBJECTS_DICT["hard_swish"] = hard_silu
@keras_export("keras.activations.serialize")
def serialize(activation):
fn_config = serialization_lib.serialize_keras_object(activation)
if "config" not in fn_config:
raise ValueError(
f"Unknown activation function '{activation}' cannot be "
"serialized due to invalid function name. Make sure to use "
"an activation name that matches the references defined in "
"activations.py or use "
"`@keras.saving.register_keras_serializable()`"
"to register any custom activations. "
f"config={fn_config}"
)
if not isinstance(activation, types.FunctionType):
# Case for additional custom activations represented by objects
return fn_config
if (
isinstance(fn_config["config"], str)
and fn_config["config"] not in globals()
):
# Case for custom activation functions from external activations modules
fn_config["config"] = object_registration.get_registered_name(
activation
)
return fn_config
# Case for keras.activations builtins (simply return name)
return fn_config["config"]
@keras_export("keras.activations.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras activation function via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.activations.get")
def get(identifier):
"""Retrieve a Keras activation function via an identifier."""
if identifier is None:
return linear
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
return obj
raise ValueError(
f"Could not interpret activation function identifier: {identifier}"
)
|
import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
from keras.src.api_export import keras_export
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
relu,
leaky_relu,
relu6,
softmax,
celu,
elu,
selu,
softplus,
softsign,
soft_shrink,
silu,
gelu,
glu,
tanh,
tanh_shrink,
sigmoid,
exponential,
hard_sigmoid,
hard_silu,
hard_tanh,
hard_shrink,
linear,
mish,
log_softmax,
log_sigmoid,
}
ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS}
# Additional aliases
ALL_OBJECTS_DICT["swish"] = silu
ALL_OBJECTS_DICT["hard_swish"] = hard_silu
@keras_export("keras.activations.serialize")
def serialize(activation):
fn_config = serialization_lib.serialize_keras_object(activation)
if "config" not in fn_config:
raise ValueError(
f"Unknown activation function '{activation}' cannot be "
"serialized due to invalid function name. Make sure to use "
"an activation name that matches the references defined in "
"activations.py or use "
"`@keras.saving.register_keras_serializable()`"
"to register any custom activations. "
f"config={fn_config}"
)
if not isinstance(activation, types.FunctionType):
# Case for additional custom activations represented by objects
return fn_config
if (
isinstance(fn_config["config"], str)
and fn_config["config"] not in globals()
):
# Case for custom activation functions from external activations modules
fn_config["config"] = object_registration.get_registered_name(
activation
)
return fn_config
# Case for keras.activations builtins (simply return name)
return fn_config["config"]
@keras_export("keras.activations.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras activation function via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.activations.get")
def get(identifier):
"""Retrieve a Keras activation function via an identifier."""
if identifier is None:
return linear
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
return obj
raise ValueError(
f"Could not interpret activation function identifier: {identifier}"
)
|
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
from typing import Union, Iterable
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray.array.storage.registry import _REGISTRY
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with weaviate as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.get_meta() == other._client.get_meta()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses weaviate as storage
:return: the length of this :class:`DocumentArrayWeaviate` object
"""
cls_data = (
self._client.query.aggregate(self._class_name)
.with_meta_count()
.do()
.get('data', {})
.get('Aggregate', {})
.get(self._class_name, [])
)
if not cls_data:
return 0
return cls_data[0]['meta']['count']
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with weaviate storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._client.data_object.exists(self._map_id(x))
elif isinstance(x, Document):
return self._client.data_object.exists(self._map_id(x.id))
else:
return False
def __del__(self):
"""Delete this :class:`DocumentArrayWeaviate` object"""
super().__del__()
if (
not self._persist
and len(_REGISTRY[self.__class__.__name__][self._class_name]) == 1
):
self._client.schema.delete_class(self._class_name)
self._client.schema.delete_class(self._meta_name)
_REGISTRY[self.__class__.__name__][self._class_name].remove(self)
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayWeaviate` object
:return: string representation of this object
"""
return f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'
def extend(self, values: Iterable['Document']) -> None:
"""Extends the array with the given values
:param values: Documents to be added
"""
with self._client.batch(batch_size=50) as _b:
for d in values:
_b.add_data_object(**self._doc2weaviate_create_payload(d))
self._offset2ids.append(d.id)
|
from typing import Union, Iterable
from ..base.seqlike import BaseSequenceLikeMixin
from ..registry import _REGISTRY
from .... import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with weaviate as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.get_meta() == other._client.get_meta()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses weaviate as storage
:return: the length of this :class:`DocumentArrayWeaviate` object
"""
cls_data = (
self._client.query.aggregate(self._class_name)
.with_meta_count()
.do()
.get('data', {})
.get('Aggregate', {})
.get(self._class_name, [])
)
if not cls_data:
return 0
return cls_data[0]['meta']['count']
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with weaviate storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._client.data_object.exists(self._map_id(x))
elif isinstance(x, Document):
return self._client.data_object.exists(self._map_id(x.id))
else:
return False
def __del__(self):
"""Delete this :class:`DocumentArrayWeaviate` object"""
super().__del__()
if (
not self._persist
and len(_REGISTRY[self.__class__.__name__][self._class_name]) == 1
):
self._client.schema.delete_class(self._class_name)
self._client.schema.delete_class(self._meta_name)
_REGISTRY[self.__class__.__name__][self._class_name].remove(self)
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayWeaviate` object
:return: string representation of this object
"""
return f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'
def extend(self, values: Iterable['Document']) -> None:
"""Extends the array with the given values
:param values: Documents to be added
"""
with self._client.batch(batch_size=50) as _b:
for d in values:
_b.add_data_object(**self._doc2weaviate_create_payload(d))
self._offset2ids.append(d.id)
|
from . import InputExample
import csv
import gzip
import os
class STSDataReader:
"""Reads in the STS dataset. Each line contains two sentences (s1_col_idx, s2_col_idx) and one label (score_col_idx)
Default values expects a tab separated file with the first & second column the sentence pair and third column the score (0...1). Default config normalizes scores from 0...5 to 0...1
"""
def __init__(
self,
dataset_folder,
s1_col_idx=0,
s2_col_idx=1,
score_col_idx=2,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize_scores=True,
min_score=0,
max_score=5,
):
self.dataset_folder = dataset_folder
self.score_col_idx = score_col_idx
self.s1_col_idx = s1_col_idx
self.s2_col_idx = s2_col_idx
self.delimiter = delimiter
self.quoting = quoting
self.normalize_scores = normalize_scores
self.min_score = min_score
self.max_score = max_score
def get_examples(self, filename, max_examples=0):
"""filename specified which data split to use (train.csv, dev.csv, test.csv)."""
filepath = os.path.join(self.dataset_folder, filename)
with gzip.open(filepath, "rt", encoding="utf8") if filename.endswith(".gz") else open(
filepath, encoding="utf-8"
) as fIn:
data = csv.reader(fIn, delimiter=self.delimiter, quoting=self.quoting)
examples = []
for id, row in enumerate(data):
score = float(row[self.score_col_idx])
if self.normalize_scores: # Normalize to a 0...1 value
score = (score - self.min_score) / (self.max_score - self.min_score)
s1 = row[self.s1_col_idx]
s2 = row[self.s2_col_idx]
examples.append(InputExample(guid=filename + str(id), texts=[s1, s2], label=score))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
class STSBenchmarkDataReader(STSDataReader):
"""Reader especially for the STS benchmark dataset. There, the sentences are in column 5 and 6, the score is in column 4.
Scores are normalized from 0...5 to 0...1
"""
def __init__(
self,
dataset_folder,
s1_col_idx=5,
s2_col_idx=6,
score_col_idx=4,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize_scores=True,
min_score=0,
max_score=5,
):
super().__init__(
dataset_folder=dataset_folder,
s1_col_idx=s1_col_idx,
s2_col_idx=s2_col_idx,
score_col_idx=score_col_idx,
delimiter=delimiter,
quoting=quoting,
normalize_scores=normalize_scores,
min_score=min_score,
max_score=max_score,
)
|
from . import InputExample
import csv
import gzip
import os
class STSDataReader:
"""
Reads in the STS dataset. Each line contains two sentences (s1_col_idx, s2_col_idx) and one label (score_col_idx)
Default values expects a tab separated file with the first & second column the sentence pair and third column the score (0...1). Default config normalizes scores from 0...5 to 0...1
"""
def __init__(
self,
dataset_folder,
s1_col_idx=0,
s2_col_idx=1,
score_col_idx=2,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize_scores=True,
min_score=0,
max_score=5,
):
self.dataset_folder = dataset_folder
self.score_col_idx = score_col_idx
self.s1_col_idx = s1_col_idx
self.s2_col_idx = s2_col_idx
self.delimiter = delimiter
self.quoting = quoting
self.normalize_scores = normalize_scores
self.min_score = min_score
self.max_score = max_score
def get_examples(self, filename, max_examples=0):
"""
filename specified which data split to use (train.csv, dev.csv, test.csv).
"""
filepath = os.path.join(self.dataset_folder, filename)
with gzip.open(filepath, "rt", encoding="utf8") if filename.endswith(".gz") else open(
filepath, encoding="utf-8"
) as fIn:
data = csv.reader(fIn, delimiter=self.delimiter, quoting=self.quoting)
examples = []
for id, row in enumerate(data):
score = float(row[self.score_col_idx])
if self.normalize_scores: # Normalize to a 0...1 value
score = (score - self.min_score) / (self.max_score - self.min_score)
s1 = row[self.s1_col_idx]
s2 = row[self.s2_col_idx]
examples.append(InputExample(guid=filename + str(id), texts=[s1, s2], label=score))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
class STSBenchmarkDataReader(STSDataReader):
"""
Reader especially for the STS benchmark dataset. There, the sentences are in column 5 and 6, the score is in column 4.
Scores are normalized from 0...5 to 0...1
"""
def __init__(
self,
dataset_folder,
s1_col_idx=5,
s2_col_idx=6,
score_col_idx=4,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize_scores=True,
min_score=0,
max_score=5,
):
super().__init__(
dataset_folder=dataset_folder,
s1_col_idx=s1_col_idx,
s2_col_idx=s2_col_idx,
score_col_idx=score_col_idx,
delimiter=delimiter,
quoting=quoting,
normalize_scores=normalize_scores,
min_score=min_score,
max_score=max_score,
)
|
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
# disable opencv multithreading to avoid system being overloaded
opencv_num_threads = 0
# set multi-process start method as `fork` to speed up the training
mp_start_method = 'fork'
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
# disable opencv multithreading to avoid system being overloaded
opencv_num_threads = 0
# set multi-process start method as `fork` to speed up the training
mp_start_method = 'fork'
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.clickup.toolkit import ClickupToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ClickupToolkit": "langchain_community.agent_toolkits.clickup.toolkit",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ClickupToolkit",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.clickup.toolkit import ClickupToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ClickupToolkit": "langchain_community.agent_toolkits.clickup.toolkit"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ClickupToolkit",
]
|
"""Tests for tf.distribute related functionality under tf implementation."""
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.python.eager import context
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import testing
from keras.src.backend.tensorflow import trainer as tf_trainer
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="The distribute test can only run with TF backend.",
)
class DistributeTest(testing.TestCase):
def setUp(self):
super().setUp()
# Need at least 2 devices for distribution related tests.
cpus = tf.config.list_physical_devices("CPU")
context._reset_context()
tf.config.set_logical_device_configuration(
cpus[0],
[
tf.config.LogicalDeviceConfiguration(),
tf.config.LogicalDeviceConfiguration(),
],
)
def test_variable_creation(self):
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
with strategy.scope():
dense = layers.Dense(2)
dense.build([4, 2])
self.assertIsInstance(dense.kernel, backend.Variable)
self.assertIsInstance(
dense.kernel.value, tf.distribute.DistributedValues
)
self.assertIn("MirroredVariable", dense.kernel.value.__class__.__name__)
self.assertIsInstance(dense.kernel, backend.Variable)
self.assertIsInstance(dense.bias.value, tf.distribute.DistributedValues)
self.assertIn("MirroredVariable", dense.bias.value.__class__.__name__)
def test_strategy_run(self):
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
with strategy.scope():
inputs = layers.Input(shape=[4])
dense = layers.Dense(2)
output = dense(inputs)
model = models.Functional(inputs, output)
self.assertIsInstance(dense.kernel, backend.Variable)
self.assertIsInstance(
dense.kernel.value, tf.distribute.DistributedValues
)
def input_fn(ctx):
if ctx.replica_id_in_sync_group == 1:
return tf.ones([8, 4])
else:
return tf.zeros([8, 4])
distributed_inputs = (
strategy.experimental_distribute_values_from_function(input_fn)
)
@tf.function
def run_fn(data):
return model(data)
result = strategy.run(run_fn, args=(distributed_inputs,))
self.assertIsInstance(
result, tf.types.experimental.distributed.PerReplica
)
self.assertLen(result.values, 2)
self.assertEqual(result.values[0].shape, [8, 2])
self.assertEqual(result.values[1].shape, [8, 2])
self.assertNotAllClose(result.values[0], result.values[1])
self.assertAllClose(result.values[0], tf.zeros([8, 2]))
def test_epoch_iterator(self):
x = np.random.random((100, 16))
y = np.random.random((100, 4))
sample_weight = np.random.random((100,))
batch_size = 16
shuffle = True
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
epoch_iterator = tf_trainer.TFEpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
shuffle=shuffle,
distribute_strategy=strategy,
)
steps_seen = []
for step, data_iterator in epoch_iterator:
steps_seen.append(step)
batch = next(data_iterator)
self.assertEqual(len(batch), 3)
x, y, sample_weight = batch
self.assertTrue(
isinstance(x, tf.types.experimental.distributed.PerReplica)
)
# Make sure the local batch size is 8
if step < 6:
self.assertEqual(x.values[0].shape, [8, 16])
self.assertEqual(y.values[0].shape, [8, 4])
self.assertEqual(sample_weight.values[0].shape, [8])
else:
# Last partial batch
self.assertEqual(x.values[0].shape, [2, 16])
self.assertEqual(y.values[0].shape, [2, 4])
self.assertEqual(sample_weight.values[0].shape, [2])
self.assertEqual(steps_seen, [0, 1, 2, 3, 4, 5, 6])
def test_variable_aggregation(self):
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
with strategy.scope():
x = np.random.random((4, 4))
v1 = backend.Variable(x, dtype="float32")
self.assertEqual(v1.aggregation, "mean")
self.assertEqual(v1.value.aggregation, tf.VariableAggregation.MEAN)
v2 = backend.Variable(x, dtype="float32", aggregation="sum")
self.assertEqual(v2.aggregation, "sum")
self.assertEqual(v2.value.aggregation, tf.VariableAggregation.SUM)
|
"""Tests for tf.distribute related functionality under tf implementation."""
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.python.eager import context
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import testing
from keras.src.backend.tensorflow import trainer as tf_trainer
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="The distribute test can only run with TF backend.",
)
class DistributeTest(testing.TestCase):
def setUp(self):
super().setUp()
# Need at least 2 devices for distribution related tests.
cpus = tf.config.list_physical_devices("CPU")
context._reset_context()
tf.config.set_logical_device_configuration(
cpus[0],
[
tf.config.LogicalDeviceConfiguration(),
tf.config.LogicalDeviceConfiguration(),
],
)
def test_variable_creation(self):
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
with strategy.scope():
dense = layers.Dense(2)
dense.build([4, 2])
self.assertIsInstance(dense.kernel, backend.Variable)
self.assertIsInstance(
dense.kernel.value, tf.distribute.DistributedValues
)
self.assertIn("MirroredVariable", dense.kernel.value.__class__.__name__)
self.assertIsInstance(dense.kernel, backend.Variable)
self.assertIsInstance(dense.bias.value, tf.distribute.DistributedValues)
self.assertIn("MirroredVariable", dense.bias.value.__class__.__name__)
def test_strategy_run(self):
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
with strategy.scope():
inputs = layers.Input(shape=[4])
dense = layers.Dense(2)
output = dense(inputs)
model = models.Functional(inputs, output)
self.assertIsInstance(dense.kernel, backend.Variable)
self.assertIsInstance(
dense.kernel.value, tf.distribute.DistributedValues
)
def input_fn(ctx):
if ctx.replica_id_in_sync_group == 1:
return tf.ones([8, 4])
else:
return tf.zeros([8, 4])
distributed_inputs = (
strategy.experimental_distribute_values_from_function(input_fn)
)
@tf.function
def run_fn(data):
return model(data)
result = strategy.run(run_fn, args=(distributed_inputs,))
self.assertIsInstance(
result, tf.types.experimental.distributed.PerReplica
)
self.assertLen(result.values, 2)
self.assertEqual(result.values[0].shape, [8, 2])
self.assertEqual(result.values[1].shape, [8, 2])
self.assertNotAllClose(result.values[0], result.values[1])
self.assertAllClose(result.values[0], tf.zeros([8, 2]))
def test_epoch_iterator(self):
x = np.random.random((100, 16))
y = np.random.random((100, 4))
sample_weight = np.random.random((100,))
batch_size = 16
shuffle = True
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
epoch_iterator = tf_trainer.TFEpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
shuffle=shuffle,
distribute_strategy=strategy,
)
steps_seen = []
for step, data_iterator in epoch_iterator:
steps_seen.append(step)
batch = next(data_iterator)
self.assertEqual(len(batch), 3)
x, y, sample_weight = batch
self.assertTrue(
isinstance(x, tf.types.experimental.distributed.PerReplica)
)
# Make sure the local batch size is 8
if step < 6:
self.assertEqual(x.values[0].shape, [8, 16])
self.assertEqual(y.values[0].shape, [8, 4])
self.assertEqual(sample_weight.values[0].shape, [8])
else:
# Last partial batch
self.assertEqual(x.values[0].shape, [2, 16])
self.assertEqual(y.values[0].shape, [2, 4])
self.assertEqual(sample_weight.values[0].shape, [2])
self.assertEqual(steps_seen, [0, 1, 2, 3, 4, 5, 6])
|
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.
The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
self.greater_is_better = True
self.primary_metric = None
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> float | dict[str, float]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
Args:
model: the model to evaluate
output_path: path where predictions and metrics are written
to
epoch: the epoch where the evaluation takes place. This is
used for the file prefixes. If this is -1, then we
assume evaluation on test data.
steps: the steps in the current epoch at time of the
evaluation. This is used for the file prefixes. If this
is -1, then we assume evaluation at the end of the
epoch.
Returns:
Either a score for the evaluation with a higher score
indicating a better result, or a dictionary with scores. If
the latter is chosen, then `evaluator.primary_metric` must
be defined
"""
pass
def prefix_name_to_metrics(self, metrics: dict[str, float], name: str) -> dict[str, float]:
if not name:
return metrics
def maybe_to_float(value: Any) -> Any:
try:
return float(value)
except ValueError:
return value
metrics = {name + "_" + key: maybe_to_float(value) for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(
self, model: SentenceTransformer, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Replace "CE" prefix with "CrossEncoder"
2. Remove "Evaluator" from the class name
3. Add a space before every capital letter
"""
class_name = self.__class__.__name__
if class_name.startswith("CE"):
class_name = "CrossEncoder" + class_name[2:]
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", class_name)
def get_config_dict(self) -> dict[str, Any]:
"""
Return a dictionary with all meaningful configuration values of the evaluator to store in the model card.
"""
return {}
|
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.
The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
self.greater_is_better = True
self.primary_metric = None
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> float | dict[str, float]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
Args:
model: the model to evaluate
output_path: path where predictions and metrics are written
to
epoch: the epoch where the evaluation takes place. This is
used for the file prefixes. If this is -1, then we
assume evaluation on test data.
steps: the steps in the current epoch at time of the
evaluation. This is used for the file prefixes. If this
is -1, then we assume evaluation at the end of the
epoch.
Returns:
Either a score for the evaluation with a higher score
indicating a better result, or a dictionary with scores. If
the latter is chosen, then `evaluator.primary_metric` must
be defined
"""
pass
def prefix_name_to_metrics(self, metrics: dict[str, float], name: str) -> dict[str, float]:
if not name:
return metrics
def maybe_to_float(value: Any) -> Any:
try:
return float(value)
except ValueError:
return value
metrics = {name + "_" + key: maybe_to_float(value) for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(
self, model: SentenceTransformer, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Replace "CE" prefix with "CrossEncoder"
2. Remove "Evaluator" from the class name
3. Add a space before every capital letter
"""
class_name = self.__class__.__name__
if class_name.startswith("CE"):
class_name = "CrossEncoder" + class_name[2:]
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", class_name)
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from diffusers import DDPMPipeline, DDPMScheduler, UNet2DModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_accelerator, slow, torch_device
enable_full_determinism()
class DDPMPipelineFastTests(unittest.TestCase):
@property
def dummy_uncond_unet(self):
torch.manual_seed(0)
model = UNet2DModel(
block_out_channels=(4, 8),
layers_per_block=1,
norm_num_groups=4,
sample_size=8,
in_channels=3,
out_channels=3,
down_block_types=("DownBlock2D", "AttnDownBlock2D"),
up_block_types=("AttnUpBlock2D", "UpBlock2D"),
)
return model
def test_fast_inference(self):
device = "cpu"
unet = self.dummy_uncond_unet
scheduler = DDPMScheduler()
ddpm = DDPMPipeline(unet=unet, scheduler=scheduler)
ddpm.to(device)
ddpm.set_progress_bar_config(disable=None)
generator = torch.Generator(device=device).manual_seed(0)
image = ddpm(generator=generator, num_inference_steps=2, output_type="np").images
generator = torch.Generator(device=device).manual_seed(0)
image_from_tuple = ddpm(generator=generator, num_inference_steps=2, output_type="np", return_dict=False)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 8, 8, 3)
expected_slice = np.array([0.0, 0.9996672, 0.00329116, 1.0, 0.9995991, 1.0, 0.0060907, 0.00115037, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def test_inference_predict_sample(self):
unet = self.dummy_uncond_unet
scheduler = DDPMScheduler(prediction_type="sample")
ddpm = DDPMPipeline(unet=unet, scheduler=scheduler)
ddpm.to(torch_device)
ddpm.set_progress_bar_config(disable=None)
generator = torch.manual_seed(0)
image = ddpm(generator=generator, num_inference_steps=2, output_type="np").images
generator = torch.manual_seed(0)
image_eps = ddpm(generator=generator, num_inference_steps=2, output_type="np")[0]
image_slice = image[0, -3:, -3:, -1]
image_eps_slice = image_eps[0, -3:, -3:, -1]
assert image.shape == (1, 8, 8, 3)
tolerance = 1e-2 if torch_device != "mps" else 3e-2
assert np.abs(image_slice.flatten() - image_eps_slice.flatten()).max() < tolerance
@slow
@require_torch_accelerator
class DDPMPipelineIntegrationTests(unittest.TestCase):
def test_inference_cifar10(self):
model_id = "google/ddpm-cifar10-32"
unet = UNet2DModel.from_pretrained(model_id)
scheduler = DDPMScheduler.from_pretrained(model_id)
ddpm = DDPMPipeline(unet=unet, scheduler=scheduler)
ddpm.to(torch_device)
ddpm.set_progress_bar_config(disable=None)
generator = torch.manual_seed(0)
image = ddpm(generator=generator, output_type="np").images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
expected_slice = np.array([0.4200, 0.3588, 0.1939, 0.3847, 0.3382, 0.2647, 0.4155, 0.3582, 0.3385])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from diffusers import DDPMPipeline, DDPMScheduler, UNet2DModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_accelerator, slow, torch_device
enable_full_determinism()
class DDPMPipelineFastTests(unittest.TestCase):
@property
def dummy_uncond_unet(self):
torch.manual_seed(0)
model = UNet2DModel(
block_out_channels=(4, 8),
layers_per_block=1,
norm_num_groups=4,
sample_size=8,
in_channels=3,
out_channels=3,
down_block_types=("DownBlock2D", "AttnDownBlock2D"),
up_block_types=("AttnUpBlock2D", "UpBlock2D"),
)
return model
def test_fast_inference(self):
device = "cpu"
unet = self.dummy_uncond_unet
scheduler = DDPMScheduler()
ddpm = DDPMPipeline(unet=unet, scheduler=scheduler)
ddpm.to(device)
ddpm.set_progress_bar_config(disable=None)
generator = torch.Generator(device=device).manual_seed(0)
image = ddpm(generator=generator, num_inference_steps=2, output_type="np").images
generator = torch.Generator(device=device).manual_seed(0)
image_from_tuple = ddpm(generator=generator, num_inference_steps=2, output_type="np", return_dict=False)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 8, 8, 3)
expected_slice = np.array([0.0, 0.9996672, 0.00329116, 1.0, 0.9995991, 1.0, 0.0060907, 0.00115037, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def test_inference_predict_sample(self):
unet = self.dummy_uncond_unet
scheduler = DDPMScheduler(prediction_type="sample")
ddpm = DDPMPipeline(unet=unet, scheduler=scheduler)
ddpm.to(torch_device)
ddpm.set_progress_bar_config(disable=None)
generator = torch.manual_seed(0)
image = ddpm(generator=generator, num_inference_steps=2, output_type="np").images
generator = torch.manual_seed(0)
image_eps = ddpm(generator=generator, num_inference_steps=2, output_type="np")[0]
image_slice = image[0, -3:, -3:, -1]
image_eps_slice = image_eps[0, -3:, -3:, -1]
assert image.shape == (1, 8, 8, 3)
tolerance = 1e-2 if torch_device != "mps" else 3e-2
assert np.abs(image_slice.flatten() - image_eps_slice.flatten()).max() < tolerance
@slow
@require_torch_accelerator
class DDPMPipelineIntegrationTests(unittest.TestCase):
def test_inference_cifar10(self):
model_id = "google/ddpm-cifar10-32"
unet = UNet2DModel.from_pretrained(model_id)
scheduler = DDPMScheduler.from_pretrained(model_id)
ddpm = DDPMPipeline(unet=unet, scheduler=scheduler)
ddpm.to(torch_device)
ddpm.set_progress_bar_config(disable=None)
generator = torch.manual_seed(0)
image = ddpm(generator=generator, output_type="np").images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
expected_slice = np.array([0.4200, 0.3588, 0.1939, 0.3847, 0.3382, 0.2647, 0.4155, 0.3582, 0.3385])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
from jina.parsers.base import set_base_parser
from jina.parsers.helper import _chf
def mixin_hub_usage_parser(parser):
"""Add the arguments for hub pull to the parser
:param parser: the parser configure
"""
parser.add_argument(
'--no-usage',
action='store_true',
default=False,
help='If set, Hub executor usage will not be printed.',
)
def set_hub_push_parser(parser=None):
"""Set the parser for the hub push
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
from jina.parsers.hubble.push import mixin_hub_push_parser
mixin_hub_usage_parser(parser)
mixin_hub_push_parser(parser)
return parser
def set_hub_pull_parser(parser=None):
"""Set the parser for the hub pull
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
from jina.parsers.hubble.pull import mixin_hub_pull_parser
mixin_hub_usage_parser(parser)
mixin_hub_pull_parser(parser)
return parser
def set_hub_new_parser(parser=None):
"""Set the parser for the hub new
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
from jina.parsers.hubble.new import mixin_hub_new_parser
mixin_hub_new_parser(parser)
return parser
def set_hub_status_parser(parser=None):
"""Set the parser for the hub status
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
from jina.parsers.hubble.status import mixin_hub_status_parser
mixin_hub_status_parser(parser)
return parser
def set_hub_parser(parser=None):
"""Set the parser for the hub
:param parser: the parser configure
:return: the parser
"""
if not parser:
parser = set_base_parser()
spp = parser.add_subparsers(
dest='hub',
description='use `%(prog)-8s [sub-command] --help` '
'to get detailed information about each sub-command',
required=True,
)
set_hub_new_parser(
spp.add_parser(
'new',
help='create a new executor using the template',
description='Create a new executor using the template',
formatter_class=_chf,
)
)
set_hub_push_parser(
spp.add_parser(
'push',
help='push an executor package to Jina hub',
description='Push an executor package to Jina hub',
formatter_class=_chf,
)
)
set_hub_pull_parser(
spp.add_parser(
'pull',
help='download an executor image/package from Jina hub',
description='Download an executor image/package from Jina hub',
formatter_class=_chf,
)
)
set_hub_status_parser(
spp.add_parser(
'status',
help='query an executor building status of of a pushed Executor from Jina hub',
description='Query an executor building status of of a pushed Executor from Jina hub',
formatter_class=_chf,
)
)
return parser
|
from jina.parsers.base import set_base_parser
from jina.parsers.helper import _chf
def mixin_hub_usage_parser(parser):
"""Add the arguments for hub pull to the parser
:param parser: the parser configure
"""
parser.add_argument(
'--no-usage',
action='store_true',
default=False,
help='If set, Hub executor usage will not be printed.',
)
def set_hub_push_parser(parser=None):
"""Set the parser for the hub push
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
from jina.parsers.hubble.push import mixin_hub_push_parser
mixin_hub_usage_parser(parser)
mixin_hub_push_parser(parser)
return parser
def set_hub_pull_parser(parser=None):
"""Set the parser for the hub pull
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
from jina.parsers.hubble.pull import mixin_hub_pull_parser
mixin_hub_usage_parser(parser)
mixin_hub_pull_parser(parser)
return parser
def set_hub_new_parser(parser=None):
"""Set the parser for the hub new
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
from jina.parsers.hubble.new import mixin_hub_new_parser
mixin_hub_new_parser(parser)
return parser
def set_hub_parser(parser=None):
"""Set the parser for the hub
:param parser: the parser configure
:return: the parser
"""
if not parser:
parser = set_base_parser()
spp = parser.add_subparsers(
dest='hub',
description='use `%(prog)-8s [sub-command] --help` '
'to get detailed information about each sub-command',
required=True,
)
set_hub_new_parser(
spp.add_parser(
'new',
help='create a new executor using the template',
description='Create a new executor using the template',
formatter_class=_chf,
)
)
set_hub_push_parser(
spp.add_parser(
'push',
help='push an executor package to Jina hub',
description='Push an executor package to Jina hub',
formatter_class=_chf,
)
)
set_hub_pull_parser(
spp.add_parser(
'pull',
help='download an executor image/package from Jina hub',
description='Download an executor image/package from Jina hub',
formatter_class=_chf,
)
)
return parser
|
"""Test openai embeddings."""
import numpy as np
import pytest
from langchain_community.embeddings.openai import OpenAIEmbeddings
@pytest.mark.scheduled
def test_openai_embedding_documents() -> None:
"""Test openai embeddings."""
documents = ["foo bar"]
embedding = OpenAIEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 1536
@pytest.mark.scheduled
def test_openai_embedding_documents_multiple() -> None:
"""Test openai embeddings."""
documents = ["foo bar", "bar foo", "foo"]
embedding = OpenAIEmbeddings(chunk_size=2)
embedding.embedding_ctx_length = 8191
output = embedding.embed_documents(documents)
assert len(output) == 3
assert len(output[0]) == 1536
assert len(output[1]) == 1536
assert len(output[2]) == 1536
@pytest.mark.scheduled
async def test_openai_embedding_documents_async_multiple() -> None:
"""Test openai embeddings."""
documents = ["foo bar", "bar foo", "foo"]
embedding = OpenAIEmbeddings(chunk_size=2)
embedding.embedding_ctx_length = 8191
output = await embedding.aembed_documents(documents)
assert len(output) == 3
assert len(output[0]) == 1536
assert len(output[1]) == 1536
assert len(output[2]) == 1536
@pytest.mark.scheduled
def test_openai_embedding_query() -> None:
"""Test openai embeddings."""
document = "foo bar"
embedding = OpenAIEmbeddings()
output = embedding.embed_query(document)
assert len(output) == 1536
@pytest.mark.scheduled
async def test_openai_embedding_async_query() -> None:
"""Test openai embeddings."""
document = "foo bar"
embedding = OpenAIEmbeddings()
output = await embedding.aembed_query(document)
assert len(output) == 1536
@pytest.mark.skip(reason="Unblock scheduled testing. TODO: fix.")
@pytest.mark.scheduled
def test_openai_embedding_with_empty_string() -> None:
"""Test openai embeddings with empty string."""
import openai
document = ["", "abc"]
embedding = OpenAIEmbeddings()
output = embedding.embed_documents(document)
assert len(output) == 2
assert len(output[0]) == 1536
expected_output = openai.Embedding.create(input="", model="text-embedding-ada-002")[
"data"
][0]["embedding"]
assert np.allclose(output[0], expected_output)
assert len(output[1]) == 1536
@pytest.mark.scheduled
def test_embed_documents_normalized() -> None:
output = OpenAIEmbeddings().embed_documents(["foo walked to the market"])
assert np.isclose(np.linalg.norm(output[0]), 1.0)
@pytest.mark.scheduled
def test_embed_query_normalized() -> None:
output = OpenAIEmbeddings().embed_query("foo walked to the market")
assert np.isclose(np.linalg.norm(output), 1.0)
|
"""Test openai embeddings."""
import numpy as np
import pytest
from langchain_community.embeddings.openai import OpenAIEmbeddings
@pytest.mark.scheduled
def test_openai_embedding_documents() -> None:
"""Test openai embeddings."""
documents = ["foo bar"]
embedding = OpenAIEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 1536
@pytest.mark.scheduled
def test_openai_embedding_documents_multiple() -> None:
"""Test openai embeddings."""
documents = ["foo bar", "bar foo", "foo"]
embedding = OpenAIEmbeddings(chunk_size=2)
embedding.embedding_ctx_length = 8191
output = embedding.embed_documents(documents)
assert len(output) == 3
assert len(output[0]) == 1536
assert len(output[1]) == 1536
assert len(output[2]) == 1536
@pytest.mark.scheduled
async def test_openai_embedding_documents_async_multiple() -> None:
"""Test openai embeddings."""
documents = ["foo bar", "bar foo", "foo"]
embedding = OpenAIEmbeddings(chunk_size=2)
embedding.embedding_ctx_length = 8191
output = await embedding.aembed_documents(documents)
assert len(output) == 3
assert len(output[0]) == 1536
assert len(output[1]) == 1536
assert len(output[2]) == 1536
@pytest.mark.scheduled
def test_openai_embedding_query() -> None:
"""Test openai embeddings."""
document = "foo bar"
embedding = OpenAIEmbeddings()
output = embedding.embed_query(document)
assert len(output) == 1536
@pytest.mark.scheduled
async def test_openai_embedding_async_query() -> None:
"""Test openai embeddings."""
document = "foo bar"
embedding = OpenAIEmbeddings()
output = await embedding.aembed_query(document)
assert len(output) == 1536
@pytest.mark.skip(reason="Unblock scheduled testing. TODO: fix.")
@pytest.mark.scheduled
def test_openai_embedding_with_empty_string() -> None:
"""Test openai embeddings with empty string."""
import openai
document = ["", "abc"]
embedding = OpenAIEmbeddings()
output = embedding.embed_documents(document)
assert len(output) == 2
assert len(output[0]) == 1536
expected_output = openai.Embedding.create(input="", model="text-embedding-ada-002")[ # type: ignore[attr-defined]
"data"
][0]["embedding"]
assert np.allclose(output[0], expected_output)
assert len(output[1]) == 1536
@pytest.mark.scheduled
def test_embed_documents_normalized() -> None:
output = OpenAIEmbeddings().embed_documents(["foo walked to the market"])
assert np.isclose(np.linalg.norm(output[0]), 1.0)
@pytest.mark.scheduled
def test_embed_query_normalized() -> None:
output = OpenAIEmbeddings().embed_query("foo walked to the market")
assert np.isclose(np.linalg.norm(output), 1.0)
|
from typing import Dict, Optional, Tuple
import torch
import torchaudio
from torchaudio.backend.common import AudioMetaData
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _info_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
):
i = s.find_best_audio_stream()
sinfo = s.get_src_stream_info(i)
if sinfo[5] == 0:
waveform, _ = _load_audio(s)
num_frames = waveform.size(1)
else:
num_frames = sinfo[5]
return AudioMetaData(
int(sinfo[8]),
num_frames,
sinfo[9],
sinfo[6],
sinfo[1].upper(),
)
def info_audio(
src: str,
format: Optional[str],
) -> AudioMetaData:
s = torch.classes.torchaudio.ffmpeg_StreamReader(src, format, None)
return _info_audio(s)
def info_audio_fileobj(
src,
format: Optional[str],
buffer_size: int = 4096,
) -> AudioMetaData:
s = torchaudio._torchaudio_ffmpeg.StreamReaderFileObj(src, format, None, buffer_size)
return _info_audio(s)
def _get_load_filter(
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
) -> Optional[str]:
if frame_offset < 0:
raise RuntimeError("Invalid argument: frame_offset must be non-negative. Found: {}".format(frame_offset))
if num_frames == 0 or num_frames < -1:
raise RuntimeError("Invalid argument: num_frames must be -1 or greater than 0. Found: {}".format(num_frames))
# All default values -> no filter
if frame_offset == 0 and num_frames == -1 and not convert:
return None
# Only convert
aformat = "aformat=sample_fmts=fltp"
if frame_offset == 0 and num_frames == -1 and convert:
return aformat
# At least one of frame_offset or num_frames has non-default value
if num_frames > 0:
atrim = "atrim=start_sample={}:end_sample={}".format(frame_offset, frame_offset + num_frames)
else:
atrim = "atrim=start_sample={}".format(frame_offset)
if not convert:
return atrim
return "{},{}".format(atrim, aformat)
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _load_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
) -> Tuple[torch.Tensor, int]:
i = s.find_best_audio_stream()
sinfo = s.get_src_stream_info(i)
sample_rate = int(sinfo[8])
option: Dict[str, str] = {}
s.add_audio_stream(i, -1, -1, _get_load_filter(frame_offset, num_frames, convert), None, option)
s.process_all_packets()
waveform = s.pop_chunks()[0]
if waveform is None:
raise RuntimeError("Failed to decode audio.")
assert waveform is not None
if channels_first:
waveform = waveform.T
return waveform, sample_rate
def load_audio(
src: str,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
) -> Tuple[torch.Tensor, int]:
s = torch.classes.torchaudio.ffmpeg_StreamReader(src, format, None)
return _load_audio(s, frame_offset, num_frames, convert, channels_first)
def load_audio_fileobj(
src: str,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
buffer_size: int = 4096,
) -> Tuple[torch.Tensor, int]:
s = torchaudio._torchaudio_ffmpeg.StreamReaderFileObj(src, format, None, buffer_size)
return _load_audio(s, frame_offset, num_frames, convert, channels_first)
|
from typing import Dict, Optional, Tuple
import torch
import torchaudio
from torchaudio.backend.common import AudioMetaData
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _info_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
):
i = s.find_best_audio_stream()
sinfo = s.get_src_stream_info(i)
if sinfo[5] == 0:
waveform, _ = _load_audio(s)
num_frames = waveform.size(1)
else:
num_frames = sinfo[5]
return AudioMetaData(
int(sinfo[8]),
num_frames,
sinfo[9],
sinfo[6],
sinfo[1].upper(),
)
def info_audio(
src: str,
format: Optional[str],
) -> AudioMetaData:
s = torch.classes.torchaudio.ffmpeg_StreamReader(src, format, None)
return _info_audio(s)
def info_audio_fileobj(
src,
format: Optional[str],
) -> AudioMetaData:
s = torchaudio._torchaudio_ffmpeg.StreamReaderFileObj(src, format, None, 4096)
return _info_audio(s)
def _get_load_filter(
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
) -> Optional[str]:
if frame_offset < 0:
raise RuntimeError("Invalid argument: frame_offset must be non-negative. Found: {}".format(frame_offset))
if num_frames == 0 or num_frames < -1:
raise RuntimeError("Invalid argument: num_frames must be -1 or greater than 0. Found: {}".format(num_frames))
# All default values -> no filter
if frame_offset == 0 and num_frames == -1 and not convert:
return None
# Only convert
aformat = "aformat=sample_fmts=fltp"
if frame_offset == 0 and num_frames == -1 and convert:
return aformat
# At least one of frame_offset or num_frames has non-default value
if num_frames > 0:
atrim = "atrim=start_sample={}:end_sample={}".format(frame_offset, frame_offset + num_frames)
else:
atrim = "atrim=start_sample={}".format(frame_offset)
if not convert:
return atrim
return "{},{}".format(atrim, aformat)
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _load_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
) -> Tuple[torch.Tensor, int]:
i = s.find_best_audio_stream()
sinfo = s.get_src_stream_info(i)
sample_rate = int(sinfo[8])
option: Dict[str, str] = {}
s.add_audio_stream(i, -1, -1, _get_load_filter(frame_offset, num_frames, convert), None, option)
s.process_all_packets()
waveform = s.pop_chunks()[0]
if waveform is None:
raise RuntimeError("Failed to decode audio.")
assert waveform is not None
if channels_first:
waveform = waveform.T
return waveform, sample_rate
def load_audio(
src: str,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
) -> Tuple[torch.Tensor, int]:
s = torch.classes.torchaudio.ffmpeg_StreamReader(src, format, None)
return _load_audio(s, frame_offset, num_frames, convert, channels_first)
def load_audio_fileobj(
src: str,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
) -> Tuple[torch.Tensor, int]:
s = torchaudio._torchaudio_ffmpeg.StreamReaderFileObj(src, format, None, 4096)
return _load_audio(s, frame_offset, num_frames, convert, channels_first)
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTripletEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load triplets from the AllNLI dataset
# The dataset contains triplets of (anchor, positive, negative) sentences
dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev[:1000]")
# Initialize the SparseTripletEvaluator
evaluator = SparseTripletEvaluator(
anchors=dataset[:1000]["anchor"],
positives=dataset[:1000]["positive"],
negatives=dataset[:1000]["negative"],
name="all_nli_dev",
batch_size=32,
show_progress_bar=True,
)
# Run the evaluation
results = evaluator(model)
"""
TripletEvaluator: Evaluating the model on the all_nli_dev dataset:
Accuracy Dot Similarity: 85.10%
Model Anchor Sparsity: Active Dimensions: 105.5, Sparsity Ratio: 0.9965
Model Positive Sparsity: Active Dimensions: 69.8, Sparsity Ratio: 0.9977
Model Negative Sparsity: Active Dimensions: 68.6, Sparsity Ratio: 0.9978
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: all_nli_dev_dot_accuracy
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8510
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTripletEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load triplets from the AllNLI dataset
# The dataset contains triplets of (anchor, positive, negative) sentences
dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev[:1000]")
# Initialize the SparseTripletEvaluator
evaluator = SparseTripletEvaluator(
anchors=dataset[:1000]["anchor"],
positives=dataset[:1000]["positive"],
negatives=dataset[:1000]["negative"],
name="all_nli_dev",
batch_size=32,
show_progress_bar=True,
)
# Run the evaluation
results = evaluator(model, ".")
"""
TripletEvaluator: Evaluating the model on the all_nli_dev dataset:
Accuracy Dot Similarity: 85.10%
Model Sparsity Stats Query : Row Non-Zero Mean: 105.4530029296875, Row Sparsity Mean: 0.9965449571609497
Model Sparsity Stats Corpus : Row Non-Zero Mean: 69.18349838256836, Row Sparsity Mean: 0.9977333247661591
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: all_nli_dev_dot_accuracy
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8510
|
import numpy as np
from docarray.array import DocumentArray
from docarray.document import BaseDocument
from docarray.typing import NdArray
def test_get_bulk_attributes_function():
class Mmdoc(BaseDocument):
text: str
tensor: NdArray
N = 10
da = DocumentArray[Mmdoc](
(Mmdoc(text=f'hello{i}', tensor=np.zeros((3, 224, 224))) for i in range(N))
)
tensors = da._get_documents_attribute('tensor')
assert len(tensors) == N
for tensor in tensors:
assert tensor.shape == (3, 224, 224)
texts = da._get_documents_attribute('text')
assert len(texts) == N
for i, text in enumerate(texts):
assert text == f'hello{i}'
def test_get_bulk_attributes():
class Mmdoc(BaseDocument):
text: str
tensor: NdArray
N = 10
da = DocumentArray[Mmdoc](
(Mmdoc(text=f'hello{i}', tensor=np.zeros((3, 224, 224))) for i in range(N))
)
tensors = da.tensor
assert len(tensors) == N
for tensor in tensors:
assert tensor.shape == (3, 224, 224)
texts = da.text
assert len(texts) == N
for i, text in enumerate(texts):
assert text == f'hello{i}'
def test_get_bulk_attributes_document():
class InnerDoc(BaseDocument):
text: str
class Mmdoc(BaseDocument):
inner: InnerDoc
N = 10
da = DocumentArray[Mmdoc](
(Mmdoc(inner=InnerDoc(text=f'hello{i}')) for i in range(N))
)
assert isinstance(da.inner, DocumentArray)
|
import numpy as np
from docarray.array import DocumentArray
from docarray.document import BaseDocument
from docarray.typing import Tensor
def test_get_bulk_attributes_function():
class Mmdoc(BaseDocument):
text: str
tensor: Tensor
N = 10
da = DocumentArray[Mmdoc](
(Mmdoc(text=f'hello{i}', tensor=np.zeros((3, 224, 224))) for i in range(N))
)
tensors = da._get_documents_attribute('tensor')
assert len(tensors) == N
for tensor in tensors:
assert tensor.shape == (3, 224, 224)
texts = da._get_documents_attribute('text')
assert len(texts) == N
for i, text in enumerate(texts):
assert text == f'hello{i}'
def test_get_bulk_attributes():
class Mmdoc(BaseDocument):
text: str
tensor: Tensor
N = 10
da = DocumentArray[Mmdoc](
(Mmdoc(text=f'hello{i}', tensor=np.zeros((3, 224, 224))) for i in range(N))
)
tensors = da.tensor
assert len(tensors) == N
for tensor in tensors:
assert tensor.shape == (3, 224, 224)
texts = da.text
assert len(texts) == N
for i, text in enumerate(texts):
assert text == f'hello{i}'
def test_get_bulk_attributes_document():
class InnerDoc(BaseDocument):
text: str
class Mmdoc(BaseDocument):
inner: InnerDoc
N = 10
da = DocumentArray[Mmdoc](
(Mmdoc(inner=InnerDoc(text=f'hello{i}')) for i in range(N))
)
assert isinstance(da.inner, DocumentArray)
|
from abc import abstractmethod
import pytest
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_tests.base import BaseStandardTests
class RetrieversIntegrationTests(BaseStandardTests):
"""Base class for retrievers integration tests."""
@property
@abstractmethod
def retriever_constructor(self) -> type[BaseRetriever]:
"""A BaseRetriever subclass to be tested."""
...
@property
def retriever_constructor_params(self) -> dict:
"""Returns a dictionary of parameters to pass to the retriever constructor."""
return {}
@property
@abstractmethod
def retriever_query_example(self) -> str:
"""Returns a str representing the "query" of an example retriever call."""
...
@pytest.fixture
def retriever(self) -> BaseRetriever:
""":private:"""
return self.retriever_constructor(**self.retriever_constructor_params)
def test_k_constructor_param(self) -> None:
"""Test that the retriever constructor accepts a k parameter, representing
the number of documents to return.
.. dropdown:: Troubleshooting
If this test fails, either the retriever constructor does not accept a k
parameter, or the retriever does not return the correct number of documents
(`k`) when it is set.
For example, a retriever like
.. code-block:: python
MyRetriever(k=3).invoke("query")
should return 3 documents when invoked with a query.
"""
params = {
k: v for k, v in self.retriever_constructor_params.items() if k != "k"
}
params_3 = {**params, "k": 3}
retriever_3 = self.retriever_constructor(**params_3)
result_3 = retriever_3.invoke(self.retriever_query_example)
assert len(result_3) == 3
assert all(isinstance(doc, Document) for doc in result_3)
params_1 = {**params, "k": 1}
retriever_1 = self.retriever_constructor(**params_1)
result_1 = retriever_1.invoke(self.retriever_query_example)
assert len(result_1) == 1
assert all(isinstance(doc, Document) for doc in result_1)
def test_invoke_with_k_kwarg(self, retriever: BaseRetriever) -> None:
"""Test that the invoke method accepts a k parameter, representing the number of
documents to return.
.. dropdown:: Troubleshooting
If this test fails, the retriever's invoke method does not accept a k
parameter, or the retriever does not return the correct number of documents
(`k`) when it is set.
For example, a retriever like
.. code-block:: python
MyRetriever().invoke("query", k=3)
should return 3 documents when invoked with a query.
"""
result_1 = retriever.invoke(self.retriever_query_example, k=1)
assert len(result_1) == 1
assert all(isinstance(doc, Document) for doc in result_1)
result_3 = retriever.invoke(self.retriever_query_example, k=3)
assert len(result_3) == 3
assert all(isinstance(doc, Document) for doc in result_3)
def test_invoke_returns_documents(self, retriever: BaseRetriever) -> None:
"""If invoked with the example params, the retriever should return a list of
Documents.
.. dropdown:: Troubleshooting
If this test fails, the retriever's invoke method does not return a list of
`langchain_core.document.Document` objects. Please confirm that your
`_get_relevant_documents` method returns a list of `Document` objects.
"""
result = retriever.invoke(self.retriever_query_example)
assert isinstance(result, list)
assert all(isinstance(doc, Document) for doc in result)
async def test_ainvoke_returns_documents(self, retriever: BaseRetriever) -> None:
"""If ainvoked with the example params, the retriever should return a list of
Documents.
See :meth:`test_invoke_returns_documents` for more information on
troubleshooting.
"""
result = await retriever.ainvoke(self.retriever_query_example)
assert isinstance(result, list)
assert all(isinstance(doc, Document) for doc in result)
|
from abc import abstractmethod
import pytest
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_tests.base import BaseStandardTests
class RetrieversIntegrationTests(BaseStandardTests):
"""
Base class for retrievers integration tests.
"""
@property
@abstractmethod
def retriever_constructor(self) -> type[BaseRetriever]:
"""
A BaseRetriever subclass to be tested.
"""
...
@property
def retriever_constructor_params(self) -> dict:
"""
Returns a dictionary of parameters to pass to the retriever constructor.
"""
return {}
@property
@abstractmethod
def retriever_query_example(self) -> str:
"""
Returns a str representing the "query" of an example retriever call.
"""
...
@pytest.fixture
def retriever(self) -> BaseRetriever:
"""
:private:
"""
return self.retriever_constructor(**self.retriever_constructor_params)
def test_k_constructor_param(self) -> None:
"""
Test that the retriever constructor accepts a k parameter, representing
the number of documents to return.
.. dropdown:: Troubleshooting
If this test fails, either the retriever constructor does not accept a k
parameter, or the retriever does not return the correct number of documents
(`k`) when it is set.
For example, a retriever like
.. code-block:: python
MyRetriever(k=3).invoke("query")
should return 3 documents when invoked with a query.
"""
params = {
k: v for k, v in self.retriever_constructor_params.items() if k != "k"
}
params_3 = {**params, "k": 3}
retriever_3 = self.retriever_constructor(**params_3)
result_3 = retriever_3.invoke(self.retriever_query_example)
assert len(result_3) == 3
assert all(isinstance(doc, Document) for doc in result_3)
params_1 = {**params, "k": 1}
retriever_1 = self.retriever_constructor(**params_1)
result_1 = retriever_1.invoke(self.retriever_query_example)
assert len(result_1) == 1
assert all(isinstance(doc, Document) for doc in result_1)
def test_invoke_with_k_kwarg(self, retriever: BaseRetriever) -> None:
"""
Test that the invoke method accepts a k parameter, representing the number of
documents to return.
.. dropdown:: Troubleshooting
If this test fails, the retriever's invoke method does not accept a k
parameter, or the retriever does not return the correct number of documents
(`k`) when it is set.
For example, a retriever like
.. code-block:: python
MyRetriever().invoke("query", k=3)
should return 3 documents when invoked with a query.
"""
result_1 = retriever.invoke(self.retriever_query_example, k=1)
assert len(result_1) == 1
assert all(isinstance(doc, Document) for doc in result_1)
result_3 = retriever.invoke(self.retriever_query_example, k=3)
assert len(result_3) == 3
assert all(isinstance(doc, Document) for doc in result_3)
def test_invoke_returns_documents(self, retriever: BaseRetriever) -> None:
"""
If invoked with the example params, the retriever should return a list of
Documents.
.. dropdown:: Troubleshooting
If this test fails, the retriever's invoke method does not return a list of
`langchain_core.document.Document` objects. Please confirm that your
`_get_relevant_documents` method returns a list of `Document` objects.
"""
result = retriever.invoke(self.retriever_query_example)
assert isinstance(result, list)
assert all(isinstance(doc, Document) for doc in result)
async def test_ainvoke_returns_documents(self, retriever: BaseRetriever) -> None:
"""
If ainvoked with the example params, the retriever should return a list of
Documents.
See :meth:`test_invoke_returns_documents` for more information on
troubleshooting.
"""
result = await retriever.ainvoke(self.retriever_query_example)
assert isinstance(result, list)
assert all(isinstance(doc, Document) for doc in result)
|
from typing import Optional
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from docarray.helper import (
_access_path_dict_to_nested_dict,
_access_path_to_dict,
_dict_to_access_paths,
_is_access_path_valid,
_update_nested_dicts,
)
@pytest.fixture()
def nested_doc():
class Inner(BaseDocument):
img: Optional[ImageDoc]
class Middle(BaseDocument):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDocument):
img: Optional[ImageDoc]
middle: Optional[Middle]
da: DocumentArray[Inner]
doc = Outer(
img=ImageDoc(),
middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc())),
da=DocumentArray[Inner]([Inner(img=ImageDoc(url='test.png'))]),
)
return doc
def test_is_access_path_valid(nested_doc):
assert _is_access_path_valid(nested_doc.__class__, 'img')
assert _is_access_path_valid(nested_doc.__class__, 'middle__img')
assert _is_access_path_valid(nested_doc.__class__, 'middle__inner__img')
assert _is_access_path_valid(nested_doc.__class__, 'middle')
assert _is_access_path_valid(nested_doc.__class__, 'da__img__url')
def test_is_access_path_not_valid(nested_doc):
assert not _is_access_path_valid(nested_doc.__class__, 'inner')
assert not _is_access_path_valid(nested_doc.__class__, 'some__other__path')
assert not _is_access_path_valid(nested_doc.__class__, 'middle.inner')
def test_get_access_paths():
class Painting(BaseDocument):
title: str
img: ImageDoc
access_paths = Painting._get_access_paths()
assert access_paths == [
'id',
'title',
'img__id',
'img__url',
'img__tensor',
'img__embedding',
'img__bytes_',
]
def test_dict_to_access_paths():
d = {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
casted = _dict_to_access_paths(d)
assert casted == {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
def test_access_path_to_dict():
access_path = 'a__b__c__d__e'
value = 1
result = {'a': {'b': {'c': {'d': {'e': value}}}}}
assert _access_path_to_dict(access_path, value) == result
def test_access_path_dict_to_nested_dict():
d = {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
casted = _access_path_dict_to_nested_dict(d)
assert casted == {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
def test_update_nested_dict():
d1 = {'text': 'hello', 'image': {'tensor': None}}
d2 = {'image': {'url': 'some.png'}}
_update_nested_dicts(d1, d2)
assert d1 == {'text': 'hello', 'image': {'tensor': None, 'url': 'some.png'}}
|
from typing import Optional
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from docarray.helper import (
_access_path_dict_to_nested_dict,
_access_path_to_dict,
_dict_to_access_paths,
_is_access_path_valid,
_update_nested_dicts,
)
@pytest.fixture()
def nested_doc():
class Inner(BaseDocument):
img: Optional[ImageDoc]
class Middle(BaseDocument):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDocument):
img: Optional[ImageDoc]
middle: Optional[Middle]
da: DocumentArray[Inner]
doc = Outer(
img=ImageDoc(),
middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc())),
da=DocumentArray[Inner]([Inner(img=ImageDoc(url='test.png'))]),
)
return doc
def test_is_access_path_valid(nested_doc):
assert _is_access_path_valid(nested_doc.__class__, 'img')
assert _is_access_path_valid(nested_doc.__class__, 'middle__img')
assert _is_access_path_valid(nested_doc.__class__, 'middle__inner__img')
assert _is_access_path_valid(nested_doc.__class__, 'middle')
assert _is_access_path_valid(nested_doc.__class__, 'da__img__url')
def test_is_access_path_not_valid(nested_doc):
assert not _is_access_path_valid(nested_doc.__class__, 'inner')
assert not _is_access_path_valid(nested_doc.__class__, 'some__other__path')
assert not _is_access_path_valid(nested_doc.__class__, 'middle.inner')
def test_get_access_paths():
class Painting(BaseDocument):
title: str
img: ImageDoc
access_paths = Painting._get_access_paths()
assert access_paths == [
'id',
'title',
'img__id',
'img__url',
'img__tensor',
'img__embedding',
'img__bytes',
]
def test_dict_to_access_paths():
d = {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
casted = _dict_to_access_paths(d)
assert casted == {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
def test_access_path_to_dict():
access_path = 'a__b__c__d__e'
value = 1
result = {'a': {'b': {'c': {'d': {'e': value}}}}}
assert _access_path_to_dict(access_path, value) == result
def test_access_path_dict_to_nested_dict():
d = {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
casted = _access_path_dict_to_nested_dict(d)
assert casted == {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
def test_update_nested_dict():
d1 = {'text': 'hello', 'image': {'tensor': None}}
d2 = {'image': {'url': 'some.png'}}
_update_nested_dicts(d1, d2)
assert d1 == {'text': 'hello', 'image': {'tensor': None, 'url': 'some.png'}}
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
T = TypeVar('T', bound='ImageTorchTensor')
@_register_proto(proto_type_name='image_torch_tensor')
class ImageTorchTensor(AbstractImageTensor, TorchTensor, metaclass=metaTorchAndNode):
"""
Subclass of TorchTensor, to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to image bytes which are
optimized to send over the wire
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import ImageTorchTensor, ImageUrl
class MyImageDoc(BaseDoc):
title: str
tensor: Optional[ImageTorchTensor]
url: Optional[ImageUrl]
bytes: Optional[bytes]
doc = MyImageDoc(
title='my_second_image_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
```
---
"""
...
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
T = TypeVar('T', bound='ImageTorchTensor')
@_register_proto(proto_type_name='image_torch_tensor')
class ImageTorchTensor(AbstractImageTensor, TorchTensor, metaclass=metaTorchAndNode):
"""
Subclass of TorchTensor, to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to image bytes which are
optimized to send over the wire
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import ImageTorchTensor, ImageUrl
class MyImageDoc(BaseDoc):
title: str
tensor: Optional[ImageTorchTensor]
url: Optional[ImageUrl]
bytes: Optional[bytes]
doc = MyImageDoc(
title='my_second_image_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
"""
...
|
from urllib.parse import quote
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
class FactCheckerBlock(Block):
class Input(BlockSchema):
statement: str = SchemaField(
description="The statement to check for factuality"
)
credentials: JinaCredentialsInput = JinaCredentialsField()
class Output(BlockSchema):
factuality: float = SchemaField(
description="The factuality score of the statement"
)
result: bool = SchemaField(description="The result of the factuality check")
reason: str = SchemaField(description="The reason for the factuality result")
error: str = SchemaField(description="Error message if the check fails")
def __init__(self):
super().__init__(
id="d38b6c5e-9968-4271-8423-6cfe60d6e7e6",
description="This block checks the factuality of a given statement using Jina AI's Grounding API.",
categories={BlockCategory.SEARCH},
input_schema=FactCheckerBlock.Input,
output_schema=FactCheckerBlock.Output,
)
async def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
encoded_statement = quote(input_data.statement)
url = f"https://g.jina.ai/{encoded_statement}"
headers = {
"Accept": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
response = await Requests().get(url, headers=headers)
data = response.json()
if "data" in data:
data = data["data"]
yield "factuality", data["factuality"]
yield "result", data["result"]
yield "reason", data["reason"]
else:
raise RuntimeError(f"Expected 'data' key not found in response: {data}")
|
from urllib.parse import quote
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class FactCheckerBlock(Block):
class Input(BlockSchema):
statement: str = SchemaField(
description="The statement to check for factuality"
)
credentials: JinaCredentialsInput = JinaCredentialsField()
class Output(BlockSchema):
factuality: float = SchemaField(
description="The factuality score of the statement"
)
result: bool = SchemaField(description="The result of the factuality check")
reason: str = SchemaField(description="The reason for the factuality result")
error: str = SchemaField(description="Error message if the check fails")
def __init__(self):
super().__init__(
id="d38b6c5e-9968-4271-8423-6cfe60d6e7e6",
description="This block checks the factuality of a given statement using Jina AI's Grounding API.",
categories={BlockCategory.SEARCH},
input_schema=FactCheckerBlock.Input,
output_schema=FactCheckerBlock.Output,
)
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
encoded_statement = quote(input_data.statement)
url = f"https://g.jina.ai/{encoded_statement}"
headers = {
"Accept": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
response = requests.get(url, headers=headers)
response.raise_for_status()
data = response.json()
if "data" in data:
data = data["data"]
yield "factuality", data["factuality"]
yield "result", data["result"]
yield "reason", data["reason"]
else:
raise RuntimeError(f"Expected 'data' key not found in response: {data}")
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
from mmengine.data import BaseDataElement as PixelData
from mmengine.data import InstanceData
from mmdet.datasets.transforms import PackDetInputs
from mmdet.structures import DetDataSample
from mmdet.structures.mask import BitmapMasks
class TestPackDetInputs(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
img_path = osp.join(data_prefix, 'color.jpg')
rng = np.random.RandomState(0)
self.results1 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'gt_ignore_flags': np.array([0, 0, 1], dtype=np.bool),
'proposals': rng.rand(2, 4)
}
self.results2 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'proposals': rng.rand(2, 4)
}
self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'scale_factor',
'flip')
def test_transform(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results1))
self.assertIn('data_sample', results)
self.assertIsInstance(results['data_sample'], DetDataSample)
self.assertIsInstance(results['data_sample'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_sample'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_sample'].gt_instances), 2)
self.assertEqual(len(results['data_sample'].ignored_instances), 1)
self.assertIsInstance(results['data_sample'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_sample'].proposals, InstanceData)
self.assertEqual(len(results['data_sample'].proposals), 2)
self.assertIsInstance(results['data_sample'].proposals.bboxes,
np.ndarray)
def test_transform_without_ignore(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results2))
self.assertIn('data_sample', results)
self.assertIsInstance(results['data_sample'], DetDataSample)
self.assertIsInstance(results['data_sample'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_sample'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_sample'].gt_instances), 3)
self.assertEqual(len(results['data_sample'].ignored_instances), 0)
self.assertIsInstance(results['data_sample'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_sample'].proposals, InstanceData)
self.assertEqual(len(results['data_sample'].proposals), 2)
self.assertIsInstance(results['data_sample'].proposals.bboxes,
np.ndarray)
def test_repr(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
self.assertEqual(
repr(transform), f'PackDetInputs(meta_keys={self.meta_keys})')
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
from mmengine.data import BaseDataElement as PixelData
from mmengine.data import InstanceData
from mmdet.datasets.transforms import PackDetInputs
from mmdet.structures import DetDataSample
from mmdet.structures.mask import BitmapMasks
class TestPackDetInputs(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
img_path = osp.join(data_prefix, 'color.jpg')
rng = np.random.RandomState(0)
self.results1 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(2, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'gt_ignore_flags': np.array([0, 0, 1], dtype=np.bool),
'proposals': rng.rand(2, 4)
}
self.results2 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'proposals': rng.rand(2, 4)
}
self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'scale_factor',
'flip')
def test_transform(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results1))
self.assertIn('data_sample', results)
self.assertIsInstance(results['data_sample'], DetDataSample)
self.assertIsInstance(results['data_sample'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_sample'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_sample'].gt_instances), 2)
self.assertEqual(len(results['data_sample'].ignored_instances), 1)
self.assertIsInstance(results['data_sample'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_sample'].proposals, InstanceData)
self.assertEqual(len(results['data_sample'].proposals), 2)
self.assertIsInstance(results['data_sample'].proposals.bboxes,
np.ndarray)
def test_transform_without_ignore(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results2))
self.assertIn('data_sample', results)
self.assertIsInstance(results['data_sample'], DetDataSample)
self.assertIsInstance(results['data_sample'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_sample'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_sample'].gt_instances), 3)
self.assertEqual(len(results['data_sample'].ignored_instances), 0)
self.assertIsInstance(results['data_sample'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_sample'].proposals, InstanceData)
self.assertEqual(len(results['data_sample'].proposals), 2)
self.assertIsInstance(results['data_sample'].proposals.bboxes,
np.ndarray)
def test_repr(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
self.assertEqual(
repr(transform), f'PackDetInputs(meta_keys={self.meta_keys})')
|
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.MaxPooling1D", "keras.layers.MaxPool1D"])
class MaxPooling1D(BasePooling):
"""Max pooling operation for 1D temporal data.
Downsamples the input representation by taking the maximum value over a
spatial window of size `pool_size`. The window is shifted by `strides`.
The resulting output when using the `"valid"` padding option has a shape of:
`output_shape = (input_shape - pool_size + 1) / strides)`.
The resulting output shape when using the `"same"` padding option is:
`output_shape = input_shape / strides`
Args:
pool_size: int, size of the max pooling window.
strides: int or None. Specifies how much the pooling window moves
for each pooling step. If None, it will default to `pool_size`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
3D tensor with shape `(batch_size, steps, features)`.
- If `data_format="channels_first"`:
3D tensor with shape `(batch_size, features, steps)`.
Output shape:
- If `data_format="channels_last"`:
3D tensor with shape `(batch_size, downsampled_steps, features)`.
- If `data_format="channels_first"`:
3D tensor with shape `(batch_size, features, downsampled_steps)`.
Examples:
`strides=1` and `padding="valid"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
... strides=1, padding="valid")
>>> max_pool_1d(x)
`strides=2` and `padding="valid"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
... strides=2, padding="valid")
>>> max_pool_1d(x)
`strides=1` and `padding="same"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
... strides=1, padding="same")
>>> max_pool_1d(x)
"""
def __init__(
self,
pool_size=2,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs,
):
super().__init__(
pool_size,
strides,
pool_dimensions=1,
pool_mode="max",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.MaxPooling1D", "keras.layers.MaxPool1D"])
class MaxPooling1D(BasePooling):
"""Max pooling operation for 1D temporal data.
Downsamples the input representation by taking the maximum value over a
spatial window of size `pool_size`. The window is shifted by `strides`.
The resulting output when using the `"valid"` padding option has a shape of:
`output_shape = (input_shape - pool_size + 1) / strides)`.
The resulting output shape when using the `"same"` padding option is:
`output_shape = input_shape / strides`
Args:
pool_size: int, size of the max pooling window.
strides: int or None. Specifies how much the pooling window moves
for each pooling step. If None, it will default to `pool_size`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
3D tensor with shape `(batch_size, steps, features)`.
- If `data_format="channels_first"`:
3D tensor with shape `(batch_size, features, steps)`.
Output shape:
- If `data_format="channels_last"`:
3D tensor with shape `(batch_size, downsampled_steps, features)`.
- If `data_format="channels_first"`:
3D tensor with shape `(batch_size, features, downsampled_steps)`.
Examples:
`strides=1` and `padding="valid"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
... strides=1, padding="valid")
>>> max_pool_1d(x)
`strides=2` and `padding="valid"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
... strides=2, padding="valid")
>>> max_pool_1d(x)
`strides=1` and `padding="same"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
... strides=1, padding="same")
>>> max_pool_1d(x)
"""
def __init__(
self,
pool_size=2,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs
):
super().__init__(
pool_size,
strides,
pool_dimensions=1,
pool_mode="max",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
from __future__ import annotations
from collections.abc import Iterable
from enum import Enum
import torch
import torch.nn as nn
from sentence_transformers.sparse_encoder.losses import (
FlopsLoss,
SparseDistillKLDivLoss,
SparseMarginMSELoss,
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class PrincipalLoss(Enum):
"""The principal loss types for the model"""
MMSE = SparseMarginMSELoss
KL = SparseDistillKLDivLoss
MRL = SparseMultipleNegativesRankingLoss
class SpladeLoss(nn.Module):
def __init__(
self, model: SparseEncoder, main_loss: PrincipalLoss, lambda_corpus: float = 0.1, lambda_query: float = 0.1
):
super().__init__()
self.model = model
self.lambda_corpus = lambda_corpus
self.lambda_query = lambda_query
self.main_loss = main_loss
self.flops_loss = FlopsLoss(model)
def forward(
self, sentence_features: Iterable[dict[str, torch.Tensor]], labels: torch.Tensor = None
) -> dict[str, torch.Tensor]:
# Compute embeddings using the model
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
main_loss_value = self.main_loss.compute_loss_from_embeddings(embeddings, labels)
flops_query = self.flops_loss.compute_loss_from_embeddings(embeddings, "query")
flops_corpus = self.flops_loss.compute_loss_from_embeddings(embeddings, "corpus")
# Compute the total loss
total_loss = main_loss_value + self.lambda_query * flops_query + self.lambda_corpus * flops_corpus
return total_loss
def get_config_dict(self):
"""
Get the configuration dictionary.
Returns:
Dictionary containing the configuration parameters
"""
return {"lambda_corpus": self.lambda_corpus, "lambda_query": self.lambda_query, "main_loss": self.main_loss}
@property
def citation(self) -> str:
return """
@misc{wen2025matryoshkarevisitingsparsecoding,
title={Beyond Matryoshka: Revisiting Sparse Coding for Adaptive Representation},
author={Tiansheng Wen and Yifei Wang and Zequn Zeng and Zhong Peng and Yudi Su and Xinyang Liu and Bo Chen and Hongwei Liu and Stefanie Jegelka and Chenyu You},
year={2025},
eprint={2503.01776},
archivePrefix={arXiv},
primaryClass={cs.LG},
url={https://arxiv.org/abs/2503.01776},
}
"""
|
from __future__ import annotations
from collections.abc import Iterable
from enum import Enum
import torch
import torch.nn as nn
from sentence_transformers.sparse_encoder.losses import (
FlopsLoss,
SparseDistillKLDivLoss,
SparseMarginMSELoss,
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class PrincipalLoss(Enum):
"""The principal loss types for the model"""
MMSE = SparseMarginMSELoss
KL = SparseDistillKLDivLoss
MRL = SparseMultipleNegativesRankingLoss
class SpladeLoss(nn.Module):
def __init__(
self, model: SparseEncoder, main_loss: PrincipalLoss, lamda_corpus: float = 0.1, lamda_query: float = 0.1
):
super().__init__()
self.model = model
self.lamda_corpus = lamda_corpus
self.lamda_query = lamda_query
self.main_loss = main_loss
self.flops_loss = FlopsLoss(model)
def forward(
self, sentence_features: Iterable[dict[str, torch.Tensor]], labels: torch.Tensor = None
) -> dict[str, torch.Tensor]:
# Compute embeddings using the model
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
main_loss_value = self.main_loss.compute_loss_from_embeddings(embeddings, labels)
flops_query = self.flops_loss.compute_loss_from_embeddings(embeddings, "query")
flops_corpus = self.flops_loss.compute_loss_from_embeddings(embeddings, "corpus")
# Compute the total loss
total_loss = main_loss_value + self.lamda_query * flops_query + self.lamda_corpus * flops_corpus
return total_loss
def get_config_dict(self):
"""
Get the configuration dictionary.
Returns:
Dictionary containing the configuration parameters
"""
return {"lamda_corpus": self.lamda_corpus, "lamda_query": self.lamda_query, "main_loss": self.main_loss}
@property
def citation(self) -> str:
return """
@misc{wen2025matryoshkarevisitingsparsecoding,
title={Beyond Matryoshka: Revisiting Sparse Coding for Adaptive Representation},
author={Tiansheng Wen and Yifei Wang and Zequn Zeng and Zhong Peng and Yudi Su and Xinyang Liu and Bo Chen and Hongwei Liu and Stefanie Jegelka and Chenyu You},
year={2025},
eprint={2503.01776},
archivePrefix={arXiv},
primaryClass={cs.LG},
url={https://arxiv.org/abs/2503.01776},
}
"""
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.6.2.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.6.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.losses.MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseMultipleNegativesRankingLoss(MultipleNegativesRankingLoss):
def __init__(self, model: SparseEncoder, scale: float = 1.0, similarity_fct=util.dot_score) -> None:
"""
Given a list of (anchor, positive) pairs or (anchor, positive, negative) triplets, this loss optimizes the following:
1. Given an anchor (e.g. a question), assign the highest similarity to the corresponding positive (i.e. answer)
out of every single positive and negative (e.g. all answers) in the batch.
If you provide the optional negatives, they will all be used as extra options from which the model must pick the
correct positive. Within reason, the harder this "picking" is, the stronger the model will become. Because of
this, a higher batch size results in more in-batch negatives, which then increases performance (to a point).
This loss function works great to train embeddings for retrieval setups where you have positive pairs
(e.g. (query, answer)) as it will sample in each batch ``n-1`` negative docs randomly.
This loss is also known as InfoNCE loss, SimCSE loss, Cross-Entropy Loss with in-batch negatives, or simply
in-batch negatives loss.
Args:
model: SparseEncoder model
scale: Output of similarity function is multiplied by scale
value
similarity_fct: similarity function between sentence
embeddings. By default, dot product. Can also be set to cosine
similarity (and then set scale to 20)
Requirements:
1. Need to be used in SpladeLoss or CSRLoss as a loss function.
2. (anchor, positive) pairs or (anchor, positive, negative) triplets
Inputs:
+-------------------------------------------------+--------+
| Texts | Labels |
+=================================================+========+
| (anchor, positive) pairs | none |
+-------------------------------------------------+--------+
| (anchor, positive, negative) triplets | none |
+-------------------------------------------------+--------+
| (anchor, positive, negative_1, ..., negative_n) | none |
+-------------------------------------------------+--------+
Recommendations:
- Use ``BatchSamplers.NO_DUPLICATES`` (:class:`docs <sentence_transformers.training_args.BatchSamplers>`) to
ensure that no in-batch negatives are duplicates of the anchor or positive samples.
Relations:
- :class:`SparseCachedMultipleNegativesRankingLoss` is equivalent to this loss, but it uses caching that allows for
much higher batch sizes (and thus better performance) without extra memory usage. However, it is slightly
slower.
- :class:`SparseGISTEmbedLoss` is equivalent to this loss, but uses a guide model to guide the in-batch negative
sample selection. `SparseGISTEmbedLoss` yields a stronger training signal at the cost of some training overhead.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
}
)
loss = losses.SpladeLoss(
model=model, loss=losses.SparseMultipleNegativesRankingLoss(model), corpus_regularizer_weight=3e-5, query_regularizer_weight=5e-5
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError(
"SparseMultipleNegativesRankingLoss should not be used alone. Use it with SpladeLoss or CSRLoss."
)
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.losses.MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseMultipleNegativesRankingLoss(MultipleNegativesRankingLoss):
def __init__(self, model: SparseEncoder, scale: float = 1.0, similarity_fct=util.dot_score) -> None:
"""
Given a list of (anchor, positive) pairs or (anchor, positive, negative) triplets, this loss optimizes the following:
1. Given an anchor (e.g. a question), assign the highest similarity to the corresponding positive (i.e. answer)
out of every single positive and negative (e.g. all answers) in the batch.
If you provide the optional negatives, they will all be used as extra options from which the model must pick the
correct positive. Within reason, the harder this "picking" is, the stronger the model will become. Because of
this, a higher batch size results in more in-batch negatives, which then increases performance (to a point).
This loss function works great to train embeddings for retrieval setups where you have positive pairs
(e.g. (query, answer)) as it will sample in each batch ``n-1`` negative docs randomly.
This loss is also known as InfoNCE loss, SimCSE loss, Cross-Entropy Loss with in-batch negatives, or simply
in-batch negatives loss.
Args:
model: SparseEncoder model
scale: Output of similarity function is multiplied by scale
value
similarity_fct: similarity function between sentence
embeddings. By default, dot product. Can also be set to cosine
similarity (and then set scale to 20)
Requirements:
1. Need to be used in SpladeLoss or CSRLoss as a loss function.
2. (anchor, positive) pairs or (anchor, positive, negative) triplets
Inputs:
+-------------------------------------------------+--------+
| Texts | Labels |
+=================================================+========+
| (anchor, positive) pairs | none |
+-------------------------------------------------+--------+
| (anchor, positive, negative) triplets | none |
+-------------------------------------------------+--------+
| (anchor, positive, negative_1, ..., negative_n) | none |
+-------------------------------------------------+--------+
Recommendations:
- Use ``BatchSamplers.NO_DUPLICATES`` (:class:`docs <sentence_transformers.training_args.BatchSamplers>`) to
ensure that no in-batch negatives are duplicates of the anchor or positive samples.
Relations:
- :class:`SparseCachedMultipleNegativesRankingLoss` is equivalent to this loss, but it uses caching that allows for
much higher batch sizes (and thus better performance) without extra memory usage. However, it is slightly
slower.
- :class:`SparseGISTEmbedLoss` is equivalent to this loss, but uses a guide model to guide the in-batch negative
sample selection. `SparseGISTEmbedLoss` yields a stronger training signal at the cost of some training overhead.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
}
)
loss = losses.SpladeLoss(
model=model, loss=losses.SparseMultipleNegativesRankingLoss(model), lambda_corpus=3e-5, lambda_query=5e-5
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError(
"SparseMultipleNegativesRankingLoss should not be used alone. Use it with SpladeLoss or CSRLoss."
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import (find_latest_checkpoint, get_test_pipeline_cfg,
update_data_root)
from .mot_error_visualize import imshow_mot_errors
from .replace_cfg_vals import replace_cfg_vals
from .setup_env import (register_all_modules, setup_cache_size_limit_of_dynamo,
setup_multi_processes)
from .split_batch import split_batch
from .typing_utils import (ConfigType, InstanceList, MultiConfig,
OptConfigType, OptInstanceList, OptMultiConfig,
OptPixelList, PixelList, RangeType)
__all__ = [
'collect_env', 'find_latest_checkpoint', 'update_data_root',
'setup_multi_processes', 'get_caller_name', 'log_img_scale', 'compat_cfg',
'split_batch', 'register_all_modules', 'replace_cfg_vals', 'AvoidOOM',
'AvoidCUDAOOM', 'all_reduce_dict', 'allreduce_grads', 'reduce_mean',
'sync_random_seed', 'ConfigType', 'InstanceList', 'MultiConfig',
'OptConfigType', 'OptInstanceList', 'OptMultiConfig', 'OptPixelList',
'PixelList', 'RangeType', 'get_test_pipeline_cfg',
'setup_cache_size_limit_of_dynamo', 'imshow_mot_errors'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import (find_latest_checkpoint, get_test_pipeline_cfg,
update_data_root)
from .replace_cfg_vals import replace_cfg_vals
from .setup_env import (register_all_modules, setup_cache_size_limit_of_dynamo,
setup_multi_processes)
from .split_batch import split_batch
from .typing_utils import (ConfigType, InstanceList, MultiConfig,
OptConfigType, OptInstanceList, OptMultiConfig,
OptPixelList, PixelList, RangeType)
__all__ = [
'collect_env', 'find_latest_checkpoint', 'update_data_root',
'setup_multi_processes', 'get_caller_name', 'log_img_scale', 'compat_cfg',
'split_batch', 'register_all_modules', 'replace_cfg_vals', 'AvoidOOM',
'AvoidCUDAOOM', 'all_reduce_dict', 'allreduce_grads', 'reduce_mean',
'sync_random_seed', 'ConfigType', 'InstanceList', 'MultiConfig',
'OptConfigType', 'OptInstanceList', 'OptMultiConfig', 'OptPixelList',
'PixelList', 'RangeType', 'get_test_pipeline_cfg',
'setup_cache_size_limit_of_dynamo'
]
|
"""Callback Handler that writes to a file."""
from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING, Any, Optional, TextIO, cast
from typing_extensions import override
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.utils.input import print_text
if TYPE_CHECKING:
from langchain_core.agents import AgentAction, AgentFinish
class FileCallbackHandler(BaseCallbackHandler):
"""Callback Handler that writes to a file.
Parameters:
filename: The file to write to.
mode: The mode to open the file in. Defaults to "a".
color: The color to use for the text.
"""
def __init__(
self, filename: str, mode: str = "a", color: Optional[str] = None
) -> None:
"""Initialize callback handler.
Args:
filename: The filename to write to.
mode: The mode to open the file in. Defaults to "a".
color: The color to use for the text. Defaults to None.
"""
self.file = cast("TextIO", Path(filename).open(mode, encoding="utf-8")) # noqa: SIM115
self.color = color
def __del__(self) -> None:
"""Destructor to cleanup when done."""
self.file.close()
@override
def on_chain_start(
self, serialized: dict[str, Any], inputs: dict[str, Any], **kwargs: Any
) -> None:
"""Print out that we are entering a chain.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Dict[str, Any]): The inputs to the chain.
**kwargs (Any): Additional keyword arguments.
"""
if "name" in kwargs:
name = kwargs["name"]
else:
if serialized:
name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
else:
name = "<unknown>"
print_text(
f"\n\n\033[1m> Entering new {name} chain...\033[0m",
end="\n",
file=self.file,
)
@override
def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None:
"""Print out that we finished a chain.
Args:
outputs (Dict[str, Any]): The outputs of the chain.
**kwargs (Any): Additional keyword arguments.
"""
print_text("\n\033[1m> Finished chain.\033[0m", end="\n", file=self.file)
@override
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
"""Run on agent action.
Args:
action (AgentAction): The agent action.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
print_text(action.log, color=color or self.color, file=self.file)
@override
def on_tool_end(
self,
output: str,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""If not the final action, print out observation.
Args:
output (str): The output to print.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
observation_prefix (Optional[str], optional): The observation prefix.
Defaults to None.
llm_prefix (Optional[str], optional): The LLM prefix.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
if observation_prefix is not None:
print_text(f"\n{observation_prefix}", file=self.file)
print_text(output, color=color or self.color, file=self.file)
if llm_prefix is not None:
print_text(f"\n{llm_prefix}", file=self.file)
@override
def on_text(
self, text: str, color: Optional[str] = None, end: str = "", **kwargs: Any
) -> None:
"""Run when the agent ends.
Args:
text (str): The text to print.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
end (str, optional): The end character. Defaults to "".
**kwargs (Any): Additional keyword arguments.
"""
print_text(text, color=color or self.color, end=end, file=self.file)
@override
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
"""Run on the agent end.
Args:
finish (AgentFinish): The agent finish.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
print_text(finish.log, color=color or self.color, end="\n", file=self.file)
|
"""Callback Handler that writes to a file."""
from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING, Any, Optional, TextIO, cast
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.utils.input import print_text
if TYPE_CHECKING:
from langchain_core.agents import AgentAction, AgentFinish
class FileCallbackHandler(BaseCallbackHandler):
"""Callback Handler that writes to a file.
Parameters:
filename: The file to write to.
mode: The mode to open the file in. Defaults to "a".
color: The color to use for the text.
"""
def __init__(
self, filename: str, mode: str = "a", color: Optional[str] = None
) -> None:
"""Initialize callback handler.
Args:
filename: The filename to write to.
mode: The mode to open the file in. Defaults to "a".
color: The color to use for the text. Defaults to None.
"""
self.file = cast("TextIO", Path(filename).open(mode, encoding="utf-8")) # noqa: SIM115
self.color = color
def __del__(self) -> None:
"""Destructor to cleanup when done."""
self.file.close()
def on_chain_start(
self, serialized: dict[str, Any], inputs: dict[str, Any], **kwargs: Any
) -> None:
"""Print out that we are entering a chain.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Dict[str, Any]): The inputs to the chain.
**kwargs (Any): Additional keyword arguments.
"""
if "name" in kwargs:
name = kwargs["name"]
else:
if serialized:
name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
else:
name = "<unknown>"
print_text(
f"\n\n\033[1m> Entering new {name} chain...\033[0m",
end="\n",
file=self.file,
)
def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None:
"""Print out that we finished a chain.
Args:
outputs (Dict[str, Any]): The outputs of the chain.
**kwargs (Any): Additional keyword arguments.
"""
print_text("\n\033[1m> Finished chain.\033[0m", end="\n", file=self.file)
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
"""Run on agent action.
Args:
action (AgentAction): The agent action.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
print_text(action.log, color=color or self.color, file=self.file)
def on_tool_end(
self,
output: str,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""If not the final action, print out observation.
Args:
output (str): The output to print.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
observation_prefix (Optional[str], optional): The observation prefix.
Defaults to None.
llm_prefix (Optional[str], optional): The LLM prefix.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
if observation_prefix is not None:
print_text(f"\n{observation_prefix}", file=self.file)
print_text(output, color=color or self.color, file=self.file)
if llm_prefix is not None:
print_text(f"\n{llm_prefix}", file=self.file)
def on_text(
self, text: str, color: Optional[str] = None, end: str = "", **kwargs: Any
) -> None:
"""Run when the agent ends.
Args:
text (str): The text to print.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
end (str, optional): The end character. Defaults to "".
**kwargs (Any): Additional keyword arguments.
"""
print_text(text, color=color or self.color, end=end, file=self.file)
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
"""Run on the agent end.
Args:
finish (AgentFinish): The agent finish.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
print_text(finish.log, color=color or self.color, end="\n", file=self.file)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean, sync_random_seed)
from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor,
generate_coordinate, levels_to_images, mask2ndarray,
multi_apply, select_single_mlvl, unmap)
from .typing import (ConfigType, ForwardResults, InstanceList, MultiConfig,
OptConfigType, OptInstanceList, OptMultiConfig,
OptPixelList, OptSampleList, OptSamplingResultList,
PixelList, SampleList, SamplingResultList)
__all__ = [
'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply',
'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict',
'center_of_mass', 'generate_coordinate', 'select_single_mlvl',
'filter_scores_and_topk', 'sync_random_seed', 'levels_to_images',
'ConfigType', 'OptConfigType', 'MultiConfig', 'OptMultiConfig',
'InstanceList', 'OptInstanceList', 'SampleList', 'OptSampleList',
'SamplingResultList', 'ForwardResults', 'OptSamplingResultList',
'PixelList', 'OptPixelList'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean, sync_random_seed)
from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor,
generate_coordinate, levels_to_images, mask2ndarray,
multi_apply, select_single_mlvl, unmap)
from .typing import (ConfigType, ForwardResults, InstanceList, MultiConfig,
OptConfigType, OptInstanceList, OptMultiConfig,
OptSampleList, OptSamplingResultList, SampleList,
SamplingResultList)
__all__ = [
'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply',
'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict',
'center_of_mass', 'generate_coordinate', 'select_single_mlvl',
'filter_scores_and_topk', 'sync_random_seed', 'levels_to_images',
'ConfigType', 'OptConfigType', 'MultiConfig', 'OptMultiConfig',
'InstanceList', 'OptInstanceList', 'SampleList', 'OptSampleList',
'SamplingResultList', 'ForwardResults', 'OptSamplingResultList'
]
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable
from sentence_transformers.evaluation import InformationRetrievalEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseInformationRetrievalEvaluator(InformationRetrievalEvaluator):
def __init__(
self,
queries: dict[str, str], # qid => query
corpus: dict[str, str], # cid => doc
relevant_docs: dict[str, set[str]], # qid => Set[cid]
corpus_chunk_size: int = 50000,
mrr_at_k: list[int] = [10],
ndcg_at_k: list[int] = [10],
accuracy_at_k: list[int] = [1, 3, 5, 10],
precision_recall_at_k: list[int] = [1, 3, 5, 10],
map_at_k: list[int] = [100],
show_progress_bar: bool = False,
batch_size: int = 32,
name: str = "",
write_csv: bool = True,
truncate_dim: int | None = None,
score_functions: dict[str, Callable[[Tensor, Tensor], Tensor]] | None = None,
main_score_function: str | SimilarityFunction | None = None,
query_prompt: str | None = None,
query_prompt_name: str | None = None,
corpus_prompt: str | None = None,
corpus_prompt_name: str | None = None,
) -> None:
return super().__init__(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
corpus_chunk_size=corpus_chunk_size,
mrr_at_k=mrr_at_k,
ndcg_at_k=ndcg_at_k,
accuracy_at_k=accuracy_at_k,
precision_recall_at_k=precision_recall_at_k,
map_at_k=map_at_k,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
write_csv=write_csv,
truncate_dim=truncate_dim,
score_functions=score_functions,
main_score_function=main_score_function,
query_prompt=query_prompt,
query_prompt_name=query_prompt_name,
corpus_prompt=corpus_prompt,
corpus_prompt_name=corpus_prompt_name,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1, *args, **kwargs
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps, *args, **kwargs)
def compute_metrices(
self, model: SparseEncoder, corpus_model=None, corpus_embeddings: Tensor | None = None
) -> dict[str, float]:
return super().compute_metrices(model=model, corpus_model=corpus_model, corpus_embeddings=corpus_embeddings)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
prompt_name: str | None = None,
prompt: str | None = None,
**kwargs,
) -> Tensor:
kwargs["truncate_dim"] = self.truncate_dim
return model.encode(
sentences,
prompt_name=prompt_name,
prompt=prompt,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
save_on_cpu=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable
from sentence_transformers.evaluation import InformationRetrievalEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseInformationRetrievalEvaluator(InformationRetrievalEvaluator):
def __init__(
self,
queries: dict[str, str], # qid => query
corpus: dict[str, str], # cid => doc
relevant_docs: dict[str, set[str]], # qid => Set[cid]
corpus_chunk_size: int = 50000,
mrr_at_k: list[int] = [10],
ndcg_at_k: list[int] = [10],
accuracy_at_k: list[int] = [1, 3, 5, 10],
precision_recall_at_k: list[int] = [1, 3, 5, 10],
map_at_k: list[int] = [100],
show_progress_bar: bool = False,
batch_size: int = 32,
name: str = "",
write_csv: bool = True,
truncate_dim: int | None = None,
score_functions: dict[str, Callable[[Tensor, Tensor], Tensor]] | None = None,
main_score_function: str | SimilarityFunction | None = None,
query_prompt: str | None = None,
query_prompt_name: str | None = None,
corpus_prompt: str | None = None,
corpus_prompt_name: str | None = None,
) -> None:
return super().__init__(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
corpus_chunk_size=corpus_chunk_size,
mrr_at_k=mrr_at_k,
ndcg_at_k=ndcg_at_k,
accuracy_at_k=accuracy_at_k,
precision_recall_at_k=precision_recall_at_k,
map_at_k=map_at_k,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
write_csv=write_csv,
truncate_dim=truncate_dim,
score_functions=score_functions,
main_score_function=main_score_function,
query_prompt=query_prompt,
query_prompt_name=query_prompt_name,
corpus_prompt=corpus_prompt,
corpus_prompt_name=corpus_prompt_name,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1, *args, **kwargs
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps, *args, **kwargs)
def compute_metrices(
self, model: SparseEncoder, corpus_model=None, corpus_embeddings: Tensor | None = None
) -> dict[str, float]:
return super().compute_metrices(model=model, corpus_model=corpus_model, corpus_embeddings=corpus_embeddings)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
prompt_name: str | None = None,
prompt: str | None = None,
**kwargs,
) -> Tensor:
kwargs["truncate_dim"] = self.truncate_dim
return model.encode(
sentences,
prompt_name=prompt_name,
prompt=prompt,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import AuraFlowTransformer2DModel
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class AuraFlowTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = AuraFlowTransformer2DModel
main_input_name = "hidden_states"
# We override the items here because the transformer under consideration is small.
model_split_percents = [0.7, 0.6, 0.6]
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
height = width = embedding_dim = 32
sequence_length = 256
hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (4, 32, 32)
@property
def output_shape(self):
return (4, 32, 32)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"sample_size": 32,
"patch_size": 2,
"in_channels": 4,
"num_mmdit_layers": 1,
"num_single_dit_layers": 1,
"attention_head_dim": 8,
"num_attention_heads": 4,
"caption_projection_dim": 32,
"joint_attention_dim": 32,
"out_channels": 4,
"pos_embed_max_size": 256,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"AuraFlowTransformer2DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
@unittest.skip("AuraFlowTransformer2DModel uses its own dedicated attention processor. This test does not apply")
def test_set_attn_processor_for_determinism(self):
pass
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import AuraFlowTransformer2DModel
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class AuraFlowTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = AuraFlowTransformer2DModel
main_input_name = "hidden_states"
# We override the items here because the transformer under consideration is small.
model_split_percents = [0.7, 0.6, 0.6]
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
height = width = embedding_dim = 32
sequence_length = 256
hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (4, 32, 32)
@property
def output_shape(self):
return (4, 32, 32)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"sample_size": 32,
"patch_size": 2,
"in_channels": 4,
"num_mmdit_layers": 1,
"num_single_dit_layers": 1,
"attention_head_dim": 8,
"num_attention_heads": 4,
"caption_projection_dim": 32,
"joint_attention_dim": 32,
"out_channels": 4,
"pos_embed_max_size": 256,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"AuraFlowTransformer2DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
@unittest.skip("AuraFlowTransformer2DModel uses its own dedicated attention processor. This test does not apply")
def test_set_attn_processor_for_determinism(self):
pass
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Metrics model generator for mobilenet v1.
The produced model is to be used as part of the mini-benchmark, combined into
the same flatbuffer with the main model.
Mobilenet v1 is described in
https://tfhub.dev/tensorflow/coral-model/mobilenet_v1_1.0_224_quantized/1/default/1
The metrics used are symmetric KL-divergence and MSE.
"""
import argparse
import sys
# TODO(b/152872335): (re-)port to tf v2 after output names are kept during
# conversion in v2.
import tensorflow.compat.v1 as tf
from tensorflow.lite.experimental.acceleration.mini_benchmark.metrics import kl_divergence
from tensorflow.lite.python import lite
from tensorflow.lite.tools import flatbuffer_utils
parser = argparse.ArgumentParser(
description='Script to generate a metrics model for mobilenet v1.')
parser.add_argument('output', help='Output filepath')
def main(output_path):
tf.reset_default_graph()
with tf.Graph().as_default():
expected_scores = tf.placeholder(dtype=tf.float32, shape=[1, 1001])
actual_scores = tf.placeholder(dtype=tf.float32, shape=[1, 1001])
mse = tf.reshape(
tf.math.reduce_mean((expected_scores - actual_scores)**2), [1],
name='mse')
kld_metric = kl_divergence.symmetric_kl_divergence(expected_scores,
actual_scores)
kld_metric = tf.reshape(kld_metric, [1], name='symmetric_kl_divergence')
# Thresholds chosen by comparing NNAPI top-k accuracy on MLTS on devices
# with top-k accuracy within 1%-p of tflite CPU and with a 5-%p drop.
ok = tf.reshape(
tf.logical_and(kld_metric < 5.5, mse < 0.003), [1], name='ok')
sess = tf.compat.v1.Session()
converter = lite.TFLiteConverter.from_session(sess, [
expected_scores,
actual_scores,
], [kld_metric, mse, ok])
converter.experimental_new_converter = True
tflite_model = converter.convert()
if sys.byteorder == 'big':
tflite_model = flatbuffer_utils.byte_swap_tflite_buffer(
tflite_model, 'big', 'little'
)
open(output_path, 'wb').write(tflite_model)
if __name__ == '__main__':
flags, unparsed = parser.parse_known_args()
if unparsed:
parser.print_usage()
sys.stderr.write('\nGot the following unparsed args, %r please fix.\n' %
unparsed)
exit(1)
else:
main(flags.output)
exit(0)
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Metrics model generator for mobilenet v1.
The produced model is to be used as part of the mini-benchmark, combined into
the same flatbuffer with the main model.
Mobilenet v1 is described in
https://tfhub.dev/tensorflow/coral-model/mobilenet_v1_1.0_224_quantized/1/default/1
The metrics used are symmetric KL-divergence and MSE.
"""
import argparse
import sys
# TODO(b/152872335): (re-)port to tf v2 after output names are kept during
# conversion in v2.
import tensorflow.compat.v1 as tf
from tensorflow.lite.experimental.acceleration.mini_benchmark.metrics import kl_divergence
from tensorflow.lite.tools import flatbuffer_utils
parser = argparse.ArgumentParser(
description='Script to generate a metrics model for mobilenet v1.')
parser.add_argument('output', help='Output filepath')
def main(output_path):
tf.reset_default_graph()
with tf.Graph().as_default():
expected_scores = tf.placeholder(dtype=tf.float32, shape=[1, 1001])
actual_scores = tf.placeholder(dtype=tf.float32, shape=[1, 1001])
mse = tf.reshape(
tf.math.reduce_mean((expected_scores - actual_scores)**2), [1],
name='mse')
kld_metric = kl_divergence.symmetric_kl_divergence(expected_scores,
actual_scores)
kld_metric = tf.reshape(kld_metric, [1], name='symmetric_kl_divergence')
# Thresholds chosen by comparing NNAPI top-k accuracy on MLTS on devices
# with top-k accuracy within 1%-p of tflite CPU and with a 5-%p drop.
ok = tf.reshape(
tf.logical_and(kld_metric < 5.5, mse < 0.003), [1], name='ok')
sess = tf.compat.v1.Session()
converter = tf.lite.TFLiteConverter.from_session(sess, [
expected_scores,
actual_scores,
], [kld_metric, mse, ok])
converter.experimental_new_converter = True
tflite_model = converter.convert()
if sys.byteorder == 'big':
tflite_model = flatbuffer_utils.byte_swap_tflite_buffer(
tflite_model, 'big', 'little'
)
open(output_path, 'wb').write(tflite_model)
if __name__ == '__main__':
flags, unparsed = parser.parse_known_args()
if unparsed:
parser.print_usage()
sys.stderr.write('\nGot the following unparsed args, %r please fix.\n' %
unparsed)
exit(1)
else:
main(flags.output)
exit(0)
|
# mypy: allow-untyped-defs
import warnings
import torch
import torch.distributed.algorithms.model_averaging.averagers as averagers
class PostLocalSGDOptimizer(torch.optim.Optimizer):
r"""
Wraps an arbitrary :class:`torch.optim.Optimizer` and runs `post-local SGD <https://arxiv.org/abs/1808.07217>`_,
This optimizer runs local optimizer at every step.
After the warm-up stage, it averages parameters periodically after the local optimizer is applied.
Args:
optim: The local optimizer.
averager: A model averager instance to run post-localSGD algorithm.
Example::
>>> # xdoctest: +SKIP("undefined variables")
>>> import torch
>>> import torch.distributed as dist
>>> import torch.distributed.algorithms.model_averaging.averagers as averagers
>>> import torch.nn as nn
>>> from torch.distributed.optim import PostLocalSGDOptimizer
>>> from torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook import (
>>> PostLocalSGDState,
>>> post_localSGD_hook,
>>> )
>>>
>>> model = nn.parallel.DistributedDataParallel(
>>> module, device_ids=[rank], output_device=rank
>>> )
>>>
>>> # Register a post-localSGD communication hook.
>>> state = PostLocalSGDState(process_group=None, subgroup=None, start_localSGD_iter=100)
>>> model.register_comm_hook(state, post_localSGD_hook)
>>>
>>> # Create a post-localSGD optimizer that wraps a local optimizer.
>>> # Note that ``warmup_steps`` used in ``PostLocalSGDOptimizer`` must be the same as
>>> # ``start_localSGD_iter`` used in ``PostLocalSGDState``.
>>> local_optim = torch.optim.SGD(params=model.parameters(), lr=0.01)
>>> opt = PostLocalSGDOptimizer(
>>> optim=local_optim,
>>> averager=averagers.PeriodicModelAverager(period=4, warmup_steps=100)
>>> )
>>>
>>> # In the first 100 steps, DDP runs global gradient averaging at every step.
>>> # After 100 steps, DDP runs gradient averaging within each subgroup (intra-node by default),
>>> # and post-localSGD optimizer runs global model averaging every 4 steps after applying the local optimizer.
>>> for step in range(0, 200):
>>> opt.zero_grad()
>>> loss = loss_fn(output, labels)
>>> loss.backward()
>>> opt.step()
"""
def __init__(self, optim: torch.optim.Optimizer, averager: averagers.ModelAverager):
self.optim = optim
self.param_groups = self.optim.param_groups
self.averager = averager
@property
def state(self): # type: ignore[override]
return self.optim.state
def __repr__(self):
return self.optim.__repr__()
def state_dict(self):
r"""
This is the same as :class:`torch.optim.Optimizer` :meth:`state_dict`,
but adds an extra entry to record model averager's step to the checkpoint
to ensure reload does not cause unnecessary warm up again.
"""
optim_state_dict = self.optim.state_dict()
optim_state_dict["step"] = self.averager.step
return optim_state_dict
def load_state_dict(self, state_dict):
r"""
This is the same as :class:`torch.optim.Optimizer` :meth:`load_state_dict`,
but also restores model averager's step value to the one
saved in the provided ``state_dict``.
If there is no ``"step"`` entry in ``state_dict``,
it will raise a warning and initialize the model averager's step to 0.
"""
self.optim.load_state_dict(state_dict)
if "step" in state_dict:
self.averager.step = state_dict["step"]
else:
warnings.warn(
"Loaded state dict does not contain a step counter for an averager. "
"Setting step counter to 0."
)
self.averager.step = 0
def step(self): # type: ignore[override]
r"""
Performs a single optimization step (parameter update).
"""
self.optim.step()
self.averager.average_parameters(params=self.param_groups)
def zero_grad(self, set_to_none: bool = True): # type: ignore[override]
self.optim.zero_grad(set_to_none=set_to_none)
def add_param_group(self, param_group):
self.optim.add_param_group(param_group)
|
# mypy: allow-untyped-defs
import warnings
import torch
import torch.distributed.algorithms.model_averaging.averagers as averagers
class PostLocalSGDOptimizer(torch.optim.Optimizer):
r"""
Wraps an arbitrary :class:`torch.optim.Optimizer` and runs `post-local SGD <https://arxiv.org/abs/1808.07217>`_,
This optimizer runs local optimizer at every step.
After the warm-up stage, it averages parameters periodically afer the local optimizer is applied.
Args:
optim: The local optimizer.
averager: A model averager instance to run post-localSGD algorithm.
Example::
>>> # xdoctest: +SKIP("undefined variables")
>>> import torch
>>> import torch.distributed as dist
>>> import torch.distributed.algorithms.model_averaging.averagers as averagers
>>> import torch.nn as nn
>>> from torch.distributed.optim import PostLocalSGDOptimizer
>>> from torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook import (
>>> PostLocalSGDState,
>>> post_localSGD_hook,
>>> )
>>>
>>> model = nn.parallel.DistributedDataParallel(
>>> module, device_ids=[rank], output_device=rank
>>> )
>>>
>>> # Register a post-localSGD communication hook.
>>> state = PostLocalSGDState(process_group=None, subgroup=None, start_localSGD_iter=100)
>>> model.register_comm_hook(state, post_localSGD_hook)
>>>
>>> # Create a post-localSGD optimizer that wraps a local optimizer.
>>> # Note that ``warmup_steps`` used in ``PostLocalSGDOptimizer`` must be the same as
>>> # ``start_localSGD_iter`` used in ``PostLocalSGDState``.
>>> local_optim = torch.optim.SGD(params=model.parameters(), lr=0.01)
>>> opt = PostLocalSGDOptimizer(
>>> optim=local_optim,
>>> averager=averagers.PeriodicModelAverager(period=4, warmup_steps=100)
>>> )
>>>
>>> # In the first 100 steps, DDP runs global gradient averaging at every step.
>>> # After 100 steps, DDP runs gradient averaging within each subgroup (intra-node by default),
>>> # and post-localSGD optimizer runs global model averaging every 4 steps after applying the local optimizer.
>>> for step in range(0, 200):
>>> opt.zero_grad()
>>> loss = loss_fn(output, labels)
>>> loss.backward()
>>> opt.step()
"""
def __init__(self, optim: torch.optim.Optimizer, averager: averagers.ModelAverager):
self.optim = optim
self.param_groups = self.optim.param_groups
self.averager = averager
@property
def state(self): # type: ignore[override]
return self.optim.state
def __repr__(self):
return self.optim.__repr__()
def state_dict(self):
r"""
This is the same as :class:`torch.optim.Optimizer` :meth:`state_dict`,
but adds an extra entry to record model averager's step to the checkpoint
to ensure reload does not cause unnecessary warm up again.
"""
optim_state_dict = self.optim.state_dict()
optim_state_dict["step"] = self.averager.step
return optim_state_dict
def load_state_dict(self, state_dict):
r"""
This is the same as :class:`torch.optim.Optimizer` :meth:`load_state_dict`,
but also restores model averager's step value to the one
saved in the provided ``state_dict``.
If there is no ``"step"`` entry in ``state_dict``,
it will raise a warning and initialize the model averager's step to 0.
"""
self.optim.load_state_dict(state_dict)
if "step" in state_dict:
self.averager.step = state_dict["step"]
else:
warnings.warn(
"Loaded state dict does not contain a step counter for an averager. "
"Setting step counter to 0."
)
self.averager.step = 0
def step(self): # type: ignore[override]
r"""
Performs a single optimization step (parameter update).
"""
self.optim.step()
self.averager.average_parameters(params=self.param_groups)
def zero_grad(self, set_to_none: bool = True): # type: ignore[override]
self.optim.zero_grad(set_to_none=set_to_none)
def add_param_group(self, param_group):
self.optim.add_param_group(param_group)
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../common/lsj_100e_coco_instance.py'
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# use caffe norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
# pad_size_divisor=32 is unnecessary in training but necessary
# in testing.
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1,
norm_eval=False,
norm_cfg=norm_cfg,
init_cfg=None,
style='caffe'),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../common/lsj_100e_coco_instance.py'
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# use caffe norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
# pad_size_divisor=32 is unnecessary in training but necessary
# in testing.
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1,
norm_eval=False,
norm_cfg=norm_cfg,
init_cfg=None,
style='caffe'),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
|
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseMSEEvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
student_model_name = "prithivida/Splade_PP_en_v1"
student_model = SparseEncoder(
modules=[
MLMTransformer(student_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Initialize the SPLADE model
teacher_model_name = "naver/splade-cocondenser-ensembledistil"
teacher_model = SparseEncoder(
modules=[
MLMTransformer(teacher_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load any dataset with some texts
dataset = load_dataset("sentence-transformers/stsb", split="validation")
sentences = dataset["sentence1"] + dataset["sentence2"]
# Given queries, a corpus and a mapping with relevant documents, the InformationRetrievalEvaluator computes different IR metrics.
mse_evaluator = SparseMSEEvaluator(
source_sentences=sentences,
target_sentences=sentences,
teacher_model=teacher_model,
name="stsb-dev",
)
results = mse_evaluator(student_model)
# Print the results
print(f"Primary metric: {mse_evaluator.primary_metric}")
print(f"Primary metric value: {results[mse_evaluator.primary_metric]:.4f}")
|
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseMSEEvaluator,
SpladePooling,
)
# Initialize the SPLADE model
student_model_name = "prithivida/Splade_PP_en_v1"
student_model = SparseEncoder(
modules=[
MLMTransformer(student_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Initialize the SPLADE model
teacher_model_name = "naver/splade-cocondenser-ensembledistil"
teacher_model = SparseEncoder(
modules=[
MLMTransformer(teacher_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load any dataset with some texts
dataset = load_dataset("sentence-transformers/stsb", split="validation")
sentences = dataset["sentence1"] + dataset["sentence2"]
# Given queries, a corpus and a mapping with relevant documents, the InformationRetrievalEvaluator computes different IR metrics.
mse_evaluator = SparseMSEEvaluator(
source_sentences=sentences,
target_sentences=sentences,
teacher_model=teacher_model,
name="stsb-dev",
)
results = mse_evaluator(student_model)
print(mse_evaluator.primary_metric)
print(results[mse_evaluator.primary_metric])
|
# coding: utf-8
"""Get the most recent status of workflow for the current PR.
[usage]
python get_workflow_status.py TRIGGER_PHRASE
TRIGGER_PHRASE: Code phrase that triggers workflow.
"""
import json
from os import environ
from sys import argv, exit
from time import sleep
from urllib import request
def get_runs(trigger_phrase):
"""Get all triggering workflow comments in the current PR.
Parameters
----------
trigger_phrase : str
Code phrase that triggers workflow.
Returns
-------
pr_runs : list
List of comment objects sorted by the time of creation in decreasing order.
"""
pr_runs = []
if environ.get("GITHUB_EVENT_NAME", "") == "pull_request":
pr_number = int(environ.get("GITHUB_REF").split("/")[-2])
page = 1
while True:
req = request.Request(
url="{}/repos/microsoft/LightGBM/issues/{}/comments?page={}&per_page=100".format(
environ.get("GITHUB_API_URL"), pr_number, page
),
headers={"Accept": "application/vnd.github.v3+json"},
)
url = request.urlopen(req)
data = json.loads(url.read().decode("utf-8"))
url.close()
if not data:
break
runs_on_page = [
i
for i in data
if i["author_association"].lower() in {"owner", "member", "collaborator"}
and i["body"].startswith("/gha run {}".format(trigger_phrase))
]
pr_runs.extend(runs_on_page)
page += 1
return pr_runs[::-1]
def get_status(runs):
"""Get the most recent status of workflow for the current PR.
Parameters
----------
runs : list
List of comment objects sorted by the time of creation in decreasing order.
Returns
-------
status : str
The most recent status of workflow.
Can be 'success', 'failure' or 'in-progress'.
"""
status = "success"
for run in runs:
body = run["body"]
if "Status: " in body:
if "Status: skipped" in body:
continue
if "Status: failure" in body:
status = "failure"
break
if "Status: success" in body:
status = "success"
break
else:
status = "in-progress"
break
return status
if __name__ == "__main__":
trigger_phrase = argv[1]
while True:
status = get_status(get_runs(trigger_phrase))
if status != "in-progress":
break
sleep(60)
if status == "failure":
exit(1)
|
# coding: utf-8
"""Get the most recent status of workflow for the current PR.
[usage]
python get_workflow_status.py TRIGGER_PHRASE
TRIGGER_PHRASE: Code phrase that triggers workflow.
"""
import json
from os import environ
from sys import argv, exit
from time import sleep
from urllib import request
def get_runs(trigger_phrase):
"""Get all triggering workflow comments in the current PR.
Parameters
----------
trigger_phrase : str
Code phrase that triggers workflow.
Returns
-------
pr_runs : list
List of comment objects sorted by the time of creation in decreasing order.
"""
pr_runs = []
if environ.get("GITHUB_EVENT_NAME", "") == "pull_request":
pr_number = int(environ.get("GITHUB_REF").split("/")[-2])
page = 1
while True:
req = request.Request(
url="{}/repos/microsoft/LightGBM/issues/{}/comments?page={}&per_page=100".format(
environ.get("GITHUB_API_URL"), pr_number, page
),
headers={"Accept": "application/vnd.github.v3+json"},
)
url = request.urlopen(req)
data = json.loads(url.read().decode("utf-8"))
url.close()
if not data:
break
runs_on_page = [
i
for i in data
if i["author_association"].lower() in {"owner", "member", "collaborator"}
and i["body"].startswith("/gha run {}".format(trigger_phrase))
]
pr_runs.extend(runs_on_page)
page += 1
return pr_runs[::-1]
def get_status(runs):
"""Get the most recent status of workflow for the current PR.
Parameters
----------
runs : list
List of comment objects sorted by the time of creation in decreasing order.
Returns
-------
status : str
The most recent status of workflow.
Can be 'success', 'failure' or 'in-progress'.
"""
status = "success"
for run in runs:
body = run["body"]
if "Status: " in body:
if "Status: skipped" in body:
continue
if "Status: failure" in body:
status = "failure"
break
if "Status: success" in body:
status = "success"
break
else:
status = "in-progress"
break
return status
if __name__ == "__main__":
trigger_phrase = argv[1]
while True:
status = get_status(get_runs(trigger_phrase))
if status != "in-progress":
break
sleep(60)
if status == "failure":
exit(1)
|
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index import HnswDocumentIndex
from docarray.typing import NdArray
pytestmark = [pytest.mark.slow, pytest.mark.index]
class MyDoc(BaseDoc):
tens: NdArray
def test_configure_dim(tmp_path):
class Schema(BaseDoc):
tens: NdArray = Field(dim=10)
index = HnswDocumentIndex[Schema](work_dir=str(tmp_path))
assert index._hnsw_indices['tens'].dim == 10
docs = [Schema(tens=np.random.random((10,))) for _ in range(10)]
index.index(docs)
assert index.num_docs() == 10
def test_configure_index(tmp_path):
class Schema(BaseDoc):
tens: NdArray[100] = Field(max_elements=12, space='cosine')
tens_two: NdArray[10] = Field(M=4, space='ip')
index = HnswDocumentIndex[Schema](work_dir=str(tmp_path))
assert index._hnsw_indices['tens'].max_elements == 12
assert index._hnsw_indices['tens'].space == 'cosine'
assert index._hnsw_indices['tens'].M == 16 # default
assert index._hnsw_indices['tens'].dim == 100
assert index._hnsw_indices['tens_two'].max_elements == 1024 # default
assert index._hnsw_indices['tens_two'].space == 'ip'
assert index._hnsw_indices['tens_two'].M == 4
assert index._hnsw_indices['tens_two'].dim == 10
|
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index import HnswDocumentIndex
from docarray.typing import NdArray
pytestmark = [pytest.mark.slow, pytest.mark.index]
class MyDoc(BaseDoc):
tens: NdArray
def test_configure_dim(tmp_path):
class Schema(BaseDoc):
tens: NdArray = Field(dim=10)
index = HnswDocumentIndex[Schema](work_dir=str(tmp_path))
assert index._hnsw_indices['tens'].dim == 10
docs = [Schema(tens=np.random.random((10,))) for _ in range(10)]
index.index(docs)
assert index.num_docs() == 10
|
from enum import Enum
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.utils import pre_init
class EnumOutputParser(BaseOutputParser[Enum]):
"""Parse an output that is one of a set of values."""
enum: type[Enum]
"""The enum to parse. Its values must be strings."""
@pre_init
def raise_deprecation(cls, values: dict) -> dict:
enum = values["enum"]
if not all(isinstance(e.value, str) for e in enum):
raise ValueError("Enum values must be strings")
return values
@property
def _valid_values(self) -> list[str]:
return [e.value for e in self.enum]
def parse(self, response: str) -> Enum:
try:
return self.enum(response.strip())
except ValueError:
raise OutputParserException(
f"Response '{response}' is not one of the "
f"expected values: {self._valid_values}"
)
def get_format_instructions(self) -> str:
return f"Select one of the following options: {', '.join(self._valid_values)}"
@property
def OutputType(self) -> type[Enum]:
return self.enum
|
from enum import Enum
from typing import Dict, List, Type
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.utils import pre_init
class EnumOutputParser(BaseOutputParser[Enum]):
"""Parse an output that is one of a set of values."""
enum: Type[Enum]
"""The enum to parse. Its values must be strings."""
@pre_init
def raise_deprecation(cls, values: Dict) -> Dict:
enum = values["enum"]
if not all(isinstance(e.value, str) for e in enum):
raise ValueError("Enum values must be strings")
return values
@property
def _valid_values(self) -> List[str]:
return [e.value for e in self.enum]
def parse(self, response: str) -> Enum:
try:
return self.enum(response.strip())
except ValueError:
raise OutputParserException(
f"Response '{response}' is not one of the "
f"expected values: {self._valid_values}"
)
def get_format_instructions(self) -> str:
return f"Select one of the following options: {', '.join(self._valid_values)}"
@property
def OutputType(self) -> Type[Enum]:
return self.enum
|
import torch
from torchvision import datapoints
from torchvision.utils import _log_api_usage_once
from ._utils import is_simple_tensor
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchvideo/transforms/functional.py#L19
t_max = video.shape[-4] - 1
indices = torch.linspace(0, t_max, num_samples, device=video.device).long()
return torch.index_select(video, -4, indices)
def uniform_temporal_subsample(inpt: datapoints._VideoTypeJIT, num_samples: int) -> datapoints._VideoTypeJIT:
if not torch.jit.is_scripting():
_log_api_usage_once(uniform_temporal_subsample)
if torch.jit.is_scripting() or is_simple_tensor(inpt):
return uniform_temporal_subsample_video(inpt, num_samples)
elif isinstance(inpt, datapoints.Video):
output = uniform_temporal_subsample_video(inpt.as_subclass(torch.Tensor), num_samples)
return datapoints.Video.wrap_like(inpt, output)
else:
raise TypeError(f"Input can either be a plain tensor or a `Video` datapoint, but got {type(inpt)} instead.")
|
import torch
from torchvision import datapoints
from torchvision.utils import _log_api_usage_once
from ._utils import is_simple_tensor
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchvideo/transforms/functional.py#L19
t_max = video.shape[-4] - 1
indices = torch.linspace(0, t_max, num_samples, device=video.device).long()
return torch.index_select(video, -4, indices)
def uniform_temporal_subsample(inpt: datapoints.VideoTypeJIT, num_samples: int) -> datapoints.VideoTypeJIT:
if not torch.jit.is_scripting():
_log_api_usage_once(uniform_temporal_subsample)
if torch.jit.is_scripting() or is_simple_tensor(inpt):
return uniform_temporal_subsample_video(inpt, num_samples)
elif isinstance(inpt, datapoints.Video):
output = uniform_temporal_subsample_video(inpt.as_subclass(torch.Tensor), num_samples)
return datapoints.Video.wrap_like(inpt, output)
else:
raise TypeError(f"Input can either be a plain tensor or a `Video` datapoint, but got {type(inpt)} instead.")
|
from llama_index.core.base.llms.types import (
LLMMetadata,
)
from llama_index.core.bridge.pydantic import Field
from llama_index.llms.openai_like import OpenAILike
class LlamaAPI(OpenAILike):
"""
LlamaAPI LLM.
Examples:
`pip install llama-index-llms-llama-api`
```python
from llama_index.llms.llama_api import LlamaAPI
# Obtain an API key from https://www.llama-api.com/
api_key = "your-api-key"
llm = LlamaAPI(model="llama3.1-8b", context_window=128000, is_function_calling_model=True, api_key=api_key)
# Call the complete method with a prompt
resp = llm.complete("Paul Graham is ")
print(resp)
```
"""
model: str = Field(
default="llama3.1-8b",
description=LLMMetadata.model_fields["model_name"].description,
)
api_base: str = Field(
default="https://api.llmapi.com/",
description="The base URL for OpenAI API.",
)
is_chat_model: bool = Field(
default=True,
description=LLMMetadata.model_fields["is_chat_model"].description,
)
is_function_calling_model: bool = Field(
default=False,
description=LLMMetadata.model_fields["is_function_calling_model"].description,
)
@classmethod
def class_name(cls) -> str:
return "llama_api_llm"
|
from llama_index.core.base.llms.types import (
LLMMetadata,
)
from llama_index.core.bridge.pydantic import Field
from llama_index.llms.openai_like import OpenAILike
class LlamaAPI(OpenAILike):
"""LlamaAPI LLM.
Examples:
`pip install llama-index-llms-llama-api`
```python
from llama_index.llms.llama_api import LlamaAPI
# Obtain an API key from https://www.llama-api.com/
api_key = "your-api-key"
llm = LlamaAPI(model="llama3.1-8b", context_window=128000, is_function_calling_model=True, api_key=api_key)
# Call the complete method with a prompt
resp = llm.complete("Paul Graham is ")
print(resp)
```
"""
model: str = Field(
default="llama3.1-8b",
description=LLMMetadata.model_fields["model_name"].description,
)
api_base: str = Field(
default="https://api.llmapi.com/",
description="The base URL for OpenAI API.",
)
is_chat_model: bool = Field(
default=True,
description=LLMMetadata.model_fields["is_chat_model"].description,
)
is_function_calling_model: bool = Field(
default=False,
description=LLMMetadata.model_fields["is_function_calling_model"].description,
)
@classmethod
def class_name(cls) -> str:
return "llama_api_llm"
|
from docarray.array.array.array import DocArray
from docarray.array.stacked.array_stacked import DocArrayStacked
__all__ = ['DocArray', 'DocArrayStacked']
|
from docarray.array.array.array import DocumentArray
from docarray.array.stacked.array_stacked import DocumentArrayStacked
__all__ = ['DocumentArray', 'DocumentArrayStacked']
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src.testing import test_case
class SpatialDropoutTest(test_case.TestCase):
@pytest.mark.requires_trainable_backend
def test_spatial_dropout_1d(self):
self.run_layer_test(
layers.SpatialDropout1D,
init_kwargs={"rate": 0.5},
call_kwargs={"training": True},
input_shape=(2, 3, 4),
assert_built_after_instantiation=True,
)
self.run_layer_test(
layers.SpatialDropout1D,
init_kwargs={"rate": 0.5},
call_kwargs={"training": False},
input_shape=(2, 3, 4),
assert_built_after_instantiation=True,
)
@pytest.mark.requires_trainable_backend
def test_spatial_dropout_2d(self):
self.run_layer_test(
layers.SpatialDropout2D,
init_kwargs={"rate": 0.5},
call_kwargs={"training": True},
input_shape=(2, 3, 4, 5),
assert_built_after_instantiation=True,
)
self.run_layer_test(
layers.SpatialDropout2D,
init_kwargs={"rate": 0.5, "data_format": "channels_first"},
call_kwargs={"training": True},
input_shape=(2, 3, 4, 5),
assert_built_after_instantiation=True,
)
@pytest.mark.requires_trainable_backend
def test_spatial_dropout_3d(self):
self.run_layer_test(
layers.SpatialDropout3D,
init_kwargs={"rate": 0.5},
call_kwargs={"training": True},
input_shape=(2, 3, 4, 4, 5),
assert_built_after_instantiation=True,
)
self.run_layer_test(
layers.SpatialDropout3D,
init_kwargs={"rate": 0.5, "data_format": "channels_first"},
call_kwargs={"training": True},
input_shape=(2, 3, 4, 4, 5),
assert_built_after_instantiation=True,
)
def test_spatial_dropout_1D_dynamic(self):
inputs = layers.Input((3, 2))
layer = layers.SpatialDropout1D(0.5)
layer(inputs, training=True)
def test_spatial_dropout_1D_correctness(self):
inputs = np.ones((10, 3, 10))
layer = layers.SpatialDropout1D(0.5)
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_spatial_dropout_2D_dynamic(self):
inputs = layers.Input((3, 2, 4))
layer = layers.SpatialDropout2D(0.5)
layer(inputs, training=True)
def test_spatial_dropout_2D_correctness(self):
if backend.config.image_data_format() == "channels_last":
inputs = np.ones((10, 3, 3, 10))
else:
inputs = np.ones((10, 10, 3, 3))
layer = layers.SpatialDropout2D(0.5)
outputs = layer(inputs, training=True)
if backend.config.image_data_format() == "channels_last":
self.assertAllClose(outputs[:, 0, 0, :], outputs[:, 1, 1, :])
else:
self.assertAllClose(outputs[:, :, 0, 0], outputs[:, :, 1, 1])
def test_spatial_dropout_3D_dynamic(self):
inputs = layers.Input((3, 2, 4, 2))
layer = layers.SpatialDropout3D(0.5)
layer(inputs, training=True)
def test_spatial_dropout_3D_correctness(self):
if backend.config.image_data_format() == "channels_last":
inputs = np.ones((10, 3, 3, 3, 10))
else:
inputs = np.ones((10, 10, 3, 3, 3))
layer = layers.SpatialDropout3D(0.5)
outputs = layer(inputs, training=True)
if backend.config.image_data_format() == "channels_last":
self.assertAllClose(outputs[:, 0, 0, 0, :], outputs[:, 1, 1, 1, :])
else:
self.assertAllClose(outputs[:, :, 0, 0, 0], outputs[:, :, 1, 1, 1])
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src.testing import test_case
class SpatialDropoutTest(test_case.TestCase):
@pytest.mark.requires_trainable_backend
def test_spatial_dropout_1d(self):
self.run_layer_test(
layers.SpatialDropout1D,
init_kwargs={"rate": 0.5},
call_kwargs={"training": True},
input_shape=(2, 3, 4),
)
self.run_layer_test(
layers.SpatialDropout1D,
init_kwargs={"rate": 0.5},
call_kwargs={"training": False},
input_shape=(2, 3, 4),
)
@pytest.mark.requires_trainable_backend
def test_spatial_dropout_2d(self):
self.run_layer_test(
layers.SpatialDropout2D,
init_kwargs={"rate": 0.5},
call_kwargs={"training": True},
input_shape=(2, 3, 4, 5),
)
self.run_layer_test(
layers.SpatialDropout2D,
init_kwargs={"rate": 0.5, "data_format": "channels_first"},
call_kwargs={"training": True},
input_shape=(2, 3, 4, 5),
)
@pytest.mark.requires_trainable_backend
def test_spatial_dropout_3d(self):
self.run_layer_test(
layers.SpatialDropout3D,
init_kwargs={"rate": 0.5},
call_kwargs={"training": True},
input_shape=(2, 3, 4, 4, 5),
)
self.run_layer_test(
layers.SpatialDropout3D,
init_kwargs={"rate": 0.5, "data_format": "channels_first"},
call_kwargs={"training": True},
input_shape=(2, 3, 4, 4, 5),
)
def test_spatial_dropout_1D_dynamic(self):
inputs = layers.Input((3, 2))
layer = layers.SpatialDropout1D(0.5)
layer(inputs, training=True)
def test_spatial_dropout_1D_correctness(self):
inputs = np.ones((10, 3, 10))
layer = layers.SpatialDropout1D(0.5)
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_spatial_dropout_2D_dynamic(self):
inputs = layers.Input((3, 2, 4))
layer = layers.SpatialDropout2D(0.5)
layer(inputs, training=True)
def test_spatial_dropout_2D_correctness(self):
if backend.config.image_data_format() == "channels_last":
inputs = np.ones((10, 3, 3, 10))
else:
inputs = np.ones((10, 10, 3, 3))
layer = layers.SpatialDropout2D(0.5)
outputs = layer(inputs, training=True)
if backend.config.image_data_format() == "channels_last":
self.assertAllClose(outputs[:, 0, 0, :], outputs[:, 1, 1, :])
else:
self.assertAllClose(outputs[:, :, 0, 0], outputs[:, :, 1, 1])
def test_spatial_dropout_3D_dynamic(self):
inputs = layers.Input((3, 2, 4, 2))
layer = layers.SpatialDropout3D(0.5)
layer(inputs, training=True)
def test_spatial_dropout_3D_correctness(self):
if backend.config.image_data_format() == "channels_last":
inputs = np.ones((10, 3, 3, 3, 10))
else:
inputs = np.ones((10, 10, 3, 3, 3))
layer = layers.SpatialDropout3D(0.5)
outputs = layer(inputs, training=True)
if backend.config.image_data_format() == "channels_last":
self.assertAllClose(outputs[:, 0, 0, 0, :], outputs[:, 1, 1, 1, :])
else:
self.assertAllClose(outputs[:, :, 0, 0, 0], outputs[:, :, 1, 1, 1])
|
_base_ = './gfl_r50_fpn_ms-2x_coco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './gfl_r50_fpn_mstrain_2x_coco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
import os
import platform
import sys
import pkg_resources
from setuptools import find_packages, setup
def read_version(fname="whisper/version.py"):
exec(compile(open(fname, encoding="utf-8").read(), fname, "exec"))
return locals()["__version__"]
requirements = []
if sys.platform.startswith("linux") and platform.machine() == "x86_64":
triton_requirement = "triton==2.0.0"
try:
import re
import subprocess
version_line = (
subprocess.check_output(["nvcc", "--version"]).strip().split(b"\n")[-1]
)
major, minor = re.findall(rb"([\d]+)\.([\d]+)", version_line)[0]
if (int(major), int(minor)) < (11, 4):
# the last version supporting CUDA < 11.4
triton_requirement = "triton==2.0.0.dev20221011"
except (IndexError, OSError, subprocess.SubprocessError):
pass
requirements.append(triton_requirement)
setup(
name="openai-whisper",
py_modules=["whisper"],
version=read_version(),
description="Robust Speech Recognition via Large-Scale Weak Supervision",
long_description=open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
readme="README.md",
python_requires=">=3.8",
author="OpenAI",
url="https://github.com/openai/whisper",
license="MIT",
packages=find_packages(exclude=["tests*"]),
install_requires=requirements
+ [
str(r)
for r in pkg_resources.parse_requirements(
open(os.path.join(os.path.dirname(__file__), "requirements.txt"))
)
],
entry_points={
"console_scripts": ["whisper=whisper.transcribe:cli"],
},
include_package_data=True,
extras_require={"dev": ["pytest", "scipy", "black", "flake8", "isort"]},
)
|
import os
import sys
import pkg_resources
from setuptools import find_packages, setup
def read_version(fname="whisper/version.py"):
exec(compile(open(fname, encoding="utf-8").read(), fname, "exec"))
return locals()["__version__"]
requirements = []
if sys.platform.startswith("linux"):
triton_requirement = "triton>=2.0.0.dev20221202"
try:
import re
import subprocess
version_line = (
subprocess.check_output(["nvcc", "--version"]).strip().split(b"\n")[-1]
)
major, minor = re.findall(rb"([\d]+)\.([\d]+)", version_line)[0]
if (int(major), int(minor)) < (11, 4):
# the last version supporting CUDA < 11.4
triton_requirement = "triton==2.0.0.dev20221011"
except (IndexError, OSError, subprocess.SubprocessError):
pass
requirements.append(triton_requirement)
setup(
name="openai-whisper",
py_modules=["whisper"],
version=read_version(),
description="Robust Speech Recognition via Large-Scale Weak Supervision",
long_description=open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
readme="README.md",
python_requires=">=3.8",
author="OpenAI",
url="https://github.com/openai/whisper",
license="MIT",
packages=find_packages(exclude=["tests*"]),
install_requires=requirements
+ [
str(r)
for r in pkg_resources.parse_requirements(
open(os.path.join(os.path.dirname(__file__), "requirements.txt"))
)
],
entry_points={
"console_scripts": ["whisper=whisper.transcribe:cli"],
},
include_package_data=True,
extras_require={"dev": ["pytest", "scipy", "black", "flake8", "isort"]},
)
|
from __future__ import annotations
from typing import Any, Dict, List, Mapping, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import LLM
from pydantic import BaseModel
# Ignoring type because below is valid pydantic code
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class Params(BaseModel, extra="allow"):
"""Parameters for the Javelin AI Gateway LLM."""
temperature: float = 0.0
stop: Optional[List[str]] = None
max_tokens: Optional[int] = None
class JavelinAIGateway(LLM):
"""Javelin AI Gateway LLMs.
To use, you should have the ``javelin_sdk`` python package installed.
For more information, see https://docs.getjavelin.io
Example:
.. code-block:: python
from langchain_community.llms import JavelinAIGateway
completions = JavelinAIGateway(
gateway_uri="<your-javelin-ai-gateway-uri>",
route="<your-javelin-ai-gateway-completions-route>",
params={
"temperature": 0.1
}
)
"""
route: str
"""The route to use for the Javelin AI Gateway API."""
client: Optional[Any] = None
"""The Javelin AI Gateway client."""
gateway_uri: Optional[str] = None
"""The URI of the Javelin AI Gateway API."""
params: Optional[Params] = None
"""Parameters for the Javelin AI Gateway API."""
javelin_api_key: Optional[str] = None
"""The API key for the Javelin AI Gateway API."""
def __init__(self, **kwargs: Any):
try:
from javelin_sdk import (
JavelinClient,
UnauthorizedError,
)
except ImportError:
raise ImportError(
"Could not import javelin_sdk python package. "
"Please install it with `pip install javelin_sdk`."
)
super().__init__(**kwargs)
if self.gateway_uri:
try:
self.client = JavelinClient(
base_url=self.gateway_uri, api_key=self.javelin_api_key
)
except UnauthorizedError as e:
raise ValueError("Javelin: Incorrect API Key.") from e
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Javelin AI Gateway API."""
params: Dict[str, Any] = {
"gateway_uri": self.gateway_uri,
"route": self.route,
"javelin_api_key": self.javelin_api_key,
**(self.params.dict() if self.params else {}),
}
return params
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return self._default_params
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the Javelin AI Gateway API."""
data: Dict[str, Any] = {
"prompt": prompt,
**(self.params.dict() if self.params else {}),
}
if s := (stop or (self.params.stop if self.params else None)):
data["stop"] = s
if self.client is not None:
resp = self.client.query_route(self.route, query_body=data)
else:
raise ValueError("Javelin client is not initialized.")
resp_dict = resp.dict()
try:
return resp_dict["llm_response"]["choices"][0]["text"]
except KeyError:
return ""
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call async the Javelin AI Gateway API."""
data: Dict[str, Any] = {
"prompt": prompt,
**(self.params.dict() if self.params else {}),
}
if s := (stop or (self.params.stop if self.params else None)):
data["stop"] = s
if self.client is not None:
resp = await self.client.aquery_route(self.route, query_body=data)
else:
raise ValueError("Javelin client is not initialized.")
resp_dict = resp.dict()
try:
return resp_dict["llm_response"]["choices"][0]["text"]
except KeyError:
return ""
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "javelin-ai-gateway"
|
from __future__ import annotations
from typing import Any, Dict, List, Mapping, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import LLM
from pydantic import BaseModel
# Ignoring type because below is valid pydantic code
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class Params(BaseModel, extra="allow"): # type: ignore[call-arg]
"""Parameters for the Javelin AI Gateway LLM."""
temperature: float = 0.0
stop: Optional[List[str]] = None
max_tokens: Optional[int] = None
class JavelinAIGateway(LLM):
"""Javelin AI Gateway LLMs.
To use, you should have the ``javelin_sdk`` python package installed.
For more information, see https://docs.getjavelin.io
Example:
.. code-block:: python
from langchain_community.llms import JavelinAIGateway
completions = JavelinAIGateway(
gateway_uri="<your-javelin-ai-gateway-uri>",
route="<your-javelin-ai-gateway-completions-route>",
params={
"temperature": 0.1
}
)
"""
route: str
"""The route to use for the Javelin AI Gateway API."""
client: Optional[Any] = None
"""The Javelin AI Gateway client."""
gateway_uri: Optional[str] = None
"""The URI of the Javelin AI Gateway API."""
params: Optional[Params] = None
"""Parameters for the Javelin AI Gateway API."""
javelin_api_key: Optional[str] = None
"""The API key for the Javelin AI Gateway API."""
def __init__(self, **kwargs: Any):
try:
from javelin_sdk import (
JavelinClient,
UnauthorizedError,
)
except ImportError:
raise ImportError(
"Could not import javelin_sdk python package. "
"Please install it with `pip install javelin_sdk`."
)
super().__init__(**kwargs)
if self.gateway_uri:
try:
self.client = JavelinClient(
base_url=self.gateway_uri, api_key=self.javelin_api_key
)
except UnauthorizedError as e:
raise ValueError("Javelin: Incorrect API Key.") from e
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Javelin AI Gateway API."""
params: Dict[str, Any] = {
"gateway_uri": self.gateway_uri,
"route": self.route,
"javelin_api_key": self.javelin_api_key,
**(self.params.dict() if self.params else {}),
}
return params
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return self._default_params
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the Javelin AI Gateway API."""
data: Dict[str, Any] = {
"prompt": prompt,
**(self.params.dict() if self.params else {}),
}
if s := (stop or (self.params.stop if self.params else None)):
data["stop"] = s
if self.client is not None:
resp = self.client.query_route(self.route, query_body=data)
else:
raise ValueError("Javelin client is not initialized.")
resp_dict = resp.dict()
try:
return resp_dict["llm_response"]["choices"][0]["text"]
except KeyError:
return ""
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call async the Javelin AI Gateway API."""
data: Dict[str, Any] = {
"prompt": prompt,
**(self.params.dict() if self.params else {}),
}
if s := (stop or (self.params.stop if self.params else None)):
data["stop"] = s
if self.client is not None:
resp = await self.client.aquery_route(self.route, query_body=data)
else:
raise ValueError("Javelin client is not initialized.")
resp_dict = resp.dict()
try:
return resp_dict["llm_response"]["choices"][0]["text"]
except KeyError:
return ""
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "javelin-ai-gateway"
|
# Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, mask_cross_entropy)
from .dice_loss import DiceLoss
from .eqlv2_loss import EQLV2Loss
from .focal_loss import FocalLoss, sigmoid_focal_loss
from .gaussian_focal_loss import GaussianFocalLoss
from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss
from .ghm_loss import GHMC, GHMR
from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, EIoULoss, GIoULoss,
IoULoss, bounded_iou_loss, iou_loss)
from .kd_loss import KnowledgeDistillationKLDivLoss
from .l2_loss import L2Loss
from .margin_loss import MarginL2Loss
from .mse_loss import MSELoss, mse_loss
from .multipos_cross_entropy_loss import MultiPosCrossEntropyLoss
from .pisa_loss import carl_loss, isr_p
from .seesaw_loss import SeesawLoss
from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss
from .triplet_loss import TripletLoss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
from .varifocal_loss import VarifocalLoss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss',
'EIoULoss', 'GHMC', 'GHMR', 'reduce_loss', 'weight_reduce_loss',
'weighted_loss', 'L1Loss', 'l1_loss', 'isr_p', 'carl_loss',
'AssociativeEmbeddingLoss', 'GaussianFocalLoss', 'QualityFocalLoss',
'DistributionFocalLoss', 'VarifocalLoss', 'KnowledgeDistillationKLDivLoss',
'SeesawLoss', 'DiceLoss', 'EQLV2Loss', 'MarginL2Loss', 'MultiPosCrossEntropyLoss',
'L2Loss', 'TripletLoss'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, mask_cross_entropy)
from .dice_loss import DiceLoss
from .eqlv2_loss import EQLV2Loss
from .focal_loss import FocalLoss, sigmoid_focal_loss
from .gaussian_focal_loss import GaussianFocalLoss
from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss
from .ghm_loss import GHMC, GHMR
from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, EIoULoss, GIoULoss,
IoULoss, bounded_iou_loss, iou_loss)
from .kd_loss import KnowledgeDistillationKLDivLoss
from .margin_loss import MarginL2Loss
from .mse_loss import MSELoss, mse_loss
from .multipos_cross_entropy_loss import MultiPosCrossEntropyLoss
from .pisa_loss import carl_loss, isr_p
from .seesaw_loss import SeesawLoss
from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
from .varifocal_loss import VarifocalLoss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss',
'EIoULoss', 'GHMC', 'GHMR', 'reduce_loss', 'weight_reduce_loss',
'weighted_loss', 'L1Loss', 'l1_loss', 'isr_p', 'carl_loss',
'AssociativeEmbeddingLoss', 'GaussianFocalLoss', 'QualityFocalLoss',
'DistributionFocalLoss', 'VarifocalLoss', 'KnowledgeDistillationKLDivLoss',
'SeesawLoss', 'DiceLoss', 'EQLV2Loss', 'MarginL2Loss', 'MultiPosCrossEntropyLoss'
]
|
from langchain_core.globals import get_debug, set_debug
def test_debug_is_settable_via_setter() -> None:
from langchain_core import globals as globals_
from langchain_core.callbacks.manager import _get_debug
previous_value = globals_._debug
previous_fn_reading = _get_debug()
assert previous_value == previous_fn_reading
# Flip the value of the flag.
set_debug(not previous_value)
new_value = globals_._debug
new_fn_reading = _get_debug()
try:
# We successfully changed the value of `debug`.
assert new_value != previous_value
# If we access `debug` via a function used elsewhere in langchain,
# it also sees the same new value.
assert new_value == new_fn_reading
# If we access `debug` via `get_debug()` we also get the same value.
assert new_value == get_debug()
finally:
# Make sure we don't alter global state, even if the test fails.
# Always reset `debug` to the value it had before.
set_debug(previous_value)
|
from langchain_core.globals import get_debug, set_debug
def test_debug_is_settable_via_setter() -> None:
from langchain_core import globals
from langchain_core.callbacks.manager import _get_debug
previous_value = globals._debug
previous_fn_reading = _get_debug()
assert previous_value == previous_fn_reading
# Flip the value of the flag.
set_debug(not previous_value)
new_value = globals._debug
new_fn_reading = _get_debug()
try:
# We successfully changed the value of `debug`.
assert new_value != previous_value
# If we access `debug` via a function used elsewhere in langchain,
# it also sees the same new value.
assert new_value == new_fn_reading
# If we access `debug` via `get_debug()` we also get the same value.
assert new_value == get_debug()
finally:
# Make sure we don't alter global state, even if the test fails.
# Always reset `debug` to the value it had before.
set_debug(previous_value)
|
from backend.app import run_processes
from backend.executor import DatabaseManager, ExecutionScheduler
from backend.notifications.notifications import NotificationManager
from backend.server.rest_api import AgentServer
def main():
"""
Run all the processes required for the AutoGPT-server REST API.
"""
run_processes(
NotificationManager(),
DatabaseManager(),
ExecutionScheduler(),
AgentServer(),
)
if __name__ == "__main__":
main()
|
from backend.app import run_processes
from backend.executor import DatabaseManager, ExecutionScheduler
from backend.server.rest_api import AgentServer
def main():
"""
Run all the processes required for the AutoGPT-server REST API.
"""
run_processes(
DatabaseManager(),
ExecutionScheduler(),
AgentServer(),
)
if __name__ == "__main__":
main()
|
import sys
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.head.request_handling import HeaderRequestHandler
from jina.parsers import set_pod_parser
def run(*args, **kwargs):
runtime_args = set_pod_parser().parse_args(args)
runtime_args.host = runtime_args.host[0]
runtime_args.port = runtime_args.port
with AsyncNewLoopRuntime(
args=runtime_args, req_handler_cls=HeaderRequestHandler
) as runtime:
runtime.run_forever()
if __name__ == '__main__':
run(*sys.argv[1:])
|
import sys
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.head.request_handling import HeaderRequestHandler
from jina.parsers import set_pod_parser
def run(*args, **kwargs):
runtime_args = set_pod_parser().parse_args(args)
runtime_args.host = runtime_args.host[0]
runtime_args.port = runtime_args.port
with AsyncNewLoopRuntime(args=runtime_args, req_handler_cls=HeaderRequestHandler) as runtime:
runtime.run_forever()
if __name__ == '__main__':
run(*sys.argv[1:])
|
import datetime
from typing import List
import prisma.enums
import pydantic
class Pagination(pydantic.BaseModel):
total_items: int = pydantic.Field(
description="Total number of items.", examples=[42]
)
total_pages: int = pydantic.Field(
description="Total number of pages.", examples=[97]
)
current_page: int = pydantic.Field(
description="Current_page page number.", examples=[1]
)
page_size: int = pydantic.Field(
description="Number of items per page.", examples=[25]
)
class MyAgent(pydantic.BaseModel):
agent_id: str
agent_version: int
agent_name: str
agent_image: str | None = None
description: str
last_edited: datetime.datetime
class MyAgentsResponse(pydantic.BaseModel):
agents: list[MyAgent]
pagination: Pagination
class StoreAgent(pydantic.BaseModel):
slug: str
agent_name: str
agent_image: str
creator: str
creator_avatar: str
sub_heading: str
description: str
runs: int
rating: float
class StoreAgentsResponse(pydantic.BaseModel):
agents: list[StoreAgent]
pagination: Pagination
class StoreAgentDetails(pydantic.BaseModel):
store_listing_version_id: str
slug: str
agent_name: str
agent_video: str
agent_image: list[str]
creator: str
creator_avatar: str
sub_heading: str
description: str
categories: list[str]
runs: int
rating: float
versions: list[str]
last_updated: datetime.datetime
class Creator(pydantic.BaseModel):
name: str
username: str
description: str
avatar_url: str
num_agents: int
agent_rating: float
agent_runs: int
is_featured: bool
class CreatorsResponse(pydantic.BaseModel):
creators: List[Creator]
pagination: Pagination
class CreatorDetails(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str
agent_rating: float
agent_runs: int
top_categories: list[str]
class Profile(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str
is_featured: bool = False
class StoreSubmission(pydantic.BaseModel):
agent_id: str
agent_version: int
name: str
sub_heading: str
slug: str
description: str
image_urls: list[str]
date_submitted: datetime.datetime
status: prisma.enums.SubmissionStatus
runs: int
rating: float
store_listing_version_id: str | None = None
class StoreSubmissionsResponse(pydantic.BaseModel):
submissions: list[StoreSubmission]
pagination: Pagination
class StoreSubmissionRequest(pydantic.BaseModel):
agent_id: str
agent_version: int
slug: str
name: str
sub_heading: str
video_url: str | None = None
image_urls: list[str] = []
description: str = ""
categories: list[str] = []
class ProfileDetails(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str | None = None
class StoreReview(pydantic.BaseModel):
score: int
comments: str | None = None
class StoreReviewCreate(pydantic.BaseModel):
store_listing_version_id: str
score: int
comments: str | None = None
class ReviewSubmissionRequest(pydantic.BaseModel):
store_listing_version_id: str
is_approved: bool
comments: str
|
import datetime
from typing import List
import prisma.enums
import pydantic
class Pagination(pydantic.BaseModel):
total_items: int = pydantic.Field(
description="Total number of items.", examples=[42]
)
total_pages: int = pydantic.Field(
description="Total number of pages.", examples=[97]
)
current_page: int = pydantic.Field(
description="Current_page page number.", examples=[1]
)
page_size: int = pydantic.Field(
description="Number of items per page.", examples=[25]
)
class MyAgent(pydantic.BaseModel):
agent_id: str
agent_version: int
agent_name: str
description: str
last_edited: datetime.datetime
class MyAgentsResponse(pydantic.BaseModel):
agents: list[MyAgent]
pagination: Pagination
class StoreAgent(pydantic.BaseModel):
slug: str
agent_name: str
agent_image: str
creator: str
creator_avatar: str
sub_heading: str
description: str
runs: int
rating: float
class StoreAgentsResponse(pydantic.BaseModel):
agents: list[StoreAgent]
pagination: Pagination
class StoreAgentDetails(pydantic.BaseModel):
store_listing_version_id: str
slug: str
agent_name: str
agent_video: str
agent_image: list[str]
creator: str
creator_avatar: str
sub_heading: str
description: str
categories: list[str]
runs: int
rating: float
versions: list[str]
last_updated: datetime.datetime
class Creator(pydantic.BaseModel):
name: str
username: str
description: str
avatar_url: str
num_agents: int
agent_rating: float
agent_runs: int
is_featured: bool
class CreatorsResponse(pydantic.BaseModel):
creators: List[Creator]
pagination: Pagination
class CreatorDetails(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str
agent_rating: float
agent_runs: int
top_categories: list[str]
class Profile(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str
is_featured: bool = False
class StoreSubmission(pydantic.BaseModel):
agent_id: str
agent_version: int
name: str
sub_heading: str
slug: str
description: str
image_urls: list[str]
date_submitted: datetime.datetime
status: prisma.enums.SubmissionStatus
runs: int
rating: float
store_listing_version_id: str | None = None
class StoreSubmissionsResponse(pydantic.BaseModel):
submissions: list[StoreSubmission]
pagination: Pagination
class StoreSubmissionRequest(pydantic.BaseModel):
agent_id: str
agent_version: int
slug: str
name: str
sub_heading: str
video_url: str | None = None
image_urls: list[str] = []
description: str = ""
categories: list[str] = []
class ProfileDetails(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str | None = None
class StoreReview(pydantic.BaseModel):
score: int
comments: str | None = None
class StoreReviewCreate(pydantic.BaseModel):
store_listing_version_id: str
score: int
comments: str | None = None
class ReviewSubmissionRequest(pydantic.BaseModel):
store_listing_version_id: str
is_approved: bool
comments: str
|
# mypy: allow-untyped-defs
import functools
from collections.abc import Hashable
from dataclasses import fields
class _UnionTag(str):
__slots__ = ("_cls",)
_cls: Hashable
@staticmethod
def create(t, cls):
tag = _UnionTag(t)
assert not hasattr(tag, "_cls")
tag._cls = cls
return tag
def __eq__(self, cmp) -> bool:
assert isinstance(cmp, str)
other = str(cmp)
assert other in _get_field_names(self._cls), (
f"{other} is not a valid tag for {self._cls}. Available tags: {_get_field_names(self._cls)}"
)
return str(self) == other
def __hash__(self):
return hash(str(self))
@functools.cache
def _get_field_names(cls) -> set[str]:
return {f.name for f in fields(cls)}
class _Union:
_type: _UnionTag
@classmethod
def create(cls, **kwargs):
assert len(kwargs) == 1
obj = cls(**{**{f.name: None for f in fields(cls)}, **kwargs}) # type: ignore[arg-type]
obj._type = _UnionTag.create(next(iter(kwargs.keys())), cls)
return obj
def __post_init__(self):
assert not any(
f.name in ("type", "_type", "create", "value")
for f in fields(self) # type: ignore[arg-type, misc]
)
@property
def type(self) -> str:
try:
return self._type
except AttributeError as e:
raise RuntimeError(
f"Please use {type(self).__name__}.create to instantiate the union type."
) from e
@property
def value(self):
return getattr(self, self.type)
def __getattribute__(self, name):
attr = super().__getattribute__(name)
if attr is None and name in _get_field_names(type(self)) and name != self.type: # type: ignore[arg-type]
raise AttributeError(f"Field {name} is not set.")
return attr
def __str__(self):
return self.__repr__()
def __repr__(self):
return f"{type(self).__name__}({self.type}={getattr(self, self.type)})"
|
# mypy: allow-untyped-defs
import functools
from collections.abc import Hashable
from dataclasses import fields
class _UnionTag(str):
__slots__ = ("_cls",)
_cls: Hashable
@staticmethod
def create(t, cls):
tag = _UnionTag(t)
assert not hasattr(tag, "_cls")
tag._cls = cls
return tag
def __eq__(self, cmp) -> bool:
assert isinstance(cmp, str)
other = str(cmp)
assert other in _get_field_names(
self._cls
), f"{other} is not a valid tag for {self._cls}. Available tags: {_get_field_names(self._cls)}"
return str(self) == other
def __hash__(self):
return hash(str(self))
@functools.cache
def _get_field_names(cls) -> set[str]:
return {f.name for f in fields(cls)}
class _Union:
_type: _UnionTag
@classmethod
def create(cls, **kwargs):
assert len(kwargs) == 1
obj = cls(**{**{f.name: None for f in fields(cls)}, **kwargs}) # type: ignore[arg-type]
obj._type = _UnionTag.create(next(iter(kwargs.keys())), cls)
return obj
def __post_init__(self):
assert not any(f.name in ("type", "_type", "create", "value") for f in fields(self)) # type: ignore[arg-type, misc]
@property
def type(self) -> str:
try:
return self._type
except AttributeError as e:
raise RuntimeError(
f"Please use {type(self).__name__}.create to instantiate the union type."
) from e
@property
def value(self):
return getattr(self, self.type)
def __getattribute__(self, name):
attr = super().__getattribute__(name)
if attr is None and name in _get_field_names(type(self)) and name != self.type: # type: ignore[arg-type]
raise AttributeError(f"Field {name} is not set.")
return attr
def __str__(self):
return self.__repr__()
def __repr__(self):
return f"{type(self).__name__}({self.type}={getattr(self, self.type)})"
|
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.MaxPooling1D", "keras.layers.MaxPool1D"])
class MaxPooling1D(BasePooling):
"""Max pooling operation for 1D temporal data.
Downsamples the input representation by taking the maximum value over a
spatial window of size `pool_size`. The window is shifted by `strides`.
The resulting output when using the `"valid"` padding option has a shape of:
`output_shape = (input_shape - pool_size + 1) / strides)`.
The resulting output shape when using the `"same"` padding option is:
`output_shape = input_shape / strides`
Args:
pool_size: int, size of the max pooling window.
strides: int or None. Specifies how much the pooling window moves
for each pooling step. If None, it will default to `pool_size`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
3D tensor with shape `(batch_size, steps, features)`.
- If `data_format="channels_first"`:
3D tensor with shape `(batch_size, features, steps)`.
Output shape:
- If `data_format="channels_last"`:
3D tensor with shape `(batch_size, downsampled_steps, features)`.
- If `data_format="channels_first"`:
3D tensor with shape `(batch_size, features, downsampled_steps)`.
Examples:
`strides=1` and `padding="valid"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
... strides=1, padding="valid")
>>> max_pool_1d(x)
`strides=2` and `padding="valid"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
... strides=2, padding="valid")
>>> max_pool_1d(x)
`strides=1` and `padding="same"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
... strides=1, padding="same")
>>> max_pool_1d(x)
"""
def __init__(
self,
pool_size=2,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs
):
super().__init__(
pool_size,
strides,
pool_dimensions=1,
pool_mode="max",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.MaxPooling1D", "keras.layers.MaxPool1D"])
class MaxPooling1D(BasePooling):
"""Max pooling operation for 1D temporal data.
Downsamples the input representation by taking the maximum value over a
spatial window of size `pool_size`. The window is shifted by `strides`.
The resulting output when using the `"valid"` padding option has a shape of:
`output_shape = (input_shape - pool_size + 1) / strides)`.
The resulting output shape when using the `"same"` padding option is:
`output_shape = input_shape / strides`
Args:
pool_size: int, size of the max pooling window.
strides: int or None. Specifies how much the pooling window moves
for each pooling step. If None, it will default to `pool_size`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
3D tensor with shape `(batch_size, steps, features)`.
- If `data_format="channels_first"`:
3D tensor with shape `(batch_size, features, steps)`.
Output shape:
- If `data_format="channels_last"`:
3D tensor with shape `(batch_size, downsampled_steps, features)`.
- If `data_format="channels_first"`:
3D tensor with shape `(batch_size, features, downsampled_steps)`.
Examples:
`strides=1` and `padding="valid"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
... strides=1, padding="valid")
>>> max_pool_1d(x)
`strides=2` and `padding="valid"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
... strides=2, padding="valid")
>>> max_pool_1d(x)
`strides=1` and `padding="same"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
... strides=1, padding="same")
>>> max_pool_1d(x)
"""
def __init__(
self,
pool_size=2,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs
):
super().__init__(
pool_size,
strides,
pool_dimensions=1,
pool_mode="max",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.