input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from enum import Enum
# --8<-- [start:ProviderName]
class ProviderName(str, Enum):
AIML_API = "aiml_api"
ANTHROPIC = "anthropic"
APOLLO = "apollo"
COMPASS = "compass"
DISCORD = "discord"
D_ID = "d_id"
E2B = "e2b"
EXA = "exa"
FAL = "fal"
GENERIC_WEBHOOK = "generic_webhook"
GITHUB = "github"
GOOGLE = "google"
GOOGLE_MAPS = "google_maps"
GROQ = "groq"
HTTP = "http"
HUBSPOT = "hubspot"
IDEOGRAM = "ideogram"
JINA = "jina"
LINEAR = "linear"
LLAMA_API = "llama_api"
MEDIUM = "medium"
MEM0 = "mem0"
NOTION = "notion"
NVIDIA = "nvidia"
OLLAMA = "ollama"
OPENAI = "openai"
OPENWEATHERMAP = "openweathermap"
OPEN_ROUTER = "open_router"
PINECONE = "pinecone"
REDDIT = "reddit"
REPLICATE = "replicate"
REVID = "revid"
SCREENSHOTONE = "screenshotone"
SLANT3D = "slant3d"
SMARTLEAD = "smartlead"
SMTP = "smtp"
TWITTER = "twitter"
TODOIST = "todoist"
UNREAL_SPEECH = "unreal_speech"
ZEROBOUNCE = "zerobounce"
# --8<-- [end:ProviderName]
|
from enum import Enum
# --8<-- [start:ProviderName]
class ProviderName(str, Enum):
AIML_API = "aiml_api"
ANTHROPIC = "anthropic"
APOLLO = "apollo"
COMPASS = "compass"
DISCORD = "discord"
D_ID = "d_id"
E2B = "e2b"
EXA = "exa"
FAL = "fal"
GENERIC_WEBHOOK = "generic_webhook"
GITHUB = "github"
GOOGLE = "google"
GOOGLE_MAPS = "google_maps"
GROQ = "groq"
HUBSPOT = "hubspot"
IDEOGRAM = "ideogram"
JINA = "jina"
LINEAR = "linear"
LLAMA_API = "llama_api"
MEDIUM = "medium"
MEM0 = "mem0"
NOTION = "notion"
NVIDIA = "nvidia"
OLLAMA = "ollama"
OPENAI = "openai"
OPENWEATHERMAP = "openweathermap"
OPEN_ROUTER = "open_router"
PINECONE = "pinecone"
REDDIT = "reddit"
REPLICATE = "replicate"
REVID = "revid"
SCREENSHOTONE = "screenshotone"
SLANT3D = "slant3d"
SMARTLEAD = "smartlead"
SMTP = "smtp"
TWITTER = "twitter"
TODOIST = "todoist"
UNREAL_SPEECH = "unreal_speech"
ZEROBOUNCE = "zerobounce"
# --8<-- [end:ProviderName]
|
from __future__ import annotations
__version__ = "3.5.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
]
|
from __future__ import annotations
__version__ = "3.4.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
]
|
from docarray.typing.bytes import ImageBytes
from docarray.typing.id import ID
from docarray.typing.tensor import ImageNdArray, ImageTensor
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video import VideoNdArray
from docarray.typing.url import (
AnyUrl,
AudioUrl,
ImageUrl,
Mesh3DUrl,
PointCloud3DUrl,
TextUrl,
VideoUrl,
)
__all__ = [
'NdArray',
'NdArrayEmbedding',
'AudioNdArray',
'VideoNdArray',
'AnyEmbedding',
'ImageUrl',
'AudioUrl',
'TextUrl',
'Mesh3DUrl',
'PointCloud3DUrl',
'VideoUrl',
'AnyUrl',
'ID',
'AnyTensor',
'NdArrayEmbedding',
'ImageBytes',
'ImageTensor',
'ImageNdArray',
]
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor import TorchEmbedding, TorchTensor # noqa: F401
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa
from docarray.typing.tensor.image import ImageTorchTensor # noqa: F401
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor # noqa
__all__.extend(
[
'AudioTorchTensor',
'TorchEmbedding',
'TorchTensor',
'VideoTorchTensor',
'ImageTorchTensor',
]
)
|
from docarray.typing.bytes import ImageBytes
from docarray.typing.id import ID
from docarray.typing.tensor import ImageNdArray, ImageTensor
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video import VideoNdArray
from docarray.typing.url import (
AnyUrl,
AudioUrl,
ImageUrl,
Mesh3DUrl,
PointCloud3DUrl,
TextUrl,
VideoUrl,
)
__all__ = [
'NdArray',
'NdArrayEmbedding',
'AudioNdArray',
'VideoNdArray',
'AnyEmbedding',
'ImageUrl',
'AudioUrl',
'TextUrl',
'Mesh3DUrl',
'PointCloud3DUrl',
'VideoUrl',
'AnyUrl',
'ID',
'AnyTensor',
'NdArrayEmbedding',
'ImageBytes',
'ImageTensor',
'ImageNdArray',
]
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor import TorchEmbedding, TorchTensor # noqa: F401
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor # noqa
__all__.extend(
['AudioTorchTensor', 'TorchEmbedding', 'TorchTensor', 'VideoTorchTensor']
)
|
import itertools
import numpy as np
from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
class TorchDataLoaderAdapter(DataAdapter):
"""Adapter that handles `torch.utils.data.DataLoader`."""
def __init__(self, dataloader):
import torch
if not isinstance(dataloader, torch.utils.data.DataLoader):
raise ValueError(
f"Expected argument `dataloader` to be an instance of"
f"`torch.utils.data.DataLoader`. Received: {dataloader}"
)
self._dataloader = dataloader
self._output_signature = None
self._batch_size = dataloader.batch_size
self._num_batches = None
self._partial_batch_size = None
if hasattr(dataloader.dataset, "__len__"):
self._num_batches = len(dataloader)
if self._batch_size is not None:
self._partial_batch_size = (
len(dataloader.dataset) % self._batch_size
)
def get_numpy_iterator(self):
for batch in self._dataloader:
# shared memory using `np.asarray`
yield tuple(
tree.map_structure(lambda x: np.asarray(x.cpu()), batch)
)
def get_jax_iterator(self):
# We use numpy as an intermediary because it is faster.
return self.get_numpy_iterator()
def get_tf_dataset(self):
from keras.src.utils.module_utils import tensorflow as tf
if self._output_signature is None:
batches = list(
itertools.islice(
self._dataloader,
data_adapter_utils.NUM_BATCHES_FOR_TENSOR_SPEC,
)
)
self._output_signature = tuple(
data_adapter_utils.get_tensor_spec(batches)
)
return tf.data.Dataset.from_generator(
self.get_numpy_iterator,
output_signature=self._output_signature,
)
def get_torch_dataloader(self):
return self._dataloader
@property
def num_batches(self):
return self._num_batches
@property
def batch_size(self):
return self._batch_size
@property
def has_partial_batch(self):
if self._partial_batch_size:
return self._partial_batch_size > 0
else:
return None
@property
def partial_batch_size(self):
return self._partial_batch_size
|
import itertools
import numpy as np
from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
class TorchDataLoaderAdapter(DataAdapter):
"""Adapter that handles `torch.utils.data.DataLoader`."""
def __init__(self, dataloader):
import torch
if not isinstance(dataloader, torch.utils.data.DataLoader):
raise ValueError(
f"Expected argument `dataloader` to be an instance of"
f"`torch.utils.data.DataLoader`. Received: {dataloader}"
)
self._dataloader = dataloader
self._output_signature = None
self._batch_size = dataloader.batch_size
self._num_batches = None
self._partial_batch_size = None
if hasattr(dataloader.dataset, "__len__"):
self._num_batches = len(dataloader)
if self._batch_size is not None:
self._partial_batch_size = (
len(dataloader.dataset) % self._batch_size
)
def get_numpy_iterator(self):
for batch in self._dataloader:
# shared memory using `np.asarray`
yield tuple(
tree.map_structure(lambda x: np.asarray(x.cpu()), batch)
)
def get_jax_iterator(self):
# We use numpy as an intermediary because the conversion
# torch -> numpy -> jax is faster than torch -> jax.
return data_adapter_utils.get_jax_iterator(self.get_numpy_iterator())
def get_tf_dataset(self):
from keras.src.utils.module_utils import tensorflow as tf
if self._output_signature is None:
batches = list(
itertools.islice(
self._dataloader,
data_adapter_utils.NUM_BATCHES_FOR_TENSOR_SPEC,
)
)
self._output_signature = tuple(
data_adapter_utils.get_tensor_spec(batches)
)
return tf.data.Dataset.from_generator(
self.get_numpy_iterator,
output_signature=self._output_signature,
)
def get_torch_dataloader(self):
return self._dataloader
@property
def num_batches(self):
return self._num_batches
@property
def batch_size(self):
return self._batch_size
@property
def has_partial_batch(self):
if self._partial_batch_size:
return self._partial_batch_size > 0
else:
return None
@property
def partial_batch_size(self):
return self._partial_batch_size
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.documents import Audio
from docarray.typing import AnyEmbedding, AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
from docarray.utils.misc import is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
T = TypeVar('T', bound='Video')
class Video(BaseDocument):
"""
Document for handling video.
The Video Document can contain a VideoUrl (`Video.url`), an Audio Document
(`Video.audio`), a VideoTensor (`Video.tensor`), an AnyTensor representing
the indices of the video's key frames (`Video.key_frame_indices`) and an
AnyEmbedding (`Video.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Video
# use it directly
vid = Video(
url='https://github.com/docarray/docarray/tree/feat-add-video-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.audio.tensor, vid.tensor, vid.key_frame_indices = vid.url.load()
model = MyEmbeddingModel()
vid.embedding = model(vid.tensor)
You can extend this Document:
.. code-block:: python
from typing import Optional
from docarray.documents import Text, Video
# extend it
class MyVideo(Video):
name: Optional[Text]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video.video_tensor = video.url.load().video
model = MyEmbeddingModel()
video.embedding = model(video.tensor)
video.name = Text(text='my first video')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Text, Video
# compose it
class MultiModalDoc(BaseDocument):
video: Video
text: Text
mmdoc = MultiModalDoc(
video=Video(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.video.video_tensor = mmdoc.video.url.load().video
# or
mmdoc.video.bytes = mmdoc.video.url.load_bytes()
"""
url: Optional[VideoUrl]
audio: Optional[Audio] = Audio()
tensor: Optional[VideoTensor]
key_frame_indices: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[bytes] = None
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available and isinstance(value, torch.Tensor)
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.documents import Audio
from docarray.typing import AnyEmbedding, AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
try:
import torch
torch_available = True
except ImportError:
torch_available = False
T = TypeVar('T', bound='Video')
class Video(BaseDocument):
"""
Document for handling video.
The Video Document can contain a VideoUrl (`Video.url`), an Audio Document
(`Video.audio`), a VideoTensor (`Video.tensor`), an AnyTensor representing
the indices of the video's key frames (`Video.key_frame_indices`) and an
AnyEmbedding (`Video.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Video
# use it directly
vid = Video(
url='https://github.com/docarray/docarray/tree/feat-add-video-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.audio.tensor, vid.tensor, vid.key_frame_indices = vid.url.load()
model = MyEmbeddingModel()
vid.embedding = model(vid.tensor)
You can extend this Document:
.. code-block:: python
from typing import Optional
from docarray.documents import Text, Video
# extend it
class MyVideo(Video):
name: Optional[Text]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video.video_tensor = video.url.load().video
model = MyEmbeddingModel()
video.embedding = model(video.tensor)
video.name = Text(text='my first video')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Text, Video
# compose it
class MultiModalDoc(BaseDocument):
video: Video
text: Text
mmdoc = MultiModalDoc(
video=Video(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.video.video_tensor = mmdoc.video.url.load().video
# or
mmdoc.video.bytes = mmdoc.video.url.load_bytes()
"""
url: Optional[VideoUrl]
audio: Optional[Audio] = Audio()
tensor: Optional[VideoTensor]
key_frame_indices: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[bytes] = None
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available and isinstance(value, torch.Tensor)
):
value = cls(tensor=value)
return super().validate(value)
|
from docarray import DocumentArray
from jina import Executor, requests
class ProcessExecutor(Executor):
@requests(on='/')
def process(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.text = doc.text + 'world'
doc.tags['processed'] = True
def _validate_dummy_custom_gateway_response(port, expected):
import requests
resp = requests.get(f'http://127.0.0.1:{port}/').json()
assert resp == expected
def _validate_custom_gateway_process(port, text, expected):
import requests
resp = requests.get(f'http://127.0.0.1:{port}/stream?text={text}').json()
assert resp == expected
|
def _validate_dummy_custom_gateway_response(port, expected):
import requests
resp = requests.get(f'http://127.0.0.1:{port}/').json()
assert resp == expected
def _validate_custom_gateway_process(port, text, expected):
import requests
resp = requests.get(f'http://127.0.0.1:{port}/stream?text={text}').json()
assert resp == expected
|
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_document import BaseDocument
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import AnyEmbedding
T = TypeVar('T', bound='TextDoc')
class TextDoc(BaseDocument):
"""
Document for handling text.
It can contain a TextUrl (`TextDoc.url`), a str (`TextDoc.text`),
and an AnyEmbedding (`TextDoc.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import TextDoc
# use it directly
txt_doc = Text(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
You can initialize directly from a string:
.. code-block:: python
from docarray.documents import TextDoc
txt_doc = Text('hello world')
You can extend this Document:
.. code-block:: python
from docarray.documents import TextDoc
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyText(Text):
second_embedding: Optional[AnyEmbedding]
txt_doc = MyText(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
txt_doc.second_embedding = model(txt_doc.text)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import ImageDoc, TextDoc
# compose it
class MultiModalDoc(BaseDocument):
image_doc: Image
text_doc: Text
mmdoc = MultiModalDoc(
image_doc=Image(url="http://www.jina.ai/image.jpg"),
text_doc=Text(text="hello world, how are you doing?"),
)
mmdoc.text_doc.text = mmdoc.text_doc.url.load()
# or
mmdoc.text_doc.bytes_ = mmdoc.text_doc.url.load_bytes()
This Document can be compared against another Document of the same type or a string.
When compared against another object of the same type, the pydantic BaseModel
equality check will apply which checks the equality of every attribute,
including `id`. When compared against a str, it will check the equality
of the `text` attribute against the given string.
.. code-block:: python
from docarray.documents import TextDoc
doc = Text(text='This is the main text', url='exampleurl.com')
doc2 = Text(text='This is the main text', url='exampleurl.com')
doc == 'This is the main text' # True
doc == doc2 # False, their ids are not equivalent
"""
text: Optional[str]
url: Optional[TextUrl]
embedding: Optional[AnyEmbedding]
bytes_: Optional[bytes]
def __init__(self, text: Optional[str] = None, **kwargs):
if 'text' not in kwargs:
kwargs['text'] = text
super().__init__(**kwargs)
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(text=value)
return super().validate(value)
def __eq__(self, other: Any) -> bool:
if isinstance(other, str):
return self.text == other
else:
# BaseModel has a default equality
return super().__eq__(other)
def __contains__(self, item: str) -> bool:
"""
This method makes `Text` behave the same as an `str`.
.. code-block:: python
from docarray.documents import Text
t = Text(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
:param item: A string to be checked if is a substring of `text` attribute
:return: A boolean determining the presence of `item` as a substring in `text`
"""
if self.text is not None:
return self.text.__contains__(item)
else:
return False
def _get_string_for_regex_filter(self):
return self.text
|
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_document import BaseDocument
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import AnyEmbedding
T = TypeVar('T', bound='TextDoc')
class TextDoc(BaseDocument):
"""
Document for handling text.
It can contain a TextUrl (`TextDoc.url`), a str (`TextDoc.text`),
and an AnyEmbedding (`TextDoc.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import TextDoc
# use it directly
txt_doc = Text(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
You can initialize directly from a string:
.. code-block:: python
from docarray.documents import TextDoc
txt_doc = Text('hello world')
You can extend this Document:
.. code-block:: python
from docarray.documents import TextDoc
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyText(Text):
second_embedding: Optional[AnyEmbedding]
txt_doc = MyText(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
txt_doc.second_embedding = model(txt_doc.text)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import ImageDoc, TextDoc
# compose it
class MultiModalDoc(BaseDocument):
image_doc: Image
text_doc: Text
mmdoc = MultiModalDoc(
image_doc=Image(url="http://www.jina.ai/image.jpg"),
text_doc=Text(text="hello world, how are you doing?"),
)
mmdoc.text_doc.text = mmdoc.text_doc.url.load()
# or
mmdoc.text_doc.bytes = mmdoc.text_doc.url.load_bytes()
This Document can be compared against another Document of the same type or a string.
When compared against another object of the same type, the pydantic BaseModel
equality check will apply which checks the equality of every attribute,
including `id`. When compared against a str, it will check the equality
of the `text` attribute against the given string.
.. code-block:: python
from docarray.documents import TextDoc
doc = Text(text='This is the main text', url='exampleurl.com')
doc2 = Text(text='This is the main text', url='exampleurl.com')
doc == 'This is the main text' # True
doc == doc2 # False, their ids are not equivalent
"""
text: Optional[str]
url: Optional[TextUrl]
embedding: Optional[AnyEmbedding]
bytes: Optional[bytes]
def __init__(self, text: Optional[str] = None, **kwargs):
if 'text' not in kwargs:
kwargs['text'] = text
super().__init__(**kwargs)
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(text=value)
return super().validate(value)
def __eq__(self, other: Any) -> bool:
if isinstance(other, str):
return self.text == other
else:
# BaseModel has a default equality
return super().__eq__(other)
def __contains__(self, item: str) -> bool:
"""
This method makes `Text` behave the same as an `str`.
.. code-block:: python
from docarray.documents import Text
t = Text(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
:param item: A string to be checked if is a substring of `text` attribute
:return: A boolean determining the presence of `item` as a substring in `text`
"""
if self.text is not None:
return self.text.__contains__(item)
else:
return False
def _get_string_for_regex_filter(self):
return self.text
|
from __future__ import annotations
import logging
import tqdm
class LoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET) -> None:
super().__init__(level)
def emit(self, record) -> None:
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.handleError(record)
def install_logger(given_logger, level=logging.WARNING, fmt="%(levelname)s:%(name)s:%(message)s") -> None:
"""Configures the given logger; format, logging level, style, etc"""
import coloredlogs
def add_notice_log_level():
"""Creates a new 'notice' logging level"""
# inspired by:
# https://stackoverflow.com/questions/2183233/how-to-add-a-custom-loglevel-to-pythons-logging-facility
NOTICE_LEVEL_NUM = 25
logging.addLevelName(NOTICE_LEVEL_NUM, "NOTICE")
def notice(self, message, *args, **kws):
if self.isEnabledFor(NOTICE_LEVEL_NUM):
self._log(NOTICE_LEVEL_NUM, message, args, **kws)
logging.Logger.notice = notice
# Add an extra logging level above INFO and below WARNING
add_notice_log_level()
# More style info at:
# https://coloredlogs.readthedocs.io/en/latest/api.html
field_styles = coloredlogs.DEFAULT_FIELD_STYLES.copy()
field_styles["asctime"] = {}
level_styles = coloredlogs.DEFAULT_LEVEL_STYLES.copy()
level_styles["debug"] = {"color": "white", "faint": True}
level_styles["notice"] = {"color": "cyan", "bold": True}
coloredlogs.install(
logger=given_logger,
level=level,
use_chroot=False,
fmt=fmt,
level_styles=level_styles,
field_styles=field_styles,
)
|
import logging
import tqdm
class LoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET) -> None:
super().__init__(level)
def emit(self, record) -> None:
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.handleError(record)
def install_logger(given_logger, level=logging.WARNING, fmt="%(levelname)s:%(name)s:%(message)s") -> None:
"""Configures the given logger; format, logging level, style, etc"""
import coloredlogs
def add_notice_log_level():
"""Creates a new 'notice' logging level"""
# inspired by:
# https://stackoverflow.com/questions/2183233/how-to-add-a-custom-loglevel-to-pythons-logging-facility
NOTICE_LEVEL_NUM = 25
logging.addLevelName(NOTICE_LEVEL_NUM, "NOTICE")
def notice(self, message, *args, **kws):
if self.isEnabledFor(NOTICE_LEVEL_NUM):
self._log(NOTICE_LEVEL_NUM, message, args, **kws)
logging.Logger.notice = notice
# Add an extra logging level above INFO and below WARNING
add_notice_log_level()
# More style info at:
# https://coloredlogs.readthedocs.io/en/latest/api.html
field_styles = coloredlogs.DEFAULT_FIELD_STYLES.copy()
field_styles["asctime"] = {}
level_styles = coloredlogs.DEFAULT_LEVEL_STYLES.copy()
level_styles["debug"] = {"color": "white", "faint": True}
level_styles["notice"] = {"color": "cyan", "bold": True}
coloredlogs.install(
logger=given_logger,
level=level,
use_chroot=False,
fmt=fmt,
level_styles=level_styles,
field_styles=field_styles,
)
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../../'))
# -- Project information -----------------------------------------------------
project = 'MMDetection'
copyright = '2018-2021, OpenMMLab'
author = 'MMDetection Authors'
version_file = '../../mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'myst_parser',
'sphinx_markdown_tables',
'sphinx_copybutton',
]
autodoc_mock_imports = [
'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmdetection'
},
],
# Specify the language of shared menu
'menu_lang':
'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
language = 'zh_CN'
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../../'))
# -- Project information -----------------------------------------------------
project = 'MMDetection'
copyright = '2018-2021, OpenMMLab'
author = 'MMDetection Authors'
version_file = '../../mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'recommonmark',
'sphinx_markdown_tables',
'sphinx_copybutton',
]
autodoc_mock_imports = [
'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmdetection'
},
],
# Specify the language of shared menu
'menu_lang':
'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
language = 'zh_CN'
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
|
import re
from langchain_core.output_parsers import BaseOutputParser
class BooleanOutputParser(BaseOutputParser[bool]):
"""Parse the output of an LLM call to a boolean."""
true_val: str = "YES"
"""The string value that should be parsed as True."""
false_val: str = "NO"
"""The string value that should be parsed as False."""
def parse(self, text: str) -> bool:
"""Parse the output of an LLM call to a boolean.
Args:
text: output of a language model
Returns:
boolean
"""
regexp = rf"\b({self.true_val}|{self.false_val})\b"
truthy = {
val.upper()
for val in re.findall(regexp, text, flags=re.IGNORECASE | re.MULTILINE)
}
if self.true_val.upper() in truthy:
if self.false_val.upper() in truthy:
msg = (
f"Ambiguous response. Both {self.true_val} and {self.false_val} "
f"in received: {text}."
)
raise ValueError(msg)
return True
if self.false_val.upper() in truthy:
if self.true_val.upper() in truthy:
msg = (
f"Ambiguous response. Both {self.true_val} and {self.false_val} "
f"in received: {text}."
)
raise ValueError(msg)
return False
msg = (
f"BooleanOutputParser expected output value to include either "
f"{self.true_val} or {self.false_val}. Received {text}."
)
raise ValueError(msg)
@property
def _type(self) -> str:
"""Snake-case string identifier for an output parser type."""
return "boolean_output_parser"
|
import re
from langchain_core.output_parsers import BaseOutputParser
class BooleanOutputParser(BaseOutputParser[bool]):
"""Parse the output of an LLM call to a boolean."""
true_val: str = "YES"
"""The string value that should be parsed as True."""
false_val: str = "NO"
"""The string value that should be parsed as False."""
def parse(self, text: str) -> bool:
"""Parse the output of an LLM call to a boolean.
Args:
text: output of a language model
Returns:
boolean
"""
regexp = rf"\b({self.true_val}|{self.false_val})\b"
truthy = {
val.upper()
for val in re.findall(regexp, text, flags=re.IGNORECASE | re.MULTILINE)
}
if self.true_val.upper() in truthy:
if self.false_val.upper() in truthy:
msg = (
f"Ambiguous response. Both {self.true_val} and {self.false_val} "
f"in received: {text}."
)
raise ValueError(msg)
return True
elif self.false_val.upper() in truthy:
if self.true_val.upper() in truthy:
msg = (
f"Ambiguous response. Both {self.true_val} and {self.false_val} "
f"in received: {text}."
)
raise ValueError(msg)
return False
msg = (
f"BooleanOutputParser expected output value to include either "
f"{self.true_val} or {self.false_val}. Received {text}."
)
raise ValueError(msg)
@property
def _type(self) -> str:
"""Snake-case string identifier for an output parser type."""
return "boolean_output_parser"
|
from docarray import Document, DocumentArray
import numpy as np
import pytest
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_add_ignore_existing_doc_id(start_storage):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': [('price', 'int')],
'distance': 'l2_norm',
'index_name': 'test_add_ignore_existing_doc_id',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='r0', embedding=[0, 0, 0]),
Document(id='r1', embedding=[1, 1, 1]),
Document(id='r2', embedding=[2, 2, 2]),
Document(id='r3', embedding=[3, 3, 3]),
Document(id='r4', embedding=[4, 4, 4]),
]
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='r0', embedding=[0, 0, 0]),
Document(id='r2', embedding=[2, 2, 2]),
Document(id='r4', embedding=[4, 4, 4]),
Document(id='r5', embedding=[2, 2, 2]),
Document(id='r6', embedding=[4, 4, 4]),
]
)
indexed_offset_count = elastic_doc._client.count(
index=elastic_doc._index_name_offset2id
)['count']
assert len(elastic_doc) == len(elastic_doc[:, 'embedding'])
assert len(elastic_doc) == indexed_offset_count
assert len(elastic_doc[:, 'embedding']) == 7
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_add_skip_wrong_data_type_and_fix_offset(start_storage):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': [('price', 'int')],
'index_name': 'test_add_skip_wrong_data_type_and_fix_offset',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='0', price=1000),
Document(id='1', price=20000),
Document(id='2', price=103000),
]
)
with pytest.raises(IndexError):
with elastic_doc:
elastic_doc.extend(
[
Document(id='0', price=10000),
Document(id='1', price=20000),
Document(id='3', price=30000),
Document(id='4', price=100000000000), # overflow int32
Document(id='5', price=2000),
Document(id='6', price=100000000000), # overflow int32
Document(id='7', price=30000),
]
)
expected_ids = ['0', '1', '2', '3', '5', '7']
assert len(elastic_doc) == 6
assert len(elastic_doc[:, 'id']) == 6
assert elastic_doc[:, 'id'] == expected_ids
assert elastic_doc._offset2ids.ids == expected_ids
@pytest.mark.filterwarnings('ignore::UserWarning')
@pytest.mark.parametrize("assert_customization_propagation", [True, False])
def test_succes_add_bulk_custom_params(
monkeypatch, start_storage, assert_customization_propagation
):
bulk_custom_params = {
'thread_count': 4,
'chunk_size': 100,
'max_chunk_bytes': 104857600,
'queue_size': 4,
}
nrof_docs = 100
def _mock_send_requests(requests, **kwargs):
# Currently only self._send_requests from extend method which
# receive customization
if (
not requests[0]['_index'].startswith('offset2id__')
and requests[0]['_op_type'] == 'index'
):
assert kwargs == bulk_custom_params
return [{'index': {'_id': f'r{i}'}} for i in range(nrof_docs)]
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': [
('is_true', 'bool'),
('test_long', 'long'),
('test_double', 'double'),
],
'distance': 'l2_norm',
'index_name': 'test_succes_add_bulk_custom_params',
},
)
if assert_customization_propagation:
monkeypatch.setattr(elastic_doc, '_send_requests', _mock_send_requests)
with elastic_doc:
elastic_doc.extend(
[
Document(id=f'r{i}', embedding=np.ones((3,)) * i)
for i in range(nrof_docs)
],
**bulk_custom_params,
)
|
from docarray import Document, DocumentArray
import pytest
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_add_ignore_existing_doc_id(start_storage):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': [('price', 'int')],
'distance': 'l2_norm',
'index_name': 'test_add_ignore_existing_doc_id',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='r0', embedding=[0, 0, 0]),
Document(id='r1', embedding=[1, 1, 1]),
Document(id='r2', embedding=[2, 2, 2]),
Document(id='r3', embedding=[3, 3, 3]),
Document(id='r4', embedding=[4, 4, 4]),
]
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='r0', embedding=[0, 0, 0]),
Document(id='r2', embedding=[2, 2, 2]),
Document(id='r4', embedding=[4, 4, 4]),
Document(id='r5', embedding=[2, 2, 2]),
Document(id='r6', embedding=[4, 4, 4]),
]
)
indexed_offset_count = elastic_doc._client.count(
index=elastic_doc._index_name_offset2id
)['count']
assert len(elastic_doc) == len(elastic_doc[:, 'embedding'])
assert len(elastic_doc) == indexed_offset_count
assert len(elastic_doc[:, 'embedding']) == 7
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_add_skip_wrong_data_type_and_fix_offset(start_storage):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': [('price', 'int')],
'index_name': 'test_add_skip_wrong_data_type_and_fix_offset',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='0', price=1000),
Document(id='1', price=20000),
Document(id='2', price=103000),
]
)
with pytest.raises(IndexError):
with elastic_doc:
elastic_doc.extend(
[
Document(id='0', price=10000),
Document(id='1', price=20000),
Document(id='3', price=30000),
Document(id='4', price=100000000000), # overflow int32
Document(id='5', price=2000),
Document(id='6', price=100000000000), # overflow int32
Document(id='7', price=30000),
]
)
expected_ids = ['0', '1', '2', '3', '5', '7']
assert len(elastic_doc) == 6
assert len(elastic_doc[:, 'id']) == 6
assert elastic_doc[:, 'id'] == expected_ids
assert elastic_doc._offset2ids.ids == expected_ids
|
"""Load agent."""
from collections.abc import Sequence
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.callbacks import BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import BaseTool
from langchain._api.deprecation import AGENT_DEPRECATION_WARNING
from langchain.agents.agent import AgentExecutor
from langchain.agents.agent_types import AgentType
from langchain.agents.loading import AGENT_TO_CLASS, load_agent
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
)
def initialize_agent(
tools: Sequence[BaseTool],
llm: BaseLanguageModel,
agent: Optional[AgentType] = None,
callback_manager: Optional[BaseCallbackManager] = None,
agent_path: Optional[str] = None,
agent_kwargs: Optional[dict] = None,
*,
tags: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> AgentExecutor:
"""Load an agent executor given tools and LLM.
Args:
tools: List of tools this agent has access to.
llm: Language model to use as the agent.
agent: Agent type to use. If None and agent_path is also None, will default
to AgentType.ZERO_SHOT_REACT_DESCRIPTION. Defaults to None.
callback_manager: CallbackManager to use. Global callback manager is used if
not provided. Defaults to None.
agent_path: Path to serialized agent to use. If None and agent is also None,
will default to AgentType.ZERO_SHOT_REACT_DESCRIPTION. Defaults to None.
agent_kwargs: Additional keyword arguments to pass to the underlying agent.
Defaults to None.
tags: Tags to apply to the traced runs. Defaults to None.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
ValueError: If both `agent` and `agent_path` are specified.
ValueError: If `agent` is not a valid agent type.
ValueError: If both `agent` and `agent_path` are None.
"""
tags_ = list(tags) if tags else []
if agent is None and agent_path is None:
agent = AgentType.ZERO_SHOT_REACT_DESCRIPTION
if agent is not None and agent_path is not None:
raise ValueError(
"Both `agent` and `agent_path` are specified, "
"but at most only one should be."
)
if agent is not None:
if agent not in AGENT_TO_CLASS:
raise ValueError(
f"Got unknown agent type: {agent}. "
f"Valid types are: {AGENT_TO_CLASS.keys()}."
)
tags_.append(agent.value if isinstance(agent, AgentType) else agent)
agent_cls = AGENT_TO_CLASS[agent]
agent_kwargs = agent_kwargs or {}
agent_obj = agent_cls.from_llm_and_tools(
llm, tools, callback_manager=callback_manager, **agent_kwargs
)
elif agent_path is not None:
agent_obj = load_agent(
agent_path, llm=llm, tools=tools, callback_manager=callback_manager
)
try:
# TODO: Add tags from the serialized object directly.
tags_.append(agent_obj._agent_type)
except NotImplementedError:
pass
else:
raise ValueError(
"Somehow both `agent` and `agent_path` are None, this should never happen."
)
return AgentExecutor.from_agent_and_tools(
agent=agent_obj,
tools=tools,
callback_manager=callback_manager,
tags=tags_,
**kwargs,
)
|
"""Load agent."""
from typing import Any, Optional, Sequence
from langchain_core._api import deprecated
from langchain_core.callbacks import BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import BaseTool
from langchain._api.deprecation import AGENT_DEPRECATION_WARNING
from langchain.agents.agent import AgentExecutor
from langchain.agents.agent_types import AgentType
from langchain.agents.loading import AGENT_TO_CLASS, load_agent
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
)
def initialize_agent(
tools: Sequence[BaseTool],
llm: BaseLanguageModel,
agent: Optional[AgentType] = None,
callback_manager: Optional[BaseCallbackManager] = None,
agent_path: Optional[str] = None,
agent_kwargs: Optional[dict] = None,
*,
tags: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> AgentExecutor:
"""Load an agent executor given tools and LLM.
Args:
tools: List of tools this agent has access to.
llm: Language model to use as the agent.
agent: Agent type to use. If None and agent_path is also None, will default
to AgentType.ZERO_SHOT_REACT_DESCRIPTION. Defaults to None.
callback_manager: CallbackManager to use. Global callback manager is used if
not provided. Defaults to None.
agent_path: Path to serialized agent to use. If None and agent is also None,
will default to AgentType.ZERO_SHOT_REACT_DESCRIPTION. Defaults to None.
agent_kwargs: Additional keyword arguments to pass to the underlying agent.
Defaults to None.
tags: Tags to apply to the traced runs. Defaults to None.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
ValueError: If both `agent` and `agent_path` are specified.
ValueError: If `agent` is not a valid agent type.
ValueError: If both `agent` and `agent_path` are None.
"""
tags_ = list(tags) if tags else []
if agent is None and agent_path is None:
agent = AgentType.ZERO_SHOT_REACT_DESCRIPTION
if agent is not None and agent_path is not None:
raise ValueError(
"Both `agent` and `agent_path` are specified, "
"but at most only one should be."
)
if agent is not None:
if agent not in AGENT_TO_CLASS:
raise ValueError(
f"Got unknown agent type: {agent}. "
f"Valid types are: {AGENT_TO_CLASS.keys()}."
)
tags_.append(agent.value if isinstance(agent, AgentType) else agent)
agent_cls = AGENT_TO_CLASS[agent]
agent_kwargs = agent_kwargs or {}
agent_obj = agent_cls.from_llm_and_tools(
llm, tools, callback_manager=callback_manager, **agent_kwargs
)
elif agent_path is not None:
agent_obj = load_agent(
agent_path, llm=llm, tools=tools, callback_manager=callback_manager
)
try:
# TODO: Add tags from the serialized object directly.
tags_.append(agent_obj._agent_type)
except NotImplementedError:
pass
else:
raise ValueError(
"Somehow both `agent` and `agent_path` are None, this should never happen."
)
return AgentExecutor.from_agent_and_tools(
agent=agent_obj,
tools=tools,
callback_manager=callback_manager,
tags=tags_,
**kwargs,
)
|
"""
This examples demonstrates the setup for Question-Answer-Retrieval.
You can input a query or a question. The script then uses semantic search
to find relevant passages in Simple English Wikipedia (as it is smaller and fits better in RAM).
As model, we use: nq-distilbert-base-v1
It was trained on the Natural Questions dataset, a dataset with real questions from Google Search
together with annotated data from Wikipedia providing the answer. For the passages, we encode the
Wikipedia article tile together with the individual text passages.
Google Colab Example: https://colab.research.google.com/drive/11GunvCqJuebfeTlgbJWkIMT0xJH6PWF1?usp=sharing
"""
import json
from sentence_transformers import SentenceTransformer, util
import time
import gzip
import os
import torch
# We use the Bi-Encoder to encode all passages, so that we can use it with semantic search
model_name = "nq-distilbert-base-v1"
bi_encoder = SentenceTransformer(model_name)
top_k = 5 # Number of passages we want to retrieve with the bi-encoder
# As dataset, we use Simple English Wikipedia. Compared to the full English wikipedia, it has only
# about 170k articles. We split these articles into paragraphs and encode them with the bi-encoder
wikipedia_filepath = "data/simplewiki-2020-11-01.jsonl.gz"
if not os.path.exists(wikipedia_filepath):
util.http_get("http://sbert.net/datasets/simplewiki-2020-11-01.jsonl.gz", wikipedia_filepath)
passages = []
with gzip.open(wikipedia_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
data = json.loads(line.strip())
for paragraph in data["paragraphs"]:
# We encode the passages as [title, text]
passages.append([data["title"], paragraph])
# If you like, you can also limit the number of passages you want to use
print("Passages:", len(passages))
# To speed things up, pre-computed embeddings are downloaded.
# The provided file encoded the passages with the model 'nq-distilbert-base-v1'
if model_name == "nq-distilbert-base-v1":
embeddings_filepath = "simplewiki-2020-11-01-nq-distilbert-base-v1.pt"
if not os.path.exists(embeddings_filepath):
util.http_get("http://sbert.net/datasets/simplewiki-2020-11-01-nq-distilbert-base-v1.pt", embeddings_filepath)
corpus_embeddings = torch.load(embeddings_filepath)
corpus_embeddings = corpus_embeddings.float() # Convert embedding file to float
device = util.get_device_name()
corpus_embeddings = corpus_embeddings.to(device)
else: # Here, we compute the corpus_embeddings from scratch (which can take a while depending on the GPU)
corpus_embeddings = bi_encoder.encode(passages, convert_to_tensor=True, show_progress_bar=True)
while True:
query = input("Please enter a question: ")
# Encode the query using the bi-encoder and find potentially relevant passages
start_time = time.time()
question_embedding = bi_encoder.encode(query, convert_to_tensor=True)
hits = util.semantic_search(question_embedding, corpus_embeddings, top_k=top_k)
hits = hits[0] # Get the hits for the first query
end_time = time.time()
# Output of top-k hits
print("Input question:", query)
print("Results (after {:.3f} seconds):".format(end_time - start_time))
for hit in hits:
print("\t{:.3f}\t{}".format(hit["score"], passages[hit["corpus_id"]]))
print("\n\n========\n")
|
"""
This examples demonstrates the setup for Question-Answer-Retrieval.
You can input a query or a question. The script then uses semantic search
to find relevant passages in Simple English Wikipedia (as it is smaller and fits better in RAM).
As model, we use: nq-distilbert-base-v1
It was trained on the Natural Questions dataset, a dataset with real questions from Google Search
together with annotated data from Wikipedia providing the answer. For the passages, we encode the
Wikipedia article tile together with the individual text passages.
Google Colab Example: https://colab.research.google.com/drive/11GunvCqJuebfeTlgbJWkIMT0xJH6PWF1?usp=sharing
"""
import json
from sentence_transformers import SentenceTransformer, util
import time
import gzip
import os
import torch
# We use the Bi-Encoder to encode all passages, so that we can use it with semantic search
model_name = "nq-distilbert-base-v1"
bi_encoder = SentenceTransformer(model_name)
top_k = 5 # Number of passages we want to retrieve with the bi-encoder
# As dataset, we use Simple English Wikipedia. Compared to the full English wikipedia, it has only
# about 170k articles. We split these articles into paragraphs and encode them with the bi-encoder
wikipedia_filepath = "data/simplewiki-2020-11-01.jsonl.gz"
if not os.path.exists(wikipedia_filepath):
util.http_get("http://sbert.net/datasets/simplewiki-2020-11-01.jsonl.gz", wikipedia_filepath)
passages = []
with gzip.open(wikipedia_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
data = json.loads(line.strip())
for paragraph in data["paragraphs"]:
# We encode the passages as [title, text]
passages.append([data["title"], paragraph])
# If you like, you can also limit the number of passages you want to use
print("Passages:", len(passages))
# To speed things up, pre-computed embeddings are downloaded.
# The provided file encoded the passages with the model 'nq-distilbert-base-v1'
if model_name == "nq-distilbert-base-v1":
embeddings_filepath = "simplewiki-2020-11-01-nq-distilbert-base-v1.pt"
if not os.path.exists(embeddings_filepath):
util.http_get("http://sbert.net/datasets/simplewiki-2020-11-01-nq-distilbert-base-v1.pt", embeddings_filepath)
corpus_embeddings = torch.load(embeddings_filepath)
corpus_embeddings = corpus_embeddings.float() # Convert embedding file to float
device = util.get_device_name()
corpus_embeddings = corpus_embeddings.to(device)
else: # Here, we compute the corpus_embeddings from scratch (which can take a while depending on the GPU)
corpus_embeddings = bi_encoder.encode(passages, convert_to_tensor=True, show_progress_bar=True)
while True:
query = input("Please enter a question: ")
# Encode the query using the bi-encoder and find potentially relevant passages
start_time = time.time()
question_embedding = bi_encoder.encode(query, convert_to_tensor=True)
hits = util.semantic_search(question_embedding, corpus_embeddings, top_k=top_k)
hits = hits[0] # Get the hits for the first query
end_time = time.time()
# Output of top-k hits
print("Input question:", query)
print("Results (after {:.3f} seconds):".format(end_time - start_time))
for hit in hits:
print("\t{:.3f}\t{}".format(hit["score"], passages[hit["corpus_id"]]))
print("\n\n========\n")
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import nn
class CNN(nn.Module):
"""CNN-layer with multiple kernel-sizes over the word embeddings"""
def __init__(
self,
in_word_embedding_dimension: int,
out_channels: int = 256,
kernel_sizes: list[int] = [1, 3, 5],
stride_sizes: list[int] = None,
):
nn.Module.__init__(self)
self.config_keys = ["in_word_embedding_dimension", "out_channels", "kernel_sizes"]
self.in_word_embedding_dimension = in_word_embedding_dimension
self.out_channels = out_channels
self.kernel_sizes = kernel_sizes
self.embeddings_dimension = out_channels * len(kernel_sizes)
self.convs = nn.ModuleList()
in_channels = in_word_embedding_dimension
if stride_sizes is None:
stride_sizes = [1] * len(kernel_sizes)
for kernel_size, stride in zip(kernel_sizes, stride_sizes):
padding_size = int((kernel_size - 1) / 2)
conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding_size,
)
self.convs.append(conv)
def forward(self, features):
token_embeddings = features["token_embeddings"]
token_embeddings = token_embeddings.transpose(1, -1)
vectors = [conv(token_embeddings) for conv in self.convs]
out = torch.cat(vectors, 1).transpose(1, -1)
features.update({"token_embeddings": out})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> list[int]:
raise NotImplementedError()
def save(self, output_path: str, safe_serialization: bool = True):
with open(os.path.join(output_path, "cnn_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "cnn_config.json"), "r") as fIn:
config = json.load(fIn)
model = CNN(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
import json
import os
from typing import List
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import nn
class CNN(nn.Module):
"""CNN-layer with multiple kernel-sizes over the word embeddings"""
def __init__(
self,
in_word_embedding_dimension: int,
out_channels: int = 256,
kernel_sizes: List[int] = [1, 3, 5],
stride_sizes: List[int] = None,
):
nn.Module.__init__(self)
self.config_keys = ["in_word_embedding_dimension", "out_channels", "kernel_sizes"]
self.in_word_embedding_dimension = in_word_embedding_dimension
self.out_channels = out_channels
self.kernel_sizes = kernel_sizes
self.embeddings_dimension = out_channels * len(kernel_sizes)
self.convs = nn.ModuleList()
in_channels = in_word_embedding_dimension
if stride_sizes is None:
stride_sizes = [1] * len(kernel_sizes)
for kernel_size, stride in zip(kernel_sizes, stride_sizes):
padding_size = int((kernel_size - 1) / 2)
conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding_size,
)
self.convs.append(conv)
def forward(self, features):
token_embeddings = features["token_embeddings"]
token_embeddings = token_embeddings.transpose(1, -1)
vectors = [conv(token_embeddings) for conv in self.convs]
out = torch.cat(vectors, 1).transpose(1, -1)
features.update({"token_embeddings": out})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> List[int]:
raise NotImplementedError()
def save(self, output_path: str, safe_serialization: bool = True):
with open(os.path.join(output_path, "cnn_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "cnn_config.json"), "r") as fIn:
config = json.load(fIn)
model = CNN(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
"""Utilities for the XGBoost Dask interface."""
import logging
import warnings
from typing import Any, Dict, Optional, Tuple
import distributed
from ..collective import Config
LOGGER = logging.getLogger("[xgboost.dask]")
def get_n_threads(local_param: Dict[str, Any], worker: "distributed.Worker") -> int:
"""Get the number of threads from a worker and the user-supplied parameters."""
# dask worker nthreads, "state" is available in 2022.6.1
dwnt = worker.state.nthreads if hasattr(worker, "state") else worker.nthreads
n_threads = None
for p in ["nthread", "n_jobs"]:
if local_param.get(p, None) is not None and local_param.get(p, dwnt) != dwnt:
LOGGER.info("Overriding `nthreads` defined in dask worker.")
n_threads = local_param[p]
break
if n_threads == 0 or n_threads is None:
n_threads = dwnt
return n_threads
def get_address_from_user(
dconfig: Optional[Dict[str, Any]], coll_cfg: Config
) -> Tuple[Optional[str], int]:
"""Get the tracker address from the optional user configuration.
Parameters
----------
dconfig :
Dask global configuration.
coll_cfg :
Collective configuration.
Returns
-------
The IP address along with the port number.
"""
valid_config = ["scheduler_address"]
host_ip = None
port = 0
if dconfig is not None:
for k in dconfig:
if k not in valid_config:
raise ValueError(f"Unknown configuration: {k}")
warnings.warn(
(
"Use `coll_cfg` instead of the Dask global configuration store"
f" for the XGBoost tracker configuration: {k}."
),
FutureWarning,
)
else:
dconfig = {}
host_ip = dconfig.get("scheduler_address", None)
if host_ip is not None and host_ip.startswith("[") and host_ip.endswith("]"):
# convert dask bracket format to proper IPv6 address.
host_ip = host_ip[1:-1]
if host_ip is not None:
try:
host_ip, port = distributed.comm.get_address_host_port(host_ip)
except ValueError:
pass
if coll_cfg is None:
coll_cfg = Config()
if coll_cfg.tracker_host_ip is not None:
if host_ip is not None and coll_cfg.tracker_host_ip != host_ip:
raise ValueError(
"Conflicting host IP addresses from the dask configuration and the "
f"collective configuration: {host_ip} v.s. {coll_cfg.tracker_host_ip}."
)
host_ip = coll_cfg.tracker_host_ip
if coll_cfg.tracker_port is not None:
if (
port != 0
and port is not None
and coll_cfg.tracker_port != 0
and port != coll_cfg.tracker_port
):
raise ValueError(
"Conflicting ports from the dask configuration and the "
f"collective configuration: {port} v.s. {coll_cfg.tracker_port}."
)
port = coll_cfg.tracker_port
return host_ip, port
|
"""Utilities for the XGBoost Dask interface."""
import logging
from typing import TYPE_CHECKING, Any, Dict
LOGGER = logging.getLogger("[xgboost.dask]")
if TYPE_CHECKING:
import distributed
def get_n_threads(local_param: Dict[str, Any], worker: "distributed.Worker") -> int:
"""Get the number of threads from a worker and the user-supplied parameters."""
# dask worker nthreads, "state" is available in 2022.6.1
dwnt = worker.state.nthreads if hasattr(worker, "state") else worker.nthreads
n_threads = None
for p in ["nthread", "n_jobs"]:
if local_param.get(p, None) is not None and local_param.get(p, dwnt) != dwnt:
LOGGER.info("Overriding `nthreads` defined in dask worker.")
n_threads = local_param[p]
break
if n_threads == 0 or n_threads is None:
n_threads = dwnt
return n_threads
|
# dataset settings
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically Infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/segmentation/VOCdevkit/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/segmentation/',
# 'data/': 's3://openmmlab/datasets/segmentation/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type='ConcatDataset',
# VOCDataset will add different `dataset_type` in dataset.metainfo,
# which will get error if using ConcatDataset. Adding
# `ignore_keys` can avoid this error.
ignore_keys=['dataset_type'],
datasets=[
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2007/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
filter_cfg=dict(
filter_empty_gt=True, min_size=32, bbox_min_size=32),
pipeline=train_pipeline,
backend_args=backend_args),
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2012/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2012/'),
filter_cfg=dict(
filter_empty_gt=True, min_size=32, bbox_min_size=32),
pipeline=train_pipeline,
backend_args=backend_args)
])))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2007/ImageSets/Main/test.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
# Pascal VOC2007 uses `11points` as default evaluate mode, while PASCAL
# VOC2012 defaults to use 'area'.
val_evaluator = dict(type='VOCMetric', metric='mAP', eval_mode='11points')
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type='ConcatDataset',
# VOCDataset will add different `dataset_type` in dataset.metainfo,
# which will get error if using ConcatDataset. Adding
# `ignore_keys` can avoid this error.
ignore_keys=['dataset_type'],
datasets=[
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2007/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
filter_cfg=dict(
filter_empty_gt=True, min_size=32, bbox_min_size=32),
pipeline=train_pipeline),
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2012/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2012/'),
filter_cfg=dict(
filter_empty_gt=True, min_size=32, bbox_min_size=32),
pipeline=train_pipeline)
])))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2007/ImageSets/Main/test.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
# Pascal VOC2007 uses `11points` as default evaluate mode, while PASCAL
# VOC2012 defaults to use 'area'.
val_evaluator = dict(type='VOCMetric', metric='mAP', eval_mode='11points')
test_evaluator = val_evaluator
|
from __future__ import annotations
from typing import Any, Optional, Union
import PIL.Image
import torch
from ._datapoint import Datapoint
class Image(Datapoint):
"""[BETA] :class:`torch.Tensor` subclass for images.
Args:
data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as
well as PIL images.
dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Image:
if isinstance(data, PIL.Image.Image):
from torchvision.transforms.v2 import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
if tensor.ndim < 2:
raise ValueError
elif tensor.ndim == 2:
tensor = tensor.unsqueeze(0)
return cls._wrap(tensor)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr()
_ImageType = Union[torch.Tensor, PIL.Image.Image, Image]
_ImageTypeJIT = torch.Tensor
_TensorImageType = Union[torch.Tensor, Image]
_TensorImageTypeJIT = torch.Tensor
|
from __future__ import annotations
from typing import Any, Optional, Union
import PIL.Image
import torch
from ._datapoint import Datapoint
class Image(Datapoint):
"""[BETA] :class:`torch.Tensor` subclass for images.
Args:
data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as
well as PIL images.
dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
@classmethod
def _wrap(cls, tensor: torch.Tensor) -> Image:
image = tensor.as_subclass(cls)
return image
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Image:
if isinstance(data, PIL.Image.Image):
from torchvision.transforms.v2 import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
if tensor.ndim < 2:
raise ValueError
elif tensor.ndim == 2:
tensor = tensor.unsqueeze(0)
return cls._wrap(tensor)
@classmethod
def wrap_like(cls, other: Image, tensor: torch.Tensor) -> Image:
return cls._wrap(tensor)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr()
_ImageType = Union[torch.Tensor, PIL.Image.Image, Image]
_ImageTypeJIT = torch.Tensor
_TensorImageType = Union[torch.Tensor, Image]
_TensorImageTypeJIT = torch.Tensor
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.callbacks.splade_callbacks import (
SchedulerType,
SpladeLambdaSchedulerCallback,
)
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseMSEEvaluator,
SparseNanoBEIREvaluator,
SparseRerankingEvaluator,
SparseTranslationEvaluator,
SparseTripletEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
CSRReconstructionLoss,
FlopsLoss,
SparseAnglELoss,
SparseCachedGISTEmbedLoss,
SparseCachedMultipleNegativesRankingLoss,
SparseCoSENTLoss,
SparseCosineSimilarityLoss,
SparseDistillKLDivLoss,
SparseGISTEmbedLoss,
SparseMarginMSELoss,
SparseMSELoss,
SparseMultipleNegativesRankingLoss,
SparseTripletLoss,
SpladeLoss,
)
from sentence_transformers.sparse_encoder.model_card import SparseEncoderModelCardData
from sentence_transformers.sparse_encoder.models import IDF, CSRSparsity, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
# Core components
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
# Models
"CSRSparsity",
"MLMTransformer",
"SpladePooling",
"IDF",
# Losses
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
# Callbacks
"SpladeLambdaSchedulerCallback",
"SchedulerType",
# Evaluators
"SparseBinaryClassificationEvaluator",
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
"SparseTripletEvaluator",
# Model card
"SparseEncoderModelCardData",
]
# TODO : Add tests for all the components
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseMSEEvaluator,
SparseNanoBEIREvaluator,
SparseRerankingEvaluator,
SparseTranslationEvaluator,
SparseTripletEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
CSRReconstructionLoss,
FlopsLoss,
SparseAnglELoss,
SparseCachedGISTEmbedLoss,
SparseCachedMultipleNegativesRankingLoss,
SparseCoSENTLoss,
SparseCosineSimilarityLoss,
SparseDistillKLDivLoss,
SparseGISTEmbedLoss,
SparseMarginMSELoss,
SparseMSELoss,
SparseMultipleNegativesRankingLoss,
SparseTripletLoss,
SpladeLoss,
)
from sentence_transformers.sparse_encoder.model_card import SparseEncoderModelCardData
from sentence_transformers.sparse_encoder.models import IDF, CSRSparsity, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
# Core components
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
# Models
"CSRSparsity",
"MLMTransformer",
"SpladePooling",
"IDF",
# Losses
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
# Evaluators
"SparseBinaryClassificationEvaluator",
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
"SparseTripletEvaluator",
# Model card
"SparseEncoderModelCardData",
]
# TODO : Add tests for all the components
|
import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import dtype_policies
from keras.src import layers
from keras.src import testing
class ZeroPadding3DTest(testing.TestCase, parameterized.TestCase):
@parameterized.parameters(
{"data_format": "channels_first"}, {"data_format": "channels_last"}
)
def test_zero_padding_3d(self, data_format):
inputs = np.random.rand(1, 2, 3, 4, 5)
outputs = layers.ZeroPadding3D(
padding=((1, 2), (3, 4), (0, 2)), data_format=data_format
)(inputs)
if data_format == "channels_first":
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, :, index, :, :], 0.0)
for index in [0, 1, 2, -1, -2, -3, -4]:
self.assertAllClose(outputs[:, :, :, index, :], 0.0)
for index in [-1, -2]:
self.assertAllClose(outputs[:, :, :, :, index], 0.0)
self.assertAllClose(outputs[:, :, 1:-2, 3:-4, 0:-2], inputs)
else:
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, index, :, :, :], 0.0)
for index in [0, 1, 2, -1, -2, -3, -4]:
self.assertAllClose(outputs[:, :, index, :, :], 0.0)
for index in [-1, -2]:
self.assertAllClose(outputs[:, :, :, index, :], 0.0)
self.assertAllClose(outputs[:, 1:-2, 3:-4, 0:-2, :], inputs)
@parameterized.product(
(
{"padding": ((2, 2), (2, 2), (2, 2))}, # 3 tuples
{"padding": (2, 2, 2)}, # 1 tuple
{"padding": 2}, # 1 int
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
def test_zero_padding_3d_with_same_padding(self, padding, data_format):
inputs = np.random.rand(1, 2, 3, 4, 5)
outputs = layers.ZeroPadding3D(
padding=padding, data_format=data_format
)(inputs)
if data_format == "channels_first":
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, :, index, :, :], 0.0)
self.assertAllClose(outputs[:, :, :, index, :], 0.0)
self.assertAllClose(outputs[:, :, :, :, index], 0.0)
self.assertAllClose(outputs[:, :, 2:-2, 2:-2, 2:-2], inputs)
else:
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, index, :, :, :], 0.0)
self.assertAllClose(outputs[:, :, index, :, :], 0.0)
self.assertAllClose(outputs[:, :, :, index, :], 0.0)
self.assertAllClose(outputs[:, 2:-2, 2:-2, 2:-2, :], inputs)
def test_zero_padding_3d_with_dynamic_spatial_dim(self):
if backend.config.image_data_format() == "channels_last":
input_layer = layers.Input(batch_shape=(1, 2, None, 4, 5))
else:
input_layer = layers.Input(batch_shape=(1, 5, 2, None, 4))
padded = layers.ZeroPadding3D(((1, 2), (3, 4), (5, 6)))(input_layer)
if backend.config.image_data_format() == "channels_last":
self.assertEqual(padded.shape, (1, 5, None, 15, 5))
else:
self.assertEqual(padded.shape, (1, 5, 5, None, 15))
@parameterized.parameters(
{"padding": (1,)},
{"padding": (1, 2)},
{"padding": (1, 2, 3, 4)},
{"padding": "1"},
{"padding": ((1, 2), (3, 4), (5, 6, 7))},
{"padding": ((1, 2), (3, 4), (5, -6))},
{"padding": ((1, 2), (3, 4), "5")},
)
def test_zero_padding_3d_errors_if_padding_argument_invalid(self, padding):
with self.assertRaises(ValueError):
layers.ZeroPadding3D(padding=padding)
@parameterized.parameters(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
)
def test_zero_padding_3d_get_config(self, data_format):
layer = layers.ZeroPadding3D(padding=(1, 2, 3), data_format=data_format)
expected_config = {
"data_format": data_format,
"dtype": dtype_policies.serialize(layer.dtype_policy),
"name": layer.name,
"padding": ((1, 1), (2, 2), (3, 3)),
"trainable": layer.trainable,
}
self.assertEqual(layer.get_config(), expected_config)
|
import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import testing
class ZeroPadding3DTest(testing.TestCase, parameterized.TestCase):
@parameterized.parameters(
{"data_format": "channels_first"}, {"data_format": "channels_last"}
)
def test_zero_padding_3d(self, data_format):
inputs = np.random.rand(1, 2, 3, 4, 5)
outputs = layers.ZeroPadding3D(
padding=((1, 2), (3, 4), (0, 2)), data_format=data_format
)(inputs)
if data_format == "channels_first":
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, :, index, :, :], 0.0)
for index in [0, 1, 2, -1, -2, -3, -4]:
self.assertAllClose(outputs[:, :, :, index, :], 0.0)
for index in [-1, -2]:
self.assertAllClose(outputs[:, :, :, :, index], 0.0)
self.assertAllClose(outputs[:, :, 1:-2, 3:-4, 0:-2], inputs)
else:
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, index, :, :, :], 0.0)
for index in [0, 1, 2, -1, -2, -3, -4]:
self.assertAllClose(outputs[:, :, index, :, :], 0.0)
for index in [-1, -2]:
self.assertAllClose(outputs[:, :, :, index, :], 0.0)
self.assertAllClose(outputs[:, 1:-2, 3:-4, 0:-2, :], inputs)
@parameterized.product(
(
{"padding": ((2, 2), (2, 2), (2, 2))}, # 3 tuples
{"padding": (2, 2, 2)}, # 1 tuple
{"padding": 2}, # 1 int
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
def test_zero_padding_3d_with_same_padding(self, padding, data_format):
inputs = np.random.rand(1, 2, 3, 4, 5)
outputs = layers.ZeroPadding3D(
padding=padding, data_format=data_format
)(inputs)
if data_format == "channels_first":
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, :, index, :, :], 0.0)
self.assertAllClose(outputs[:, :, :, index, :], 0.0)
self.assertAllClose(outputs[:, :, :, :, index], 0.0)
self.assertAllClose(outputs[:, :, 2:-2, 2:-2, 2:-2], inputs)
else:
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, index, :, :, :], 0.0)
self.assertAllClose(outputs[:, :, index, :, :], 0.0)
self.assertAllClose(outputs[:, :, :, index, :], 0.0)
self.assertAllClose(outputs[:, 2:-2, 2:-2, 2:-2, :], inputs)
def test_zero_padding_3d_with_dynamic_spatial_dim(self):
if backend.config.image_data_format() == "channels_last":
input_layer = layers.Input(batch_shape=(1, 2, None, 4, 5))
else:
input_layer = layers.Input(batch_shape=(1, 5, 2, None, 4))
padded = layers.ZeroPadding3D(((1, 2), (3, 4), (5, 6)))(input_layer)
if backend.config.image_data_format() == "channels_last":
self.assertEqual(padded.shape, (1, 5, None, 15, 5))
else:
self.assertEqual(padded.shape, (1, 5, 5, None, 15))
@parameterized.parameters(
{"padding": (1,)},
{"padding": (1, 2)},
{"padding": (1, 2, 3, 4)},
{"padding": "1"},
{"padding": ((1, 2), (3, 4), (5, 6, 7))},
{"padding": ((1, 2), (3, 4), (5, -6))},
{"padding": ((1, 2), (3, 4), "5")},
)
def test_zero_padding_3d_errors_if_padding_argument_invalid(self, padding):
with self.assertRaises(ValueError):
layers.ZeroPadding3D(padding=padding)
@parameterized.parameters(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
)
def test_zero_padding_3d_get_config(self, data_format):
layer = layers.ZeroPadding3D(padding=(1, 2, 3), data_format=data_format)
expected_config = {
"data_format": data_format,
"dtype": layer.dtype_policy.name,
"name": layer.name,
"padding": ((1, 1), (2, 2), (3, 3)),
"trainable": layer.trainable,
}
self.assertEqual(layer.get_config(), expected_config)
|
"""Google Universal Sentence Encoder Embedding Wrapper Module."""
import deprecated
from typing import Any, List, Optional
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.callbacks import CallbackManager
# Google Universal Sentence Encode v5
DEFAULT_HANDLE = "https://tfhub.dev/google/universal-sentence-encoder-large/5"
@deprecated.deprecated(
reason=(
"Should use `llama-index-embeddings-google-genai` instead, using Google's latest unified SDK. "
"See: https://docs.llamaindex.ai/en/stable/examples/embeddings/google_genai/"
)
)
class GoogleUnivSentEncoderEmbedding(BaseEmbedding):
_model: Any = PrivateAttr()
def __init__(
self,
handle: Optional[str] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
):
"""Init params."""
handle = handle or DEFAULT_HANDLE
try:
import tensorflow_hub as hub
model = hub.load(handle)
except ImportError:
raise ImportError(
"Please install tensorflow_hub: `pip install tensorflow_hub`"
)
super().__init__(
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
model_name=handle,
)
self._model = model
@classmethod
def class_name(cls) -> str:
return "GoogleUnivSentEncoderEmbedding"
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._get_embedding(query)
# TODO: use proper async methods
async def _aget_text_embedding(self, query: str) -> List[float]:
"""Get text embedding."""
return self._get_embedding(query)
# TODO: user proper async methods
async def _aget_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._get_embedding(query)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self._get_embedding(text)
def _get_embedding(self, text: str) -> List[float]:
vectors = self._model([text]).numpy().tolist()
return vectors[0]
|
"""Google Universal Sentence Encoder Embedding Wrapper Module."""
from typing import Any, List, Optional
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.callbacks import CallbackManager
# Google Universal Sentence Encode v5
DEFAULT_HANDLE = "https://tfhub.dev/google/universal-sentence-encoder-large/5"
class GoogleUnivSentEncoderEmbedding(BaseEmbedding):
_model: Any = PrivateAttr()
def __init__(
self,
handle: Optional[str] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
):
"""Init params."""
handle = handle or DEFAULT_HANDLE
try:
import tensorflow_hub as hub
model = hub.load(handle)
except ImportError:
raise ImportError(
"Please install tensorflow_hub: `pip install tensorflow_hub`"
)
super().__init__(
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
model_name=handle,
)
self._model = model
@classmethod
def class_name(cls) -> str:
return "GoogleUnivSentEncoderEmbedding"
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._get_embedding(query)
# TODO: use proper async methods
async def _aget_text_embedding(self, query: str) -> List[float]:
"""Get text embedding."""
return self._get_embedding(query)
# TODO: user proper async methods
async def _aget_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._get_embedding(query)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self._get_embedding(text)
def _get_embedding(self, text: str) -> List[float]:
vectors = self._model([text]).numpy().tolist()
return vectors[0]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.parsers.generic import MimeTypeBasedParser
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"MimeTypeBasedParser": "langchain_community.document_loaders.parsers.generic",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MimeTypeBasedParser",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.parsers.generic import MimeTypeBasedParser
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"MimeTypeBasedParser": "langchain_community.document_loaders.parsers.generic"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MimeTypeBasedParser",
]
|
from typing import Union
from docarray.typing.tensor.image.image_ndarray import ImageNdArray
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.image.image_torch_tensor import ImageTorchTensor
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.image.image_tensorflow_tensor import (
ImageTensorFlowTensor as ImageTFTensor,
)
ImageTensor = Union[ImageNdArray] # type: ignore
if tf_available and torch_available:
ImageTensor = Union[ImageNdArray, ImageTorchTensor, ImageTFTensor] # type: ignore
elif tf_available:
ImageTensor = Union[ImageNdArray, ImageTFTensor] # type: ignore
elif torch_available:
ImageTensor = Union[ImageNdArray, ImageTorchTensor] # type: ignore
|
from typing import Union
from docarray.typing.tensor.image.image_ndarray import ImageNdArray
try:
import torch # noqa: F401
except ImportError:
ImageTensor = ImageNdArray
else:
from docarray.typing.tensor.image.image_torch_tensor import ImageTorchTensor
ImageTensor = Union[ImageNdArray, ImageTorchTensor] # type: ignore
|
"""LLM Prompt Program."""
from abc import abstractmethod
from typing import Any, Generic, Optional, Type, TypeVar
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.types import BasePydanticProgram, Model
LM = TypeVar("LM")
class BaseLLMFunctionProgram(BasePydanticProgram[BaseModel], Generic[LM]):
"""
Base LLM Prompt Program.
This is a base class for LLM endpoints that can return
a structured output given the prompt.
NOTE: this only works for structured endpoints atm
(does not work for text completion endpoints.)
"""
@classmethod
@abstractmethod
def from_defaults(
cls,
output_cls: Type[Model],
prompt_template_str: Optional[str] = None,
prompt: Optional[PromptTemplate] = None,
llm: Optional[LM] = None,
**kwargs: Any,
) -> "BaseLLMFunctionProgram":
"""Initialize program from defaults."""
|
"""LLM Prompt Program."""
from abc import abstractmethod
from typing import Any, Generic, Optional, Type, TypeVar
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.types import BasePydanticProgram, Model
LM = TypeVar("LM")
class BaseLLMFunctionProgram(BasePydanticProgram[BaseModel], Generic[LM]):
"""
Base LLM Prompt Program.
This is a base class for LLM endpoints that can return
a structured output given the prompt.
NOTE: this only works for structured endpoints atm
(does not work for text completion endpoints.)
"""
@classmethod
@abstractmethod
def from_defaults(
cls,
output_cls: Type[Model],
prompt_template_str: Optional[str] = None,
prompt: Optional[PromptTemplate] = None,
llm: Optional[LM] = None,
**kwargs: Any,
) -> "BaseLLMFunctionProgram":
"""Initialize program from defaults."""
|
_base_ = './rpn_r50-caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
_base_ = './rpn_r50_caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
_base_ = './panoptic-fpn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './panoptic_fpn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
from __future__ import annotations
import inspect
from typing import Any, Union
import torch
try:
import triton
except ImportError:
triton = None
if triton is not None:
import triton.language as tl
from triton import Config
from triton.compiler import CompiledKernel
from triton.runtime.autotuner import OutOfResources
from triton.runtime.jit import KernelInterface
try:
from triton.runtime.autotuner import PTXASError
except ImportError:
class PTXASError(Exception): # type: ignore[no-redef]
pass
try:
from triton.compiler.compiler import ASTSource
except ImportError:
ASTSource = None
try:
from triton.backends.compiler import GPUTarget
except ImportError:
def GPUTarget(
backend: str,
arch: Union[int, str],
warp_size: int,
) -> Any:
if torch.version.hip:
return [backend, arch, warp_size]
return (backend, arch)
# In the latest triton, math functions were shuffled around into different modules:
# https://github.com/triton-lang/triton/pull/3172
try:
from triton.language.extra import libdevice
libdevice = tl.extra.libdevice # noqa: F811
math = tl.math
except ImportError:
if hasattr(tl.extra, "cuda") and hasattr(tl.extra.cuda, "libdevice"):
libdevice = tl.extra.cuda.libdevice
math = tl.math
elif hasattr(tl.extra, "intel") and hasattr(tl.extra.intel, "libdevice"):
libdevice = tl.extra.intel.libdevice
math = tl.math
else:
libdevice = tl.math
math = tl
try:
from triton.language.standard import _log2
except ImportError:
def _log2(x: Any) -> Any:
raise NotImplementedError
HAS_WARP_SPEC = hasattr(tl, "async_task")
try:
from triton import knobs
except ImportError:
knobs = None
try:
from triton.runtime.cache import triton_key # type: ignore[attr-defined]
except ImportError:
from triton.compiler.compiler import (
triton_key, # type: ignore[attr-defined,no-redef]
)
builtins_use_semantic_kwarg = (
"_semantic" in inspect.signature(triton.language.core.view).parameters
)
HAS_TRITON = True
else:
def _raise_error(*args: Any, **kwargs: Any) -> Any:
raise RuntimeError("triton package is not installed")
class OutOfResources(Exception): # type: ignore[no-redef]
pass
class PTXASError(Exception): # type: ignore[no-redef]
pass
Config = object
CompiledKernel = object
KernelInterface = object
ASTSource = None
GPUTarget = None
_log2 = _raise_error
libdevice = None
math = None
knobs = None
builtins_use_semantic_kwarg = False
class triton: # type: ignore[no-redef]
@staticmethod
def jit(*args: Any, **kwargs: Any) -> Any:
return _raise_error
class tl: # type: ignore[no-redef]
@staticmethod
def constexpr(val: Any) -> Any:
return val
tensor = Any
dtype = Any
HAS_WARP_SPEC = False
triton_key = _raise_error
HAS_TRITON = False
def cc_warp_size(cc: Union[str, int]) -> int:
if torch.version.hip:
cc_str = str(cc)
if "gfx10" in cc_str or "gfx11" in cc_str:
return 32
else:
return 64
else:
return 32
try:
autograd_profiler = torch.autograd.profiler
except AttributeError: # Compile workers only have a mock version of torch
class autograd_profiler: # type: ignore[no-redef]
_is_profiler_enabled = False
__all__ = [
"Config",
"CompiledKernel",
"OutOfResources",
"KernelInterface",
"PTXASError",
"ASTSource",
"GPUTarget",
"tl",
"_log2",
"libdevice",
"math",
"triton",
"cc_warp_size",
"knobs",
"triton_key",
]
|
from __future__ import annotations
import inspect
from typing import Any, Union
import torch
try:
import triton
except ImportError:
triton = None
if triton is not None:
import triton.language as tl
from triton import Config
from triton.compiler import CompiledKernel
from triton.runtime.autotuner import OutOfResources
from triton.runtime.jit import KernelInterface
try:
from triton.runtime.autotuner import PTXASError
except ImportError:
class PTXASError(Exception): # type: ignore[no-redef]
pass
try:
from triton.compiler.compiler import ASTSource
except ImportError:
ASTSource = None
try:
from triton.backends.compiler import GPUTarget
except ImportError:
def GPUTarget(
backend: str,
arch: Union[int, str],
warp_size: int,
) -> Any:
if torch.version.hip:
return [backend, arch, warp_size]
return (backend, arch)
# In the latest triton, math functions were shuffled around into different modules:
# https://github.com/triton-lang/triton/pull/3172
try:
from triton.language.extra import libdevice
libdevice = tl.extra.libdevice # noqa: F811
math = tl.math
except ImportError:
if hasattr(tl.extra, "cuda") and hasattr(tl.extra.cuda, "libdevice"):
libdevice = tl.extra.cuda.libdevice
math = tl.math
elif hasattr(tl.extra, "intel") and hasattr(tl.extra.intel, "libdevice"):
libdevice = tl.extra.intel.libdevice
math = tl.math
else:
libdevice = tl.math
math = tl
try:
from triton.language.standard import _log2
except ImportError:
def _log2(x: Any) -> Any:
raise NotImplementedError
HAS_WARP_SPEC = hasattr(tl, "async_task")
try:
from triton import knobs
except ImportError:
knobs = None
builtins_use_semantic_kwarg = (
"_semantic" in inspect.signature(triton.language.core.view).parameters
)
else:
def _raise_error(*args: Any, **kwargs: Any) -> Any:
raise RuntimeError("triton package is not installed")
class OutOfResources(Exception): # type: ignore[no-redef]
pass
class PTXASError(Exception): # type: ignore[no-redef]
pass
Config = object
CompiledKernel = object
KernelInterface = object
ASTSource = None
GPUTarget = None
_log2 = _raise_error
libdevice = None
math = None
knobs = None
builtins_use_semantic_kwarg = False
class triton: # type: ignore[no-redef]
@staticmethod
def jit(*args: Any, **kwargs: Any) -> Any:
return _raise_error
class tl: # type: ignore[no-redef]
@staticmethod
def constexpr(val: Any) -> Any:
return val
tensor = Any
dtype = Any
HAS_WARP_SPEC = False
def cc_warp_size(cc: Union[str, int]) -> int:
if torch.version.hip:
cc_str = str(cc)
if "gfx10" in cc_str or "gfx11" in cc_str:
return 32
else:
return 64
else:
return 32
try:
autograd_profiler = torch.autograd.profiler
except AttributeError: # Compile workers only have a mock version of torch
class autograd_profiler: # type: ignore[no-redef]
_is_profiler_enabled = False
__all__ = [
"Config",
"CompiledKernel",
"OutOfResources",
"KernelInterface",
"PTXASError",
"ASTSource",
"GPUTarget",
"tl",
"_log2",
"libdevice",
"math",
"triton",
"cc_warp_size",
"knobs",
]
|
from __future__ import annotations
from .BinaryCrossEntropyLoss import BinaryCrossEntropyLoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .CrossEntropyLoss import CrossEntropyLoss
from .LambdaLoss import (
LambdaLoss,
LambdaRankScheme,
NDCGLoss1Scheme,
NDCGLoss2PPScheme,
NDCGLoss2Scheme,
NoWeighingScheme,
)
from .MarginMSELoss import MarginMSELoss
from .MSELoss import MSELoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
__all__ = [
"BinaryCrossEntropyLoss",
"CrossEntropyLoss",
"MultipleNegativesRankingLoss",
"CachedMultipleNegativesRankingLoss",
"MarginMSELoss",
"MSELoss",
"LambdaLoss",
"NoWeighingScheme",
"NDCGLoss1Scheme",
"NDCGLoss2Scheme",
"LambdaRankScheme",
"NDCGLoss2PPScheme",
]
|
from __future__ import annotations
from .BinaryCrossEntropyLoss import BinaryCrossEntropyLoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .CrossEntropyLoss import CrossEntropyLoss
from .MarginMSELoss import MarginMSELoss
from .MSELoss import MSELoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
__all__ = [
"BinaryCrossEntropyLoss",
"CrossEntropyLoss",
"MultipleNegativesRankingLoss",
"CachedMultipleNegativesRankingLoss",
"MarginMSELoss",
"MSELoss",
]
|
_base_ = './retinanet_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
# use caffe img_norm
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
_base_ = './retinanet_r50_fpn_1x_coco.py'
# use caffe img_norm
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
from enum import Enum
from fsspec import AbstractFileSystem
from pathlib import Path
from typing import Any, Dict, Iterable, Optional, Protocol, runtime_checkable
import json
import uuid
from docling.document_converter import DocumentConverter
from docling_core.types import DoclingDocument as DLDocument
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core import Document as LIDocument
from pydantic import Field
class DoclingReader(BasePydanticReader):
"""
Docling Reader.
Extracts PDF, DOCX, and other document formats into LlamaIndex Documents as either Markdown or JSON-serialized Docling native format.
Args:
export_type (Literal["markdown", "json"], optional): The type to export to. Defaults to "markdown".
doc_converter (DocumentConverter, optional): The Docling converter to use. Default factory: `DocumentConverter`.
md_export_kwargs (Dict[str, Any], optional): Kwargs to use in case of markdown export. Defaults to `{"image_placeholder": ""}`.
id_func: (DocIDGenCallable, optional): Doc ID generation function to use. Default: `_uuid4_doc_id_gen`
"""
class ExportType(str, Enum):
MARKDOWN = "markdown"
JSON = "json"
@runtime_checkable
class DocIDGenCallable(Protocol):
def __call__(self, doc: DLDocument, file_path: str | Path) -> str:
...
@staticmethod
def _uuid4_doc_id_gen(doc: DLDocument, file_path: str | Path) -> str:
return str(uuid.uuid4())
export_type: ExportType = ExportType.MARKDOWN
doc_converter: DocumentConverter = Field(default_factory=DocumentConverter)
md_export_kwargs: Dict[str, Any] = {"image_placeholder": ""}
id_func: DocIDGenCallable = _uuid4_doc_id_gen
def lazy_load_data(
self,
file_path: str | Path | Iterable[str] | Iterable[Path],
extra_info: dict | None = None,
fs: Optional[AbstractFileSystem] = None,
) -> Iterable[LIDocument]:
"""
Lazily load from given source.
Args:
file_path (str | Path | Iterable[str] | Iterable[Path]): Document file source as single str (URL or local file) or pathlib.Path — or iterable thereof
extra_info (dict | None, optional): Any pre-existing metadata to include. Defaults to None.
Returns:
Iterable[LIDocument]: Iterable over the created LlamaIndex documents.
"""
file_paths = (
file_path
if isinstance(file_path, Iterable) and not isinstance(file_path, str)
else [file_path]
)
for source in file_paths:
dl_doc = self.doc_converter.convert(source).document
text: str
if self.export_type == self.ExportType.MARKDOWN:
text = dl_doc.export_to_markdown(**self.md_export_kwargs)
elif self.export_type == self.ExportType.JSON:
text = json.dumps(dl_doc.export_to_dict())
else:
raise ValueError(f"Unexpected export type: {self.export_type}")
li_doc = LIDocument(
doc_id=self.id_func(doc=dl_doc, file_path=source),
text=text,
)
li_doc.metadata = extra_info or {}
yield li_doc
|
from enum import Enum
from fsspec import AbstractFileSystem
from pathlib import Path
from typing import Any, Dict, Iterable, Optional, Protocol, runtime_checkable
import json
import uuid
from docling.document_converter import DocumentConverter
from docling_core.types import DoclingDocument as DLDocument
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core import Document as LIDocument
from pydantic import Field
class DoclingReader(BasePydanticReader):
"""Docling Reader.
Extracts PDF, DOCX, and other document formats into LlamaIndex Documents as either Markdown or JSON-serialized Docling native format.
Args:
export_type (Literal["markdown", "json"], optional): The type to export to. Defaults to "markdown".
doc_converter (DocumentConverter, optional): The Docling converter to use. Default factory: `DocumentConverter`.
md_export_kwargs (Dict[str, Any], optional): Kwargs to use in case of markdown export. Defaults to `{"image_placeholder": ""}`.
id_func: (DocIDGenCallable, optional): Doc ID generation function to use. Default: `_uuid4_doc_id_gen`
"""
class ExportType(str, Enum):
MARKDOWN = "markdown"
JSON = "json"
@runtime_checkable
class DocIDGenCallable(Protocol):
def __call__(self, doc: DLDocument, file_path: str | Path) -> str:
...
@staticmethod
def _uuid4_doc_id_gen(doc: DLDocument, file_path: str | Path) -> str:
return str(uuid.uuid4())
export_type: ExportType = ExportType.MARKDOWN
doc_converter: DocumentConverter = Field(default_factory=DocumentConverter)
md_export_kwargs: Dict[str, Any] = {"image_placeholder": ""}
id_func: DocIDGenCallable = _uuid4_doc_id_gen
def lazy_load_data(
self,
file_path: str | Path | Iterable[str] | Iterable[Path],
extra_info: dict | None = None,
fs: Optional[AbstractFileSystem] = None,
) -> Iterable[LIDocument]:
"""Lazily load from given source.
Args:
file_path (str | Path | Iterable[str] | Iterable[Path]): Document file source as single str (URL or local file) or pathlib.Path — or iterable thereof
extra_info (dict | None, optional): Any pre-existing metadata to include. Defaults to None.
Returns:
Iterable[LIDocument]: Iterable over the created LlamaIndex documents.
"""
file_paths = (
file_path
if isinstance(file_path, Iterable) and not isinstance(file_path, str)
else [file_path]
)
for source in file_paths:
dl_doc = self.doc_converter.convert(source).document
text: str
if self.export_type == self.ExportType.MARKDOWN:
text = dl_doc.export_to_markdown(**self.md_export_kwargs)
elif self.export_type == self.ExportType.JSON:
text = json.dumps(dl_doc.export_to_dict())
else:
raise ValueError(f"Unexpected export type: {self.export_type}")
li_doc = LIDocument(
doc_id=self.id_func(doc=dl_doc, file_path=source),
text=text,
)
li_doc.metadata = extra_info or {}
yield li_doc
|
# mypy: allow-untyped-defs
import torch._C._lazy
def reset():
"""Resets all metric counters."""
torch._C._lazy._reset_metrics()
def counter_names():
"""Retrieves all the currently active counter names."""
return torch._C._lazy._counter_names()
def counter_value(name: str):
"""Return the value of the counter with the speficied name"""
return torch._C._lazy._counter_value(name)
def metrics_report():
"""Return the combined (lazy core and backend) metric report"""
return torch._C._lazy._metrics_report()
|
# mypy: allow-untyped-defs
import torch._C._lazy
def reset():
"""Resets all metric counters."""
torch._C._lazy._reset_metrics()
def counter_names():
"""Retrieves all the currently active counter names."""
return torch._C._lazy._counter_names()
def counter_value(name: str):
"""Return the value of the counter with the specified name"""
return torch._C._lazy._counter_value(name)
def metrics_report():
"""Return the combined (lazy core and backend) metric report"""
return torch._C._lazy._metrics_report()
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict, Iterable, List, Optional, Tuple
import numpy as np
import torch
import torchvision.transforms as T
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from jina_commons.batching import get_docs_batch_generator
from .models import EmbeddingModelWrapper
class ImageTorchEncoder(Executor):
"""
:class:`ImageTorchEncoder` encodes ``Document`` blobs of type `ndarray` (`float32`) and shape
`B x H x W x C` into `ndarray` of `B x D`.
Where `B` is the batch size and `D` is the Dimension of the embedding.
If `use_default_preprocessing=False`, the expected input shape is `B x C x H x W` with `float32` dtype.
Internally, :class:`ImageTorchEncoder` wraps the models from
`torchvision.models`.
https://pytorch.org/vision/stable/models.html
:param model_name: the name of the model. Some of the models:
``alexnet``, `squeezenet1_0``, ``vgg16``,
``densenet161``, ``inception_v3``, ``googlenet``,
``shufflenet_v2_x1_0``, ``mobilenet_v2``,
``mnasnet1_0``, ``resnet18``. See full list above.
:param device: Which device the model runs on. Can be 'cpu' or 'cuda'
:param default_traversal_paths: Used in the encode method an defines traversal on the received `DocumentArray`
:param default_batch_size: Defines the batch size for inference on the loaded PyTorch model.
:param args: Additional positional arguments
:param kwargs: Additional keyword arguments
"""
def __init__(
self,
model_name: str = 'resnet18',
device: str = 'cpu',
default_traversal_path: Tuple = ('r',),
default_batch_size: Optional[int] = 32,
use_default_preprocessing: bool = True,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.logger = JinaLogger(self.__class__.__name__)
self.device = device
self.default_batch_size = default_batch_size
self.use_default_preprocessing = use_default_preprocessing
self.default_traversal_path = default_traversal_path
# axis 0 is the batch
self._default_channel_axis = 1
self.model_wrapper = EmbeddingModelWrapper(model_name, device=self.device)
self._preprocess = T.Compose(
[
T.ToPILImage(),
T.Resize(256),
T.CenterCrop(224),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode image data into a ndarray of `D` as dimension, and fill the embedding of each Document.
:param docs: DocumentArray containing images
:param parameters: dictionary to define the `traversal_paths` and the `batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
:param kwargs: Additional key value arguments.
"""
if docs:
docs_batch_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_path
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='blob',
)
self._compute_embeddings(docs_batch_generator)
def _compute_embeddings(self, docs_batch_generator: Iterable) -> None:
with torch.no_grad():
for document_batch in docs_batch_generator:
blob_batch = [d.blob for d in document_batch]
if self.use_default_preprocessing:
images = np.stack(self._preprocess_image(blob_batch))
else:
images = np.stack(blob_batch)
features = self.model_wrapper.compute_embeddings(images)
for doc, embed in zip(document_batch, features):
doc.embedding = embed
def _preprocess_image(self, images: List[np.array]) -> List[np.ndarray]:
return [self._preprocess(img) for img in images]
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Optional, List, Dict, Iterable, Tuple
import numpy as np
import torchvision.transforms as T
import torch
from jina import Executor, requests, DocumentArray
from jina_commons.batching import get_docs_batch_generator
from .models import EmbeddingModelWrapper
class ImageTorchEncoder(Executor):
"""
:class:`ImageTorchEncoder` encodes ``Document`` blobs of type `ndarray` (`float32`) and shape
`B x H x W x C` into `ndarray` of `B x D`.
Where `B` is the batch size and `D` is the Dimension of the embedding.
If `use_default_preprocessing=False`, the expected input shape is `B x C x H x W` with `float32` dtype.
Internally, :class:`ImageTorchEncoder` wraps the models from
`torchvision.models`.
https://pytorch.org/vision/stable/models.html
:param model_name: the name of the model. Some of the models:
``alexnet``, `squeezenet1_0``, ``vgg16``,
``densenet161``, ``inception_v3``, ``googlenet``,
``shufflenet_v2_x1_0``, ``mobilenet_v2``,
``mnasnet1_0``, ``resnet18``. See full list above.
:param device: Which device the model runs on. Can be 'cpu' or 'cuda'
:param default_traversal_paths: Used in the encode method an defines traversal on the received `DocumentArray`
:param default_batch_size: Defines the batch size for inference on the loaded PyTorch model.
:param args: Additional positional arguments
:param kwargs: Additional keyword arguments
"""
def __init__(
self,
model_name: str = 'resnet18',
device: Optional[str] = None,
default_traversal_path: Tuple = ('r', ),
default_batch_size: Optional[int] = 32,
use_default_preprocessing: bool = True,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
if not device:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = device
self.default_batch_size = default_batch_size
self.use_default_preprocessing = use_default_preprocessing
self.default_traversal_path = default_traversal_path
# axis 0 is the batch
self._default_channel_axis = 1
self.model_wrapper = EmbeddingModelWrapper(model_name)
self._preprocess = T.Compose([
T.ToPILImage(),
T.Resize(256),
T.CenterCrop(224),
T.ToTensor(),
T.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
])
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode image data into a ndarray of `D` as dimension, and fill the embedding of each Document.
:param docs: DocumentArray containing images
:param parameters: dictionary to define the `traversal_paths` and the `batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
:param kwargs: Additional key value arguments.
"""
if docs:
docs_batch_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get('traversal_paths', self.default_traversal_path),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='blob'
)
self._compute_embeddings(docs_batch_generator)
def _compute_embeddings(self, docs_batch_generator: Iterable) -> None:
with torch.no_grad():
for document_batch in docs_batch_generator:
blob_batch = [d.blob for d in document_batch]
if self.use_default_preprocessing:
images = np.stack(self._preprocess_image(blob_batch))
else:
images = np.stack(blob_batch)
features = self.model_wrapper.compute_embeddings(images)
for doc, embed in zip(document_batch, features):
doc.embedding = embed
def _preprocess_image(self, images: List[np.array]) -> List[np.ndarray]:
return [self._preprocess(img) for img in images]
|
import os
from typing import Any, List, Optional
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks import CBEventType, EventPayload
from llama_index.core.instrumentation import get_dispatcher
from llama_index.core.instrumentation.events.rerank import (
ReRankEndEvent,
ReRankStartEvent,
)
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.schema import NodeWithScore, QueryBundle, MetadataMode
dispatcher = get_dispatcher(__name__)
class ContextualRerank(BaseNodePostprocessor):
"""
Contextual Reranking model.
Args:
model: str = Field(description="Contextual Reranking model name. Default is 'ctxl-rerank-en-v1-instruct'.")
top_n: int = Field(description="Top N nodes to return.")
base_url: Optional[str] = Field(description="Contextual base url.", default=None)
"""
model: str = Field(description="Contextual Reranking model name.")
top_n: int = Field(description="Top N nodes to return.")
base_url: Optional[str] = Field(description="Contextual base url.", default=None)
_client: Any = PrivateAttr()
def __init__(
self,
top_n: int = 2,
model: str = "ctxl-rerank-en-v1-instruct",
api_key: Optional[str] = None,
client: Optional[Any] = None,
base_url: Optional[str] = None,
):
super().__init__(top_n=top_n, model=model)
try:
api_key = api_key or os.environ["CONTEXTUAL_API_KEY"]
except IndexError:
raise ValueError(
"Must pass in contextual api key or "
"specify via CONTEXTUAL_API_KEY environment variable "
)
try:
from contextual import ContextualAI
except ImportError:
raise ImportError(
"Cannot import Contextual client package, please `pip install contextual-client`."
)
if client is not None:
self._client = client
else:
try:
self._client = ContextualAI(api_key=api_key, base_url=base_url)
except Exception as e:
raise ValueError(f"Failed to create Contextual client: {e}")
@classmethod
def class_name(cls) -> str:
return "ContextualRerank"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
dispatcher.event(
ReRankStartEvent(
query=query_bundle, nodes=nodes, top_n=self.top_n, model_name=self.model
)
)
if query_bundle is None:
raise ValueError("Missing query bundle in extra info.")
if len(nodes) == 0:
return []
with self.callback_manager.event(
CBEventType.RERANKING,
payload={
EventPayload.NODES: nodes,
EventPayload.MODEL_NAME: self.model,
EventPayload.QUERY_STR: query_bundle.query_str,
EventPayload.TOP_K: self.top_n,
},
) as event:
texts = [
node.node.get_content(metadata_mode=MetadataMode.EMBED)
for node in nodes
]
results = self._client.rerank.create(
model=self.model,
top_n=self.top_n,
query=query_bundle.query_str,
documents=texts,
)
new_nodes = []
for result in results.results:
new_node_with_score = NodeWithScore(
node=nodes[result.index].node, score=result.relevance_score
)
new_nodes.append(new_node_with_score)
event.on_end(payload={EventPayload.NODES: new_nodes})
dispatcher.event(ReRankEndEvent(nodes=new_nodes))
return new_nodes
|
import os
from typing import Any, List, Optional
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks import CBEventType, EventPayload
from llama_index.core.instrumentation import get_dispatcher
from llama_index.core.instrumentation.events.rerank import (
ReRankEndEvent,
ReRankStartEvent,
)
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.schema import NodeWithScore, QueryBundle, MetadataMode
dispatcher = get_dispatcher(__name__)
class ContextualRerank(BaseNodePostprocessor):
"""
Contextual Reranking model.
Args:
model: str = Field(description="Contextual Reranking model name. Default is 'ctxl-rerank-en-v1-instruct'.")
top_n: int = Field(description="Top N nodes to return.")
base_url: Optional[str] = Field(description="Contextual base url.", default=None)
"""
model: str = Field(description="Contextual Reranking model name.")
top_n: int = Field(description="Top N nodes to return.")
base_url: Optional[str] = Field(description="Contextual base url.", default=None)
_client: Any = PrivateAttr()
def __init__(
self,
top_n: int = 2,
model: str = "ctxl-rerank-en-v1-instruct",
api_key: Optional[str] = None,
client: Optional[Any] = None,
base_url: Optional[str] = None,
):
super().__init__(top_n=top_n, model=model)
try:
api_key = api_key or os.environ["CONTEXTUAL_API_KEY"]
except IndexError:
raise ValueError(
"Must pass in contextual api key or "
"specify via CONTEXTUAL_API_KEY environment variable "
)
try:
from contextual import ContextualAI
except ImportError:
raise ImportError(
"Cannot import Contextual client package, please `pip install contextual-client`."
)
if client is not None:
self._client = client
else:
try:
self._client = ContextualAI(api_key=api_key, base_url=base_url)
except Exception as e:
raise ValueError(f"Failed to create Contextual client: {e}")
@classmethod
def class_name(cls) -> str:
return "ContextualRerank"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
dispatcher.event(
ReRankStartEvent(
query=query_bundle, nodes=nodes, top_n=self.top_n, model_name=self.model
)
)
if query_bundle is None:
raise ValueError("Missing query bundle in extra info.")
if len(nodes) == 0:
return []
with self.callback_manager.event(
CBEventType.RERANKING,
payload={
EventPayload.NODES: nodes,
EventPayload.MODEL_NAME: self.model,
EventPayload.QUERY_STR: query_bundle.query_str,
EventPayload.TOP_K: self.top_n,
},
) as event:
texts = [
node.node.get_content(metadata_mode=MetadataMode.EMBED)
for node in nodes
]
results = self._client.rerank.create(
model=self.model,
top_n=self.top_n,
query=query_bundle.query_str,
documents=texts,
)
new_nodes = []
for result in results.results:
new_node_with_score = NodeWithScore(
node=nodes[result.index].node, score=result.relevance_score
)
new_nodes.append(new_node_with_score)
event.on_end(payload={EventPayload.NODES: new_nodes})
dispatcher.event(ReRankEndEvent(nodes=new_nodes))
return new_nodes
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from docarray import BaseDoc, DocList
from docarray.index import InMemoryExactNNIndex
from docarray.typing import NdArray
class SimpleDoc(BaseDoc):
embedding: NdArray[128]
text: str
def test_update_payload():
docs = DocList[SimpleDoc](
[SimpleDoc(embedding=np.random.rand(128), text=f'hey {i}') for i in range(100)]
)
index = InMemoryExactNNIndex[SimpleDoc]()
index.index(docs)
assert index.num_docs() == 100
for doc in docs:
doc.text += '_changed'
index.index(docs)
assert index.num_docs() == 100
res = index.find(query=docs[0], search_field='embedding', limit=100)
assert len(res.documents) == 100
for doc in res.documents:
assert '_changed' in doc.text
def test_update_embedding():
docs = DocList[SimpleDoc](
[SimpleDoc(embedding=np.random.rand(128), text=f'hey {i}') for i in range(100)]
)
index = InMemoryExactNNIndex[SimpleDoc]()
index.index(docs)
assert index.num_docs() == 100
new_tensor = np.random.rand(128)
docs[0].embedding = new_tensor
index.index(docs[0])
assert index.num_docs() == 100
res = index.find(query=docs[0], search_field='embedding', limit=100)
assert len(res.documents) == 100
found = False
for doc in res.documents:
if doc.id == docs[0].id:
found = True
assert (doc.embedding == new_tensor).all()
assert found
|
import numpy as np
from docarray import BaseDoc, DocList
from docarray.index import InMemoryExactNNIndex
from docarray.typing import NdArray
class SimpleDoc(BaseDoc):
embedding: NdArray[128]
text: str
def test_update_payload():
docs = DocList[SimpleDoc](
[SimpleDoc(embedding=np.random.rand(128), text=f'hey {i}') for i in range(100)]
)
index = InMemoryExactNNIndex[SimpleDoc]()
index.index(docs)
assert index.num_docs() == 100
for doc in docs:
doc.text += '_changed'
index.index(docs)
assert index.num_docs() == 100
res = index.find(query=docs[0], search_field='embedding', limit=100)
assert len(res.documents) == 100
for doc in res.documents:
assert '_changed' in doc.text
def test_update_embedding():
docs = DocList[SimpleDoc](
[SimpleDoc(embedding=np.random.rand(128), text=f'hey {i}') for i in range(100)]
)
index = InMemoryExactNNIndex[SimpleDoc]()
index.index(docs)
assert index.num_docs() == 100
new_tensor = np.random.rand(128)
docs[0].embedding = new_tensor
index.index(docs[0])
assert index.num_docs() == 100
res = index.find(query=docs[0], search_field='embedding', limit=100)
assert len(res.documents) == 100
found = False
for doc in res.documents:
if doc.id == docs[0].id:
found = True
assert (doc.embedding == new_tensor).all()
assert found
|
from abc import abstractmethod
from typing import Iterable, Iterator
from qdrant_client import QdrantClient
from qdrant_client.http.exceptions import UnexpectedResponse
from qdrant_client.http.models.models import (
PointIdsList,
PointsList,
ScrollRequest,
PointStruct,
)
from docarray import Document
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
class GetSetDelMixin(BaseGetSetDelMixin):
@property
@abstractmethod
def client(self) -> QdrantClient:
raise NotImplementedError()
@property
@abstractmethod
def serialization_config(self) -> dict:
raise NotImplementedError()
@property
@abstractmethod
def n_dim(self) -> int:
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def scroll_batch_size(self) -> int:
raise NotImplementedError()
def _upload_batch(self, docs: Iterable['Document']):
batch = []
for doc in docs:
batch.append(self._document_to_qdrant(doc))
if len(batch) > self.scroll_batch_size:
self.client.http.points_api.upsert_points(
collection_name=self.collection_name,
wait=True,
point_insert_operations=PointsList(points=batch),
)
batch = []
if len(batch) > 0:
self.client.http.points_api.upsert_points(
collection_name=self.collection_name,
wait=True,
point_insert_operations=PointsList(points=batch),
)
def _qdrant_to_document(self, qdrant_record: dict) -> 'Document':
return Document.from_base64(
qdrant_record['_serialized'], **self.serialization_config
)
def _document_to_qdrant(self, doc: 'Document') -> 'PointStruct':
extra_columns = {
col: doc.tags.get(col) for col, _ in self._config.columns.items()
}
return PointStruct(
id=self._map_id(doc.id),
payload=dict(
_serialized=doc.to_base64(**self.serialization_config), **extra_columns
),
vector=self._map_embedding(doc.embedding),
)
def _get_doc_by_id(self, _id: str) -> 'Document':
try:
resp = self.client.http.points_api.get_point(
collection_name=self.collection_name, id=self._map_id(_id)
)
return self._qdrant_to_document(resp.result.payload)
except UnexpectedResponse as response_error:
if response_error.status_code in [404, 400]:
raise KeyError(_id)
def _del_doc_by_id(self, _id: str):
self.client.http.points_api.delete_points(
collection_name=self.collection_name,
wait=True,
points_selector=PointIdsList(points=[self._map_id(_id)]),
)
def _set_doc_by_id(self, _id: str, value: 'Document'):
if _id != value.id:
self._del_doc_by_id(_id)
self.client.http.points_api.upsert_points(
collection_name=self.collection_name,
wait=True,
point_insert_operations=PointsList(
points=[self._document_to_qdrant(value)]
),
)
def scan(self) -> Iterator['Document']:
offset = None
while True:
response = self.client.http.points_api.scroll_points(
collection_name=self.collection_name,
scroll_request=ScrollRequest(
offset=offset,
limit=self.scroll_batch_size,
with_payload=['_serialized'],
with_vector=False,
),
)
for point in response.result.points:
yield self._qdrant_to_document(point.payload)
if response.result.next_page_offset:
offset = response.result.next_page_offset
else:
break
def _load_offset2ids(self):
ids = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids)
def _save_offset2ids(self):
self._update_offset2ids_meta()
def _clear_storage(self):
self._client.recreate_collection(
self.collection_name,
vector_size=self.n_dim,
distance=self.distance,
)
|
from abc import abstractmethod
from typing import Iterable, Iterator
from qdrant_client import QdrantClient
from qdrant_client.http.exceptions import UnexpectedResponse
from qdrant_client.http.models.models import (
PointIdsList,
PointsList,
ScrollRequest,
PointStruct,
)
from docarray import Document
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
class GetSetDelMixin(BaseGetSetDelMixin):
@property
@abstractmethod
def client(self) -> QdrantClient:
raise NotImplementedError()
@property
@abstractmethod
def serialization_config(self) -> dict:
raise NotImplementedError()
@property
@abstractmethod
def n_dim(self) -> int:
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def scroll_batch_size(self) -> int:
raise NotImplementedError()
def _upload_batch(self, docs: Iterable['Document']):
batch = []
for doc in docs:
batch.append(self._document_to_qdrant(doc))
if len(batch) > self.scroll_batch_size:
self.client.http.points_api.upsert_points(
collection_name=self.collection_name,
wait=True,
point_insert_operations=PointsList(points=batch),
)
batch = []
if len(batch) > 0:
self.client.http.points_api.upsert_points(
collection_name=self.collection_name,
wait=True,
point_insert_operations=PointsList(points=batch),
)
def _qdrant_to_document(self, qdrant_record: dict) -> 'Document':
return Document.from_base64(
qdrant_record['_serialized'], **self.serialization_config
)
def _document_to_qdrant(self, doc: 'Document') -> 'PointStruct':
extra_columns = {col: doc.tags.get(col) for col, _ in self._config.columns}
return PointStruct(
id=self._map_id(doc.id),
payload=dict(
_serialized=doc.to_base64(**self.serialization_config), **extra_columns
),
vector=self._map_embedding(doc.embedding),
)
def _get_doc_by_id(self, _id: str) -> 'Document':
try:
resp = self.client.http.points_api.get_point(
collection_name=self.collection_name, id=self._map_id(_id)
)
return self._qdrant_to_document(resp.result.payload)
except UnexpectedResponse as response_error:
if response_error.status_code in [404, 400]:
raise KeyError(_id)
def _del_doc_by_id(self, _id: str):
self.client.http.points_api.delete_points(
collection_name=self.collection_name,
wait=True,
points_selector=PointIdsList(points=[self._map_id(_id)]),
)
def _set_doc_by_id(self, _id: str, value: 'Document'):
if _id != value.id:
self._del_doc_by_id(_id)
self.client.http.points_api.upsert_points(
collection_name=self.collection_name,
wait=True,
point_insert_operations=PointsList(
points=[self._document_to_qdrant(value)]
),
)
def scan(self) -> Iterator['Document']:
offset = None
while True:
response = self.client.http.points_api.scroll_points(
collection_name=self.collection_name,
scroll_request=ScrollRequest(
offset=offset,
limit=self.scroll_batch_size,
with_payload=['_serialized'],
with_vector=False,
),
)
for point in response.result.points:
yield self._qdrant_to_document(point.payload)
if response.result.next_page_offset:
offset = response.result.next_page_offset
else:
break
def _load_offset2ids(self):
ids = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids)
def _save_offset2ids(self):
self._update_offset2ids_meta()
def _clear_storage(self):
self._client.recreate_collection(
self.collection_name,
vector_size=self.n_dim,
distance=self.distance,
)
|
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
from keras.src.ops import convert_to_tensor
class StringLookupTest(testing.TestCase):
# TODO: increase coverage. Most features aren't being tested.
def test_config(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
oov_token="[OOV]",
mask_token="[MASK]",
)
self.run_class_serialization_test(layer)
def test_adapt_flow(self):
layer = layers.StringLookup(
output_mode="int",
)
layer.adapt(["a", "a", "a", "b", "b", "c"])
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
def test_fixed_vocabulary(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
@pytest.mark.skipif(
not backend.backend() == "tensorflow", reason="Requires tf.SparseTensor"
)
def test_sparse_inputs(self):
import tensorflow as tf
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = tf.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2]],
values=["b", "c", "d"],
dense_shape=(3, 3),
)
output = layer(input_data)
self.assertIsInstance(output, tf.SparseTensor)
self.assertAllClose(output, np.array([[2, 0, 0], [0, 3, 0], [0, 0, 0]]))
self.assertAllClose(output.values, np.array([2, 3, 0]))
def test_set_vocabulary(self):
layer = layers.StringLookup(
output_mode="int",
)
layer.set_vocabulary(["a", "b", "c"])
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
def test_tf_data_compatibility(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = ["b", "c", "d"]
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(3).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertAllClose(output, np.array([2, 3, 0]))
@pytest.mark.skipif(not backend.backend() == "tensorflow", reason="tf only")
def test_tensor_as_vocab(self):
vocab = convert_to_tensor(["a", "b", "c", "d"])
data = [["a", "c", "d"], ["d", "z", "b"]]
layer = layers.StringLookup(
vocabulary=vocab,
)
output = layer(data)
self.assertAllClose(output, np.array([[1, 3, 4], [4, 0, 2]]))
|
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class StringLookupTest(testing.TestCase):
# TODO: increase coverage. Most features aren't being tested.
def test_config(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
oov_token="[OOV]",
mask_token="[MASK]",
)
self.run_class_serialization_test(layer)
def test_adapt_flow(self):
layer = layers.StringLookup(
output_mode="int",
)
layer.adapt(["a", "a", "a", "b", "b", "c"])
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
def test_fixed_vocabulary(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
@pytest.mark.skipif(
not backend.backend() == "tensorflow", reason="Requires tf.SparseTensor"
)
def test_sparse_inputs(self):
import tensorflow as tf
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = tf.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2]],
values=["b", "c", "d"],
dense_shape=(3, 3),
)
output = layer(input_data)
self.assertIsInstance(output, tf.SparseTensor)
self.assertAllClose(output, np.array([[2, 0, 0], [0, 3, 0], [0, 0, 0]]))
self.assertAllClose(output.values, np.array([2, 3, 0]))
def test_set_vocabulary(self):
layer = layers.StringLookup(
output_mode="int",
)
layer.set_vocabulary(["a", "b", "c"])
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
def test_tf_data_compatibility(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = ["b", "c", "d"]
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(3).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertAllClose(output, np.array([2, 3, 0]))
|
import textwrap
import pyarrow as pa
import pytest
from datasets import Features, Value
from datasets.packaged_modules.json.json import Json
@pytest.fixture
def jsonl_file(tmp_path):
filename = tmp_path / "file.jsonl"
data = textwrap.dedent(
"""\
{"col_1": -1}
{"col_1": 1, "col_2": 2}
{"col_1": 10, "col_2": 20}
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def json_file_with_list_of_dicts(tmp_path):
filename = tmp_path / "file_with_list_of_dicts.json"
data = textwrap.dedent(
"""\
[
{"col_1": -1},
{"col_1": 1, "col_2": 2},
{"col_1": 10, "col_2": 20}
]
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def json_file_with_list_of_dicts_field(tmp_path):
filename = tmp_path / "file_with_list_of_dicts_field.json"
data = textwrap.dedent(
"""\
{
"field1": 1,
"field2": "aabb",
"field3": [
{"col_1": -1},
{"col_1": 1, "col_2": 2},
{"col_1": 10, "col_2": 20}
]
}
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.mark.parametrize(
"file_fixture, config_kwargs",
[
("jsonl_file", {}),
("json_file_with_list_of_dicts", {}),
("json_file_with_list_of_dicts_field", {"field": "field3"}),
],
)
def test_json_generate_tables(file_fixture, config_kwargs, request):
json = Json(**config_kwargs)
generator = json._generate_tables([[request.getfixturevalue(file_fixture)]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.to_pydict() == {"col_1": [-1, 1, 10], "col_2": [None, 2, 20]}
@pytest.mark.parametrize(
"file_fixture, config_kwargs",
[
(
"jsonl_file",
{"features": Features({"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")})},
),
(
"json_file_with_list_of_dicts",
{"features": Features({"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")})},
),
(
"json_file_with_list_of_dicts_field",
{
"field": "field3",
"features": Features(
{"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")}
),
},
),
],
)
def test_json_generate_tables_with_missing_features(file_fixture, config_kwargs, request):
json = Json(**config_kwargs)
generator = json._generate_tables([[request.getfixturevalue(file_fixture)]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.to_pydict() == {"col_1": [-1, 1, 10], "col_2": [None, 2, 20], "missing_col": [None, None, None]}
|
import textwrap
import pyarrow as pa
import pytest
from datasets import Features, Value
from datasets.packaged_modules.json.json import Json
@pytest.fixture
def jsonl_file(tmp_path):
filename = tmp_path / "file.jsonl"
data = textwrap.dedent(
"""\
{"col_1": 1, "col_2": 2}
{"col_1": 10, "col_2": 20}
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def json_file_with_list_of_dicts(tmp_path):
filename = tmp_path / "file_with_list_of_dicts.json"
data = textwrap.dedent(
"""\
[
{"col_1": 1, "col_2": 2},
{"col_1": 10, "col_2": 20}
]
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def json_file_with_list_of_dicts_field(tmp_path):
filename = tmp_path / "file_with_list_of_dicts_field.json"
data = textwrap.dedent(
"""\
{
"field1": 1,
"field2": "aabb",
"field3": [
{"col_1": 1, "col_2": 2},
{"col_1": 10, "col_2": 20}
]
}
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.mark.parametrize(
"file_fixture, config_kwargs",
[
("jsonl_file", {}),
("json_file_with_list_of_dicts", {}),
("json_file_with_list_of_dicts_field", {"field": "field3"}),
],
)
def test_json_generate_tables(file_fixture, config_kwargs, request):
json = Json(**config_kwargs)
generator = json._generate_tables([[request.getfixturevalue(file_fixture)]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.to_pydict() == {"col_1": [1, 10], "col_2": [2, 20]}
@pytest.mark.parametrize(
"file_fixture, config_kwargs",
[
(
"jsonl_file",
{"features": Features({"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")})},
),
(
"json_file_with_list_of_dicts",
{"features": Features({"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")})},
),
(
"json_file_with_list_of_dicts_field",
{
"field": "field3",
"features": Features(
{"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")}
),
},
),
],
)
def test_json_generate_tables_with_missing_features(file_fixture, config_kwargs, request):
json = Json(**config_kwargs)
generator = json._generate_tables([[request.getfixturevalue(file_fixture)]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.to_pydict() == {"col_1": [1, 10], "col_2": [2, 20], "missing_col": [None, None]}
|
# Copyright (c) OpenMMLab. All rights reserved.
from .activations import SiLU
from .bbox_nms import fast_nms, multiclass_nms
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .dropblock import DropBlock
from .ema import ExpMomentumEMA
from .inverted_residual import InvertedResidual
from .matrix_nms import mask_matrix_nms
from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder
from .normed_predictor import NormedConv2d, NormedLinear
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import ChannelAttention, DyReLU, SELayer
# yapf: disable
from .transformer import (MLP, AdaptivePadding, CdnQueryGenerator,
ConditionalAttention,
ConditionalDetrTransformerDecoder,
ConditionalDetrTransformerDecoderLayer,
DABDetrTransformerDecoder,
DABDetrTransformerDecoderLayer,
DABDetrTransformerEncoder,
DeformableDetrTransformerDecoder,
DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder,
DeformableDetrTransformerEncoderLayer,
DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer,
DinoTransformerDecoder, DynamicConv, PatchEmbed,
PatchMerging, coordinate_to_encoding,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
# yapf: enable
__all__ = [
'fast_nms', 'multiclass_nms', 'mask_matrix_nms', 'DropBlock',
'PixelDecoder', 'TransformerEncoderPixelDecoder',
'MSDeformAttnPixelDecoder', 'ResLayer', 'PatchMerging',
'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv',
'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'InvertedResidual',
'SELayer', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d',
'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'DyReLU',
'ExpMomentumEMA', 'inverse_sigmoid', 'ChannelAttention', 'SiLU', 'MLP',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer', 'AdaptivePadding',
'coordinate_to_encoding', 'ConditionalAttention',
'DABDetrTransformerDecoderLayer', 'DABDetrTransformerDecoder',
'DABDetrTransformerEncoder', 'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',
'CdnQueryGenerator'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .activations import SiLU
from .bbox_nms import fast_nms, multiclass_nms
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .dropblock import DropBlock
from .ema import ExpMomentumEMA
from .inverted_residual import InvertedResidual
from .matrix_nms import mask_matrix_nms
from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder
from .normed_predictor import NormedConv2d, NormedLinear
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import ChannelAttention, DyReLU, SELayer
# yapf: disable
from .transformer import (MLP, CdnQueryGenerator,
ConditionalDetrTransformerDecoder,
ConditionalDetrTransformerDecoderLayer,
DeformableDetrTransformerDecoder,
DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder,
DeformableDetrTransformerEncoderLayer,
DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer,
DinoTransformerDecoder, DynamicConv, PatchEmbed,
PatchMerging, inverse_sigmoid, nchw_to_nlc,
nlc_to_nchw)
# yapf: enable
__all__ = [
'fast_nms', 'multiclass_nms', 'mask_matrix_nms', 'DropBlock',
'PixelDecoder', 'TransformerEncoderPixelDecoder',
'MSDeformAttnPixelDecoder', 'ResLayer', 'PatchMerging',
'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv',
'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'InvertedResidual',
'SELayer', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d',
'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'DyReLU',
'ExpMomentumEMA', 'inverse_sigmoid', 'ChannelAttention', 'SiLU', 'MLP',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer',
'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',
'CdnQueryGenerator'
]
|
__version__ = '0.1.0'
from docarray.array import DocumentArray
from docarray.document.document import BaseDocument as Document
from docarray.predefined_document import Image, Mesh3D, PointCloud3D, Text
__all__ = ['Document', 'DocumentArray', 'Image', 'Text', 'Mesh3D', 'PointCloud3D']
|
__version__ = '0.1.0'
from docarray.array import DocumentArray
from docarray.document.document import BaseDocument as Document
from docarray.predefined_document import Image, Text
__all__ = ['Document', 'DocumentArray', 'Image', 'Text']
|
import re
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS
class ConvoOutputParser(AgentOutputParser):
"""Output parser for the conversational agent."""
ai_prefix: str = "AI"
"""Prefix to use before AI output."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
"""Parse the output from the agent into
an AgentAction or AgentFinish object.
Args:
text: The text to parse.
Returns:
An AgentAction or AgentFinish object.
"""
if f"{self.ai_prefix}:" in text:
return AgentFinish(
{"output": text.split(f"{self.ai_prefix}:")[-1].strip()},
text,
)
regex = r"Action: (.*?)[\n]*Action Input: ([\s\S]*)"
match = re.search(regex, text, re.DOTALL)
if not match:
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(msg)
action = match.group(1)
action_input = match.group(2)
return AgentAction(action.strip(), action_input.strip(" ").strip('"'), text)
@property
def _type(self) -> str:
return "conversational"
|
import re
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS
class ConvoOutputParser(AgentOutputParser):
"""Output parser for the conversational agent."""
ai_prefix: str = "AI"
"""Prefix to use before AI output."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
"""Parse the output from the agent into
an AgentAction or AgentFinish object.
Args:
text: The text to parse.
Returns:
An AgentAction or AgentFinish object.
"""
if f"{self.ai_prefix}:" in text:
return AgentFinish(
{"output": text.split(f"{self.ai_prefix}:")[-1].strip()}, text
)
regex = r"Action: (.*?)[\n]*Action Input: ([\s\S]*)"
match = re.search(regex, text, re.DOTALL)
if not match:
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(msg)
action = match.group(1)
action_input = match.group(2)
return AgentAction(action.strip(), action_input.strip(" ").strip('"'), text)
@property
def _type(self) -> str:
return "conversational"
|
import torch
_TORCHFUNCTION_SUBCLASS = False
class _ReturnTypeCM:
def __init__(self, to_restore):
self.to_restore = to_restore
def __enter__(self):
return self
def __exit__(self, *args):
global _TORCHFUNCTION_SUBCLASS
_TORCHFUNCTION_SUBCLASS = self.to_restore
def set_return_type(return_type: str):
"""Set the return type of torch operations on :class:`~torchvision.tv_tensors.TVTensor`.
This only affects the behaviour of torch operations. It has no effect on
``torchvision`` transforms or functionals, which will always return as
output the same type that was passed as input.
.. warning::
We recommend using :class:`~torchvision.transforms.v2.ToPureTensor` at
the end of your transform pipelines if you use
``set_return_type("TVTensor")``. This will avoid the
``__torch_function__`` overhead in the models ``forward()``.
Can be used as a global flag for the entire program:
.. code:: python
img = tv_tensors.Image(torch.rand(3, 5, 5))
img + 2 # This is a pure Tensor (default behaviour)
set_return_type("TVTensor")
img + 2 # This is an Image
or as a context manager to restrict the scope:
.. code:: python
img = tv_tensors.Image(torch.rand(3, 5, 5))
img + 2 # This is a pure Tensor
with set_return_type("TVTensor"):
img + 2 # This is an Image
img + 2 # This is a pure Tensor
Args:
return_type (str): Can be "TVTensor" or "Tensor" (case-insensitive).
Default is "Tensor" (i.e. pure :class:`torch.Tensor`).
"""
global _TORCHFUNCTION_SUBCLASS
to_restore = _TORCHFUNCTION_SUBCLASS
try:
_TORCHFUNCTION_SUBCLASS = {"tensor": False, "tvtensor": True}[return_type.lower()]
except KeyError:
raise ValueError(f"return_type must be 'TVTensor' or 'Tensor', got {return_type}") from None
return _ReturnTypeCM(to_restore)
def _must_return_subclass():
return _TORCHFUNCTION_SUBCLASS
# For those ops we always want to preserve the original subclass instead of returning a pure Tensor
_FORCE_TORCHFUNCTION_SUBCLASS = {
torch.Tensor.clone,
torch.Tensor.to,
torch.Tensor.detach,
torch.Tensor.requires_grad_,
torch.Tensor.pin_memory,
}
|
import torch
_TORCHFUNCTION_SUBCLASS = False
class _ReturnTypeCM:
def __init__(self, to_restore):
self.to_restore = to_restore
def __enter__(self):
return self
def __exit__(self, *args):
global _TORCHFUNCTION_SUBCLASS
_TORCHFUNCTION_SUBCLASS = self.to_restore
def set_return_type(return_type: str):
"""Set the return type of torch operations on :class:`~torchvision.tv_tensors.TVTensor`.
This only affects the behaviour of torch operations. It has no effect on
``torchvision`` transforms or functionals, which will always return as
output the same type that was passed as input.
.. warning::
We recommend using :class:`~torchvision.transforms.v2.ToPureTensor` at
the end of your transform pipelines if you use
``set_return_type("TVTensor")``. This will avoid the
``__torch_function__`` overhead in the models ``forward()``.
Can be used as a global flag for the entire program:
.. code:: python
img = tv_tensors.Image(torch.rand(3, 5, 5))
img + 2 # This is a pure Tensor (default behaviour)
set_return_type("TVTensor")
img + 2 # This is an Image
or as a context manager to restrict the scope:
.. code:: python
img = tv_tensors.Image(torch.rand(3, 5, 5))
img + 2 # This is a pure Tensor
with set_return_type("TVTensor"):
img + 2 # This is an Image
img + 2 # This is a pure Tensor
Args:
return_type (str): Can be "TVTensor" or "Tensor" (case-insensitive).
Default is "Tensor" (i.e. pure :class:`torch.Tensor`).
"""
global _TORCHFUNCTION_SUBCLASS
to_restore = _TORCHFUNCTION_SUBCLASS
try:
_TORCHFUNCTION_SUBCLASS = {"tensor": False, "tvtensor": True}[return_type.lower()]
except KeyError:
raise ValueError(f"return_type must be 'TVTensor' or 'Tensor', got {return_type}") from None
return _ReturnTypeCM(to_restore)
def _must_return_subclass():
return _TORCHFUNCTION_SUBCLASS
# For those ops we always want to preserve the original subclass instead of returning a pure Tensor
_FORCE_TORCHFUNCTION_SUBCLASS = {torch.Tensor.clone, torch.Tensor.to, torch.Tensor.detach, torch.Tensor.requires_grad_}
|
from ._hubert_datamodule import HuBERTDataModule
__all__ = [
"HuBERTDataModule",
"Wav2Vec2DataModule",
]
|
from ._hubert_datamodule import HuBERTDataModule
__all__ = [
"HuBERTDataModule",
]
|
"""**Callback handlers** allow listening to events in LangChain.
**Class hierarchy:**
.. code-block::
BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
"""
from langchain_core.callbacks.base import (
AsyncCallbackHandler,
BaseCallbackHandler,
BaseCallbackManager,
CallbackManagerMixin,
Callbacks,
ChainManagerMixin,
LLMManagerMixin,
RetrieverManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from langchain_core.callbacks.file import FileCallbackHandler
from langchain_core.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForChainGroup,
AsyncCallbackManagerForChainRun,
AsyncCallbackManagerForLLMRun,
AsyncCallbackManagerForRetrieverRun,
AsyncCallbackManagerForToolRun,
AsyncParentRunManager,
AsyncRunManager,
BaseRunManager,
CallbackManager,
CallbackManagerForChainGroup,
CallbackManagerForChainRun,
CallbackManagerForLLMRun,
CallbackManagerForRetrieverRun,
CallbackManagerForToolRun,
ParentRunManager,
RunManager,
adispatch_custom_event,
dispatch_custom_event,
)
from langchain_core.callbacks.stdout import StdOutCallbackHandler
from langchain_core.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_core.callbacks.usage import (
UsageMetadataCallbackHandler,
get_usage_metadata_callback,
)
__all__ = [
"dispatch_custom_event",
"adispatch_custom_event",
"RetrieverManagerMixin",
"LLMManagerMixin",
"ChainManagerMixin",
"ToolManagerMixin",
"Callbacks",
"CallbackManagerMixin",
"RunManagerMixin",
"BaseCallbackHandler",
"AsyncCallbackHandler",
"BaseCallbackManager",
"BaseRunManager",
"RunManager",
"ParentRunManager",
"AsyncRunManager",
"AsyncParentRunManager",
"CallbackManagerForLLMRun",
"AsyncCallbackManagerForLLMRun",
"CallbackManagerForChainRun",
"AsyncCallbackManagerForChainRun",
"CallbackManagerForToolRun",
"AsyncCallbackManagerForToolRun",
"CallbackManagerForRetrieverRun",
"AsyncCallbackManagerForRetrieverRun",
"CallbackManager",
"CallbackManagerForChainGroup",
"AsyncCallbackManager",
"AsyncCallbackManagerForChainGroup",
"StdOutCallbackHandler",
"StreamingStdOutCallbackHandler",
"FileCallbackHandler",
"UsageMetadataCallbackHandler",
"get_usage_metadata_callback",
]
|
"""**Callback handlers** allow listening to events in LangChain.
**Class hierarchy:**
.. code-block::
BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
"""
from langchain_core.callbacks.base import (
AsyncCallbackHandler,
BaseCallbackHandler,
BaseCallbackManager,
CallbackManagerMixin,
Callbacks,
ChainManagerMixin,
LLMManagerMixin,
RetrieverManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from langchain_core.callbacks.file import FileCallbackHandler
from langchain_core.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForChainGroup,
AsyncCallbackManagerForChainRun,
AsyncCallbackManagerForLLMRun,
AsyncCallbackManagerForRetrieverRun,
AsyncCallbackManagerForToolRun,
AsyncParentRunManager,
AsyncRunManager,
BaseRunManager,
CallbackManager,
CallbackManagerForChainGroup,
CallbackManagerForChainRun,
CallbackManagerForLLMRun,
CallbackManagerForRetrieverRun,
CallbackManagerForToolRun,
ParentRunManager,
RunManager,
adispatch_custom_event,
dispatch_custom_event,
)
from langchain_core.callbacks.stdout import StdOutCallbackHandler
from langchain_core.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
__all__ = [
"dispatch_custom_event",
"adispatch_custom_event",
"RetrieverManagerMixin",
"LLMManagerMixin",
"ChainManagerMixin",
"ToolManagerMixin",
"Callbacks",
"CallbackManagerMixin",
"RunManagerMixin",
"BaseCallbackHandler",
"AsyncCallbackHandler",
"BaseCallbackManager",
"BaseRunManager",
"RunManager",
"ParentRunManager",
"AsyncRunManager",
"AsyncParentRunManager",
"CallbackManagerForLLMRun",
"AsyncCallbackManagerForLLMRun",
"CallbackManagerForChainRun",
"AsyncCallbackManagerForChainRun",
"CallbackManagerForToolRun",
"AsyncCallbackManagerForToolRun",
"CallbackManagerForRetrieverRun",
"AsyncCallbackManagerForRetrieverRun",
"CallbackManager",
"CallbackManagerForChainGroup",
"AsyncCallbackManager",
"AsyncCallbackManagerForChainGroup",
"StdOutCallbackHandler",
"StreamingStdOutCallbackHandler",
"FileCallbackHandler",
]
|
tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.6), max_per_img=100))
img_scales = [(640, 640), (320, 320), (960, 960)]
tta_pipeline = [
dict(type='LoadImageFromFile', backend_args=None),
dict(
type='TestTimeAug',
transforms=[
[
dict(type='Resize', scale=s, keep_ratio=True)
for s in img_scales
],
[
# ``RandomFlip`` must be placed before ``Pad``, otherwise
# bounding box coordinates after flipping cannot be
# recovered correctly.
dict(type='RandomFlip', prob=1.),
dict(type='RandomFlip', prob=0.)
],
[
dict(
type='Pad',
size=(960, 960),
pad_val=dict(img=(114, 114, 114))),
],
[
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction'))
]
])
]
|
tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.6), max_per_img=100))
img_scales = [(640, 640), (320, 320), (960, 960)]
tta_pipeline = [
dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')),
dict(
type='TestTimeAug',
transforms=[
[
dict(type='Resize', scale=s, keep_ratio=True)
for s in img_scales
],
[
# ``RandomFlip`` must be placed before ``Pad``, otherwise
# bounding box coordinates after flipping cannot be
# recovered correctly.
dict(type='RandomFlip', prob=1.),
dict(type='RandomFlip', prob=0.)
],
[
dict(
type='Pad',
size=(960, 960),
pad_val=dict(img=(114, 114, 114))),
],
[
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction'))
]
])
]
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 4),
stages=(False, True, True, True),
position='after_conv3')
]))
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 4),
stages=(False, True, True, True),
position='after_conv3')
]))
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
try:
from kernels import (
Device,
LayerRepository,
register_kernel_mapping,
replace_kernel_forward_from_hub,
use_kernel_forward_from_hub,
)
_hub_kernels_available = True
_KERNEL_MAPPING: dict[str, dict[Union[Device, str], LayerRepository]] = {
"MultiScaleDeformableAttention": {
"cuda": LayerRepository(
repo_id="kernels-community/deformable-detr",
layer_name="MultiScaleDeformableAttention",
)
},
"Llama4TextMoe": {
"cuda": LayerRepository(
# Move to kernels-community/moe once we release.
repo_id="kernels-community/moe",
layer_name="Llama4TextMoe",
)
},
"RMSNorm": {
"cuda": LayerRepository(
repo_id="kernels-community/liger_kernels",
layer_name="LigerRMSNorm",
# revision="pure-layer-test",
)
},
"MLP": {
"cuda": LayerRepository(
repo_id="medmekk/triton-llama-mlp",
layer_name="TritonLlamaMLP",
)
},
}
register_kernel_mapping(_KERNEL_MAPPING)
except ImportError:
# Stub to make decorators int transformers work when `kernels`
# is not installed.
def use_kernel_forward_from_hub(*args, **kwargs):
def decorator(cls):
return cls
return decorator
class LayerRepository:
def __init__(self, *args, **kwargs):
raise RuntimeError("LayerRepository requires `kernels` to be installed. Run `pip install kernels`.")
def replace_kernel_forward_from_hub(*args, **kwargs):
raise RuntimeError(
"replace_kernel_forward_from_hub requires `kernels` to be installed. Run `pip install kernels`."
)
def register_kernel_mapping(*args, **kwargs):
raise RuntimeError("register_kernel_mapping requires `kernels` to be installed. Run `pip install kernels`.")
_hub_kernels_available = False
def is_hub_kernels_available():
return _hub_kernels_available
__all__ = [
"LayerRepository",
"is_hub_kernels_available",
"use_kernel_forward_from_hub",
"register_kernel_mapping",
"replace_kernel_forward_from_hub",
]
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
from ..utils import is_torchdynamo_compiling
try:
from kernels import (
Device,
LayerRepository,
register_kernel_mapping,
replace_kernel_forward_from_hub,
)
from kernels import (
use_kernel_forward_from_hub as original_use_kernel_forward_from_hub,
)
_hub_kernels_available = True
_KERNEL_MAPPING: dict[str, dict[Union[Device, str], LayerRepository]] = {
"MultiScaleDeformableAttention": {
"cuda": LayerRepository(
repo_id="kernels-community/deformable-detr",
layer_name="MultiScaleDeformableAttention",
)
},
"Llama4TextMoe": {
"cuda": LayerRepository(
# Move to kernels-community/moe once we release.
repo_id="kernels-community/moe",
layer_name="Llama4TextMoe",
)
},
"RMSNorm": {
"cuda": LayerRepository(
repo_id="kernels-community/triton-layer-norm",
layer_name="LlamaRMSNorm",
revision="pure-layer-test",
)
},
"MLP": {
"cuda": LayerRepository(
repo_id="medmekk/triton-llama-mlp",
layer_name="TritonLlamaMLP",
)
},
}
register_kernel_mapping(_KERNEL_MAPPING)
def use_kernel_forward_from_hub(*args, **kwargs):
"""
Expands `kernels`' `use_kernel_forward_from_hub` to NOT use a kernel at compile time. This should be removed
when `kernels` supports `torch.compile`.
If the layer has a `config` attribute, we can also set `config.disable_custom_kernels = True` to disable the
kernel.
"""
def decorator_with_compile_path(cls):
# Keeps a reference to the original forward method
original_forward = cls.forward
# Applies the original decorator
decorator = original_use_kernel_forward_from_hub(*args, **kwargs)
cls = decorator(cls)
# Replaces the kernel forward with a compile-friendly version
kernel_forward = cls.forward
def forward_with_compile_path(*forward_args, **forward_kwargs):
disable_custom_kernels = hasattr(cls, "config") and getattr(cls.config, "disable_custom_kernels", None)
if is_torchdynamo_compiling() or disable_custom_kernels:
return original_forward(*forward_args, **forward_kwargs)
else:
return kernel_forward(*forward_args, **forward_kwargs)
cls.forward = forward_with_compile_path
return cls
return decorator_with_compile_path
except ImportError:
# Stub to make decorators int transformers work when `kernels`
# is not installed.
def use_kernel_forward_from_hub(*args, **kwargs):
def decorator(cls):
return cls
return decorator
class LayerRepository:
def __init__(self, *args, **kwargs):
raise RuntimeError("LayerRepository requires `kernels` to be installed. Run `pip install kernels`.")
def replace_kernel_forward_from_hub(*args, **kwargs):
raise RuntimeError(
"replace_kernel_forward_from_hub requires `kernels` to be installed. Run `pip install kernels`."
)
def register_kernel_mapping(*args, **kwargs):
raise RuntimeError("register_kernel_mapping requires `kernels` to be installed. Run `pip install kernels`.")
_hub_kernels_available = False
def is_hub_kernels_available():
return _hub_kernels_available
__all__ = [
"LayerRepository",
"is_hub_kernels_available",
"use_kernel_forward_from_hub",
"register_kernel_mapping",
"replace_kernel_forward_from_hub",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.cnn.bricks import NonLocal2d
from mmcv.runner import BaseModule
from mmdet.registry import MODELS
@MODELS.register_module()
class BFP(BaseModule):
"""BFP (Balanced Feature Pyramids)
BFP takes multi-level features as inputs and gather them into a single one,
then refine the gathered feature and scatter the refined results to
multi-level features. This module is used in Libra R-CNN (CVPR 2019), see
the paper `Libra R-CNN: Towards Balanced Learning for Object Detection
<https://arxiv.org/abs/1904.02701>`_ for details.
Args:
in_channels (int): Number of input channels (feature maps of all levels
should have the same channels).
num_levels (int): Number of input feature levels.
conv_cfg (dict): The config dict for convolution layers.
norm_cfg (dict): The config dict for normalization layers.
refine_level (int): Index of integration and refine level of BSF in
multi-level features from bottom to top.
refine_type (str): Type of the refine op, currently support
[None, 'conv', 'non_local'].
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
num_levels,
refine_level=2,
refine_type=None,
conv_cfg=None,
norm_cfg=None,
init_cfg=dict(
type='Xavier', layer='Conv2d', distribution='uniform')):
super(BFP, self).__init__(init_cfg)
assert refine_type in [None, 'conv', 'non_local']
self.in_channels = in_channels
self.num_levels = num_levels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.refine_level = refine_level
self.refine_type = refine_type
assert 0 <= self.refine_level < self.num_levels
if self.refine_type == 'conv':
self.refine = ConvModule(
self.in_channels,
self.in_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
elif self.refine_type == 'non_local':
self.refine = NonLocal2d(
self.in_channels,
reduction=1,
use_scale=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == self.num_levels
# step 1: gather multi-level features by resize and average
feats = []
gather_size = inputs[self.refine_level].size()[2:]
for i in range(self.num_levels):
if i < self.refine_level:
gathered = F.adaptive_max_pool2d(
inputs[i], output_size=gather_size)
else:
gathered = F.interpolate(
inputs[i], size=gather_size, mode='nearest')
feats.append(gathered)
bsf = sum(feats) / len(feats)
# step 2: refine gathered features
if self.refine_type is not None:
bsf = self.refine(bsf)
# step 3: scatter refined features to multi-levels by a residual path
outs = []
for i in range(self.num_levels):
out_size = inputs[i].size()[2:]
if i < self.refine_level:
residual = F.interpolate(bsf, size=out_size, mode='nearest')
else:
residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
outs.append(residual + inputs[i])
return tuple(outs)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.cnn.bricks import NonLocal2d
from mmcv.runner import BaseModule
from ..builder import NECKS
@NECKS.register_module()
class BFP(BaseModule):
"""BFP (Balanced Feature Pyramids)
BFP takes multi-level features as inputs and gather them into a single one,
then refine the gathered feature and scatter the refined results to
multi-level features. This module is used in Libra R-CNN (CVPR 2019), see
the paper `Libra R-CNN: Towards Balanced Learning for Object Detection
<https://arxiv.org/abs/1904.02701>`_ for details.
Args:
in_channels (int): Number of input channels (feature maps of all levels
should have the same channels).
num_levels (int): Number of input feature levels.
conv_cfg (dict): The config dict for convolution layers.
norm_cfg (dict): The config dict for normalization layers.
refine_level (int): Index of integration and refine level of BSF in
multi-level features from bottom to top.
refine_type (str): Type of the refine op, currently support
[None, 'conv', 'non_local'].
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
num_levels,
refine_level=2,
refine_type=None,
conv_cfg=None,
norm_cfg=None,
init_cfg=dict(
type='Xavier', layer='Conv2d', distribution='uniform')):
super(BFP, self).__init__(init_cfg)
assert refine_type in [None, 'conv', 'non_local']
self.in_channels = in_channels
self.num_levels = num_levels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.refine_level = refine_level
self.refine_type = refine_type
assert 0 <= self.refine_level < self.num_levels
if self.refine_type == 'conv':
self.refine = ConvModule(
self.in_channels,
self.in_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
elif self.refine_type == 'non_local':
self.refine = NonLocal2d(
self.in_channels,
reduction=1,
use_scale=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == self.num_levels
# step 1: gather multi-level features by resize and average
feats = []
gather_size = inputs[self.refine_level].size()[2:]
for i in range(self.num_levels):
if i < self.refine_level:
gathered = F.adaptive_max_pool2d(
inputs[i], output_size=gather_size)
else:
gathered = F.interpolate(
inputs[i], size=gather_size, mode='nearest')
feats.append(gathered)
bsf = sum(feats) / len(feats)
# step 2: refine gathered features
if self.refine_type is not None:
bsf = self.refine(bsf)
# step 3: scatter refined features to multi-levels by a residual path
outs = []
for i in range(self.num_levels):
out_size = inputs[i].size()[2:]
if i < self.refine_level:
residual = F.interpolate(bsf, size=out_size, mode='nearest')
else:
residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
outs.append(residual + inputs[i])
return tuple(outs)
|
_base_ = './yolov3_d53_mstrain-608_273e_coco.py'
# dataset settings
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
# `mean` and `to_rgb` should be the same with the `preprocess_cfg`
dict(type='Expand', mean=[0, 0, 0], to_rgb=True, ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='RandomResize', scale=[(320, 320), (416, 416)], keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(416, 416), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
_base_ = './yolov3_d53_mstrain-608_273e_coco.py'
# dataset settings
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
# `mean` and `to_rgb` should be the same with the `preprocess_cfg`
dict(type='Expand', mean=[0, 0, 0], to_rgb=True, ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='RandomResize', scale=[(320, 320), (416, 416)]),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(416, 416), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
"""Matrix decomposition algorithms.
These include PCA, NMF, ICA, and more. Most of the algorithms of this module can be
regarded as dimensionality reduction techniques.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ..utils.extmath import randomized_svd
from ._dict_learning import (
DictionaryLearning,
MiniBatchDictionaryLearning,
SparseCoder,
dict_learning,
dict_learning_online,
sparse_encode,
)
from ._factor_analysis import FactorAnalysis
from ._fastica import FastICA, fastica
from ._incremental_pca import IncrementalPCA
from ._kernel_pca import KernelPCA
from ._lda import LatentDirichletAllocation
from ._nmf import (
NMF,
MiniBatchNMF,
non_negative_factorization,
)
from ._pca import PCA
from ._sparse_pca import MiniBatchSparsePCA, SparsePCA
from ._truncated_svd import TruncatedSVD
__all__ = [
"NMF",
"PCA",
"DictionaryLearning",
"FactorAnalysis",
"FastICA",
"IncrementalPCA",
"KernelPCA",
"LatentDirichletAllocation",
"MiniBatchDictionaryLearning",
"MiniBatchNMF",
"MiniBatchSparsePCA",
"SparseCoder",
"SparsePCA",
"TruncatedSVD",
"dict_learning",
"dict_learning_online",
"fastica",
"non_negative_factorization",
"randomized_svd",
"sparse_encode",
]
|
"""Matrix decomposition algorithms.
These include PCA, NMF, ICA, and more. Most of the algorithms of this module can be
regarded as dimensionality reduction techniques.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ..utils.extmath import randomized_svd
from ._dict_learning import (
DictionaryLearning,
MiniBatchDictionaryLearning,
SparseCoder,
dict_learning,
dict_learning_online,
sparse_encode,
)
from ._factor_analysis import FactorAnalysis
from ._fastica import FastICA, fastica
from ._incremental_pca import IncrementalPCA
from ._kernel_pca import KernelPCA
from ._lda import LatentDirichletAllocation
from ._nmf import (
NMF,
MiniBatchNMF,
non_negative_factorization,
)
from ._pca import PCA
from ._sparse_pca import MiniBatchSparsePCA, SparsePCA
from ._truncated_svd import TruncatedSVD
__all__ = [
"DictionaryLearning",
"FastICA",
"IncrementalPCA",
"KernelPCA",
"MiniBatchDictionaryLearning",
"MiniBatchNMF",
"MiniBatchSparsePCA",
"NMF",
"PCA",
"SparseCoder",
"SparsePCA",
"dict_learning",
"dict_learning_online",
"fastica",
"non_negative_factorization",
"randomized_svd",
"sparse_encode",
"FactorAnalysis",
"TruncatedSVD",
"LatentDirichletAllocation",
]
|
import numpy as np
import pytest
import torch
from docarray.typing import (
AudioNdArray,
AudioTorchTensor,
NdArrayEmbedding,
TorchEmbedding,
)
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing import AudioTensorFlowTensor, TensorFlowEmbedding
def test_torch_tensors_interop():
t1 = AudioTorchTensor(torch.rand(128))
t2 = TorchEmbedding(torch.rand(128))
t_result = t1 + t2
assert isinstance(t_result, AudioTorchTensor)
assert isinstance(t_result, torch.Tensor)
assert t_result.shape == (128,)
@pytest.mark.tensorflow
def test_tensorflow_tensors_interop():
t1 = AudioTensorFlowTensor(tf.random.normal((128,)))
t2 = TensorFlowEmbedding(tf.random.normal((128,)))
t_result = t1.tensor + t2.tensor
assert isinstance(t_result, tf.Tensor)
assert t_result.shape == (128,)
def test_np_arrays_interop():
t1 = AudioNdArray((128,))
t2 = NdArrayEmbedding((128,))
t_result = t1 + t2
assert isinstance(t_result, AudioNdArray)
assert isinstance(t_result, np.ndarray)
assert t_result.shape == (128,)
|
import numpy as np
import torch
from docarray.typing import (
AudioNdArray,
AudioTorchTensor,
NdArrayEmbedding,
TorchEmbedding,
)
def test_torch_tensors_interop():
t1 = AudioTorchTensor(torch.rand(128))
t2 = TorchEmbedding(torch.rand(128))
t_result = t1 + t2
assert isinstance(t_result, AudioTorchTensor)
assert isinstance(t_result, torch.Tensor)
assert t_result.shape == (128,)
def test_np_arrays_interop():
t1 = AudioNdArray((128,))
t2 = NdArrayEmbedding((128,))
t_result = t1 + t2
assert isinstance(t_result, AudioNdArray)
assert isinstance(t_result, np.ndarray)
assert t_result.shape == (128,)
|
from typing import Iterable, Dict
import torch.nn.functional as F
from torch import nn, Tensor
from .ContrastiveLoss import SiameseDistanceMetric
from sentence_transformers.SentenceTransformer import SentenceTransformer
class OnlineContrastiveLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=SiameseDistanceMetric.COSINE_DISTANCE, margin: float = 0.5
):
"""
This Online Contrastive loss is similar to :class:`ConstrativeLoss`, but it selects hard positive (positives that
are far apart) and hard negative pairs (negatives that are close) and computes the loss only for these pairs.
This loss often yields better performances than ContrastiveLoss.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrics that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
References:
- `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
2. Data should include hard positives and hard negatives
Relations:
- :class:`ContrastiveLoss` is similar, but does not use hard positive and hard negative pairs.
:class:`OnlineContrastiveLoss` often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.OnlineContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(OnlineContrastiveLoss, self).__init__()
self.model = model
self.margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor, size_average=False):
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
distance_matrix = self.distance_metric(embeddings[0], embeddings[1])
negs = distance_matrix[labels == 0]
poss = distance_matrix[labels == 1]
# select hard positive and hard negative pairs
negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())]
positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())]
positive_loss = positive_pairs.pow(2).sum()
negative_loss = F.relu(self.margin - negative_pairs).pow(2).sum()
loss = positive_loss + negative_loss
return loss
|
from typing import Iterable, Dict
import torch.nn.functional as F
from torch import nn, Tensor
from .ContrastiveLoss import SiameseDistanceMetric
from sentence_transformers.SentenceTransformer import SentenceTransformer
class OnlineContrastiveLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=SiameseDistanceMetric.COSINE_DISTANCE, margin: float = 0.5
):
"""
This Online Contrastive loss is similar to :class:`ConstrativeLoss`, but it selects hard positive (positives that
are far apart) and hard negative pairs (negatives that are close) and computes the loss only for these pairs.
This loss often yields better performances than ContrastiveLoss.
:param model: SentenceTransformer model
:param distance_metric: Function that returns a distance between two embeddings. The class SiameseDistanceMetric contains pre-defined metrics that can be used
:param margin: Negative samples (label == 0) should have a distance of at least the margin value.
References:
- `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
2. Data should include hard positives and hard negatives
Relations:
- :class:`ContrastiveLoss` is similar, but does not use hard positive and hard negative pairs.
:class:`OnlineContrastiveLoss` often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, losses, InputExample
from torch.utils.data import DataLoader
model = SentenceTransformer('all-MiniLM-L6-v2')
train_examples = [
InputExample(texts=['This is a positive pair', 'Where the distance will be minimized'], label=1),
InputExample(texts=['This is a negative pair', 'Their distance will be increased'], label=0),
]
train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=2)
train_loss = losses.OnlineContrastiveLoss(model=model)
model.fit(
[(train_dataloader, train_loss)],
epochs=10,
)
"""
super(OnlineContrastiveLoss, self).__init__()
self.model = model
self.margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor, size_average=False):
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
distance_matrix = self.distance_metric(embeddings[0], embeddings[1])
negs = distance_matrix[labels == 0]
poss = distance_matrix[labels == 1]
# select hard positive and hard negative pairs
negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())]
positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())]
positive_loss = positive_pairs.pow(2).sum()
negative_loss = F.relu(self.margin - negative_pairs).pow(2).sum()
loss = positive_loss + negative_loss
return loss
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
def get_max_cuda_memory(device: Optional[torch.device] = None) -> int:
"""Returns the maximum GPU memory occupied by tensors in megabytes (MB) for
a given device. By default, this returns the peak allocated memory since
the beginning of this program.
Args:
device (torch.device, optional): selected device. Returns
statistic for the current device, given by
:func:`~torch.cuda.current_device`, if ``device`` is None.
Defaults to None.
Returns:
int: The maximum GPU memory occupied by tensors in megabytes
for a given device.
"""
mem = torch.cuda.max_memory_allocated(device=device)
mem_mb = torch.tensor([int(mem) // (1024 * 1024)],
dtype=torch.int,
device=device)
torch.cuda.reset_peak_memory_stats()
return int(mem_mb.item())
def is_cuda_available() -> bool:
"""Returns True if cuda devices exist."""
return torch.cuda.is_available()
def is_mlu_available() -> bool:
"""Returns True if Cambricon PyTorch and mlu devices exist."""
return hasattr(torch, 'is_mlu_available') and torch.is_mlu_available()
def get_device() -> str:
"""Returns the currently existing device type.
Returns:
str: cuda | mlu | cpu.
"""
if is_cuda_available():
return 'cuda'
elif is_mlu_available():
return 'mlu'
else:
return 'cpu'
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
def get_max_cuda_memory(device: Optional[torch.device] = None) -> int:
"""Returns the maximum GPU memory occupied by tensors in megabytes (MB) for
a given device. By default, this returns the peak allocated memory since
the beginning of this program.
Args:
device (torch.device, optional): selected device. Returns
statistic for the current device, given by
:func:`~torch.cuda.current_device`, if ``device`` is None.
Defaults to None.
Returns:
int: The maximum GPU memory occupied by tensors in megabytes
for a given device.
"""
mem = torch.cuda.max_memory_allocated(device=device)
mem_mb = torch.tensor([int(mem) // (1024 * 1024)],
dtype=torch.int,
device=device)
torch.cuda.reset_peak_memory_stats()
return int(mem_mb.item())
|
from keras.src import activations
from keras.src import constraints
from keras.src import initializers
from keras.src import regularizers
from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
@keras_export("keras.layers.PReLU")
class PReLU(Layer):
"""Parametric Rectified Linear Unit activation layer.
Formula:
``` python
f(x) = alpha * x for x < 0
f(x) = x for x >= 0
```
where `alpha` is a learned array with the same shape as x.
Args:
alpha_initializer: Initializer function for the weights.
alpha_regularizer: Regularizer for the weights.
alpha_constraint: Constraint for the weights.
shared_axes: The axes along which to share learnable parameters for the
activation function. For example, if the incoming feature maps are
from a 2D convolution with output shape
`(batch, height, width, channels)`, and you wish to share parameters
across space so that each filter only has one set of parameters,
set `shared_axes=[1, 2]`.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
"""
def __init__(
self,
alpha_initializer="Zeros",
alpha_regularizer=None,
alpha_constraint=None,
shared_axes=None,
**kwargs,
):
super().__init__(**kwargs)
self.supports_masking = True
self.alpha_initializer = initializers.get(alpha_initializer)
self.alpha_regularizer = regularizers.get(alpha_regularizer)
self.alpha_constraint = constraints.get(alpha_constraint)
if shared_axes is None:
self.shared_axes = None
elif not isinstance(shared_axes, (list, tuple)):
self.shared_axes = [shared_axes]
else:
self.shared_axes = list(shared_axes)
def build(self, input_shape):
param_shape = list(input_shape[1:])
if self.shared_axes is not None:
for i in self.shared_axes:
param_shape[i - 1] = 1
self.alpha = self.add_weight(
shape=param_shape,
name="alpha",
initializer=self.alpha_initializer,
regularizer=self.alpha_regularizer,
constraint=self.alpha_constraint,
)
# Set input spec
axes = {}
if self.shared_axes:
for i in range(1, len(input_shape)):
if i not in self.shared_axes:
axes[i] = input_shape[i]
self.input_spec = InputSpec(ndim=len(input_shape), axes=axes)
self.built = True
def call(self, inputs):
pos = activations.relu(inputs)
neg = -self.alpha * activations.relu(-inputs)
return pos + neg
def get_config(self):
config = super().get_config()
config.update(
{
"alpha_initializer": initializers.serialize(
self.alpha_initializer
),
"alpha_regularizer": regularizers.serialize(
self.alpha_regularizer
),
"alpha_constraint": constraints.serialize(
self.alpha_constraint
),
"shared_axes": self.shared_axes,
}
)
return config
def compute_output_shape(self, input_shape):
return input_shape
|
from keras.src import activations
from keras.src import constraints
from keras.src import initializers
from keras.src import regularizers
from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
@keras_export("keras.layers.PReLU")
class PReLU(Layer):
"""Parametric Rectified Linear Unit activation layer.
Formula:
``` python
f(x) = alpha * x for x < 0
f(x) = x for x >= 0
```
where `alpha` is a learned array with the same shape as x.
Args:
alpha_initializer: Initializer function for the weights.
alpha_regularizer: Regularizer for the weights.
alpha_constraint: Constraint for the weights.
shared_axes: The axes along which to share learnable parameters for the
activation function. For example, if the incoming feature maps are
from a 2D convolution with output shape
`(batch, height, width, channels)`, and you wish to share parameters
across space so that each filter only has one set of parameters,
set `shared_axes=[1, 2]`.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
"""
def __init__(
self,
alpha_initializer="Zeros",
alpha_regularizer=None,
alpha_constraint=None,
shared_axes=None,
**kwargs
):
super().__init__(**kwargs)
self.supports_masking = True
self.alpha_initializer = initializers.get(alpha_initializer)
self.alpha_regularizer = regularizers.get(alpha_regularizer)
self.alpha_constraint = constraints.get(alpha_constraint)
if shared_axes is None:
self.shared_axes = None
elif not isinstance(shared_axes, (list, tuple)):
self.shared_axes = [shared_axes]
else:
self.shared_axes = list(shared_axes)
def build(self, input_shape):
param_shape = list(input_shape[1:])
if self.shared_axes is not None:
for i in self.shared_axes:
param_shape[i - 1] = 1
self.alpha = self.add_weight(
shape=param_shape,
name="alpha",
initializer=self.alpha_initializer,
regularizer=self.alpha_regularizer,
constraint=self.alpha_constraint,
)
# Set input spec
axes = {}
if self.shared_axes:
for i in range(1, len(input_shape)):
if i not in self.shared_axes:
axes[i] = input_shape[i]
self.input_spec = InputSpec(ndim=len(input_shape), axes=axes)
self.built = True
def call(self, inputs):
pos = activations.relu(inputs)
neg = -self.alpha * activations.relu(-inputs)
return pos + neg
def get_config(self):
config = super().get_config()
config.update(
{
"alpha_initializer": initializers.serialize(
self.alpha_initializer
),
"alpha_regularizer": regularizers.serialize(
self.alpha_regularizer
),
"alpha_constraint": constraints.serialize(
self.alpha_constraint
),
"shared_axes": self.shared_axes,
}
)
return config
def compute_output_shape(self, input_shape):
return input_shape
|
from pathlib import Path
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .utils import check_integrity, download_and_extract_archive, download_url, verify_str_arg
from .vision import VisionDataset
class Flowers102(VisionDataset):
"""`Oxford 102 Flower <https://www.robots.ox.ac.uk/~vgg/data/flowers/102/>`_ Dataset.
.. warning::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
Oxford 102 Flower is an image classification dataset consisting of 102 flower categories. The
flowers were chosen to be flowers commonly occurring in the United Kingdom. Each class consists of
between 40 and 258 images.
The images have large scale, pose and light variations. In addition, there are categories that
have large variations within the category, and several very similar categories.
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), ``"val"``, or ``"test"``.
transform (callable, optional): A function/transform that takes in an PIL image and returns a
transformed version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
_download_url_prefix = "https://www.robots.ox.ac.uk/~vgg/data/flowers/102/"
_file_dict = { # filename, md5
"image": ("102flowers.tgz", "52808999861908f626f3c1f4e79d11fa"),
"label": ("imagelabels.mat", "e0620be6f572b9609742df49c70aed4d"),
"setid": ("setid.mat", "a5357ecc9cb78c4bef273ce3793fc85c"),
}
_splits_map = {"train": "trnid", "val": "valid", "test": "tstid"}
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
self._base_folder = Path(self.root) / "flowers-102"
self._images_folder = self._base_folder / "jpg"
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
from scipy.io import loadmat
set_ids = loadmat(self._base_folder / self._file_dict["setid"][0], squeeze_me=True)
image_ids = set_ids[self._splits_map[self._split]].tolist()
labels = loadmat(self._base_folder / self._file_dict["label"][0], squeeze_me=True)
image_id_to_label = dict(enumerate((labels["labels"] - 1).tolist(), 1))
self._labels = []
self._image_files = []
for image_id in image_ids:
self._labels.append(image_id_to_label[image_id])
self._image_files.append(self._images_folder / f"image_{image_id:05d}.jpg")
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}"
def _check_integrity(self):
if not (self._images_folder.exists() and self._images_folder.is_dir()):
return False
for id in ["label", "setid"]:
filename, md5 = self._file_dict[id]
if not check_integrity(str(self._base_folder / filename), md5):
return False
return True
def download(self):
if self._check_integrity():
return
download_and_extract_archive(
f"{self._download_url_prefix}{self._file_dict['image'][0]}",
str(self._base_folder),
md5=self._file_dict["image"][1],
)
for id in ["label", "setid"]:
filename, md5 = self._file_dict[id]
download_url(self._download_url_prefix + filename, str(self._base_folder), md5=md5)
|
from pathlib import Path
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .utils import check_integrity, download_and_extract_archive, download_url, verify_str_arg
from .vision import VisionDataset
class Flowers102(VisionDataset):
"""`Oxford 102 Flower <https://www.robots.ox.ac.uk/~vgg/data/flowers/102/>`_ Dataset.
.. warning::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
Oxford 102 Flower is an image classification dataset consisting of 102 flower categories. The
flowers were chosen to be flowers commonly occurring in the United Kingdom. Each class consists of
between 40 and 258 images.
The images have large scale, pose and light variations. In addition, there are categories that
have large variations within the category, and several very similar categories.
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), ``"val"``, or ``"test"``.
transform (callable, optional): A function/transform that takes in an PIL image and returns a
transformed version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
_download_url_prefix = "https://www.robots.ox.ac.uk/~vgg/data/flowers/102/"
_file_dict = { # filename, md5
"image": ("102flowers.tgz", "52808999861908f626f3c1f4e79d11fa"),
"label": ("imagelabels.mat", "e0620be6f572b9609742df49c70aed4d"),
"setid": ("setid.mat", "a5357ecc9cb78c4bef273ce3793fc85c"),
}
_splits_map = {"train": "trnid", "val": "valid", "test": "tstid"}
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
self._base_folder = Path(self.root) / "flowers-102"
self._images_folder = self._base_folder / "jpg"
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
from scipy.io import loadmat
set_ids = loadmat(self._base_folder / self._file_dict["setid"][0], squeeze_me=True)
image_ids = set_ids[self._splits_map[self._split]].tolist()
labels = loadmat(self._base_folder / self._file_dict["label"][0], squeeze_me=True)
image_id_to_label = dict(enumerate((labels["labels"] - 1).tolist(), 1))
self._labels = []
self._image_files = []
for image_id in image_ids:
self._labels.append(image_id_to_label[image_id])
self._image_files.append(self._images_folder / f"image_{image_id:05d}.jpg")
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}"
def _check_integrity(self):
if not (self._images_folder.exists() and self._images_folder.is_dir()):
return False
for id in ["label", "setid"]:
filename, md5 = self._file_dict[id]
if not check_integrity(str(self._base_folder / filename), md5):
return False
return True
def download(self):
if self._check_integrity():
return
download_and_extract_archive(
f"{self._download_url_prefix}{self._file_dict['image'][0]}",
str(self._base_folder),
md5=self._file_dict["image"][1],
)
for id in ["label", "setid"]:
filename, md5 = self._file_dict[id]
download_url(self._download_url_prefix + filename, str(self._base_folder), md5=md5)
|
from jina.parsers.helper import add_arg_group
def mixin_head_parser(parser):
"""Mixing in arguments required by head pods and runtimes into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Head')
gp.add_argument(
'--compression',
choices=['NoCompression', 'Deflate', 'Gzip'],
help='The compression mechanism used when sending requests from the Head to the WorkerRuntimes. For more details, '
'check https://grpc.github.io/grpc/python/grpc.html#compression.',
)
gp.add_argument(
'--uses-before-address',
type=str,
help='The address of the uses-before runtime',
)
gp.add_argument(
'--uses-after-address',
type=str,
help='The address of the uses-before runtime',
)
gp.add_argument(
'--connection-list',
type=str,
help='dictionary JSON with a list of connections to configure',
)
gp.add_argument(
'--timeout-send',
type=int,
default=None,
help='The timeout in milliseconds used when sending data requests to Executors, -1 means no timeout, disabled by default',
)
|
from jina.parsers.helper import add_arg_group
def mixin_head_parser(parser):
"""Mixing in arguments required by head pods and runtimes into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Head')
gp.add_argument(
'--compression',
choices=['NoCompression', 'Deflate', 'Gzip'],
help='The compression mechanism used when sending requests from the Head to the WorkerRuntimes. For more details, '
'check https://grpc.github.io/grpc/python/grpc.html#compression.',
)
gp.add_argument(
'--uses-before-address',
type=str,
help='The address of the uses-before runtime',
)
gp.add_argument(
'--uses-after-address',
type=str,
help='The address of the uses-before runtime',
)
gp.add_argument(
'--connection-list',
type=str,
help='dictionary JSON with a list of connections to configure',
)
gp.add_argument(
'--disable-reduce',
action='store_true',
default=False,
help='Disable the built-in reduce mechanism, set this if the reduction is to be handled by the Executor connected to this Head',
)
gp.add_argument(
'--timeout-send',
type=int,
default=None,
help='The timeout in milliseconds used when sending data requests to Executors, -1 means no timeout, disabled by default',
)
|
from typing_extensions import TYPE_CHECKING
from docarray.typing.bytes import AudioBytes, ImageBytes, VideoBytes
from docarray.typing.id import ID
from docarray.typing.tensor import ImageNdArray, ImageTensor
from docarray.typing.tensor.audio import AudioNdArray, AudioTensor
from docarray.typing.tensor.embedding.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video import VideoNdArray, VideoTensor
from docarray.typing.url import (
AnyUrl,
AudioUrl,
ImageUrl,
Mesh3DUrl,
PointCloud3DUrl,
TextUrl,
VideoUrl,
)
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.typing.tensor import TensorFlowTensor # noqa: F401
from docarray.typing.tensor import TorchEmbedding, TorchTensor # noqa: F401
from docarray.typing.tensor.audio import AudioTensorFlowTensor # noqa: F401
from docarray.typing.tensor.audio import AudioTorchTensor # noqa: F401
from docarray.typing.tensor.embedding import TensorFlowEmbedding # noqa: F401
from docarray.typing.tensor.image import ImageTensorFlowTensor # noqa: F401
from docarray.typing.tensor.image import ImageTorchTensor # noqa: F401
from docarray.typing.tensor.video import VideoTensorFlowTensor # noqa: F401
from docarray.typing.tensor.video import VideoTorchTensor # noqa: F401
__all__ = [
'NdArray',
'NdArrayEmbedding',
'AudioNdArray',
'VideoNdArray',
'AnyEmbedding',
'ImageUrl',
'AudioUrl',
'TextUrl',
'Mesh3DUrl',
'PointCloud3DUrl',
'VideoUrl',
'AnyUrl',
'ID',
'AnyTensor',
'ImageTensor',
'AudioTensor',
'VideoTensor',
'ImageNdArray',
'ImageBytes',
'VideoBytes',
'AudioBytes',
]
_torch_tensors = [
'TorchTensor',
'TorchEmbedding',
'ImageTorchTensor',
'AudioTorchTensor',
'VideoTorchTensor',
]
_tf_tensors = [
'TensorFlowTensor',
'TensorFlowEmbedding',
'ImageTensorFlowTensor',
'AudioTensorFlowTensor',
'VideoTensorFlowTensor',
]
__all_test__ = __all__ + _torch_tensors
def __getattr__(name: str):
if name in _torch_tensors:
import_library('torch', raise_error=True)
elif name in _tf_tensors:
import_library('tensorflow', raise_error=True)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
import docarray.typing.tensor
tensor_cls = getattr(docarray.typing.tensor, name)
if name not in __all__:
__all__.append(name)
return tensor_cls
|
from typing import (
Union,
TYPE_CHECKING,
TypeVar,
Sequence,
Optional,
List,
Dict,
Generator,
Iterable,
Tuple,
ForwardRef,
)
if TYPE_CHECKING: # pragma: no cover
import scipy.sparse
import tensorflow
import torch
import numpy as np
from PIL.Image import Image as PILImage
from docarray import Document
ArrayType = TypeVar(
'ArrayType',
np.ndarray,
scipy.sparse.spmatrix,
tensorflow.SparseTensor,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
DocumentContentType = Union[bytes, str, ArrayType]
ProtoValueType = Optional[Union[str, bool, float]]
StructValueType = Union[
ProtoValueType, List[ProtoValueType], Dict[str, ProtoValueType]
]
DocumentArraySourceType = Union[
Sequence[Document], Document, Generator[Document], Iterable[Document]
]
T = TypeVar('T')
AnyDNN = TypeVar(
'AnyDNN'
) #: The type of any implementation of a Deep Neural Network object
DocumentArraySingletonIndexType = Union[int, str]
DocumentArrayMultipleIndexType = Union[
slice, Sequence[int], Sequence[str], Sequence[bool], Ellipsis
]
DocumentArraySingleAttributeType = Tuple[
Union[DocumentArraySingletonIndexType, DocumentArrayMultipleIndexType], str
]
DocumentArrayMultipleAttributeType = Tuple[
Union[DocumentArraySingletonIndexType, DocumentArrayMultipleIndexType],
Sequence[str],
]
DocumentArrayIndexType = Union[
DocumentArraySingletonIndexType,
DocumentArrayMultipleIndexType,
DocumentArraySingleAttributeType,
DocumentArrayMultipleAttributeType,
]
Image = TypeVar(
'Image',
str,
ForwardRef('np.ndarray'),
ForwardRef('PILImage'),
)
Text = TypeVar('Text', bound=str)
URI = TypeVar('URI', bound=str)
Audio = TypeVar('Audio', str, ForwardRef('np.ndarray'))
Video = TypeVar('Video', str, ForwardRef('np.ndarray'))
Mesh = TypeVar('Mesh', str, ForwardRef('np.ndarray'))
Tabular = TypeVar('Tabular', bound=str)
Blob = TypeVar('Blob', str, bytes)
JSON = TypeVar('JSON', str, dict)
|
import collections
import json
import os
import string
from typing import Iterable, List
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
class WhitespaceTokenizer(WordTokenizer):
"""
Simple and fast white-space tokenizer. Splits sentence based on white spaces.
Punctuation are stripped from tokens.
"""
def __init__(
self, vocab: Iterable[str] = [], stop_words: Iterable[str] = ENGLISH_STOP_WORDS, do_lower_case: bool = False
):
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.set_vocab(vocab)
def get_vocab(self):
return self.vocab
def set_vocab(self, vocab: Iterable[str]):
self.vocab = vocab
self.word2idx = collections.OrderedDict([(word, idx) for idx, word in enumerate(vocab)])
def tokenize(self, text: str, **kwargs) -> List[int]:
if self.do_lower_case:
text = text.lower()
tokens = text.split()
tokens_filtered = []
for token in tokens:
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.strip(string.punctuation)
if token in self.stop_words:
continue
elif len(token) > 0 and token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.lower()
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
return tokens_filtered
def save(self, output_path: str):
with open(os.path.join(output_path, "whitespacetokenizer_config.json"), "w") as fOut:
json.dump(
{
"vocab": list(self.word2idx.keys()),
"stop_words": list(self.stop_words),
"do_lower_case": self.do_lower_case,
},
fOut,
)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "whitespacetokenizer_config.json"), "r") as fIn:
config = json.load(fIn)
return WhitespaceTokenizer(**config)
|
from typing import Union, Tuple, List, Iterable, Dict
import collections
import string
import os
import json
from .WordTokenizer import WordTokenizer, ENGLISH_STOP_WORDS
class WhitespaceTokenizer(WordTokenizer):
"""
Simple and fast white-space tokenizer. Splits sentence based on white spaces.
Punctuation are stripped from tokens.
"""
def __init__(self, vocab: Iterable[str] = [], stop_words: Iterable[str] = ENGLISH_STOP_WORDS, do_lower_case: bool = False):
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.set_vocab(vocab)
def get_vocab(self):
return self.vocab
def set_vocab(self, vocab: Iterable[str]):
self.vocab = vocab
self.word2idx = collections.OrderedDict([(word, idx) for idx, word in enumerate(vocab)])
def tokenize(self, text: str) -> List[int]:
if self.do_lower_case:
text = text.lower()
tokens = text.split()
tokens_filtered = []
for token in tokens:
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.strip(string.punctuation)
if token in self.stop_words:
continue
elif len(token) > 0 and token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.lower()
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
return tokens_filtered
def save(self, output_path: str):
with open(os.path.join(output_path, 'whitespacetokenizer_config.json'), 'w') as fOut:
json.dump({'vocab': list(self.word2idx.keys()), 'stop_words': list(self.stop_words), 'do_lower_case': self.do_lower_case}, fOut)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, 'whitespacetokenizer_config.json'), 'r') as fIn:
config = json.load(fIn)
return WhitespaceTokenizer(**config)
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Any, Iterable, Optional
import librosa as lr
import numpy as np
import torch
from jina import DocumentArray, Executor, requests
from jina.excepts import BadDocType
from .audio_clip.model import AudioCLIP
class AudioCLIPEncoder(Executor):
"""
Encode audio data with AudioCLIP embeddings
"""
TARGET_SAMPLE_RATE = 44100 # derived from ESResNeXt
def __init__(
self,
model_path: str = 'assets/AudioCLIP-Full-Training.pt',
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
download_model: bool = True,
*args,
**kwargs
):
"""
:param model_path: path of the pre-trained AudioCLIP model
:param traversal_paths: default traversal path
:param device: Torch device string (e.g. 'cpu', 'cuda', 'cuda:2')
"""
super().__init__(*args, **kwargs)
torch.set_grad_enabled(False)
self.model_path = model_path
self.traversal_paths = traversal_paths
self.batch_size = batch_size
if download_model:
import os
import subprocess
root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
subprocess.call(['sh', 'scripts/download_model.sh'], cwd=root_path)
try:
self.model = AudioCLIP(pretrained=model_path).to(device).eval()
except FileNotFoundError:
raise FileNotFoundError(
'Please download AudioCLIP model and set the `model_path` argument.'
)
@requests
def encode(
self,
docs: Optional[DocumentArray] = None,
parameters: dict = {},
*args,
**kwargs
) -> Any:
"""
Encode all Documents with audio data (stored in the ``blob`` attribute) and store the
embeddings in the ``embedding`` attribute of the Documents.
:param docs: a `DocumentArray` contains `Document`s with `blob` of the size (n,) or (2, n).
The `blob` contains audio time-series data. Additionally,
`tags` of each `Document` must contain `sample_rate` field,
which has the sample rate of the audio data. The `sample_rate` must be a positive
scalar value.
:param parameters: dictionary to defines the `traversal_paths`.
"""
if not docs:
return
traversal_paths = parameters.get('traversal_paths', self.traversal_paths)
batch_size = parameters.get('batch_size', self.batch_size)
with torch.inference_mode():
for batch in docs.batch(batch_size, traversal_paths):
self._create_embeddings(batch)
def _create_embeddings(self, filtered_docs: Iterable):
"""Update the documents with the embeddings generated by AudioCLIP"""
for d in filtered_docs:
d.blob, d.tags['sample_rate'] = self._resample(
d.blob, d.tags.get('sample_rate', None)
)
audio = torch.Tensor(d.blob).unsqueeze(0)
embedding = self.model.encode_audio(audio=audio)[0]
d.embedding = embedding.cpu().numpy()
def _resample(self, blob: np.ndarray, orig_sr: int):
if orig_sr is None:
raise BadDocType(
'sample rate is not given, please provide a valid sample rate'
)
if orig_sr == AudioCLIPEncoder.TARGET_SAMPLE_RATE:
return blob, orig_sr
return (
lr.resample(blob, orig_sr, AudioCLIPEncoder.TARGET_SAMPLE_RATE),
AudioCLIPEncoder.TARGET_SAMPLE_RATE,
)
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Any, Iterable, Optional
import librosa as lr
import numpy as np
import torch
from jina import DocumentArray, Executor, requests
from jina.excepts import BadDocType
from .audio_clip.model import AudioCLIP
class AudioCLIPEncoder(Executor):
"""
Encode audio data with AudioCLIP embeddings
"""
TARGET_SAMPLE_RATE = 44100 # derived from ESResNeXt
def __init__(
self,
model_path: str = 'assets/AudioCLIP-Full-Training.pt',
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
download_model: bool = True,
*args,
**kwargs
):
"""
:param model_path: path of the pre-trained AudioCLIP model
:param traversal_paths: default traversal path
:param device: Torch device string (e.g. 'cpu', 'cuda', 'cuda:2')
"""
super().__init__(*args, **kwargs)
torch.set_grad_enabled(False)
self.model_path = model_path
self.traversal_paths = traversal_paths
self.batch_size = batch_size
if download_model:
import os
import subprocess
root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
subprocess.call(['sh', 'scripts/download_model.sh'], cwd=root_path)
try:
self.model = AudioCLIP(pretrained=model_path).to(device).eval()
except FileNotFoundError:
raise FileNotFoundError(
'Please download AudioCLIP model and set the `model_path` argument.'
)
@requests
def encode(
self,
docs: Optional[DocumentArray] = None,
parameters: dict = {},
*args,
**kwargs
) -> Any:
"""
Encode all Documents with audio data (stored in the ``blob`` attribute) and store the
embeddings in the ``embedding`` attribute of the Documents.
:param docs: a `DocumentArray` contains `Document`s with `blob` of the size (n,) or (2, n).
The `blob` contains audio time-series data. Additionally,
`tags` of each `Document` must contain `sample_rate` field,
which has the sample rate of the audio data. The `sample_rate` must be a positive
scalar value.
:param parameters: dictionary to defines the `traversal_paths`.
"""
if not docs:
return
traversal_paths = parameters.get('traversal_paths', self.traversal_paths)
batch_size = parameters.get('batch_size', self.batch_size)
with torch.inference_mode():
for batch in docs.batch(batch_size, traversal_paths):
self._create_embeddings(batch)
def _create_embeddings(self, filtered_docs: Iterable):
"""Update the documents with the embeddings generated by AudioCLIP"""
for d in filtered_docs:
d.blob, d.tags['sample_rate'] = self._resample(
d.blob, d.tags.get('sample_rate', None)
)
audio = torch.Tensor(d.blob).unsqueeze(0)
embedding = self.model.encode_audio(audio=audio)[0]
d.embedding = embedding.cpu().numpy()
def _resample(self, blob: np.ndarray, orig_sr: int):
if orig_sr is None:
raise BadDocType(
'sample rate is not given, please provide a valid sample rate'
)
if orig_sr == AudioCLIPEncoder.TARGET_SAMPLE_RATE:
return
return (
lr.resample(blob, orig_sr, AudioCLIPEncoder.TARGET_SAMPLE_RATE),
AudioCLIPEncoder.TARGET_SAMPLE_RATE,
)
|
from __future__ import annotations
from typing import Any, Optional, Union
import PIL.Image
import torch
from ._datapoint import Datapoint
class Image(Datapoint):
"""[BETA] :class:`torch.Tensor` subclass for images.
.. note::
In the :ref:`transforms <transforms>`, ``Image`` instances are largely
interchangeable with pure :class:`torch.Tensor`. See
:ref:`this note <passthrough_heuristic>` for more details.
Args:
data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as
well as PIL images.
dtype (torch.dtype, optional): Desired data type. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the image is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Image:
if isinstance(data, PIL.Image.Image):
from torchvision.transforms.v2 import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
if tensor.ndim < 2:
raise ValueError
elif tensor.ndim == 2:
tensor = tensor.unsqueeze(0)
return tensor.as_subclass(cls)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr()
|
from __future__ import annotations
from typing import Any, Optional, Union
import PIL.Image
import torch
from ._datapoint import Datapoint
class Image(Datapoint):
"""[BETA] :class:`torch.Tensor` subclass for images.
Args:
data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as
well as PIL images.
dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Image:
if isinstance(data, PIL.Image.Image):
from torchvision.transforms.v2 import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
if tensor.ndim < 2:
raise ValueError
elif tensor.ndim == 2:
tensor = tensor.unsqueeze(0)
return tensor.as_subclass(cls)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr()
|
import os
import subprocess
import pytest
from xgboost import testing as tm
pytestmark = [
pytest.mark.skipif(**tm.no_dask()),
pytest.mark.skipif(**tm.no_dask_cuda()),
tm.timeout(60),
]
@pytest.mark.skipif(**tm.no_cupy())
@pytest.mark.mgpu
def test_dask_training():
script = os.path.join(tm.demo_dir(__file__), "dask", "gpu_training.py")
cmd = ["python", script]
subprocess.check_call(cmd)
@pytest.mark.mgpu
def test_dask_sklearn_demo():
script = os.path.join(tm.demo_dir(__file__), "dask", "sklearn_gpu_training.py")
cmd = ["python", script]
subprocess.check_call(cmd)
@pytest.mark.mgpu
@pytest.mark.skipif(**tm.no_cupy())
def test_forward_logging_demo():
script = os.path.join(tm.demo_dir(__file__), "dask", "forward_logging.py")
cmd = ["python", script]
subprocess.check_call(cmd)
|
import os
import subprocess
import pytest
from xgboost import testing as tm
pytestmark = [
pytest.mark.skipif(**tm.no_dask()),
pytest.mark.skipif(**tm.no_dask_cuda()),
tm.timeout(60),
]
@pytest.mark.skipif(**tm.no_cupy())
@pytest.mark.mgpu
def test_dask_training():
script = os.path.join(tm.demo_dir(__file__), "dask", "gpu_training.py")
cmd = ["python", script]
subprocess.check_call(cmd)
@pytest.mark.mgpu
def test_dask_sklearn_demo():
script = os.path.join(tm.demo_dir(__file__), "dask", "sklearn_gpu_training.py")
cmd = ["python", script]
subprocess.check_call(cmd)
|
from __future__ import annotations
from .BinaryCrossEntropyLoss import BinaryCrossEntropyLoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .CrossEntropyLoss import CrossEntropyLoss
from .MarginMSELoss import MarginMSELoss
from .MSELoss import MSELoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
__all__ = [
"BinaryCrossEntropyLoss",
"CrossEntropyLoss",
"MultipleNegativesRankingLoss",
"CachedMultipleNegativesRankingLoss",
"MarginMSELoss",
"MSELoss",
]
|
from __future__ import annotations
from .BinaryCrossEntropyLoss import BinaryCrossEntropyLoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .CrossEntropyLoss import CrossEntropyLoss
from .MSELoss import MSELoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
__all__ = [
"BinaryCrossEntropyLoss",
"CrossEntropyLoss",
"MultipleNegativesRankingLoss",
"CachedMultipleNegativesRankingLoss",
"MSELoss",
]
|
from typing import List, Optional
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.indices.query.query_transform.base import BaseQueryTransform
from llama_index.core.prompts.mixin import PromptMixinType
from llama_index.core.schema import NodeWithScore, QueryBundle
class TransformRetriever(BaseRetriever):
"""
Transform Retriever.
Takes in an existing retriever and a query transform and runs the query transform
before running the retriever.
"""
def __init__(
self,
retriever: BaseRetriever,
query_transform: BaseQueryTransform,
transform_metadata: Optional[dict] = None,
callback_manager: Optional[CallbackManager] = None,
object_map: Optional[dict] = None,
verbose: bool = False,
) -> None:
self._retriever = retriever
self._query_transform = query_transform
self._transform_metadata = transform_metadata
super().__init__(
callback_manager=callback_manager, object_map=object_map, verbose=verbose
)
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
# NOTE: don't include tools for now
return {"query_transform": self._query_transform}
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
query_bundle = self._query_transform.run(
query_bundle, metadata=self._transform_metadata
)
return self._retriever.retrieve(query_bundle)
|
from typing import List, Optional
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.indices.query.query_transform.base import BaseQueryTransform
from llama_index.core.prompts.mixin import PromptMixinType
from llama_index.core.schema import NodeWithScore, QueryBundle
class TransformRetriever(BaseRetriever):
"""Transform Retriever.
Takes in an existing retriever and a query transform and runs the query transform
before running the retriever.
"""
def __init__(
self,
retriever: BaseRetriever,
query_transform: BaseQueryTransform,
transform_metadata: Optional[dict] = None,
callback_manager: Optional[CallbackManager] = None,
object_map: Optional[dict] = None,
verbose: bool = False,
) -> None:
self._retriever = retriever
self._query_transform = query_transform
self._transform_metadata = transform_metadata
super().__init__(
callback_manager=callback_manager, object_map=object_map, verbose=verbose
)
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
# NOTE: don't include tools for now
return {"query_transform": self._query_transform}
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
query_bundle = self._query_transform.run(
query_bundle, metadata=self._transform_metadata
)
return self._retriever.retrieve(query_bundle)
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It generates sentence embeddings
that can be compared using cosine-similarity to measure the similarity.
Usage:
python training_stsbenchmark.py
OR
python training_stsbenchmark.py pretrained_transformer_model_name
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Hugging Face pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
train_batch_size = 16
num_epochs = 4
output_dir = (
"output/training_stsbenchmark_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and one
# similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# train_loss = losses.CoSENTLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
evaluation_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts')`."
)
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It generates sentence embeddings
that can be compared using cosine-similarity to measure the similarity.
Usage:
python training_nli.py
OR
python training_nli.py pretrained_transformer_model_name
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Hugging Face pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
train_batch_size = 16
num_epochs = 4
output_dir = (
"output/training_stsbenchmark_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and one
# similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# train_loss = losses.CoSENTLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
evaluation_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts')`."
)
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
model.similarity_fn_name = "cosine" # even though the model is trained with dot, we need to set it to cosine for evaluation as the score in the dataset is cosine similarity
# Load the STSB dataset (https://huggingface.co/datasets/sentence-transformers/stsb)
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
# Initialize the evaluator
dev_evaluator = SparseEmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
name="sts_dev",
)
results = dev_evaluator(model)
"""
EmbeddingSimilarityEvaluator: Evaluating the model on the sts_dev dataset:
Cosine-Similarity: Pearson: 0.8430 Spearman: 0.8368
Model Sparsity: Active Dimensions: 81.1, Sparsity Ratio: 0.9973
"""
# Print the results
print(f"Primary metric: {dev_evaluator.primary_metric}")
# => Primary metric: sts_dev_spearman_cosine
print(f"Primary metric value: {results[dev_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8368
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
model.similarity_fn_name = "cosine" # even though the model is trained with dot, we need to set it to cosine for evaluation as the score in the dataset is cosine similarity
# Load the STSB dataset (https://huggingface.co/datasets/sentence-transformers/stsb)
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
# Initialize the evaluator
dev_evaluator = SparseEmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
name="sts_dev",
)
results = dev_evaluator(model)
"""
EmbeddingSimilarityEvaluator: Evaluating the model on the sts_dev dataset:
Cosine-Similarity : Pearson: 0.8430 Spearman: 0.8368
Model Sparsity Stats: Row Non-Zero Mean: 81.0629997253418, Row Sparsity Mean: 0.997344046831131
"""
# Print the results
print(f"Primary metric: {dev_evaluator.primary_metric}")
# => Primary metric: sts_dev_spearman_cosine
print(f"Primary metric value: {results[dev_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8368
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import BBOX_SAMPLERS, build_sampler
from .base_sampler import BaseSampler
@BBOX_SAMPLERS.register_module()
class CombinedSampler(BaseSampler):
"""A sampler that combines positive sampler and negative sampler."""
def __init__(self, pos_sampler, neg_sampler, **kwargs):
super(CombinedSampler, self).__init__(**kwargs)
self.pos_sampler = build_sampler(pos_sampler, **kwargs)
self.neg_sampler = build_sampler(neg_sampler, **kwargs)
def _sample_pos(self, **kwargs):
"""Sample positive samples."""
raise NotImplementedError
def _sample_neg(self, **kwargs):
"""Sample negative samples."""
raise NotImplementedError
|
from ..builder import BBOX_SAMPLERS, build_sampler
from .base_sampler import BaseSampler
@BBOX_SAMPLERS.register_module()
class CombinedSampler(BaseSampler):
"""A sampler that combines positive sampler and negative sampler."""
def __init__(self, pos_sampler, neg_sampler, **kwargs):
super(CombinedSampler, self).__init__(**kwargs)
self.pos_sampler = build_sampler(pos_sampler, **kwargs)
self.neg_sampler = build_sampler(neg_sampler, **kwargs)
def _sample_pos(self, **kwargs):
"""Sample positive samples."""
raise NotImplementedError
def _sample_neg(self, **kwargs):
"""Sample negative samples."""
raise NotImplementedError
|
from docarray.document.mixins.proto import ProtoMixin
__all__ = ['ProtoMixin']
|
from docarray.document.mixins.proto import ProtoMixin
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmengine import Config
def parse_args():
parser = argparse.ArgumentParser(
description='Convert benchmark model list to script')
parser.add_argument('config', help='test config file path')
parser.add_argument('--port', type=int, default=29666, help='dist port')
parser.add_argument(
'--run', action='store_true', help='run script directly')
parser.add_argument(
'--out', type=str, help='path to save model benchmark script')
args = parser.parse_args()
return args
def process_model_info(model_info, work_dir):
config = model_info['config'].strip()
fname, _ = osp.splitext(osp.basename(config))
job_name = fname
work_dir = '$WORK_DIR/' + fname
checkpoint = model_info['checkpoint'].strip()
return dict(
config=config,
job_name=job_name,
work_dir=work_dir,
checkpoint=checkpoint)
def create_test_bash_info(commands, model_test_dict, port, script_name,
partition):
config = model_test_dict['config']
job_name = model_test_dict['job_name']
checkpoint = model_test_dict['checkpoint']
work_dir = model_test_dict['work_dir']
echo_info = f' \necho \'{config}\' &'
commands.append(echo_info)
commands.append('\n')
command_info = f'GPUS=8 GPUS_PER_NODE=8 ' \
f'CPUS_PER_TASK=$CPUS_PRE_TASK {script_name} '
command_info += f'{partition} '
command_info += f'{job_name} '
command_info += f'{config} '
command_info += f'$CHECKPOINT_DIR/{checkpoint} '
command_info += f'--work-dir {work_dir} '
command_info += f'--cfg-option env_cfg.dist_cfg.port={port} '
command_info += ' &'
commands.append(command_info)
def main():
args = parse_args()
if args.out:
out_suffix = args.out.split('.')[-1]
assert args.out.endswith('.sh'), \
f'Expected out file path suffix is .sh, but get .{out_suffix}'
assert args.out or args.run, \
('Please specify at least one operation (save/run/ the '
'script) with the argument "--out" or "--run"')
commands = []
partition_name = 'PARTITION=$1 '
commands.append(partition_name)
commands.append('\n')
checkpoint_root = 'CHECKPOINT_DIR=$2 '
commands.append(checkpoint_root)
commands.append('\n')
work_dir = 'WORK_DIR=$3 '
commands.append(work_dir)
commands.append('\n')
cpus_pre_task = 'CPUS_PER_TASK=${4:-2} '
commands.append(cpus_pre_task)
commands.append('\n')
script_name = osp.join('tools', 'slurm_test.sh')
port = args.port
cfg = Config.fromfile(args.config)
for model_key in cfg:
model_infos = cfg[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'])
model_test_dict = process_model_info(model_info, work_dir)
create_test_bash_info(commands, model_test_dict, port, script_name,
'$PARTITION')
port += 1
command_str = ''.join(commands)
if args.out:
with open(args.out, 'w') as f:
f.write(command_str)
if args.run:
os.system(command_str)
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmcv import Config
def parse_args():
parser = argparse.ArgumentParser(
description='Convert benchmark model list to script')
parser.add_argument('config', help='test config file path')
parser.add_argument('--port', type=int, default=29666, help='dist port')
parser.add_argument(
'--run', action='store_true', help='run script directly')
parser.add_argument(
'--out', type=str, help='path to save model benchmark script')
args = parser.parse_args()
return args
def process_model_info(model_info, work_dir):
config = model_info['config'].strip()
fname, _ = osp.splitext(osp.basename(config))
job_name = fname
work_dir = '$WORK_DIR/' + fname
checkpoint = model_info['checkpoint'].strip()
return dict(
config=config,
job_name=job_name,
work_dir=work_dir,
checkpoint=checkpoint)
def create_test_bash_info(commands, model_test_dict, port, script_name,
partition):
config = model_test_dict['config']
job_name = model_test_dict['job_name']
checkpoint = model_test_dict['checkpoint']
work_dir = model_test_dict['work_dir']
echo_info = f' \necho \'{config}\' &'
commands.append(echo_info)
commands.append('\n')
command_info = f'GPUS=8 GPUS_PER_NODE=8 ' \
f'CPUS_PER_TASK=$CPUS_PRE_TASK {script_name} '
command_info += f'{partition} '
command_info += f'{job_name} '
command_info += f'{config} '
command_info += f'$CHECKPOINT_DIR/{checkpoint} '
command_info += f'--work-dir {work_dir} '
command_info += f'--cfg-option env_cfg.dist_cfg.port={port} '
command_info += ' &'
commands.append(command_info)
def main():
args = parse_args()
if args.out:
out_suffix = args.out.split('.')[-1]
assert args.out.endswith('.sh'), \
f'Expected out file path suffix is .sh, but get .{out_suffix}'
assert args.out or args.run, \
('Please specify at least one operation (save/run/ the '
'script) with the argument "--out" or "--run"')
commands = []
partition_name = 'PARTITION=$1 '
commands.append(partition_name)
commands.append('\n')
checkpoint_root = 'CHECKPOINT_DIR=$2 '
commands.append(checkpoint_root)
commands.append('\n')
work_dir = 'WORK_DIR=$3 '
commands.append(work_dir)
commands.append('\n')
cpus_pre_task = 'CPUS_PER_TASK=${4:-2} '
commands.append(cpus_pre_task)
commands.append('\n')
script_name = osp.join('tools', 'slurm_test.sh')
port = args.port
cfg = Config.fromfile(args.config)
for model_key in cfg:
model_infos = cfg[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'])
model_test_dict = process_model_info(model_info, work_dir)
create_test_bash_info(commands, model_test_dict, port, script_name,
'$PARTITION')
port += 1
command_str = ''.join(commands)
if args.out:
with open(args.out, 'w') as f:
f.write(command_str)
if args.run:
os.system(command_str)
if __name__ == '__main__':
main()
|
"""Base classes for chain routing."""
from __future__ import annotations
from abc import ABC
from collections.abc import Mapping
from typing import Any, NamedTuple, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
Callbacks,
)
from pydantic import ConfigDict
from langchain.chains.base import Chain
class Route(NamedTuple):
destination: Optional[str]
next_inputs: dict[str, Any]
class RouterChain(Chain, ABC):
"""Chain that outputs the name of a destination chain and the inputs to it."""
@property
def output_keys(self) -> list[str]:
return ["destination", "next_inputs"]
def route(self, inputs: dict[str, Any], callbacks: Callbacks = None) -> Route:
"""
Route inputs to a destination chain.
Args:
inputs: inputs to the chain
callbacks: callbacks to use for the chain
Returns:
a Route object
"""
result = self(inputs, callbacks=callbacks)
return Route(result["destination"], result["next_inputs"])
async def aroute(
self, inputs: dict[str, Any], callbacks: Callbacks = None
) -> Route:
result = await self.acall(inputs, callbacks=callbacks)
return Route(result["destination"], result["next_inputs"])
class MultiRouteChain(Chain):
"""Use a single chain to route an input to one of multiple candidate chains."""
router_chain: RouterChain
"""Chain that routes inputs to destination chains."""
destination_chains: Mapping[str, Chain]
"""Chains that return final answer to inputs."""
default_chain: Chain
"""Default chain to use when none of the destination chains are suitable."""
silent_errors: bool = False
"""If True, use default_chain when an invalid destination name is provided.
Defaults to False."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> list[str]:
"""Will be whatever keys the router chain prompt expects.
:meta private:
"""
return self.router_chain.input_keys
@property
def output_keys(self) -> list[str]:
"""Will always return text key.
:meta private:
"""
return []
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
route = self.router_chain.route(inputs, callbacks=callbacks)
_run_manager.on_text(
str(route.destination) + ": " + str(route.next_inputs), verbose=self.verbose
)
if not route.destination:
return self.default_chain(route.next_inputs, callbacks=callbacks)
elif route.destination in self.destination_chains:
return self.destination_chains[route.destination](
route.next_inputs, callbacks=callbacks
)
elif self.silent_errors:
return self.default_chain(route.next_inputs, callbacks=callbacks)
else:
raise ValueError(
f"Received invalid destination chain name '{route.destination}'"
)
async def _acall(
self,
inputs: dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> dict[str, Any]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
route = await self.router_chain.aroute(inputs, callbacks=callbacks)
await _run_manager.on_text(
str(route.destination) + ": " + str(route.next_inputs), verbose=self.verbose
)
if not route.destination:
return await self.default_chain.acall(
route.next_inputs, callbacks=callbacks
)
elif route.destination in self.destination_chains:
return await self.destination_chains[route.destination].acall(
route.next_inputs, callbacks=callbacks
)
elif self.silent_errors:
return await self.default_chain.acall(
route.next_inputs, callbacks=callbacks
)
else:
raise ValueError(
f"Received invalid destination chain name '{route.destination}'"
)
|
"""Base classes for chain routing."""
from __future__ import annotations
from abc import ABC
from collections.abc import Mapping
from typing import Any, NamedTuple, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
Callbacks,
)
from pydantic import ConfigDict
from langchain.chains.base import Chain
class Route(NamedTuple):
destination: Optional[str]
next_inputs: dict[str, Any]
class RouterChain(Chain, ABC):
"""Chain that outputs the name of a destination chain and the inputs to it."""
@property
def output_keys(self) -> list[str]:
return ["destination", "next_inputs"]
def route(self, inputs: dict[str, Any], callbacks: Callbacks = None) -> Route:
"""
Route inputs to a destination chain.
Args:
inputs: inputs to the chain
callbacks: callbacks to use for the chain
Returns:
a Route object
"""
result = self(inputs, callbacks=callbacks)
return Route(result["destination"], result["next_inputs"])
async def aroute(
self, inputs: dict[str, Any], callbacks: Callbacks = None
) -> Route:
result = await self.acall(inputs, callbacks=callbacks)
return Route(result["destination"], result["next_inputs"])
class MultiRouteChain(Chain):
"""Use a single chain to route an input to one of multiple candidate chains."""
router_chain: RouterChain
"""Chain that routes inputs to destination chains."""
destination_chains: Mapping[str, Chain]
"""Chains that return final answer to inputs."""
default_chain: Chain
"""Default chain to use when none of the destination chains are suitable."""
silent_errors: bool = False
"""If True, use default_chain when an invalid destination name is provided.
Defaults to False."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> list[str]:
"""Will be whatever keys the router chain prompt expects.
:meta private:
"""
return self.router_chain.input_keys
@property
def output_keys(self) -> list[str]:
"""Will always return text key.
:meta private:
"""
return []
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
route = self.router_chain.route(inputs, callbacks=callbacks)
_run_manager.on_text(
str(route.destination) + ": " + str(route.next_inputs), verbose=self.verbose
)
if not route.destination:
return self.default_chain(route.next_inputs, callbacks=callbacks)
elif route.destination in self.destination_chains:
return self.destination_chains[route.destination](
route.next_inputs, callbacks=callbacks
)
elif self.silent_errors:
return self.default_chain(route.next_inputs, callbacks=callbacks)
else:
raise ValueError(
f"Received invalid destination chain name '{route.destination}'"
)
async def _acall(
self,
inputs: dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> dict[str, Any]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
route = await self.router_chain.aroute(inputs, callbacks=callbacks)
await _run_manager.on_text(
str(route.destination) + ": " + str(route.next_inputs), verbose=self.verbose
)
if not route.destination:
return await self.default_chain.acall(
route.next_inputs, callbacks=callbacks
)
elif route.destination in self.destination_chains:
return await self.destination_chains[route.destination].acall(
route.next_inputs, callbacks=callbacks
)
elif self.silent_errors:
return await self.default_chain.acall(
route.next_inputs, callbacks=callbacks
)
else:
raise ValueError(
f"Received invalid destination chain name '{route.destination}'"
)
|
from collections import defaultdict
import torch
import transforms as reference_transforms
def get_modules(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.datapoints
import torchvision.transforms.v2
return torchvision.transforms.v2, torchvision.datapoints
else:
return reference_transforms, None
class DetectionPresetTrain:
# Note: this transform assumes that the input to forward() are always PIL
# images, regardless of the backend parameter.
def __init__(
self,
*,
data_augmentation,
hflip_prob=0.5,
mean=(123.0, 117.0, 104.0),
backend="pil",
use_v2=False,
):
T, datapoints = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "datapoint":
transforms.append(T.ToImageTensor())
elif backend == "tensor":
transforms.append(T.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
if data_augmentation == "hflip":
transforms += [T.RandomHorizontalFlip(p=hflip_prob)]
elif data_augmentation == "lsj":
transforms += [
T.ScaleJitter(target_size=(1024, 1024), antialias=True),
# TODO: FixedSizeCrop below doesn't work on tensors!
reference_transforms.FixedSizeCrop(size=(1024, 1024), fill=mean),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "multiscale":
transforms += [
T.RandomShortestSize(min_size=(480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800), max_size=1333),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "ssd":
fill = defaultdict(lambda: mean, {datapoints.Mask: 0}) if use_v2 else list(mean)
transforms += [
T.RandomPhotometricDistort(),
T.RandomZoomOut(fill=fill),
T.RandomIoUCrop(),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "ssdlite":
transforms += [
T.RandomIoUCrop(),
T.RandomHorizontalFlip(p=hflip_prob),
]
else:
raise ValueError(f'Unknown data augmentation policy "{data_augmentation}"')
if backend == "pil":
# Note: we could just convert to pure tensors even in v2.
transforms += [T.ToImageTensor() if use_v2 else T.PILToTensor()]
transforms += [T.ConvertImageDtype(torch.float)]
if use_v2:
transforms += [
T.ConvertBoundingBoxFormat(datapoints.BoundingBoxFormat.XYXY),
T.SanitizeBoundingBox(),
]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
class DetectionPresetEval:
def __init__(self, backend="pil", use_v2=False):
T, _ = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "pil":
# Note: we could just convert to pure tensors even in v2?
transforms += [T.ToImageTensor() if use_v2 else T.PILToTensor()]
elif backend == "tensor":
transforms += [T.PILToTensor()]
elif backend == "datapoint":
transforms += [T.ToImageTensor()]
else:
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
transforms += [T.ConvertImageDtype(torch.float)]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
|
import torch
import transforms as T
class DetectionPresetTrain:
def __init__(self, *, data_augmentation, hflip_prob=0.5, mean=(123.0, 117.0, 104.0)):
if data_augmentation == "hflip":
self.transforms = T.Compose(
[
T.RandomHorizontalFlip(p=hflip_prob),
T.PILToTensor(),
T.ConvertImageDtype(torch.float),
]
)
elif data_augmentation == "lsj":
self.transforms = T.Compose(
[
T.ScaleJitter(target_size=(1024, 1024)),
T.FixedSizeCrop(size=(1024, 1024), fill=mean),
T.RandomHorizontalFlip(p=hflip_prob),
T.PILToTensor(),
T.ConvertImageDtype(torch.float),
]
)
elif data_augmentation == "multiscale":
self.transforms = T.Compose(
[
T.RandomShortestSize(
min_size=(480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800), max_size=1333
),
T.RandomHorizontalFlip(p=hflip_prob),
T.PILToTensor(),
T.ConvertImageDtype(torch.float),
]
)
elif data_augmentation == "ssd":
self.transforms = T.Compose(
[
T.RandomPhotometricDistort(),
T.RandomZoomOut(fill=list(mean)),
T.RandomIoUCrop(),
T.RandomHorizontalFlip(p=hflip_prob),
T.PILToTensor(),
T.ConvertImageDtype(torch.float),
]
)
elif data_augmentation == "ssdlite":
self.transforms = T.Compose(
[
T.RandomIoUCrop(),
T.RandomHorizontalFlip(p=hflip_prob),
T.PILToTensor(),
T.ConvertImageDtype(torch.float),
]
)
else:
raise ValueError(f'Unknown data augmentation policy "{data_augmentation}"')
def __call__(self, img, target):
return self.transforms(img, target)
class DetectionPresetEval:
def __init__(self):
self.transforms = T.Compose(
[
T.PILToTensor(),
T.ConvertImageDtype(torch.float),
]
)
def __call__(self, img, target):
return self.transforms(img, target)
|
from .CEBinaryAccuracyEvaluator import CEBinaryAccuracyEvaluator
from .CEBinaryClassificationEvaluator import CEBinaryClassificationEvaluator
from .CECorrelationEvaluator import CECorrelationEvaluator
from .CEF1Evaluator import CEF1Evaluator
from .CERerankingEvaluator import CERerankingEvaluator
from .CESoftmaxAccuracyEvaluator import CESoftmaxAccuracyEvaluator
__all__ = [
"CEBinaryAccuracyEvaluator",
"CEBinaryClassificationEvaluator",
"CECorrelationEvaluator",
"CEF1Evaluator",
"CESoftmaxAccuracyEvaluator",
"CERerankingEvaluator",
]
|
from .CEBinaryAccuracyEvaluator import CEBinaryAccuracyEvaluator
from .CEBinaryClassificationEvaluator import CEBinaryClassificationEvaluator
from .CEF1Evaluator import CEF1Evaluator
from .CECorrelationEvaluator import CECorrelationEvaluator
from .CESoftmaxAccuracyEvaluator import CESoftmaxAccuracyEvaluator
from .CERerankingEvaluator import CERerankingEvaluator
__all__ = [
"CEBinaryAccuracyEvaluator",
"CEBinaryClassificationEvaluator",
"CECorrelationEvaluator",
"CEF1Evaluator",
"CESoftmaxAccuracyEvaluator",
"CERerankingEvaluator",
]
|
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Run inference for pre-processed data with a trained model.
"""
import datetime as dt
import logging
from fairseq import options
from interactive_asr.utils import add_asr_eval_argument, get_microphone_transcription, setup_asr, transcribe_file
def main(args):
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
task, generator, models, sp, tgt_dict = setup_asr(args, logger)
print("READY!")
if args.input_file:
transcription_time, transcription = transcribe_file(args, task, generator, models, sp, tgt_dict)
print("transcription:", transcription)
print("transcription_time:", transcription_time)
else:
for transcription in get_microphone_transcription(args, task, generator, models, sp, tgt_dict):
print("{}: {}".format(dt.datetime.now().strftime("%H:%M:%S"), transcription[0][0]))
def cli_main():
parser = options.get_generation_parser()
parser = add_asr_eval_argument(parser)
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
|
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Run inference for pre-processed data with a trained model.
"""
import datetime as dt
import logging
from fairseq import options
from interactive_asr.utils import (
add_asr_eval_argument,
get_microphone_transcription,
setup_asr,
transcribe_file,
)
def main(args):
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
task, generator, models, sp, tgt_dict = setup_asr(args, logger)
print("READY!")
if args.input_file:
transcription_time, transcription = transcribe_file(args, task, generator, models, sp, tgt_dict)
print("transcription:", transcription)
print("transcription_time:", transcription_time)
else:
for transcription in get_microphone_transcription(args, task, generator, models, sp, tgt_dict):
print("{}: {}".format(dt.datetime.now().strftime("%H:%M:%S"), transcription[0][0]))
def cli_main():
parser = options.get_generation_parser()
parser = add_asr_eval_argument(parser)
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
|
# dataset settings
dataset_type = 'CocoPanopticDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadPanopticAnnotations'),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='SegRescale', scale_factor=1 / 4),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_train2017.json',
seg_prefix=data_root + 'annotations/panoptic_train2017/',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_val2017.json',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoPanopticMetric',
ann_file=data_root + 'annotations/panoptic_val2017.json',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
metric='PQ')
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'CocoPanopticDataset'
data_root = 'data/coco/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadPanopticAnnotations',
with_bbox=True,
with_mask=True,
with_seg=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Pad', size_divisor=32),
dict(type='SegRescale', scale_factor=1 / 4),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'data_sample']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'data_sample']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/panoptic_train2017.json',
img_prefix=data_root + 'train2017/',
seg_prefix=data_root + 'annotations/panoptic_train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/panoptic_val2017.json',
img_prefix=data_root + 'val2017/',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/panoptic_val2017.json',
img_prefix=data_root + 'val2017/',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric=['PQ'])
|
from ._bounding_box import BoundingBox, BoundingBoxFormat
from ._datapoint import _FillType, _FillTypeJIT, _InputType, _InputTypeJIT
from ._image import _ImageType, _ImageTypeJIT, _TensorImageType, _TensorImageTypeJIT, Image
from ._mask import Mask
from ._video import _TensorVideoType, _TensorVideoTypeJIT, _VideoType, _VideoTypeJIT, Video
from ._dataset_wrapper import wrap_dataset_for_transforms_v2 # type: ignore[attr-defined] # usort: skip
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn(_BETA_TRANSFORMS_WARNING)
|
from ._bounding_box import BoundingBox, BoundingBoxFormat
from ._datapoint import _FillType, _FillTypeJIT, _InputType, _InputTypeJIT
from ._image import _ImageType, _ImageTypeJIT, _TensorImageType, _TensorImageTypeJIT, Image
from ._mask import Mask
from ._video import _TensorVideoType, _TensorVideoTypeJIT, _VideoType, _VideoTypeJIT, Video
from ._dataset_wrapper import wrap_dataset_for_transforms_v2 # type: ignore[attr-defined] # usort: skip
|
from typing import Any, Dict, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence) loss.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`CosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, CoSENTLoss may be used
as a drop-in replacement for :class:`CosineSimilarityLoss` in any training script.
Args:
model: SentenceTransformerModel
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`AnglELoss` is CoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than CoSENTLoss. In our experiments, CoSENTLoss is recommended.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CoSENTLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(CoSENTLoss, self).__init__()
self.model = model
self.similarity_fct = similarity_fct
self.scale = scale
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
scores = self.similarity_fct(embeddings[0], embeddings[1])
scores = scores * self.scale
scores = scores[:, None] - scores[None, :]
# label matrix indicating which pairs are relevant
labels = labels[:, None] < labels[None, :]
labels = labels.float()
# mask out irrelevant pairs so they are negligible after exp()
scores = scores - (1 - labels) * 1e12
# append a zero as e^0 = 1
scores = torch.cat((torch.zeros(1).to(scores.device), scores.view(-1)), dim=0)
loss = torch.logsumexp(scores, dim=0)
return loss
def get_config_dict(self) -> Dict[str, Any]:
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
@property
def citation(self) -> str:
return """
@online{kexuefm-8847,
title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT},
author={Su Jianlin},
year={2022},
month={Jan},
url={https://kexue.fm/archives/8847},
}
"""
|
from typing import Dict, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim):
"""
This class implements CoSENT (Cosine Sentence) loss.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`CosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, CoSENTLoss may be used
as a drop-in replacement for :class:`CosineSimilarityLoss` in any training script.
Args:
model: SentenceTransformerModel
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`AnglELoss` is CoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than CoSENTLoss. In our experiments, CoSENTLoss is recommended.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CoSENTLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(CoSENTLoss, self).__init__()
self.model = model
self.similarity_fct = similarity_fct
self.scale = scale
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
scores = self.similarity_fct(embeddings[0], embeddings[1])
scores = scores * self.scale
scores = scores[:, None] - scores[None, :]
# label matrix indicating which pairs are relevant
labels = labels[:, None] < labels[None, :]
labels = labels.float()
# mask out irrelevant pairs so they are negligible after exp()
scores = scores - (1 - labels) * 1e12
# append a zero as e^0 = 1
scores = torch.cat((torch.zeros(1).to(scores.device), scores.view(-1)), dim=0)
loss = torch.logsumexp(scores, dim=0)
return loss
def get_config_dict(self):
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
@property
def citation(self) -> str:
return """
@online{kexuefm-8847,
title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT},
author={Su Jianlin},
year={2022},
month={Jan},
url={https://kexue.fm/archives/8847},
}
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
from mmdet.utils import register_all_modules
class TestDynamicRoIHead(TestCase):
def setUp(self):
register_all_modules()
self.roi_head_cfg = get_roi_head_cfg(
'dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py')
def test_init(self):
roi_head = MODELS.build(self.roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
@parameterized.expand(['cpu', 'cuda'])
def test_dynamic_roi_head_loss(self, device):
"""Tests trident roi head predict."""
if not torch.cuda.is_available() and device == 'cuda':
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.to(device=device)
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device=device))
image_shapes = [(3, s, s)]
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=True)
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device=device))
proposals_list[i] = proposals_list[i].to(device=device)
out = roi_head.loss(feats, proposals_list, batch_data_samples)
loss_cls = out['loss_cls']
loss_bbox = out['loss_bbox']
self.assertGreater(loss_cls.sum(), 0, 'cls loss should be non-zero')
self.assertGreater(loss_bbox.sum(), 0, 'box loss should be non-zero')
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True)
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device=device))
proposals_list[i] = proposals_list[i].to(device=device)
out = roi_head.loss(feats, proposals_list, batch_data_samples)
empty_cls_loss = out['loss_cls']
empty_bbox_loss = out['loss_bbox']
self.assertGreater(empty_cls_loss.sum(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_bbox_loss.sum(), 0,
'there should be no box loss when there are no true boxes')
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
from mmdet.utils import register_all_modules
class TestDynamicRoIHead(TestCase):
def setUp(self):
register_all_modules()
self.roi_head_cfg = get_roi_head_cfg(
'dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py')
def test_init(self):
roi_head = MODELS.build(self.roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
# TODO: CI Failed
def test_dynamic_roi_head_loss(self):
"""Tests trident roi head predict."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.cuda()
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
image_shapes = [(3, s, s)]
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=True)
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
proposals_list[i] = proposals_list[i].to(device='cuda')
out = roi_head.loss(feats, proposals_list, batch_data_samples)
loss_cls = out['loss_cls']
loss_bbox = out['loss_bbox']
self.assertGreater(loss_cls.sum(), 0, 'cls loss should be non-zero')
self.assertGreater(loss_bbox.sum(), 0, 'box loss should be non-zero')
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True)
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
proposals_list[i] = proposals_list[i].to(device='cuda')
out = roi_head.loss(feats, proposals_list, batch_data_samples)
empty_cls_loss = out['loss_cls']
empty_bbox_loss = out['loss_bbox']
self.assertGreater(empty_cls_loss.sum(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_bbox_loss.sum(), 0,
'there should be no box loss when there are no true boxes')
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import PixArtTransformer2DModel, Transformer2DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
slow,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class PixArtTransformer2DModelTests(ModelTesterMixin, unittest.TestCase):
model_class = PixArtTransformer2DModel
main_input_name = "hidden_states"
# We override the items here because the transformer under consideration is small.
model_split_percents = [0.7, 0.6, 0.6]
@property
def dummy_input(self):
batch_size = 4
in_channels = 4
sample_size = 8
scheduler_num_train_steps = 1000
cross_attention_dim = 8
seq_len = 8
hidden_states = floats_tensor((batch_size, in_channels, sample_size, sample_size)).to(torch_device)
timesteps = torch.randint(0, scheduler_num_train_steps, size=(batch_size,)).to(torch_device)
encoder_hidden_states = floats_tensor((batch_size, seq_len, cross_attention_dim)).to(torch_device)
return {
"hidden_states": hidden_states,
"timestep": timesteps,
"encoder_hidden_states": encoder_hidden_states,
"added_cond_kwargs": {"aspect_ratio": None, "resolution": None},
}
@property
def input_shape(self):
return (4, 8, 8)
@property
def output_shape(self):
return (8, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"sample_size": 8,
"num_layers": 1,
"patch_size": 2,
"attention_head_dim": 2,
"num_attention_heads": 2,
"in_channels": 4,
"cross_attention_dim": 8,
"out_channels": 8,
"attention_bias": True,
"activation_fn": "gelu-approximate",
"num_embeds_ada_norm": 8,
"norm_type": "ada_norm_single",
"norm_elementwise_affine": False,
"norm_eps": 1e-6,
"use_additional_conditions": False,
"caption_channels": None,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_output(self):
super().test_output(
expected_output_shape=(self.dummy_input[self.main_input_name].shape[0],) + self.output_shape
)
def test_gradient_checkpointing_is_applied(self):
expected_set = {"PixArtTransformer2DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
def test_correct_class_remapping_from_dict_config(self):
init_dict, _ = self.prepare_init_args_and_inputs_for_common()
model = Transformer2DModel.from_config(init_dict)
assert isinstance(model, PixArtTransformer2DModel)
def test_correct_class_remapping_from_pretrained_config(self):
config = PixArtTransformer2DModel.load_config("PixArt-alpha/PixArt-XL-2-1024-MS", subfolder="transformer")
model = Transformer2DModel.from_config(config)
assert isinstance(model, PixArtTransformer2DModel)
@slow
def test_correct_class_remapping(self):
model = Transformer2DModel.from_pretrained("PixArt-alpha/PixArt-XL-2-1024-MS", subfolder="transformer")
assert isinstance(model, PixArtTransformer2DModel)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import PixArtTransformer2DModel, Transformer2DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
slow,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class PixArtTransformer2DModelTests(ModelTesterMixin, unittest.TestCase):
model_class = PixArtTransformer2DModel
main_input_name = "hidden_states"
# We override the items here because the transformer under consideration is small.
model_split_percents = [0.7, 0.6, 0.6]
@property
def dummy_input(self):
batch_size = 4
in_channels = 4
sample_size = 8
scheduler_num_train_steps = 1000
cross_attention_dim = 8
seq_len = 8
hidden_states = floats_tensor((batch_size, in_channels, sample_size, sample_size)).to(torch_device)
timesteps = torch.randint(0, scheduler_num_train_steps, size=(batch_size,)).to(torch_device)
encoder_hidden_states = floats_tensor((batch_size, seq_len, cross_attention_dim)).to(torch_device)
return {
"hidden_states": hidden_states,
"timestep": timesteps,
"encoder_hidden_states": encoder_hidden_states,
"added_cond_kwargs": {"aspect_ratio": None, "resolution": None},
}
@property
def input_shape(self):
return (4, 8, 8)
@property
def output_shape(self):
return (8, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"sample_size": 8,
"num_layers": 1,
"patch_size": 2,
"attention_head_dim": 2,
"num_attention_heads": 2,
"in_channels": 4,
"cross_attention_dim": 8,
"out_channels": 8,
"attention_bias": True,
"activation_fn": "gelu-approximate",
"num_embeds_ada_norm": 8,
"norm_type": "ada_norm_single",
"norm_elementwise_affine": False,
"norm_eps": 1e-6,
"use_additional_conditions": False,
"caption_channels": None,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_output(self):
super().test_output(
expected_output_shape=(self.dummy_input[self.main_input_name].shape[0],) + self.output_shape
)
def test_gradient_checkpointing_is_applied(self):
expected_set = {"PixArtTransformer2DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
def test_correct_class_remapping_from_dict_config(self):
init_dict, _ = self.prepare_init_args_and_inputs_for_common()
model = Transformer2DModel.from_config(init_dict)
assert isinstance(model, PixArtTransformer2DModel)
def test_correct_class_remapping_from_pretrained_config(self):
config = PixArtTransformer2DModel.load_config("PixArt-alpha/PixArt-XL-2-1024-MS", subfolder="transformer")
model = Transformer2DModel.from_config(config)
assert isinstance(model, PixArtTransformer2DModel)
@slow
def test_correct_class_remapping(self):
model = Transformer2DModel.from_pretrained("PixArt-alpha/PixArt-XL-2-1024-MS", subfolder="transformer")
assert isinstance(model, PixArtTransformer2DModel)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmdet.registry import MODELS
from .utils import weighted_loss
@weighted_loss
def smooth_l1_loss(pred, target, beta=1.0):
"""Smooth L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
Returns:
torch.Tensor: Calculated loss
"""
assert beta > 0
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
diff - 0.5 * beta)
return loss
@weighted_loss
def l1_loss(pred, target):
"""L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
Returns:
torch.Tensor: Calculated loss
"""
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
loss = torch.abs(pred - target)
return loss
@MODELS.register_module()
class SmoothL1Loss(nn.Module):
"""Smooth L1 loss.
Args:
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum". Defaults to "mean".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
super(SmoothL1Loss, self).__init__()
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * smooth_l1_loss(
pred,
target,
weight,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
@MODELS.register_module()
class L1Loss(nn.Module):
"""L1 loss.
Args:
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super(L1Loss, self).__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * l1_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss_bbox
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
import torch.nn as nn
from mmdet.registry import MODELS
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def smooth_l1_loss(pred, target, beta=1.0):
"""Smooth L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
Returns:
torch.Tensor: Calculated loss
"""
assert beta > 0
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
diff - 0.5 * beta)
return loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def l1_loss(pred, target):
"""L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
Returns:
torch.Tensor: Calculated loss
"""
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
loss = torch.abs(pred - target)
return loss
@MODELS.register_module()
class SmoothL1Loss(nn.Module):
"""Smooth L1 loss.
Args:
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum". Defaults to "mean".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
super(SmoothL1Loss, self).__init__()
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * smooth_l1_loss(
pred,
target,
weight,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
@MODELS.register_module()
class L1Loss(nn.Module):
"""L1 loss.
Args:
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super(L1Loss, self).__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * l1_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss_bbox
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Dict, Iterable, Optional
import spacy
from jina import DocumentArray, Executor, requests
_EXCLUDE_COMPONENTS = [
'tagger',
'parser',
'ner',
'senter',
'lemmatizer',
'attribute_ruler',
]
class SpacyTextEncoder(Executor):
"""
:class:`SpacyTextEncoder` encodes ``Document`` using models offered by Spacy
"""
def __init__(
self,
model_name: str = 'en_core_web_sm',
download_data: bool = True,
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs,
):
"""
:param model_name: pre-trained spaCy language pipeline name
:param traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param batch_size: fallback batch size in case there is not batch size sent in the request
:param device: device to use for encoding. ['cuda', 'cpu', 'cuda:2']
"""
super().__init__(*args, **kwargs)
self.batch_size = batch_size
self.traversal_paths = traversal_paths
self.device = device
if device.startswith('cuda'):
spacy.require_gpu()
if download_data:
subprocess.run(
['python3', '-m', 'spacy', 'download', model_name], check=True
)
self.spacy_model = spacy.load(model_name, exclude=_EXCLUDE_COMPONENTS)
@requests
def encode(
self, docs: Optional[DocumentArray] = None, parameters: Dict = {}, **kwargs
):
"""
Encode all docs with text and store the encodings in the embedding
attribute of the docs.
:param docs: documents sent to the encoder. The docs must have the
``text`` attribute.
:param parameters: dictionary to define the ``traversal_path`` and the
``batch_size``. For example,
``parameters={'traversal_paths': ['r'], 'batch_size': 10}``
"""
if self.device.startswith('cuda'):
from cupy import asnumpy
if docs:
batch_size = parameters.get('batch_size', self.batch_size)
document_batches_generator = docs.batch(
traversal_paths=parameters.get('traversal_paths', self.traversal_paths),
batch_size=batch_size,
require_attr='text',
)
for document_batch in document_batches_generator:
texts = [doc.text for doc in document_batch]
for doc, spacy_doc in zip(
document_batch, self.spacy_model.pipe(texts, batch_size=batch_size)
):
if self.device.startswith('cuda'):
doc.embedding = asnumpy(spacy_doc.vector)
else:
doc.embedding = spacy_doc.vector
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Dict, Iterable, Optional
import spacy
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
_EXCLUDE_COMPONENTS = [
'tagger',
'parser',
'ner',
'senter',
'lemmatizer',
'attribute_ruler',
]
class SpacyTextEncoder(Executor):
"""
:class:`SpacyTextEncoder` encodes ``Document`` using models offered by Spacy
"""
def __init__(
self,
model_name: str = 'en_core_web_sm',
download_data: bool = True,
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs,
):
"""
:param model_name: pre-trained spaCy language pipeline name
:param traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param batch_size: fallback batch size in case there is not batch size sent in the request
:param device: device to use for encoding. ['cuda', 'cpu', 'cuda:2']
"""
super().__init__(*args, **kwargs)
self.batch_size = batch_size
self.traversal_paths = traversal_paths
self.device = device
if device.startswith('cuda'):
spacy.require_gpu()
if download_data:
subprocess.run(
['python3', '-m', 'spacy', 'download', model_name], check=True
)
self.spacy_model = spacy.load(model_name, exclude=_EXCLUDE_COMPONENTS)
@requests
def encode(
self, docs: Optional[DocumentArray] = None, parameters: Dict = {}, **kwargs
):
"""
Encode all docs with text and store the encodings in the embedding
attribute of the docs.
:param docs: documents sent to the encoder. The docs must have the
``text`` attribute.
:param parameters: dictionary to define the ``traversal_path`` and the
``batch_size``. For example,
``parameters={'traversal_paths': ['r'], 'batch_size': 10}``
"""
if self.device.startswith('cuda'):
from cupy import asnumpy
if docs:
batch_size = parameters.get('batch_size', self.batch_size)
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get('traversal_paths', self.traversal_paths),
batch_size=batch_size,
needs_attr='text',
)
for document_batch in document_batches_generator:
texts = [doc.text for doc in document_batch]
for doc, spacy_doc in zip(
document_batch, self.spacy_model.pipe(texts, batch_size=batch_size)
):
if self.device.startswith('cuda'):
doc.embedding = asnumpy(spacy_doc.vector)
else:
doc.embedding = spacy_doc.vector
|
from langchain_core.runnables.base import (
Other,
Runnable,
RunnableBinding,
RunnableBindingBase,
RunnableEach,
RunnableEachBase,
RunnableGenerator,
RunnableLambda,
RunnableLike,
RunnableParallel,
RunnableSequence,
RunnableSerializable,
coerce_to_runnable,
)
from langchain_core.runnables.utils import Input, Output
# Backwards compatibility.
RunnableMap = RunnableParallel
__all__ = [
"Input",
"Other",
"Output",
"Runnable",
"RunnableBinding",
"RunnableBindingBase",
"RunnableEach",
"RunnableEachBase",
"RunnableGenerator",
"RunnableLambda",
"RunnableLike",
"RunnableMap",
"RunnableParallel",
"RunnableSequence",
"RunnableSerializable",
"coerce_to_runnable",
]
|
from langchain_core.runnables.base import (
Other,
Runnable,
RunnableBinding,
RunnableBindingBase,
RunnableEach,
RunnableEachBase,
RunnableGenerator,
RunnableLambda,
RunnableLike,
RunnableParallel,
RunnableSequence,
RunnableSerializable,
coerce_to_runnable,
)
from langchain_core.runnables.utils import Input, Output
# Backwards compatibility.
RunnableMap = RunnableParallel
__all__ = [
"Input",
"Output",
"RunnableLike",
"Other",
"Runnable",
"RunnableSerializable",
"RunnableSequence",
"RunnableParallel",
"RunnableGenerator",
"RunnableLambda",
"RunnableEachBase",
"RunnableEach",
"RunnableBindingBase",
"RunnableBinding",
"RunnableMap",
"coerce_to_runnable",
]
|
from typing import Union, Iterable, Dict
from ..base.seqlike import BaseSequenceLikeMixin
from .... import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with Elastic as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.get_meta() == other._client.get_meta()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses Elastic as storage
:return: the length of this :class:`DocumentArrayElastic` object
"""
try:
return self._client.count(index=self._config.index_name)["count"]
except:
return 0
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with Elastic storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._doc_id_exists(x)
elif isinstance(x, Document):
return self._doc_id_exists(x.id)
else:
return False
def __del__(self):
"""Delete this :class:`DocumentArrayElastic` object"""
self._save_offset2ids()
# if not self._persist:
# self._offset2ids.clear()
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayElastic` object
:return: string representation of this object
"""
return f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'
def _document_to_elastic(self, doc: 'Document') -> Dict:
return {
"_op_type": "index",
'_id': doc.id,
'_index': self._config.index_name,
'embedding': self._map_embedding(doc.embedding),
'blob': doc.to_base64(),
}
def _upload_batch(self, docs: Iterable['Document']):
batch = []
for doc in docs:
batch.append(self._document_to_elastic(doc))
if len(batch) > self._config.batch_size:
self._send_requests(batch)
self._refresh(self._config.index_name)
batch = []
if len(batch) > 0:
self._send_requests(batch)
self._refresh(self._config.index_name)
def extend(self, docs: Iterable['Document']):
docs = list(docs)
self._upload_batch(docs)
self._offset2ids.extend([doc.id for doc in docs])
|
from typing import Union, Iterable
from ..base.seqlike import BaseSequenceLikeMixin
from .... import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with Elastic as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.get_meta() == other._client.get_meta()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses Elastic as storage
:return: the length of this :class:`DocumentArrayElastic` object
"""
try:
return self._client.count(index=self._config.index_name)["count"]
except:
return 0
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with Elastic storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._doc_id_exists(x)
elif isinstance(x, Document):
return self._doc_id_exists(x.id)
else:
return False
def __del__(self):
"""Delete this :class:`DocumentArrayElastic` object"""
self._save_offset2ids()
# if not self._persist:
# self._offset2ids.clear()
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayElastic` object
:return: string representation of this object
"""
return f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'
def extend(self, values: Iterable['Document']) -> None:
"""Extends the array with the given values
:param values: Documents to be added
"""
request = []
for value in values:
request.append(
{
"_op_type": "index",
'_id': value.id,
'_index': self._config.index_name,
'embedding': self._map_embedding(value.embedding),
'blob': value.to_base64(),
}
)
self._offset2ids.append(value.id)
if len(request) > 0:
self._send_requests(request)
self._refresh(self._config.index_name)
|
import threading
from typing import Callable, ParamSpec, TypeVar
P = ParamSpec("P")
R = TypeVar("R")
def thread_cached(func: Callable[P, R]) -> Callable[P, R]:
thread_local = threading.local()
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
cache = getattr(thread_local, "cache", None)
if cache is None:
cache = thread_local.cache = {}
key = (args, tuple(sorted(kwargs.items())))
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
return wrapper
|
from typing import Callable, TypeVar, ParamSpec
import threading
P = ParamSpec("P")
R = TypeVar("R")
def thread_cached(func: Callable[P, R]) -> Callable[P, R]:
thread_local = threading.local()
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
cache = getattr(thread_local, "cache", None)
if cache is None:
cache = thread_local.cache = {}
key = (args, tuple(sorted(kwargs.items())))
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
return wrapper
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.whylabs_callback import WhyLabsCallbackHandler
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"WhyLabsCallbackHandler": "langchain_community.callbacks.whylabs_callback",
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"WhyLabsCallbackHandler",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.whylabs_callback import WhyLabsCallbackHandler
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"WhyLabsCallbackHandler": "langchain_community.callbacks.whylabs_callback"
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"WhyLabsCallbackHandler",
]
|
from __future__ import annotations
__version__ = "4.1.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
import warnings
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder import (
CrossEncoder,
CrossEncoderModelCardData,
CrossEncoderTrainer,
CrossEncoderTrainingArguments,
)
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseEncoderModelCardData,
SparseEncoderTrainer,
SparseEncoderTrainingArguments,
)
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
from sentence_transformers.util import mine_hard_negatives
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
# Globally silence PyTorch sparse CSR tensor beta warning
warnings.filterwarnings("ignore", message="Sparse CSR tensor support is in beta state")
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"CrossEncoderTrainer",
"CrossEncoderTrainingArguments",
"CrossEncoderModelCardData",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"SparseEncoder",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
"SparseEncoderModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
"mine_hard_negatives",
]
|
from __future__ import annotations
__version__ = "4.1.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder import (
CrossEncoder,
CrossEncoderModelCardData,
CrossEncoderTrainer,
CrossEncoderTrainingArguments,
)
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseEncoderModelCardData,
SparseEncoderTrainer,
SparseEncoderTrainingArguments,
)
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
from sentence_transformers.util import mine_hard_negatives
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"CrossEncoderTrainer",
"CrossEncoderTrainingArguments",
"CrossEncoderModelCardData",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"SparseEncoder",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
"SparseEncoderModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
"mine_hard_negatives",
]
|
_base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
roi_head=dict(
bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))),
# model training and testing settings
train_cfg=dict(
rcnn=dict(
assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6),
sampler=dict(num=256))),
test_cfg=dict(rcnn=dict(score_thr=1e-3)))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=300),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=None),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img', 'proposals']),
])
]
# TODO: support loading proposals
data = dict(
train=dict(
proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_train2017.pkl',
pipeline=train_pipeline),
val=dict(
proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl',
pipeline=test_pipeline),
test=dict(
proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl',
pipeline=test_pipeline))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
_base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
roi_head=dict(
bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))),
# model training and testing settings
train_cfg=dict(
rcnn=dict(
assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6),
sampler=dict(num=256))),
test_cfg=dict(rcnn=dict(score_thr=1e-3)))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=300),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=None),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img', 'proposals']),
])
]
data = dict(
train=dict(
proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_train2017.pkl',
pipeline=train_pipeline),
val=dict(
proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl',
pipeline=test_pipeline),
test=dict(
proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl',
pipeline=test_pipeline))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Optional, Iterable, Any
from jina import Executor, DocumentArray, requests
import torch
from .audio_clip.model import AudioCLIP
from .audio_clip.utils.transforms import ToTensor1D
class AudioCLIPEncoder(Executor):
"""
Encode audio data with AudioCLIP embeddings
:param model_path: path of the pre-trained AudioCLIP model
:param default_traversal_paths: default traversal path
"""
def __init__(self,
model_path: str = 'assets/AudioCLIP-Full-Training.pt',
default_traversal_paths: Iterable[str] = ['r'],
*args, **kwargs):
super().__init__(*args, **kwargs)
torch.set_grad_enabled(False)
self.model_path = model_path
self.aclp = AudioCLIP(pretrained=model_path)
self.aclp.eval()
self.aclp.audio.eval()
self.default_traversal_paths = default_traversal_paths
@requests
def encode(self, docs: Optional[DocumentArray], parameters: dict, *args, **kwargs) -> Any:
if docs:
cleaned_document_array = self._get_input_data(docs, parameters)
self._create_embeddings(cleaned_document_array)
def _get_input_data(self, docs: DocumentArray, parameters: dict):
"""Create a filtered set of Documents to iterate over."""
traversal_paths = parameters.get('traversal_paths', self.default_traversal_paths)
# traverse thought all documents which have to be processed
flat_docs = docs.traverse_flat(traversal_paths)
# filter out documents without images
filtered_docs = DocumentArray([doc for doc in flat_docs if doc.blob is not None])
return filtered_docs
def _create_embeddings(self, filtered_docs: Iterable):
"""Update the documents with the embeddings generated by AudioCLIP"""
for d in filtered_docs:
audio = torch.Tensor(d.blob).unsqueeze(0)
embedding = self.aclp.encode_audio(audio=audio)[0]
d.embedding = embedding.cpu().numpy()
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Optional, Iterable, Any
from jina import Executor, DocumentArray, requests
import torch
from model import AudioCLIP
from utils.transforms import ToTensor1D
class AudioCLIPEncoder(Executor):
"""
Encode audio data with AudioCLIP embeddings
:param model_path: path of the pre-trained AudioCLIP model
:param default_traversal_paths: default traversal path
"""
def __init__(self,
model_path: str = 'assets/AudioCLIP-Full-Training.pt',
default_traversal_paths: Iterable[str] = ['r'],
*args, **kwargs):
super().__init__(*args, **kwargs)
torch.set_grad_enabled(False)
self.model_path = model_path
self.aclp = AudioCLIP(pretrained=model_path)
self.aclp.eval()
self.aclp.audio.eval()
self.default_traversal_paths = default_traversal_paths
@requests
def encode(self, docs: Optional[DocumentArray], parameters: dict, *args, **kwargs) -> Any:
if docs:
cleaned_document_array = self._get_input_data(docs, parameters)
self._create_embeddings(cleaned_document_array)
def _get_input_data(self, docs: DocumentArray, parameters: dict):
"""Create a filtered set of Documents to iterate over."""
traversal_paths = parameters.get('traversal_paths', self.default_traversal_paths)
# traverse thought all documents which have to be processed
flat_docs = docs.traverse_flat(traversal_paths)
# filter out documents without images
filtered_docs = DocumentArray([doc for doc in flat_docs if doc.blob is not None])
return filtered_docs
def _create_embeddings(self, filtered_docs: Iterable):
"""Update the documents with the embeddings generated by AudioCLIP"""
for d in filtered_docs:
audio = torch.Tensor(d.blob).unsqueeze(0)
embedding = self.aclp.encode_audio(audio=audio)[0]
d.embedding = embedding.cpu().numpy()
|
import logging
import pathlib
from postmarker.core import PostmarkClient
from postmarker.models.emails import EmailManager
from prisma.enums import NotificationType
from pydantic import BaseModel
from backend.data.notifications import (
NotificationEventModel,
NotificationTypeOverride,
T_co,
)
from backend.util.settings import Settings
from backend.util.text import TextFormatter
logger = logging.getLogger(__name__)
settings = Settings()
# The following is a workaround to get the type checker to recognize the EmailManager type
# This is a temporary solution and should be removed once the Postmark library is updated
# to support type annotations.
class TypedPostmarkClient(PostmarkClient):
emails: EmailManager
class Template(BaseModel):
subject_template: str
body_template: str
base_template: str
class EmailSender:
def __init__(self):
if settings.secrets.postmark_server_api_token:
self.postmark = TypedPostmarkClient(
server_token=settings.secrets.postmark_server_api_token
)
else:
logger.warning(
"Postmark server API token not found, email sending disabled"
)
self.postmark = None
self.formatter = TextFormatter()
def send_templated(
self,
notification: NotificationType,
user_email: str,
data: NotificationEventModel[T_co] | list[NotificationEventModel[T_co]],
user_unsub_link: str | None = None,
):
"""Send an email to a user using a template pulled from the notification type"""
if not self.postmark:
logger.warning("Postmark client not initialized, email not sent")
return
template = self._get_template(notification)
base_url = (
settings.config.frontend_base_url or settings.config.platform_base_url
)
# Handle the case when data is a list
template_data = data
if isinstance(data, list):
# Create a dictionary with a 'notifications' key containing the list
template_data = {"notifications": data}
try:
subject, full_message = self.formatter.format_email(
base_template=template.base_template,
subject_template=template.subject_template,
content_template=template.body_template,
data=template_data,
unsubscribe_link=f"{base_url}/profile/settings",
)
except Exception as e:
logger.error(f"Error formatting full message: {e}")
raise e
self._send_email(
user_email=user_email,
user_unsubscribe_link=user_unsub_link,
subject=subject,
body=full_message,
)
def _get_template(self, notification: NotificationType):
# convert the notification type to a notification type override
notification_type_override = NotificationTypeOverride(notification)
# find the template in templates/name.html (the .template returns with the .html)
template_path = f"templates/{notification_type_override.template}.jinja2"
logger.debug(
f"Template full path: {pathlib.Path(__file__).parent / template_path}"
)
base_template_path = "templates/base.html.jinja2"
with open(pathlib.Path(__file__).parent / base_template_path, "r") as file:
base_template = file.read()
with open(pathlib.Path(__file__).parent / template_path, "r") as file:
template = file.read()
return Template(
subject_template=notification_type_override.subject,
body_template=template,
base_template=base_template,
)
def _send_email(
self,
user_email: str,
subject: str,
body: str,
user_unsubscribe_link: str | None = None,
):
if not self.postmark:
logger.warning("Email tried to send without postmark configured")
return
logger.debug(f"Sending email to {user_email} with subject {subject}")
self.postmark.emails.send(
From=settings.config.postmark_sender_email,
To=user_email,
Subject=subject,
HtmlBody=body,
# Headers default to None internally so this is fine
Headers=(
{
"List-Unsubscribe-Post": "List-Unsubscribe=One-Click",
"List-Unsubscribe": f"<{user_unsubscribe_link}>",
}
if user_unsubscribe_link
else None
),
)
|
import logging
import pathlib
from postmarker.core import PostmarkClient
from postmarker.models.emails import EmailManager
from prisma.enums import NotificationType
from pydantic import BaseModel
from backend.data.notifications import (
NotificationEventModel,
NotificationTypeOverride,
T_co,
)
from backend.util.settings import Settings
from backend.util.text import TextFormatter
logger = logging.getLogger(__name__)
settings = Settings()
# The following is a workaround to get the type checker to recognize the EmailManager type
# This is a temporary solution and should be removed once the Postmark library is updated
# to support type annotations.
class TypedPostmarkClient(PostmarkClient):
emails: EmailManager
class Template(BaseModel):
subject_template: str
body_template: str
base_template: str
class EmailSender:
def __init__(self):
if settings.secrets.postmark_server_api_token:
self.postmark = TypedPostmarkClient(
server_token=settings.secrets.postmark_server_api_token
)
else:
logger.warning(
"Postmark server API token not found, email sending disabled"
)
self.postmark = None
self.formatter = TextFormatter()
def send_templated(
self,
notification: NotificationType,
user_email: str,
data: NotificationEventModel[T_co] | list[NotificationEventModel[T_co]],
user_unsub_link: str | None = None,
):
"""Send an email to a user using a template pulled from the notification type"""
if not self.postmark:
logger.warning("Postmark client not initialized, email not sent")
return
template = self._get_template(notification)
base_url = (
settings.config.frontend_base_url or settings.config.platform_base_url
)
try:
subject, full_message = self.formatter.format_email(
base_template=template.base_template,
subject_template=template.subject_template,
content_template=template.body_template,
data=data,
unsubscribe_link=f"{base_url}/profile/settings",
)
except Exception as e:
logger.error(f"Error formatting full message: {e}")
raise e
self._send_email(
user_email=user_email,
user_unsubscribe_link=user_unsub_link,
subject=subject,
body=full_message,
)
def _get_template(self, notification: NotificationType):
# convert the notification type to a notification type override
notification_type_override = NotificationTypeOverride(notification)
# find the template in templates/name.html (the .template returns with the .html)
template_path = f"templates/{notification_type_override.template}.jinja2"
logger.debug(
f"Template full path: {pathlib.Path(__file__).parent / template_path}"
)
base_template_path = "templates/base.html.jinja2"
with open(pathlib.Path(__file__).parent / base_template_path, "r") as file:
base_template = file.read()
with open(pathlib.Path(__file__).parent / template_path, "r") as file:
template = file.read()
return Template(
subject_template=notification_type_override.subject,
body_template=template,
base_template=base_template,
)
def _send_email(
self,
user_email: str,
subject: str,
body: str,
user_unsubscribe_link: str | None = None,
):
if not self.postmark:
logger.warning("Email tried to send without postmark configured")
return
logger.debug(f"Sending email to {user_email} with subject {subject}")
self.postmark.emails.send(
From=settings.config.postmark_sender_email,
To=user_email,
Subject=subject,
HtmlBody=body,
# Headers default to None internally so this is fine
Headers=(
{
"List-Unsubscribe-Post": "List-Unsubscribe=One-Click",
"List-Unsubscribe": f"<{user_unsubscribe_link}>",
}
if user_unsubscribe_link
else None
),
)
|
from __future__ import annotations
import numpy as np
from torch.utils.data import Dataset
from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available
from sentence_transformers.readers.InputExample import InputExample
class DenoisingAutoEncoderDataset(Dataset):
"""
The DenoisingAutoEncoderDataset returns InputExamples in the format: texts=[noise_fn(sentence), sentence]
It is used in combination with the DenoisingAutoEncoderLoss: Here, a decoder tries to re-construct the
sentence without noise.
Args:
sentences: A list of sentences
noise_fn: A noise function: Given a string, it returns a string
with noise, e.g. deleted words
"""
def __init__(self, sentences: list[str], noise_fn=lambda s: DenoisingAutoEncoderDataset.delete(s)):
if not is_nltk_available():
raise ImportError(NLTK_IMPORT_ERROR.format(self.__class__.__name__))
self.sentences = sentences
self.noise_fn = noise_fn
def __getitem__(self, item):
sent = self.sentences[item]
return InputExample(texts=[self.noise_fn(sent), sent])
def __len__(self):
return len(self.sentences)
# Deletion noise.
@staticmethod
def delete(text, del_ratio=0.6):
from nltk import word_tokenize
from nltk.tokenize.treebank import TreebankWordDetokenizer
words = word_tokenize(text)
n = len(words)
if n == 0:
return text
keep_or_not = np.random.rand(n) > del_ratio
if sum(keep_or_not) == 0:
keep_or_not[np.random.choice(n)] = True # guarantee that at least one word remains
words_processed = TreebankWordDetokenizer().detokenize(np.array(words)[keep_or_not])
return words_processed
|
from typing import List
import numpy as np
from torch.utils.data import Dataset
from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available
from sentence_transformers.readers.InputExample import InputExample
class DenoisingAutoEncoderDataset(Dataset):
"""
The DenoisingAutoEncoderDataset returns InputExamples in the format: texts=[noise_fn(sentence), sentence]
It is used in combination with the DenoisingAutoEncoderLoss: Here, a decoder tries to re-construct the
sentence without noise.
Args:
sentences: A list of sentences
noise_fn: A noise function: Given a string, it returns a string
with noise, e.g. deleted words
"""
def __init__(self, sentences: List[str], noise_fn=lambda s: DenoisingAutoEncoderDataset.delete(s)):
if not is_nltk_available():
raise ImportError(NLTK_IMPORT_ERROR.format(self.__class__.__name__))
self.sentences = sentences
self.noise_fn = noise_fn
def __getitem__(self, item):
sent = self.sentences[item]
return InputExample(texts=[self.noise_fn(sent), sent])
def __len__(self):
return len(self.sentences)
# Deletion noise.
@staticmethod
def delete(text, del_ratio=0.6):
from nltk import word_tokenize
from nltk.tokenize.treebank import TreebankWordDetokenizer
words = word_tokenize(text)
n = len(words)
if n == 0:
return text
keep_or_not = np.random.rand(n) > del_ratio
if sum(keep_or_not) == 0:
keep_or_not[np.random.choice(n)] = True # guarantee that at least one word remains
words_processed = TreebankWordDetokenizer().detokenize(np.array(words)[keep_or_not])
return words_processed
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, ForwardRef, Optional, Union
from typing_extensions import get_origin
from typing_inspect import get_args, is_typevar, is_union_type
def is_type_tensor(type_: Any) -> bool:
"""Return True if type is a type Tensor or an Optional Tensor type."""
from docarray.typing.tensor.abstract_tensor import AbstractTensor
return isinstance(type_, type) and safe_issubclass(type_, AbstractTensor)
def is_tensor_union(type_: Any) -> bool:
"""Return True if type is a Union of type Tensors."""
is_union = is_union_type(type_)
if is_union is None:
return False
else:
return is_union and all(
(is_type_tensor(t) or safe_issubclass(t, type(None)))
for t in get_args(type_)
)
def change_cls_name(cls: type, new_name: str, scope: Optional[dict] = None) -> None:
"""Change the name of a class.
:param cls: the class to change the name of
:param new_name: the new name
:param scope: the scope in which the class is defined
"""
if scope:
scope[new_name] = cls
cls.__qualname__ = cls.__qualname__[: -len(cls.__name__)] + new_name
cls.__name__ = new_name
def safe_issubclass(x: type, a_tuple: type) -> bool:
"""
This is a modified version of the built-in 'issubclass' function to support non-class input.
Traditional 'issubclass' calls can result in a crash if the input is non-class type (e.g. list/tuple).
:param x: A class 'x'
:param a_tuple: A class, or a tuple of classes.
:return: A boolean value - 'True' if 'x' is a subclass of 'A_tuple', 'False' otherwise.
Note that if the origin of 'x' is a list or tuple, the function immediately returns 'False'.
"""
if (
(get_origin(x) in (list, tuple, dict, set, Union))
or is_typevar(x)
or (type(x) == ForwardRef)
or is_typevar(x)
):
return False
return issubclass(x, a_tuple)
|
from typing import Any, ForwardRef, Optional, Union
from typing_extensions import get_origin
from typing_inspect import get_args, is_typevar, is_union_type
def is_type_tensor(type_: Any) -> bool:
"""Return True if type is a type Tensor or an Optional Tensor type."""
from docarray.typing.tensor.abstract_tensor import AbstractTensor
return isinstance(type_, type) and safe_issubclass(type_, AbstractTensor)
def is_tensor_union(type_: Any) -> bool:
"""Return True if type is a Union of type Tensors."""
is_union = is_union_type(type_)
if is_union is None:
return False
else:
return is_union and all(
(is_type_tensor(t) or safe_issubclass(t, type(None)))
for t in get_args(type_)
)
def change_cls_name(cls: type, new_name: str, scope: Optional[dict] = None) -> None:
"""Change the name of a class.
:param cls: the class to change the name of
:param new_name: the new name
:param scope: the scope in which the class is defined
"""
if scope:
scope[new_name] = cls
cls.__qualname__ = cls.__qualname__[: -len(cls.__name__)] + new_name
cls.__name__ = new_name
def safe_issubclass(x: type, a_tuple: type) -> bool:
"""
This is a modified version of the built-in 'issubclass' function to support non-class input.
Traditional 'issubclass' calls can result in a crash if the input is non-class type (e.g. list/tuple).
:param x: A class 'x'
:param a_tuple: A class, or a tuple of classes.
:return: A boolean value - 'True' if 'x' is a subclass of 'A_tuple', 'False' otherwise.
Note that if the origin of 'x' is a list or tuple, the function immediately returns 'False'.
"""
if (
(get_origin(x) in (list, tuple, dict, set, Union))
or is_typevar(x)
or (type(x) == ForwardRef)
or is_typevar(x)
):
return False
return issubclass(x, a_tuple)
|
# Copyright (c) OpenMMLab. All rights reserved.
import asyncio
from argparse import ArgumentParser
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector, show_result_pyplot)
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--palette',
default='coco',
choices=['coco', 'voc', 'citys', 'random'],
help='Color palette used for visualization')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
parser.add_argument(
'--async-test',
action='store_true',
help='whether to set async options for async inference.')
args = parser.parse_args()
return args
def main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
result = inference_detector(model, args.img)
# show the results
show_result_pyplot(
model,
args.img,
result,
palette=args.palette,
score_thr=args.score_thr)
async def async_main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
tasks = asyncio.create_task(async_inference_detector(model, args.img))
result = await asyncio.gather(tasks)
# show the results
show_result_pyplot(
model,
args.img,
result[0],
palette=args.palette,
score_thr=args.score_thr)
if __name__ == '__main__':
args = parse_args()
if args.async_test:
asyncio.run(async_main(args))
else:
main(args)
|
# Copyright (c) OpenMMLab. All rights reserved.
import asyncio
from argparse import ArgumentParser
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector, show_result_pyplot)
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
parser.add_argument(
'--async-test',
action='store_true',
help='whether to set async options for async inference.')
args = parser.parse_args()
return args
def main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
result = inference_detector(model, args.img)
# show the results
show_result_pyplot(model, args.img, result, score_thr=args.score_thr)
async def async_main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
tasks = asyncio.create_task(async_inference_detector(model, args.img))
result = await asyncio.gather(tasks)
# show the results
show_result_pyplot(model, args.img, result[0], score_thr=args.score_thr)
if __name__ == '__main__':
args = parse_args()
if args.async_test:
asyncio.run(async_main(args))
else:
main(args)
|
#!/usr/bin/env python
import distutils.command.clean
import os
import re
import shutil
import subprocess
from pathlib import Path
import torch
from setuptools import find_packages, setup
from tools import setup_helpers
ROOT_DIR = Path(__file__).parent.resolve()
def _run_cmd(cmd):
try:
return subprocess.check_output(cmd, cwd=ROOT_DIR, stderr=subprocess.DEVNULL).decode("ascii").strip()
except Exception:
return None
def _get_version(sha):
with open(ROOT_DIR / "version.txt", "r") as f:
version = f.read().strip()
if os.getenv("BUILD_VERSION"):
version = os.getenv("BUILD_VERSION")
elif sha is not None:
version += "+" + sha[:7]
return version
def _make_version_file(version, sha):
sha = "Unknown" if sha is None else sha
version_path = ROOT_DIR / "src" / "torchaudio" / "version.py"
with open(version_path, "w") as f:
f.write(f"__version__ = '{version}'\n")
f.write(f"git_version = '{sha}'\n")
def _get_pytorch_version():
if "PYTORCH_VERSION" in os.environ:
return f"torch=={os.environ['PYTORCH_VERSION']}"
return "torch"
class clean(distutils.command.clean.clean):
def run(self):
# Run default behavior first
distutils.command.clean.clean.run(self)
# Remove torchaudio extension
for path in (ROOT_DIR / "src").glob("**/*.so"):
print(f"removing '{path}'")
path.unlink()
# Remove build directory
build_dirs = [
ROOT_DIR / "build",
]
for path in build_dirs:
if path.exists():
print(f"removing '{path}' (and everything under it)")
shutil.rmtree(str(path), ignore_errors=True)
def _parse_url(path):
with open(path, "r") as file_:
for line in file_:
match = re.match(r"^\s*URL\s+(https:\/\/.+)$", line)
if match:
url = match.group(1)
yield url
def _fetch_archives(src):
for dest, url in src:
if not dest.exists():
print(f" --- Fetching {os.path.basename(dest)}")
torch.hub.download_url_to_file(url, dest, progress=False)
def _main():
sha = _run_cmd(["git", "rev-parse", "HEAD"])
branch = _run_cmd(["git", "rev-parse", "--abbrev-ref", "HEAD"])
tag = _run_cmd(["git", "describe", "--tags", "--exact-match", "@"])
print("-- Git branch:", branch)
print("-- Git SHA:", sha)
print("-- Git tag:", tag)
pytorch_package_dep = _get_pytorch_version()
print("-- PyTorch dependency:", pytorch_package_dep)
version = _get_version(sha)
print("-- Building version", version)
_make_version_file(version, sha)
with open("README.md") as f:
long_description = f.read()
setup(
name="torchaudio",
version=version,
description="An audio package for PyTorch",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/pytorch/audio",
author=(
"Soumith Chintala, David Pollack, Sean Naren, Peter Goldsborough, "
"Moto Hira, Caroline Chen, Jeff Hwang, Zhaoheng Ni, Xiaohui Zhang"
),
author_email="[email protected]",
maintainer="Moto Hira, Caroline Chen, Jeff Hwang, Zhaoheng Ni, Xiaohui Zhang",
maintainer_email="[email protected]",
classifiers=[
"Environment :: Plugins",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: C++",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Multimedia :: Sound/Audio",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
packages=find_packages(where="src"),
package_dir={"": "src"},
ext_modules=setup_helpers.get_ext_modules(),
cmdclass={
"build_ext": setup_helpers.CMakeBuild,
"clean": clean,
},
install_requires=[pytorch_package_dep],
zip_safe=False,
)
if __name__ == "__main__":
_main()
|
#!/usr/bin/env python
import distutils.command.clean
import os
import re
import shutil
import subprocess
from pathlib import Path
import torch
from setuptools import find_packages, setup
from tools import setup_helpers
ROOT_DIR = Path(__file__).parent.resolve()
def _run_cmd(cmd):
try:
return subprocess.check_output(cmd, cwd=ROOT_DIR, stderr=subprocess.DEVNULL).decode("ascii").strip()
except Exception:
return None
def _get_version(sha):
with open(ROOT_DIR / "version.txt", "r") as f:
version = f.read().strip()
if os.getenv("BUILD_VERSION"):
version = os.getenv("BUILD_VERSION")
elif sha is not None:
version += "+" + sha[:7]
return version
def _make_version_file(version, sha):
sha = "Unknown" if sha is None else sha
version_path = ROOT_DIR / "src" / "torchaudio" / "version.py"
with open(version_path, "w") as f:
f.write(f"__version__ = '{version}'\n")
f.write(f"git_version = '{sha}'\n")
def _get_pytorch_version():
if "PYTORCH_VERSION" in os.environ:
return f"torch=={os.environ['PYTORCH_VERSION']}"
return "torch"
class clean(distutils.command.clean.clean):
def run(self):
# Run default behavior first
distutils.command.clean.clean.run(self)
# Remove torchaudio extension
for path in (ROOT_DIR / "src").glob("**/*.so"):
print(f"removing '{path}'")
path.unlink()
# Remove build directory
build_dirs = [
ROOT_DIR / "build",
]
for path in build_dirs:
if path.exists():
print(f"removing '{path}' (and everything under it)")
shutil.rmtree(str(path), ignore_errors=True)
def _parse_url(path):
with open(path, "r") as file_:
for line in file_:
match = re.match(r"^\s*URL\s+(https:\/\/.+)$", line)
if match:
url = match.group(1)
yield url
def _fetch_archives(src):
for dest, url in src:
if not dest.exists():
print(f" --- Fetching {os.path.basename(dest)}")
torch.hub.download_url_to_file(url, dest, progress=False)
def _main():
sha = _run_cmd(["git", "rev-parse", "HEAD"])
branch = _run_cmd(["git", "rev-parse", "--abbrev-ref", "HEAD"])
tag = _run_cmd(["git", "describe", "--tags", "--exact-match", "@"])
print("-- Git branch:", branch)
print("-- Git SHA:", sha)
print("-- Git tag:", tag)
pytorch_package_dep = _get_pytorch_version()
print("-- PyTorch dependency:", pytorch_package_dep)
version = _get_version(sha)
print("-- Building version", version)
_make_version_file(version, sha)
with open("README.md") as f:
long_description = f.read()
setup(
name="torchaudio",
version=version,
description="An audio package for PyTorch",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/pytorch/audio",
author=(
"Soumith Chintala, David Pollack, Sean Naren, Peter Goldsborough, "
"Moto Hira, Caroline Chen, Jeff Hwang, Zhaoheng Ni, Xiaohui Zhang"
),
author_email="[email protected]",
maintainer="Moto Hira, Caroline Chen, Jeff Hwang, Zhaoheng Ni, Xiaohui Zhang",
maintainer_email="[email protected]",
classifiers=[
"Environment :: Plugins",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: C++",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Multimedia :: Sound/Audio",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
packages=find_packages(where="src"),
package_dir={"": "src"},
ext_modules=setup_helpers.get_ext_modules(),
cmdclass={
"build_ext": setup_helpers.CMakeBuild,
"clean": clean,
},
install_requires=[pytorch_package_dep],
zip_safe=False,
)
if __name__ == "__main__":
_main()
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from numbers import Real
import numpy as np
from ..base import BaseEstimator, _fit_context
from ..utils._param_validation import Interval
from ..utils.sparsefuncs import mean_variance_axis, min_max_axis
from ..utils.validation import check_is_fitted
from ._base import SelectorMixin
class VarianceThreshold(SelectorMixin, BaseEstimator):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, default=0
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
SelectFromModel: Meta-transformer for selecting features based on
importance weights.
SelectPercentile : Select features according to a percentile of the highest
scores.
SequentialFeatureSelector : Transformer that performs Sequential Feature
Selection.
Notes
-----
Allows NaN in the input.
Raises ValueError if no feature in X meets the variance threshold.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> from sklearn.feature_selection import VarianceThreshold
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
_parameter_constraints: dict = {
"threshold": [Interval(Real, 0, None, closed="left")]
}
def __init__(self, threshold=0.0):
self.threshold = threshold
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data from which to compute variances, where `n_samples` is
the number of samples and `n_features` is the number of features.
y : any, default=None
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self : object
Returns the instance itself.
"""
X = self._validate_data(
X,
accept_sparse=("csr", "csc"),
dtype=np.float64,
ensure_all_finite="allow-nan",
)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
if self.threshold == 0:
mins, maxes = min_max_axis(X, axis=0)
peak_to_peaks = maxes - mins
else:
self.variances_ = np.nanvar(X, axis=0)
if self.threshold == 0:
peak_to_peaks = np.ptp(X, axis=0)
if self.threshold == 0:
# Use peak-to-peak to avoid numeric precision issues
# for constant features
compare_arr = np.array([self.variances_, peak_to_peaks])
self.variances_ = np.nanmin(compare_arr, axis=0)
if np.all(~np.isfinite(self.variances_) | (self.variances_ <= self.threshold)):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self)
return self.variances_ > self.threshold
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.allow_nan = True
return tags
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from numbers import Real
import numpy as np
from ..base import BaseEstimator, _fit_context
from ..utils._param_validation import Interval
from ..utils.sparsefuncs import mean_variance_axis, min_max_axis
from ..utils.validation import check_is_fitted
from ._base import SelectorMixin
class VarianceThreshold(SelectorMixin, BaseEstimator):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, default=0
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
SelectFromModel: Meta-transformer for selecting features based on
importance weights.
SelectPercentile : Select features according to a percentile of the highest
scores.
SequentialFeatureSelector : Transformer that performs Sequential Feature
Selection.
Notes
-----
Allows NaN in the input.
Raises ValueError if no feature in X meets the variance threshold.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> from sklearn.feature_selection import VarianceThreshold
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
_parameter_constraints: dict = {
"threshold": [Interval(Real, 0, None, closed="left")]
}
def __init__(self, threshold=0.0):
self.threshold = threshold
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data from which to compute variances, where `n_samples` is
the number of samples and `n_features` is the number of features.
y : any, default=None
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self : object
Returns the instance itself.
"""
X = self._validate_data(
X,
accept_sparse=("csr", "csc"),
dtype=np.float64,
ensure_all_finite="allow-nan",
)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
if self.threshold == 0:
mins, maxes = min_max_axis(X, axis=0)
peak_to_peaks = maxes - mins
else:
self.variances_ = np.nanvar(X, axis=0)
if self.threshold == 0:
peak_to_peaks = np.ptp(X, axis=0)
if self.threshold == 0:
# Use peak-to-peak to avoid numeric precision issues
# for constant features
compare_arr = np.array([self.variances_, peak_to_peaks])
self.variances_ = np.nanmin(compare_arr, axis=0)
if np.all(~np.isfinite(self.variances_) | (self.variances_ <= self.threshold)):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self)
return self.variances_ > self.threshold
def _more_tags(self):
return {"allow_nan": True}
|
"""**Tools** are classes that an Agent uses to interact with the world.
Each tool has a **description**. Agent uses the description to choose the right
tool for the job.
**Class hierarchy:**
.. code-block::
RunnableSerializable --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool
<name> # Examples: BraveSearch, HumanInputRun
**Main helpers:**
.. code-block::
CallbackManagerForToolRun, AsyncCallbackManagerForToolRun
""" # noqa: E501
from __future__ import annotations
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.tools.base import (
FILTERED_ARGS,
ArgsSchema,
BaseTool,
BaseToolkit,
InjectedToolArg,
InjectedToolCallId,
SchemaAnnotationError,
ToolException,
_get_runnable_config_param,
create_schema_from_function,
)
from langchain_core.tools.convert import (
convert_runnable_to_tool,
tool,
)
from langchain_core.tools.render import (
ToolsRenderer,
render_text_description,
render_text_description_and_args,
)
from langchain_core.tools.retriever import (
RetrieverInput,
create_retriever_tool,
)
from langchain_core.tools.simple import Tool
from langchain_core.tools.structured import StructuredTool
__all__ = (
"FILTERED_ARGS",
"ArgsSchema",
"BaseTool",
"BaseToolkit",
"InjectedToolArg",
"InjectedToolCallId",
"RetrieverInput",
"SchemaAnnotationError",
"StructuredTool",
"Tool",
"ToolException",
"ToolsRenderer",
"_get_runnable_config_param",
"convert_runnable_to_tool",
"create_retriever_tool",
"create_schema_from_function",
"render_text_description",
"render_text_description_and_args",
"tool",
)
_dynamic_imports = {
"FILTERED_ARGS": "base",
"ArgsSchema": "base",
"BaseTool": "base",
"BaseToolkit": "base",
"InjectedToolArg": "base",
"InjectedToolCallId": "base",
"SchemaAnnotationError": "base",
"ToolException": "base",
"_get_runnable_config_param": "base",
"create_schema_from_function": "base",
"convert_runnable_to_tool": "convert",
"tool": "convert",
"ToolsRenderer": "render",
"render_text_description": "render",
"render_text_description_and_args": "render",
"RetrieverInput": "retriever",
"create_retriever_tool": "retriever",
"Tool": "simple",
"StructuredTool": "structured",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""**Tools** are classes that an Agent uses to interact with the world.
Each tool has a **description**. Agent uses the description to choose the right
tool for the job.
**Class hierarchy:**
.. code-block::
RunnableSerializable --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool
<name> # Examples: BraveSearch, HumanInputRun
**Main helpers:**
.. code-block::
CallbackManagerForToolRun, AsyncCallbackManagerForToolRun
""" # noqa: E501
from __future__ import annotations
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.tools.base import (
FILTERED_ARGS,
ArgsSchema,
BaseTool,
BaseToolkit,
InjectedToolArg,
InjectedToolCallId,
SchemaAnnotationError,
ToolException,
_get_runnable_config_param,
create_schema_from_function,
)
from langchain_core.tools.convert import (
convert_runnable_to_tool,
tool,
)
from langchain_core.tools.render import (
ToolsRenderer,
render_text_description,
render_text_description_and_args,
)
from langchain_core.tools.retriever import (
RetrieverInput,
create_retriever_tool,
)
from langchain_core.tools.simple import Tool
from langchain_core.tools.structured import StructuredTool
__all__ = (
"ArgsSchema",
"BaseTool",
"BaseToolkit",
"FILTERED_ARGS",
"SchemaAnnotationError",
"ToolException",
"InjectedToolArg",
"InjectedToolCallId",
"_get_runnable_config_param",
"create_schema_from_function",
"convert_runnable_to_tool",
"tool",
"ToolsRenderer",
"render_text_description",
"render_text_description_and_args",
"RetrieverInput",
"create_retriever_tool",
"Tool",
"StructuredTool",
)
_dynamic_imports = {
"FILTERED_ARGS": "base",
"ArgsSchema": "base",
"BaseTool": "base",
"BaseToolkit": "base",
"InjectedToolArg": "base",
"InjectedToolCallId": "base",
"SchemaAnnotationError": "base",
"ToolException": "base",
"_get_runnable_config_param": "base",
"create_schema_from_function": "base",
"convert_runnable_to_tool": "convert",
"tool": "convert",
"ToolsRenderer": "render",
"render_text_description": "render",
"render_text_description_and_args": "render",
"RetrieverInput": "retriever",
"create_retriever_tool": "retriever",
"Tool": "simple",
"StructuredTool": "structured",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""Output parsers using Pydantic."""
import json
from typing import Annotated, Generic, Optional
import pydantic
from pydantic import SkipValidation
from typing_extensions import override
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.outputs import Generation
from langchain_core.utils.pydantic import (
IS_PYDANTIC_V2,
PydanticBaseModel,
TBaseModel,
)
class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
"""Parse an output using a pydantic model."""
pydantic_object: Annotated[type[TBaseModel], SkipValidation()] # type: ignore
"""The pydantic model to parse."""
def _parse_obj(self, obj: dict) -> TBaseModel:
if IS_PYDANTIC_V2:
try:
if issubclass(self.pydantic_object, pydantic.BaseModel):
return self.pydantic_object.model_validate(obj)
if issubclass(self.pydantic_object, pydantic.v1.BaseModel):
return self.pydantic_object.parse_obj(obj)
msg = f"Unsupported model version for PydanticOutputParser: \
{self.pydantic_object.__class__}"
raise OutputParserException(msg)
except (pydantic.ValidationError, pydantic.v1.ValidationError) as e:
raise self._parser_exception(e, obj) from e
else: # pydantic v1
try:
return self.pydantic_object.parse_obj(obj)
except pydantic.ValidationError as e:
raise self._parser_exception(e, obj) from e
def _parser_exception(
self, e: Exception, json_object: dict
) -> OutputParserException:
json_string = json.dumps(json_object)
name = self.pydantic_object.__name__
msg = f"Failed to parse {name} from completion {json_string}. Got: {e}"
return OutputParserException(msg, llm_output=json_string)
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> Optional[TBaseModel]:
"""Parse the result of an LLM call to a pydantic object.
Args:
result: The result of the LLM call.
partial: Whether to parse partial JSON objects.
If True, the output will be a JSON object containing
all the keys that have been returned so far.
Defaults to False.
Returns:
The parsed pydantic object.
"""
try:
json_object = super().parse_result(result)
return self._parse_obj(json_object)
except OutputParserException:
if partial:
return None
raise
def parse(self, text: str) -> TBaseModel:
"""Parse the output of an LLM call to a pydantic object.
Args:
text: The output of the LLM call.
Returns:
The parsed pydantic object.
"""
return super().parse(text)
def get_format_instructions(self) -> str:
"""Return the format instructions for the JSON output.
Returns:
The format instructions for the JSON output.
"""
# Copy schema to avoid altering original Pydantic schema.
schema = dict(self.pydantic_object.model_json_schema().items())
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure json in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema, ensure_ascii=False)
return _PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "pydantic"
@property
@override
def OutputType(self) -> type[TBaseModel]:
"""Return the pydantic model."""
return self.pydantic_object
PydanticOutputParser.model_rebuild()
_PYDANTIC_FORMAT_INSTRUCTIONS = """The output should be formatted as a JSON instance that conforms to the JSON schema below.
As an example, for the schema {{"properties": {{"foo": {{"title": "Foo", "description": "a list of strings", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}
the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of the schema. The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted.
Here is the output schema:
```
{schema}
```""" # noqa: E501
# Re-exporting types for backwards compatibility
__all__ = [
"PydanticBaseModel",
"PydanticOutputParser",
"TBaseModel",
]
|
"""Output parsers using Pydantic."""
import json
from typing import Annotated, Generic, Optional
import pydantic
from pydantic import SkipValidation
from typing_extensions import override
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.outputs import Generation
from langchain_core.utils.pydantic import (
PYDANTIC_MAJOR_VERSION,
PydanticBaseModel,
TBaseModel,
)
class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
"""Parse an output using a pydantic model."""
pydantic_object: Annotated[type[TBaseModel], SkipValidation()] # type: ignore
"""The pydantic model to parse."""
def _parse_obj(self, obj: dict) -> TBaseModel:
if PYDANTIC_MAJOR_VERSION == 2:
try:
if issubclass(self.pydantic_object, pydantic.BaseModel):
return self.pydantic_object.model_validate(obj)
if issubclass(self.pydantic_object, pydantic.v1.BaseModel):
return self.pydantic_object.parse_obj(obj)
msg = f"Unsupported model version for PydanticOutputParser: \
{self.pydantic_object.__class__}"
raise OutputParserException(msg)
except (pydantic.ValidationError, pydantic.v1.ValidationError) as e:
raise self._parser_exception(e, obj) from e
else: # pydantic v1
try:
return self.pydantic_object.parse_obj(obj)
except pydantic.ValidationError as e:
raise self._parser_exception(e, obj) from e
def _parser_exception(
self, e: Exception, json_object: dict
) -> OutputParserException:
json_string = json.dumps(json_object)
name = self.pydantic_object.__name__
msg = f"Failed to parse {name} from completion {json_string}. Got: {e}"
return OutputParserException(msg, llm_output=json_string)
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> Optional[TBaseModel]:
"""Parse the result of an LLM call to a pydantic object.
Args:
result: The result of the LLM call.
partial: Whether to parse partial JSON objects.
If True, the output will be a JSON object containing
all the keys that have been returned so far.
Defaults to False.
Returns:
The parsed pydantic object.
"""
try:
json_object = super().parse_result(result)
return self._parse_obj(json_object)
except OutputParserException:
if partial:
return None
raise
def parse(self, text: str) -> TBaseModel:
"""Parse the output of an LLM call to a pydantic object.
Args:
text: The output of the LLM call.
Returns:
The parsed pydantic object.
"""
return super().parse(text)
def get_format_instructions(self) -> str:
"""Return the format instructions for the JSON output.
Returns:
The format instructions for the JSON output.
"""
# Copy schema to avoid altering original Pydantic schema.
schema = dict(self.pydantic_object.model_json_schema().items())
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure json in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema, ensure_ascii=False)
return _PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "pydantic"
@property
@override
def OutputType(self) -> type[TBaseModel]:
"""Return the pydantic model."""
return self.pydantic_object
PydanticOutputParser.model_rebuild()
_PYDANTIC_FORMAT_INSTRUCTIONS = """The output should be formatted as a JSON instance that conforms to the JSON schema below.
As an example, for the schema {{"properties": {{"foo": {{"title": "Foo", "description": "a list of strings", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}
the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of the schema. The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted.
Here is the output schema:
```
{schema}
```""" # noqa: E501
# Re-exporting types for backwards compatibility
__all__ = [
"PydanticBaseModel",
"PydanticOutputParser",
"TBaseModel",
]
|
from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutmix, RandomMixup, SimpleCopyPaste
from ._geometry import FixedSizeCrop
from ._misc import PermuteDimensions, TransposeDimensions
from ._type_conversion import LabelToOneHot
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutmix, RandomErasing, RandomMixup, SimpleCopyPaste
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
FixedSizeCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBox, ConvertBoundingBoxFormat, ConvertDtype, ConvertImageDtype
from ._misc import (
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
PermuteDimensions,
SanitizeBoundingBoxes,
ToDtype,
TransposeDimensions,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import LabelToOneHot, PILToTensor, ToImagePIL, ToImageTensor, ToPILImage
from ._deprecated import ToTensor # usort: skip
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmdet.core.bbox import distance2bbox
from mmdet.core.mask.structures import BitmapMasks, PolygonMasks
from mmdet.core.utils import mask2ndarray
def dummy_raw_polygon_masks(size):
"""
Args:
size (tuple): expected shape of dummy masks, (N, H, W)
Return:
list[list[ndarray]]: dummy mask
"""
num_obj, heigt, width = size
polygons = []
for _ in range(num_obj):
num_points = np.random.randint(5) * 2 + 6
polygons.append([np.random.uniform(0, min(heigt, width), num_points)])
return polygons
def test_mask2ndarray():
raw_masks = np.ones((3, 28, 28))
bitmap_mask = BitmapMasks(raw_masks, 28, 28)
output_mask = mask2ndarray(bitmap_mask)
assert np.allclose(raw_masks, output_mask)
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
output_mask = mask2ndarray(polygon_masks)
assert output_mask.shape == (3, 28, 28)
raw_masks = np.ones((3, 28, 28))
output_mask = mask2ndarray(raw_masks)
assert np.allclose(raw_masks, output_mask)
raw_masks = torch.ones((3, 28, 28))
output_mask = mask2ndarray(raw_masks)
assert np.allclose(raw_masks, output_mask)
# test unsupported type
raw_masks = []
with pytest.raises(TypeError):
output_mask = mask2ndarray(raw_masks)
def test_distance2bbox():
point = torch.Tensor([[74., 61.], [-29., 106.], [138., 61.], [29., 170.]])
distance = torch.Tensor([[0., 0, 1., 1.], [1., 2., 10., 6.],
[22., -29., 138., 61.], [54., -29., 170., 61.]])
expected_decode_bboxes = torch.Tensor([[74., 61., 75., 62.],
[0., 104., 0., 112.],
[100., 90., 100., 120.],
[0., 120., 100., 120.]])
out_bbox = distance2bbox(point, distance, max_shape=(120, 100))
assert expected_decode_bboxes.allclose(out_bbox)
out = distance2bbox(point, distance, max_shape=torch.Tensor((120, 100)))
assert expected_decode_bboxes.allclose(out)
batch_point = point.unsqueeze(0).repeat(2, 1, 1)
batch_distance = distance.unsqueeze(0).repeat(2, 1, 1)
batch_out = distance2bbox(
batch_point, batch_distance, max_shape=(120, 100))[0]
assert out.allclose(batch_out)
batch_out = distance2bbox(
batch_point, batch_distance, max_shape=[(120, 100), (120, 100)])[0]
assert out.allclose(batch_out)
batch_out = distance2bbox(point, batch_distance, max_shape=(120, 100))[0]
assert out.allclose(batch_out)
# test max_shape is not equal to batch
with pytest.raises(AssertionError):
distance2bbox(
batch_point,
batch_distance,
max_shape=[(120, 100), (120, 100), (32, 32)])
rois = torch.zeros((0, 4))
deltas = torch.zeros((0, 4))
out = distance2bbox(rois, deltas, max_shape=(120, 100))
assert rois.shape == out.shape
rois = torch.zeros((2, 0, 4))
deltas = torch.zeros((2, 0, 4))
out = distance2bbox(rois, deltas, max_shape=(120, 100))
assert rois.shape == out.shape
|
import numpy as np
import pytest
import torch
from mmdet.core.bbox import distance2bbox
from mmdet.core.mask.structures import BitmapMasks, PolygonMasks
from mmdet.core.utils import mask2ndarray
def dummy_raw_polygon_masks(size):
"""
Args:
size (tuple): expected shape of dummy masks, (N, H, W)
Return:
list[list[ndarray]]: dummy mask
"""
num_obj, heigt, width = size
polygons = []
for _ in range(num_obj):
num_points = np.random.randint(5) * 2 + 6
polygons.append([np.random.uniform(0, min(heigt, width), num_points)])
return polygons
def test_mask2ndarray():
raw_masks = np.ones((3, 28, 28))
bitmap_mask = BitmapMasks(raw_masks, 28, 28)
output_mask = mask2ndarray(bitmap_mask)
assert np.allclose(raw_masks, output_mask)
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
output_mask = mask2ndarray(polygon_masks)
assert output_mask.shape == (3, 28, 28)
raw_masks = np.ones((3, 28, 28))
output_mask = mask2ndarray(raw_masks)
assert np.allclose(raw_masks, output_mask)
raw_masks = torch.ones((3, 28, 28))
output_mask = mask2ndarray(raw_masks)
assert np.allclose(raw_masks, output_mask)
# test unsupported type
raw_masks = []
with pytest.raises(TypeError):
output_mask = mask2ndarray(raw_masks)
def test_distance2bbox():
point = torch.Tensor([[74., 61.], [-29., 106.], [138., 61.], [29., 170.]])
distance = torch.Tensor([[0., 0, 1., 1.], [1., 2., 10., 6.],
[22., -29., 138., 61.], [54., -29., 170., 61.]])
expected_decode_bboxes = torch.Tensor([[74., 61., 75., 62.],
[0., 104., 0., 112.],
[100., 90., 100., 120.],
[0., 120., 100., 120.]])
out_bbox = distance2bbox(point, distance, max_shape=(120, 100))
assert expected_decode_bboxes.allclose(out_bbox)
out = distance2bbox(point, distance, max_shape=torch.Tensor((120, 100)))
assert expected_decode_bboxes.allclose(out)
batch_point = point.unsqueeze(0).repeat(2, 1, 1)
batch_distance = distance.unsqueeze(0).repeat(2, 1, 1)
batch_out = distance2bbox(
batch_point, batch_distance, max_shape=(120, 100))[0]
assert out.allclose(batch_out)
batch_out = distance2bbox(
batch_point, batch_distance, max_shape=[(120, 100), (120, 100)])[0]
assert out.allclose(batch_out)
batch_out = distance2bbox(point, batch_distance, max_shape=(120, 100))[0]
assert out.allclose(batch_out)
# test max_shape is not equal to batch
with pytest.raises(AssertionError):
distance2bbox(
batch_point,
batch_distance,
max_shape=[(120, 100), (120, 100), (32, 32)])
rois = torch.zeros((0, 4))
deltas = torch.zeros((0, 4))
out = distance2bbox(rois, deltas, max_shape=(120, 100))
assert rois.shape == out.shape
rois = torch.zeros((2, 0, 4))
deltas = torch.zeros((2, 0, 4))
out = distance2bbox(rois, deltas, max_shape=(120, 100))
assert rois.shape == out.shape
|
"""
In this example we train a semantic search model to search through Wikipedia
articles about programming articles & technologies.
We use the text paragraphs from the following Wikipedia articles:
Assembly language, C , C Sharp , C++, Go , Java , JavaScript, Keras, Laravel, MATLAB, Matplotlib, MongoDB, MySQL, Natural Language Toolkit, NumPy, pandas (software), Perl, PHP, PostgreSQL, Python , PyTorch, R , React, Rust , Scala , scikit-learn, SciPy, Swift , TensorFlow, Vue.js
In:
1_programming_query_generation.py - We generate queries for all paragraphs from these articles
2_programming_train_bi-encoder.py - We train a SentenceTransformer bi-encoder with these generated queries. This results in a model we can then use for semantic search (for the given Wikipedia articles).
3_programming_semantic_search.py - Shows how the trained model can be used for semantic search
"""
import os
from sentence_transformers import InputExample, SentenceTransformer, datasets, losses, models
train_examples = []
with open("generated_queries.tsv") as fIn:
for line in fIn:
query, paragraph = line.strip().split("\t", maxsplit=1)
train_examples.append(InputExample(texts=[query, paragraph]))
# For the MultipleNegativesRankingLoss, it is important
# that the batch does not contain duplicate entries, i.e.
# no two equal queries and no two equal paragraphs.
# To ensure this, we use a special data loader
train_dataloader = datasets.NoDuplicatesDataLoader(train_examples, batch_size=64)
# Now we create a SentenceTransformer model from scratch
word_emb = models.Transformer("distilbert-base-uncased")
pooling = models.Pooling(word_emb.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_emb, pooling])
# MultipleNegativesRankingLoss requires input pairs (query, relevant_passage)
# and trains the model so that is is suitable for semantic search
train_loss = losses.MultipleNegativesRankingLoss(model)
# Tune the model
num_epochs = 3
warmup_steps = int(len(train_dataloader) * num_epochs * 0.1)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=num_epochs,
warmup_steps=warmup_steps,
show_progress_bar=True,
)
os.makedirs("output", exist_ok=True)
model.save("output/programming-model")
|
"""
In this example we train a semantic search model to search through Wikipedia
articles about programming articles & technologies.
We use the text paragraphs from the following Wikipedia articles:
Assembly language, C , C Sharp , C++, Go , Java , JavaScript, Keras, Laravel, MATLAB, Matplotlib, MongoDB, MySQL, Natural Language Toolkit, NumPy, pandas (software), Perl, PHP, PostgreSQL, Python , PyTorch, R , React, Rust , Scala , scikit-learn, SciPy, Swift , TensorFlow, Vue.js
In:
1_programming_query_generation.py - We generate queries for all paragraphs from these articles
2_programming_train_bi-encoder.py - We train a SentenceTransformer bi-encoder with these generated queries. This results in a model we can then use for semantic search (for the given Wikipedia articles).
3_programming_semantic_search.py - Shows how the trained model can be used for semantic search
"""
from sentence_transformers import SentenceTransformer, InputExample, losses, models, datasets
import os
train_examples = []
with open("generated_queries.tsv") as fIn:
for line in fIn:
query, paragraph = line.strip().split("\t", maxsplit=1)
train_examples.append(InputExample(texts=[query, paragraph]))
# For the MultipleNegativesRankingLoss, it is important
# that the batch does not contain duplicate entries, i.e.
# no two equal queries and no two equal paragraphs.
# To ensure this, we use a special data loader
train_dataloader = datasets.NoDuplicatesDataLoader(train_examples, batch_size=64)
# Now we create a SentenceTransformer model from scratch
word_emb = models.Transformer("distilbert-base-uncased")
pooling = models.Pooling(word_emb.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_emb, pooling])
# MultipleNegativesRankingLoss requires input pairs (query, relevant_passage)
# and trains the model so that is is suitable for semantic search
train_loss = losses.MultipleNegativesRankingLoss(model)
# Tune the model
num_epochs = 3
warmup_steps = int(len(train_dataloader) * num_epochs * 0.1)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=num_epochs,
warmup_steps=warmup_steps,
show_progress_bar=True,
)
os.makedirs("output", exist_ok=True)
model.save("output/programming-model")
|
_base_ = './mask_rcnn_r101_fpn_gn-all_2x_coco.py'
# learning policy
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
|
_base_ = './mask_rcnn_r101_fpn_gn-all_2x_coco.py'
# learning policy
lr_config = dict(step=[28, 34])
runner = dict(type='EpochBasedRunner', max_epochs=36)
|
"""
Functions for building sdist
"""
import logging
import pathlib
from .util import copy_with_logging, copytree_with_logging
def copy_cpp_src_tree(
cpp_src_dir: pathlib.Path, target_dir: pathlib.Path, logger: logging.Logger
) -> None:
"""Copy C++ source tree into build directory"""
for subdir in [
"src",
"include",
"dmlc-core",
"gputreeshap",
"rabit",
"cmake",
"plugin",
]:
copytree_with_logging(cpp_src_dir / subdir, target_dir / subdir, logger=logger)
for filename in ["CMakeLists.txt", "LICENSE"]:
copy_with_logging(cpp_src_dir.joinpath(filename), target_dir, logger=logger)
|
"""
Functions for building sdist
"""
import logging
import pathlib
from .util import copy_with_logging, copytree_with_logging
def copy_cpp_src_tree(
cpp_src_dir: pathlib.Path, target_dir: pathlib.Path, logger: logging.Logger
) -> None:
"""Copy C++ source tree into build directory"""
for subdir in [
"src",
"include",
"dmlc-core",
"gputreeshap",
"rabit",
"cmake",
"plugin",
]:
copytree_with_logging(cpp_src_dir / subdir, target_dir / subdir, logger=logger)
for filename in ["CMakeLists.txt", "LICENSE"]:
copy_with_logging(cpp_src_dir.joinpath(filename), target_dir, logger=logger)
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/lvis_v1_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
roi_head=dict(
bbox_head=dict(num_classes=1203), mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/lvis_v1_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
roi_head=dict(
bbox_head=dict(num_classes=1203), mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
from pathlib import Path
from typing import Dict
import numpy as np
import pytest
import torch
from jina import Document, DocumentArray
from PIL import Image
from torchvision.models.mobilenetv2 import model_urls
@pytest.fixture()
def test_dir() -> str:
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def mobilenet_weights(tmpdir: str) -> str:
weights_file = os.path.join(tmpdir, 'w.pth')
torch.hub.download_url_to_file(
url=model_urls['mobilenet_v2'], dst=weights_file, progress=False
)
return weights_file
@pytest.fixture()
def docs_with_blobs() -> DocumentArray:
return DocumentArray(
[Document(blob=np.ones((10, 10, 3), dtype=np.uint8)) for _ in range(11)]
)
@pytest.fixture()
def docs_with_chunk_blobs() -> DocumentArray:
return DocumentArray(
[
Document(chunks=[Document(blob=np.ones((10, 10, 3), dtype=np.uint8))])
for _ in range(11)
]
)
@pytest.fixture()
def docs_with_chunk_chunk_blobs() -> DocumentArray:
return DocumentArray(
[
Document(
chunks=[
Document(
chunks=[
Document(blob=np.ones((10, 10, 3), dtype=np.uint8))
for _ in range(11)
]
)
]
)
]
)
@pytest.fixture()
def test_images(test_dir: str) -> Dict[str, np.ndarray]:
def get_path(file_name_no_suffix: str) -> str:
return os.path.join(test_dir, 'test_data', file_name_no_suffix + '.png')
image_dict = {
file_name: np.array(Image.open(get_path(file_name)))[:, :, 0:3]
for file_name in ['airplane', 'banana1', 'banana2', 'satellite', 'studio']
}
return image_dict
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def build_docker_image(docker_image_name: str) -> str:
subprocess.run(['docker', 'build', '-t', docker_image_name, '.'], check=True)
return docker_image_name
@pytest.fixture(scope='session')
def build_docker_image_gpu(docker_image_name: str) -> str:
image_name = f'{docker_image_name}:gpu'
subprocess.run(
['docker', 'build', '-t', image_name, '-f', 'Dockerfile.gpu', '.'], check=True
)
return image_name
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from typing import Dict
import pytest
import torch
import numpy as np
from torchvision.models.mobilenetv2 import model_urls
from PIL import Image
from jina import DocumentArray, Document
@pytest.fixture()
def test_dir() -> str:
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def mobilenet_weights(tmpdir: str) -> str:
weights_file = os.path.join(tmpdir, 'w.pth')
torch.hub.download_url_to_file(
url=model_urls['mobilenet_v2'], dst=weights_file, progress=False
)
return weights_file
@pytest.fixture()
def docs_with_blobs() -> DocumentArray:
return DocumentArray(
[Document(blob=np.ones((10, 10, 3), dtype=np.uint8)) for _ in range(11)]
)
@pytest.fixture()
def docs_with_chunk_blobs() -> DocumentArray:
return DocumentArray(
[
Document(chunks=[Document(blob=np.ones((10, 10, 3), dtype=np.uint8))])
for _ in range(11)
]
)
@pytest.fixture()
def docs_with_chunk_chunk_blobs() -> DocumentArray:
return DocumentArray(
[
Document(
chunks=[
Document(
chunks=[
Document(blob=np.ones((10, 10, 3), dtype=np.uint8))
for _ in range(11)
]
)
]
)
]
)
@pytest.fixture()
def test_images(test_dir: str) -> Dict[str, np.ndarray]:
def get_path(file_name_no_suffix: str) -> str:
return os.path.join(test_dir, 'test_data', file_name_no_suffix + '.png')
image_dict = {
file_name: np.array(Image.open(get_path(file_name)))[:, :, 0:3]
for file_name in ['airplane', 'banana1', 'banana2', 'satellite', 'studio']
}
return image_dict
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.