input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
"""
==================================
Getting started with transforms v2
==================================
Most computer vision tasks are not supported out of the box by ``torchvision.transforms`` v1, since it only supports
images. ``torchvision.transforms.v2`` enables jointly transforming images, videos, bounding boxes, and masks. This
example showcases the core functionality of the new ``torchvision.transforms.v2`` API.
"""
import pathlib
import torch
import torchvision
def load_data():
from torchvision.io import read_image
from torchvision import datapoints
from torchvision.ops import masks_to_boxes
assets_directory = pathlib.Path("assets")
path = assets_directory / "FudanPed00054.png"
image = datapoints.Image(read_image(str(path)))
merged_masks = read_image(str(assets_directory / "FudanPed00054_mask.png"))
labels = torch.unique(merged_masks)[1:]
masks = datapoints.Mask(merged_masks == labels.view(-1, 1, 1))
bounding_boxes = datapoints.BoundingBoxes(
masks_to_boxes(masks), format=datapoints.BoundingBoxFormat.XYXY, canvas_size=image.shape[-2:]
)
return path, image, bounding_boxes, masks, labels
# %%
# The :mod:`torchvision.transforms.v2` API supports images, videos, bounding boxes, and instance and segmentation
# masks. Thus, it offers native support for many Computer Vision tasks, like image and video classification, object
# detection or instance and semantic segmentation. Still, the interface is the same, making
# :mod:`torchvision.transforms.v2` a drop-in replacement for the existing :mod:`torchvision.transforms` API, aka v1.
# We are using BETA APIs, so we deactivate the associated warning, thereby acknowledging that
# some APIs may slightly change in the future
torchvision.disable_beta_transforms_warning()
import torchvision.transforms.v2 as transforms
transform = transforms.Compose(
[
transforms.ColorJitter(contrast=0.5),
transforms.RandomRotation(30),
transforms.CenterCrop(480),
]
)
# %%
# :mod:`torchvision.transforms.v2` natively supports jointly transforming multiple inputs while making sure that
# potential random behavior is consistent across all inputs. However, it doesn't enforce a specific input structure or
# order.
path, image, bounding_boxes, masks, labels = load_data()
torch.manual_seed(0)
new_image = transform(image) # Image Classification
new_image, new_bounding_boxes, new_labels = transform(image, bounding_boxes, labels) # Object Detection
new_image, new_bounding_boxes, new_masks, new_labels = transform(
image, bounding_boxes, masks, labels
) # Instance Segmentation
new_image, new_target = transform((image, {"boxes": bounding_boxes, "labels": labels})) # Arbitrary Structure
# %%
# Under the hood, :mod:`torchvision.transforms.v2` relies on :mod:`torchvision.datapoints` for the dispatch to the
# appropriate function for the input data: :ref:`sphx_glr_auto_examples_plot_datapoints.py`. Note however, that as
# regular user, you likely don't have to touch this yourself. See
# :ref:`sphx_glr_auto_examples_plot_transforms_v2_e2e.py`.
#
# All "foreign" types like :class:`str`'s or :class:`pathlib.Path`'s are passed through, allowing to store extra
# information directly with the sample:
sample = {"path": path, "image": image}
new_sample = transform(sample)
assert new_sample["path"] is sample["path"]
# %%
# As stated above, :mod:`torchvision.transforms.v2` is a drop-in replacement for :mod:`torchvision.transforms` and thus
# also supports transforming plain :class:`torch.Tensor`'s as image or video if applicable. This is achieved with a
# simple heuristic:
#
# * If we find an explicit image or video (:class:`torchvision.datapoints.Image`, :class:`torchvision.datapoints.Video`,
# or :class:`PIL.Image.Image`) in the input, all other plain tensors are passed through.
# * If there is no explicit image or video, only the first plain :class:`torch.Tensor` will be transformed as image or
# video, while all others will be passed through.
plain_tensor_image = torch.rand(image.shape)
print(image.shape, plain_tensor_image.shape)
# passing a plain tensor together with an explicit image, will not transform the former
plain_tensor_image, image = transform(plain_tensor_image, image)
print(image.shape, plain_tensor_image.shape)
# passing a plain tensor without an explicit image, will transform the former
plain_tensor_image, _ = transform(plain_tensor_image, bounding_boxes)
print(image.shape, plain_tensor_image.shape)
|
"""
==================================
Getting started with transforms v2
==================================
Most computer vision tasks are not supported out of the box by ``torchvision.transforms`` v1, since it only supports
images. ``torchvision.transforms.v2`` enables jointly transforming images, videos, bounding boxes, and masks. This
example showcases the core functionality of the new ``torchvision.transforms.v2`` API.
"""
import pathlib
import torch
import torchvision
def load_data():
from torchvision.io import read_image
from torchvision import datapoints
from torchvision.ops import masks_to_boxes
assets_directory = pathlib.Path("assets")
path = assets_directory / "FudanPed00054.png"
image = datapoints.Image(read_image(str(path)))
merged_masks = read_image(str(assets_directory / "FudanPed00054_mask.png"))
labels = torch.unique(merged_masks)[1:]
masks = datapoints.Mask(merged_masks == labels.view(-1, 1, 1))
bounding_boxes = datapoints.BoundingBoxes(
masks_to_boxes(masks), format=datapoints.BoundingBoxFormat.XYXY, canvas_size=image.shape[-2:]
)
return path, image, bounding_boxes, masks, labels
########################################################################################################################
# The :mod:`torchvision.transforms.v2` API supports images, videos, bounding boxes, and instance and segmentation
# masks. Thus, it offers native support for many Computer Vision tasks, like image and video classification, object
# detection or instance and semantic segmentation. Still, the interface is the same, making
# :mod:`torchvision.transforms.v2` a drop-in replacement for the existing :mod:`torchvision.transforms` API, aka v1.
# We are using BETA APIs, so we deactivate the associated warning, thereby acknowledging that
# some APIs may slightly change in the future
torchvision.disable_beta_transforms_warning()
import torchvision.transforms.v2 as transforms
transform = transforms.Compose(
[
transforms.ColorJitter(contrast=0.5),
transforms.RandomRotation(30),
transforms.CenterCrop(480),
]
)
########################################################################################################################
# :mod:`torchvision.transforms.v2` natively supports jointly transforming multiple inputs while making sure that
# potential random behavior is consistent across all inputs. However, it doesn't enforce a specific input structure or
# order.
path, image, bounding_boxes, masks, labels = load_data()
torch.manual_seed(0)
new_image = transform(image) # Image Classification
new_image, new_bounding_boxes, new_labels = transform(image, bounding_boxes, labels) # Object Detection
new_image, new_bounding_boxes, new_masks, new_labels = transform(
image, bounding_boxes, masks, labels
) # Instance Segmentation
new_image, new_target = transform((image, {"boxes": bounding_boxes, "labels": labels})) # Arbitrary Structure
########################################################################################################################
# Under the hood, :mod:`torchvision.transforms.v2` relies on :mod:`torchvision.datapoints` for the dispatch to the
# appropriate function for the input data: :ref:`sphx_glr_auto_examples_plot_datapoints.py`. Note however, that as
# regular user, you likely don't have to touch this yourself. See
# :ref:`sphx_glr_auto_examples_plot_transforms_v2_e2e.py`.
#
# All "foreign" types like :class:`str`'s or :class:`pathlib.Path`'s are passed through, allowing to store extra
# information directly with the sample:
sample = {"path": path, "image": image}
new_sample = transform(sample)
assert new_sample["path"] is sample["path"]
########################################################################################################################
# As stated above, :mod:`torchvision.transforms.v2` is a drop-in replacement for :mod:`torchvision.transforms` and thus
# also supports transforming plain :class:`torch.Tensor`'s as image or video if applicable. This is achieved with a
# simple heuristic:
#
# * If we find an explicit image or video (:class:`torchvision.datapoints.Image`, :class:`torchvision.datapoints.Video`,
# or :class:`PIL.Image.Image`) in the input, all other plain tensors are passed through.
# * If there is no explicit image or video, only the first plain :class:`torch.Tensor` will be transformed as image or
# video, while all others will be passed through.
plain_tensor_image = torch.rand(image.shape)
print(image.shape, plain_tensor_image.shape)
# passing a plain tensor together with an explicit image, will not transform the former
plain_tensor_image, image = transform(plain_tensor_image, image)
print(image.shape, plain_tensor_image.shape)
# passing a plain tensor without an explicit image, will transform the former
plain_tensor_image, _ = transform(plain_tensor_image, bounding_boxes)
print(image.shape, plain_tensor_image.shape)
|
import logging
import pathlib
from argparse import ArgumentParser
import torch
import torchaudio
from lightning import ConformerRNNTModule
from transforms import get_data_module
logger = logging.getLogger()
def compute_word_level_distance(seq1, seq2):
return torchaudio.functional.edit_distance(seq1.lower().split(), seq2.lower().split())
def run_eval(args):
model = ConformerRNNTModule.load_from_checkpoint(args.checkpoint_path, sp_model_path=str(args.sp_model_path)).eval()
data_module = get_data_module(str(args.librispeech_path), str(args.global_stats_path), str(args.sp_model_path))
if args.use_cuda:
model = model.to(device="cuda")
total_edit_distance = 0
total_length = 0
dataloader = data_module.test_dataloader()
with torch.no_grad():
for idx, (batch, sample) in enumerate(dataloader):
actual = sample[0][2]
predicted = model(batch)
total_edit_distance += compute_word_level_distance(actual, predicted)
total_length += len(actual.split())
if idx % 100 == 0:
logger.warning(f"Processed elem {idx}; WER: {total_edit_distance / total_length}")
logger.warning(f"Final WER: {total_edit_distance / total_length}")
def cli_main():
parser = ArgumentParser()
parser.add_argument(
"--checkpoint-path",
type=pathlib.Path,
help="Path to checkpoint to use for evaluation.",
required=True,
)
parser.add_argument(
"--global-stats-path",
default=pathlib.Path("global_stats.json"),
type=pathlib.Path,
help="Path to JSON file containing feature means and stddevs.",
)
parser.add_argument(
"--librispeech-path",
type=pathlib.Path,
help="Path to LibriSpeech datasets.",
required=True,
)
parser.add_argument(
"--sp-model-path",
type=pathlib.Path,
help="Path to SentencePiece model.",
required=True,
)
parser.add_argument(
"--use-cuda",
action="store_true",
default=False,
help="Run using CUDA.",
)
args = parser.parse_args()
run_eval(args)
if __name__ == "__main__":
cli_main()
|
import logging
import pathlib
from argparse import ArgumentParser
import torch
import torchaudio
from lightning import ConformerRNNTModule, get_data_module
logger = logging.getLogger()
def compute_word_level_distance(seq1, seq2):
return torchaudio.functional.edit_distance(seq1.lower().split(), seq2.lower().split())
def run_eval(args):
model = ConformerRNNTModule.load_from_checkpoint(args.checkpoint_path, sp_model_path=str(args.sp_model_path)).eval()
data_module = get_data_module(str(args.librispeech_path), str(args.global_stats_path), str(args.sp_model_path))
if args.use_cuda:
model = model.to(device="cuda")
total_edit_distance = 0
total_length = 0
dataloader = data_module.test_dataloader()
with torch.no_grad():
for idx, (batch, sample) in enumerate(dataloader):
actual = sample[0][2]
predicted = model(batch)
total_edit_distance += compute_word_level_distance(actual, predicted)
total_length += len(actual.split())
if idx % 100 == 0:
logger.warning(f"Processed elem {idx}; WER: {total_edit_distance / total_length}")
logger.warning(f"Final WER: {total_edit_distance / total_length}")
def cli_main():
parser = ArgumentParser()
parser.add_argument(
"--checkpoint-path",
type=pathlib.Path,
help="Path to checkpoint to use for evaluation.",
required=True,
)
parser.add_argument(
"--global-stats-path",
default=pathlib.Path("global_stats.json"),
type=pathlib.Path,
help="Path to JSON file containing feature means and stddevs.",
)
parser.add_argument(
"--librispeech-path",
type=pathlib.Path,
help="Path to LibriSpeech datasets.",
required=True,
)
parser.add_argument(
"--sp-model-path",
type=pathlib.Path,
help="Path to SentencePiece model.",
required=True,
)
parser.add_argument(
"--use-cuda",
action="store_true",
default=False,
help="Run using CUDA.",
)
args = parser.parse_args()
run_eval(args)
if __name__ == "__main__":
cli_main()
|
_base_ = [
'../common/ms-poly_3x_coco-instance.py',
'../_base_/models/mask-rcnn_r50_fpn.py'
]
model = dict(
# ResNeXt-101-32x8d model trained with Caffe2 at FB,
# so the mean and std need to be changed.
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
bgr_to_rgb=False),
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=8,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnext101_32x8d')))
|
_base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
# ResNeXt-101-32x8d model trained with Caffe2 at FB,
# so the mean and std need to be changed.
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
bgr_to_rgb=False),
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=8,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnext101_32x8d')))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
# 270k iterations with batch_size 64 is roughly equivalent to 144 epochs
'../common/ssj_270k_coco_instance.py',
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# the model is trained from scratch, so init_cfg is None
data_preprocessor=dict(
# pad_size_divisor=32 is unnecessary in training but necessary
# in testing.
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2), # leads to 0.1+ mAP
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
# 270k iterations with batch_size 64 is roughly equivalent to 144 epochs
'../common/ssj_270k_coco_instance.py',
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
backbone=dict(frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2), # leads to 0.1+ mAP
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.core import url_to_fs
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, is_remote_filesystem
from .utils import require_lz4, require_zstandard
def test_mockfs(mockfs):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def test_non_mockfs():
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def test_is_remote_filesystem(mockfs):
is_remote = is_remote_filesystem(mockfs)
assert is_remote is True
fs = fsspec.filesystem("file")
is_remote = is_remote_filesystem(fs)
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS)
def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file):
input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file}
input_path = input_paths[compression_fs_class.protocol]
if input_path is None:
reason = f"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lz4.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(reason)
fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path)
assert isinstance(fs, compression_fs_class)
expected_filename = os.path.basename(input_path)
expected_filename = expected_filename[: expected_filename.rindex(".")]
assert fs.glob("*") == [expected_filename]
with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol", ["zip", "gzip"])
def test_fs_isfile(protocol, zip_jsonl_path, jsonl_gz_path):
compressed_file_paths = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
compressed_file_path = compressed_file_paths[protocol]
member_file_path = "dataset.jsonl"
path = f"{protocol}://{member_file_path}::{compressed_file_path}"
fs, *_ = url_to_fs(path)
assert fs.isfile(member_file_path)
assert not fs.isfile("non_existing_" + member_file_path)
def test_fs_overwrites():
protocol = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(protocol, None, clobber=True)
with pytest.warns(UserWarning) as warning_info:
importlib.reload(datasets.filesystems)
assert len(warning_info) == 1
assert (
str(warning_info[0].message)
== f"A filesystem protocol was already set for {protocol} and will be overwritten."
)
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.core import url_to_fs
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, extract_path_from_uri, is_remote_filesystem
from .utils import require_lz4, require_zstandard
def test_mockfs(mockfs):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def test_non_mockfs():
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def test_extract_path_from_uri():
mock_bucket = "mock-s3-bucket"
dataset_path = f"s3://{mock_bucket}"
dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path.startswith("s3://") is False
dataset_path = "./local/path"
new_dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path == new_dataset_path
def test_is_remote_filesystem(mockfs):
is_remote = is_remote_filesystem(mockfs)
assert is_remote is True
fs = fsspec.filesystem("file")
is_remote = is_remote_filesystem(fs)
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS)
def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file):
input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file}
input_path = input_paths[compression_fs_class.protocol]
if input_path is None:
reason = f"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lz4.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(reason)
fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path)
assert isinstance(fs, compression_fs_class)
expected_filename = os.path.basename(input_path)
expected_filename = expected_filename[: expected_filename.rindex(".")]
assert fs.glob("*") == [expected_filename]
with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol", ["zip", "gzip"])
def test_fs_isfile(protocol, zip_jsonl_path, jsonl_gz_path):
compressed_file_paths = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
compressed_file_path = compressed_file_paths[protocol]
member_file_path = "dataset.jsonl"
path = f"{protocol}://{member_file_path}::{compressed_file_path}"
fs, *_ = url_to_fs(path)
assert fs.isfile(member_file_path)
assert not fs.isfile("non_existing_" + member_file_path)
def test_fs_overwrites():
protocol = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(protocol, None, clobber=True)
with pytest.warns(UserWarning) as warning_info:
importlib.reload(datasets.filesystems)
assert len(warning_info) == 1
assert (
str(warning_info[0].message)
== f"A filesystem protocol was already set for {protocol} and will be overwritten."
)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.parsers.language.cobol import (
CobolSegmenter,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"CobolSegmenter": "langchain_community.document_loaders.parsers.language.cobol",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"CobolSegmenter",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.parsers.language.cobol import (
CobolSegmenter,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"CobolSegmenter": "langchain_community.document_loaders.parsers.language.cobol"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"CobolSegmenter",
]
|
"""HTML node parser."""
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, Union
from llama_index.core.bridge.pydantic import Field
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.node_parser.interface import NodeParser
from llama_index.core.node_parser.node_utils import build_nodes_from_splits
from llama_index.core.schema import BaseNode, MetadataMode, TextNode
from llama_index.core.utils import get_tqdm_iterable
if TYPE_CHECKING:
from bs4 import Tag, PageElement, NavigableString
DEFAULT_TAGS = ["p", "h1", "h2", "h3", "h4", "h5", "h6", "li", "b", "i", "u", "section"]
class HTMLNodeParser(NodeParser):
"""
HTML node parser.
Splits a document into Nodes using custom HTML splitting logic.
Args:
include_metadata (bool): whether to include metadata in nodes
include_prev_next_rel (bool): whether to include prev/next relationships
"""
tags: List[str] = Field(
default=DEFAULT_TAGS, description="HTML tags to extract text from."
)
@classmethod
def from_defaults(
cls,
include_metadata: bool = True,
include_prev_next_rel: bool = True,
callback_manager: Optional[CallbackManager] = None,
tags: Optional[List[str]] = DEFAULT_TAGS,
) -> "HTMLNodeParser":
callback_manager = callback_manager or CallbackManager([])
return cls(
include_metadata=include_metadata,
include_prev_next_rel=include_prev_next_rel,
callback_manager=callback_manager,
tags=tags,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "HTMLNodeParser"
def _parse_nodes(
self,
nodes: Sequence[BaseNode],
show_progress: bool = False,
**kwargs: Any,
) -> List[BaseNode]:
all_nodes: List[BaseNode] = []
nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes")
for node in nodes_with_progress:
nodes = self.get_nodes_from_node(node)
all_nodes.extend(nodes)
return all_nodes
def get_nodes_from_node(self, node: BaseNode) -> List[TextNode]:
"""Get nodes from document."""
try:
from bs4 import BeautifulSoup, Tag
except ImportError:
raise ImportError("bs4 is required to read HTML files.")
text = node.get_content(metadata_mode=MetadataMode.NONE)
soup = BeautifulSoup(text, "html.parser")
html_nodes = []
last_tag = None
current_section = ""
tags = soup.find_all(self.tags)
for tag in tags:
tag_text = self._extract_text_from_tag(tag)
if isinstance(tag, Tag) and (tag.name == last_tag or last_tag is None):
last_tag = tag.name
current_section += f"{tag_text.strip()}\n"
else:
html_nodes.append(
self._build_node_from_split(
current_section.strip(), node, {"tag": last_tag}
)
)
if isinstance(tag, Tag):
last_tag = tag.name
current_section = f"{tag_text}\n"
if current_section:
html_nodes.append(
self._build_node_from_split(
current_section.strip(), node, {"tag": last_tag}
)
)
return html_nodes
def _extract_text_from_tag(
self, tag: Union["Tag", "NavigableString", "PageElement"]
) -> str:
from bs4 import NavigableString, Tag, PageElement
texts = []
if isinstance(tag, Tag):
for elem in tag.children:
if isinstance(elem, NavigableString):
if elem.strip():
texts.append(elem.strip())
elif isinstance(elem, Tag):
if elem.name in self.tags:
continue
else:
texts.append(elem.get_text().strip())
elif isinstance(elem, PageElement):
texts.append(elem.get_text().strip())
else:
texts.append(tag.get_text().strip())
return "\n".join(texts)
def _build_node_from_split(
self,
text_split: str,
node: BaseNode,
metadata: dict,
) -> TextNode:
"""Build node from single text split."""
node = build_nodes_from_splits([text_split], node, id_func=self.id_func)[0]
if self.include_metadata:
node.metadata = {**node.metadata, **metadata}
return node
|
"""HTML node parser."""
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, Union
from llama_index.core.bridge.pydantic import Field
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.node_parser.interface import NodeParser
from llama_index.core.node_parser.node_utils import build_nodes_from_splits
from llama_index.core.schema import BaseNode, MetadataMode, TextNode
from llama_index.core.utils import get_tqdm_iterable
if TYPE_CHECKING:
from bs4 import Tag, PageElement, NavigableString
DEFAULT_TAGS = ["p", "h1", "h2", "h3", "h4", "h5", "h6", "li", "b", "i", "u", "section"]
class HTMLNodeParser(NodeParser):
"""
HTML node parser.
Splits a document into Nodes using custom HTML splitting logic.
Args:
include_metadata (bool): whether to include metadata in nodes
include_prev_next_rel (bool): whether to include prev/next relationships
"""
tags: List[str] = Field(
default=DEFAULT_TAGS, description="HTML tags to extract text from."
)
@classmethod
def from_defaults(
cls,
include_metadata: bool = True,
include_prev_next_rel: bool = True,
callback_manager: Optional[CallbackManager] = None,
tags: Optional[List[str]] = DEFAULT_TAGS,
) -> "HTMLNodeParser":
callback_manager = callback_manager or CallbackManager([])
return cls(
include_metadata=include_metadata,
include_prev_next_rel=include_prev_next_rel,
callback_manager=callback_manager,
tags=tags,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "HTMLNodeParser"
def _parse_nodes(
self,
nodes: Sequence[BaseNode],
show_progress: bool = False,
**kwargs: Any,
) -> List[BaseNode]:
all_nodes: List[BaseNode] = []
nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes")
for node in nodes_with_progress:
nodes = self.get_nodes_from_node(node)
all_nodes.extend(nodes)
return all_nodes
def get_nodes_from_node(self, node: BaseNode) -> List[TextNode]:
"""Get nodes from document."""
try:
from bs4 import BeautifulSoup, Tag
except ImportError:
raise ImportError("bs4 is required to read HTML files.")
text = node.get_content(metadata_mode=MetadataMode.NONE)
soup = BeautifulSoup(text, "html.parser")
html_nodes = []
last_tag = None
current_section = ""
tags = soup.find_all(self.tags)
for tag in tags:
tag_text = self._extract_text_from_tag(tag)
if isinstance(tag, Tag) and (tag.name == last_tag or last_tag is None):
last_tag = tag.name
current_section += f"{tag_text.strip()}\n"
else:
html_nodes.append(
self._build_node_from_split(
current_section.strip(), node, {"tag": last_tag}
)
)
if isinstance(tag, Tag):
last_tag = tag.name
current_section = f"{tag_text}\n"
if current_section:
html_nodes.append(
self._build_node_from_split(
current_section.strip(), node, {"tag": last_tag}
)
)
return html_nodes
def _extract_text_from_tag(
self, tag: Union["Tag", "NavigableString", "PageElement"]
) -> str:
from bs4 import NavigableString, Tag, PageElement
texts = []
if isinstance(tag, Tag):
for elem in tag.children:
if isinstance(elem, NavigableString):
if elem.strip():
texts.append(elem.strip())
elif isinstance(elem, Tag):
if elem.name in self.tags:
continue
else:
texts.append(elem.get_text().strip())
elif isinstance(elem, PageElement):
texts.append(elem.get_text().strip())
else:
texts.append(tag.get_text().strip())
return "\n".join(texts)
def _build_node_from_split(
self,
text_split: str,
node: BaseNode,
metadata: dict,
) -> TextNode:
"""Build node from single text split."""
node = build_nodes_from_splits([text_split], node, id_func=self.id_func)[0]
if self.include_metadata:
node.metadata = {**node.metadata, **metadata}
return node
|
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
rpn_head=dict(
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
bbox_roi_extractor=dict(
roi_layer=dict(
type='RoIAlign',
output_size=7,
sampling_ratio=2,
aligned=False)),
bbox_head=dict(
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
mask_roi_extractor=dict(
roi_layer=dict(
type='RoIAlign',
output_size=14,
sampling_ratio=2,
aligned=False))))
|
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
# use caffe img_norm
preprocess_cfg=preprocess_cfg,
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
rpn_head=dict(
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
bbox_roi_extractor=dict(
roi_layer=dict(
type='RoIAlign',
output_size=7,
sampling_ratio=2,
aligned=False)),
bbox_head=dict(
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
mask_roi_extractor=dict(
roi_layer=dict(
type='RoIAlign',
output_size=14,
sampling_ratio=2,
aligned=False))))
|
"""Langchain Embedding Wrapper Module."""
from typing import TYPE_CHECKING, List, Optional
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.callbacks import CallbackManager
if TYPE_CHECKING:
from llama_index.core.bridge.langchain import Embeddings as LCEmbeddings
class LangchainEmbedding(BaseEmbedding):
"""
External embeddings (taken from Langchain).
Args:
langchain_embedding (langchain.embeddings.Embeddings): Langchain
embeddings class.
"""
_langchain_embedding: "LCEmbeddings" = PrivateAttr()
_async_not_implemented_warned: bool = PrivateAttr(default=False)
def __init__(
self,
langchain_embeddings: "LCEmbeddings",
model_name: Optional[str] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
):
# attempt to get a useful model name
if model_name is not None:
model_name = model_name
elif hasattr(langchain_embeddings, "model_name"):
model_name = langchain_embeddings.model_name
elif hasattr(langchain_embeddings, "model"):
model_name = langchain_embeddings.model
else:
model_name = type(langchain_embeddings).__name__
super().__init__(
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
model_name=model_name,
)
self._langchain_embedding = langchain_embeddings
@classmethod
def class_name(cls) -> str:
return "LangchainEmbedding"
def _async_not_implemented_warn_once(self) -> None:
if not self._async_not_implemented_warned:
print("Async embedding not available, falling back to sync method.")
self._async_not_implemented_warned = True
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._langchain_embedding.embed_query(query)
async def _aget_query_embedding(self, query: str) -> List[float]:
try:
return await self._langchain_embedding.aembed_query(query)
except NotImplementedError:
# Warn the user that sync is being used
self._async_not_implemented_warn_once()
return self._get_query_embedding(query)
async def _aget_text_embedding(self, text: str) -> List[float]:
try:
embeds = await self._langchain_embedding.aembed_documents([text])
return embeds[0]
except NotImplementedError:
# Warn the user that sync is being used
self._async_not_implemented_warn_once()
return self._get_text_embedding(text)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self._langchain_embedding.embed_documents([text])[0]
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
return self._langchain_embedding.embed_documents(texts)
|
"""Langchain Embedding Wrapper Module."""
from typing import TYPE_CHECKING, List, Optional
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.callbacks import CallbackManager
if TYPE_CHECKING:
from llama_index.core.bridge.langchain import Embeddings as LCEmbeddings
class LangchainEmbedding(BaseEmbedding):
"""External embeddings (taken from Langchain).
Args:
langchain_embedding (langchain.embeddings.Embeddings): Langchain
embeddings class.
"""
_langchain_embedding: "LCEmbeddings" = PrivateAttr()
_async_not_implemented_warned: bool = PrivateAttr(default=False)
def __init__(
self,
langchain_embeddings: "LCEmbeddings",
model_name: Optional[str] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
):
# attempt to get a useful model name
if model_name is not None:
model_name = model_name
elif hasattr(langchain_embeddings, "model_name"):
model_name = langchain_embeddings.model_name
elif hasattr(langchain_embeddings, "model"):
model_name = langchain_embeddings.model
else:
model_name = type(langchain_embeddings).__name__
super().__init__(
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
model_name=model_name,
)
self._langchain_embedding = langchain_embeddings
@classmethod
def class_name(cls) -> str:
return "LangchainEmbedding"
def _async_not_implemented_warn_once(self) -> None:
if not self._async_not_implemented_warned:
print("Async embedding not available, falling back to sync method.")
self._async_not_implemented_warned = True
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._langchain_embedding.embed_query(query)
async def _aget_query_embedding(self, query: str) -> List[float]:
try:
return await self._langchain_embedding.aembed_query(query)
except NotImplementedError:
# Warn the user that sync is being used
self._async_not_implemented_warn_once()
return self._get_query_embedding(query)
async def _aget_text_embedding(self, text: str) -> List[float]:
try:
embeds = await self._langchain_embedding.aembed_documents([text])
return embeds[0]
except NotImplementedError:
# Warn the user that sync is being used
self._async_not_implemented_warn_once()
return self._get_text_embedding(text)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self._langchain_embedding.embed_documents([text])[0]
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
return self._langchain_embedding.embed_documents(texts)
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Any, Iterable, Optional
import librosa as lr
import numpy as np
import torch
from jina import DocumentArray, Executor, requests
from jina.excepts import BadDocType
from .audio_clip.model import AudioCLIP
class AudioCLIPEncoder(Executor):
"""
Encode audio data with AudioCLIP embeddings
"""
TARGET_SAMPLE_RATE = 44100 # derived from ESResNeXt
def __init__(
self,
model_path: str = 'assets/AudioCLIP-Full-Training.pt',
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs
):
"""
:param model_path: path of the pre-trained AudioCLIP model
:param traversal_paths: default traversal path
:param device: Torch device string (e.g. 'cpu', 'cuda', 'cuda:2')
"""
super().__init__(*args, **kwargs)
torch.set_grad_enabled(False)
self.model_path = model_path
self.traversal_paths = traversal_paths
self.batch_size = batch_size
try:
self.model = AudioCLIP(pretrained=model_path).to(device).eval()
except FileNotFoundError:
raise FileNotFoundError(
'Please download AudioCLIP model and set the `model_path` argument.'
)
@requests
def encode(
self,
docs: Optional[DocumentArray] = None,
parameters: dict = {},
*args,
**kwargs
) -> Any:
"""
Encode all Documents with audio data (stored in the ``blob`` attribute) and store the
embeddings in the ``embedding`` attribute of the Documents.
:param docs: a `DocumentArray` contains `Document`s with `blob` of the size (n,) or (2, n).
The `blob` contains audio time-series data. Additionally,
`tags` of each `Document` must contain `sample_rate` field,
which has the sample rate of the audio data. The `sample_rate` must be a positive
scalar value.
:param parameters: dictionary to defines the `traversal_paths`.
"""
if not docs:
return
traversal_paths = parameters.get('traversal_paths', self.traversal_paths)
batch_size = parameters.get('batch_size', self.batch_size)
with torch.no_grad():
for batch in docs.batch(batch_size, traversal_paths):
self._create_embeddings(batch)
def _create_embeddings(self, filtered_docs: Iterable):
"""Update the documents with the embeddings generated by AudioCLIP"""
for d in filtered_docs:
d.blob, d.tags['sample_rate'] = self._resample(
d.blob, d.tags.get('sample_rate', None)
)
audio = torch.Tensor(d.blob).unsqueeze(0)
embedding = self.model.encode_audio(audio=audio)[0]
d.embedding = embedding.cpu().numpy()
def _resample(self, blob: np.ndarray, orig_sr: int):
if orig_sr is None:
raise BadDocType(
'sample rate is not given, please provide a valid sample rate'
)
if orig_sr == AudioCLIPEncoder.TARGET_SAMPLE_RATE:
return
return (
lr.resample(blob, orig_sr, AudioCLIPEncoder.TARGET_SAMPLE_RATE),
AudioCLIPEncoder.TARGET_SAMPLE_RATE,
)
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Any, Iterable, Optional
import librosa as lr
import numpy as np
import torch
from jina import DocumentArray, Executor, requests
from jina.excepts import BadDocType
from .audio_clip.model import AudioCLIP
class AudioCLIPEncoder(Executor):
"""
Encode audio data with AudioCLIP embeddings
"""
TARGET_SAMPLE_RATE = 44100 # derived from ESResNeXt
def __init__(
self,
model_path: str = 'assets/AudioCLIP-Full-Training.pt',
traversal_paths: Iterable[str] = ('r',),
device: str = 'cpu',
*args,
**kwargs
):
"""
:param model_path: path of the pre-trained AudioCLIP model
:param traversal_paths: default traversal path
:param device: Torch device string (e.g. 'cpu', 'cuda', 'cuda:2')
"""
super().__init__(*args, **kwargs)
torch.set_grad_enabled(False)
self.model_path = model_path
self.aclp = AudioCLIP(pretrained=model_path).to(device).eval()
self.traversal_paths = traversal_paths
@requests
def encode(
self,
docs: Optional[DocumentArray] = None,
parameters: dict = {},
*args,
**kwargs
) -> Any:
"""
Encode all Documents with audio data (stored in the ``blob`` attribute) and store the
embeddings in the ``embedding`` attribute of the Documents.
:param docs: a `DocumentArray` contains `Document`s with `blob` of the size (n,) or (2, n).
The `blob` contains audio time-series data. Additionally,
`tags` of each `Document` must contain `sample_rate` field,
which has the sample rate of the audio data. The `sample_rate` must be a positive
scalar value.
:param parameters: dictionary to defines the `traversal_paths`.
"""
if docs:
cleaned_document_array = self._get_input_data(docs, parameters)
self._create_embeddings(cleaned_document_array)
def _get_input_data(self, docs: DocumentArray, parameters: dict):
"""Create a filtered set of Documents to iterate over."""
traversal_paths = parameters.get('traversal_paths', self.traversal_paths)
# traverse thought all documents which have to be processed
flat_docs = docs.traverse_flat(traversal_paths)
# filter out documents without audio wav
filtered_docs = DocumentArray(
[doc for doc in flat_docs if doc.blob is not None]
)
return filtered_docs
def _create_embeddings(self, filtered_docs: Iterable):
"""Update the documents with the embeddings generated by AudioCLIP"""
for d in filtered_docs:
d.blob, d.tags['sample_rate'] = self._resample(
d.blob, d.tags.get('sample_rate', None)
)
audio = torch.Tensor(d.blob).unsqueeze(0)
embedding = self.aclp.encode_audio(audio=audio)[0]
d.embedding = embedding.cpu().numpy()
def _resample(self, blob: np.ndarray, orig_sr: int):
if orig_sr is None:
raise BadDocType(
'sample rate is not given, please provide a valid sample rate'
)
if orig_sr == AudioCLIPEncoder.TARGET_SAMPLE_RATE:
return
return (
lr.resample(blob, orig_sr, AudioCLIPEncoder.TARGET_SAMPLE_RATE),
AudioCLIPEncoder.TARGET_SAMPLE_RATE,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import MagicMock, Mock, patch
from mmengine.hooks import IterTimerHook
from mmengine.logging import MessageHub
def time_patch():
if not hasattr(time_patch, 'time'):
time_patch.time = 0
else:
time_patch.time += 1
return time_patch.time
class TestIterTimerHook(TestCase):
def setUp(self) -> None:
self.hook = IterTimerHook()
def test_init(self):
assert self.hook.time_sec_tot == 0
assert self.hook.start_iter == 0
def test_before_train(self):
runner = MagicMock()
runner.iter = 1
self.hook.before_train(runner)
assert self.hook.start_iter == 1
def test_before_epoch(self):
runner = Mock()
self.hook._before_epoch(runner)
assert isinstance(self.hook.t, float)
@patch('time.time', MagicMock(return_value=1))
def test_before_iter(self):
runner = MagicMock()
runner.log_buffer = dict()
self.hook._before_epoch(runner)
for mode in ('train', 'val', 'test'):
self.hook._before_iter(runner, batch_idx=1, mode=mode)
runner.message_hub.update_scalar.assert_called_with(
f'{mode}/data_time', 0)
@patch('time.time', time_patch)
def test_after_iter(self):
runner = MagicMock()
runner.log_buffer = dict()
runner.log_processor.window_size = 10
runner.max_iters = 100
runner.iter = 0
runner.test_dataloader = [0] * 20
runner.val_dataloader = [0] * 20
self.hook._before_epoch(runner)
self.hook.before_run(runner)
self.hook._after_iter(runner, batch_idx=1)
runner.message_hub.update_scalar.assert_called()
runner.message_hub.get_log.assert_not_called()
runner.message_hub.update_info.assert_not_called()
runner.message_hub = MessageHub.get_instance('test_iter_timer_hook')
runner.iter = 9
# eta = (100 - 10) / 1
self.hook._after_iter(runner, batch_idx=89)
assert runner.message_hub.get_info('eta') == 90
self.hook._after_iter(runner, batch_idx=9, mode='val')
assert runner.message_hub.get_info('eta') == 10
self.hook._after_iter(runner, batch_idx=19, mode='test')
assert runner.message_hub.get_info('eta') == 0
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import MagicMock, Mock, patch
from mmengine.hooks import IterTimerHook
from mmengine.logging import MessageHub
def time_patch():
if not hasattr(time_patch, 'time'):
time_patch.time = 0
else:
time_patch.time += 1
return time_patch.time
class TestIterTimerHook(TestCase):
def setUp(self) -> None:
self.hook = IterTimerHook()
def test_init(self):
assert self.hook.time_sec_tot == 0
assert self.hook.start_iter == 0
def test_before_run(self):
runner = MagicMock()
runner.iter = 1
self.hook.before_run(runner)
assert self.hook.start_iter == 1
def test_before_epoch(self):
runner = Mock()
self.hook._before_epoch(runner)
assert isinstance(self.hook.t, float)
@patch('time.time', MagicMock(return_value=1))
def test_before_iter(self):
runner = MagicMock()
runner.log_buffer = dict()
self.hook._before_epoch(runner)
for mode in ('train', 'val', 'test'):
self.hook._before_iter(runner, batch_idx=1, mode=mode)
runner.message_hub.update_scalar.assert_called_with(
f'{mode}/data_time', 0)
@patch('time.time', time_patch)
def test_after_iter(self):
runner = MagicMock()
runner.log_buffer = dict()
runner.log_processor.window_size = 10
runner.max_iters = 100
runner.iter = 0
runner.test_dataloader = [0] * 20
runner.val_dataloader = [0] * 20
self.hook._before_epoch(runner)
self.hook.before_run(runner)
self.hook._after_iter(runner, batch_idx=1)
runner.message_hub.update_scalar.assert_called()
runner.message_hub.get_log.assert_not_called()
runner.message_hub.update_info.assert_not_called()
runner.message_hub = MessageHub.get_instance('test_iter_timer_hook')
runner.iter = 9
# eta = (100 - 10) / 1
self.hook._after_iter(runner, batch_idx=89)
assert runner.message_hub.get_info('eta') == 90
self.hook._after_iter(runner, batch_idx=9, mode='val')
assert runner.message_hub.get_info('eta') == 10
self.hook._after_iter(runner, batch_idx=19, mode='test')
assert runner.message_hub.get_info('eta') == 0
|
# Copyright (c) OpenMMLab. All rights reserved.
from ._fast_stop_training_hook import FastStopTrainingHook # noqa: F401,F403
from ._utils import (demo_mm_inputs, demo_mm_proposals,
demo_mm_sampling_results, demo_track_inputs,
get_detector_cfg, get_roi_head_cfg, replace_to_ceph)
__all__ = [
'demo_mm_inputs', 'get_detector_cfg', 'get_roi_head_cfg',
'demo_mm_proposals', 'demo_mm_sampling_results', 'replace_to_ceph',
'demo_track_inputs', 'VideoDataSampleFeeder'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from ._fast_stop_training_hook import FastStopTrainingHook # noqa: F401,F403
from ._utils import (demo_mm_inputs, demo_mm_proposals,
demo_mm_sampling_results, get_detector_cfg,
get_roi_head_cfg, replace_to_ceph)
__all__ = [
'demo_mm_inputs', 'get_detector_cfg', 'get_roi_head_cfg',
'demo_mm_proposals', 'demo_mm_sampling_results', 'replace_to_ceph'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import find_latest_checkpoint, update_data_root
from .replace_cfg_vals import replace_cfg_vals
from .setup_env import register_all_modules, setup_multi_processes
from .split_batch import split_batch
from .typing import (ConfigType, InstanceList, MultiConfig, OptConfigType,
OptInstanceList, OptMultiConfig, OptPixelList, PixelList,
RangeType)
__all__ = [
'collect_env', 'find_latest_checkpoint', 'update_data_root',
'setup_multi_processes', 'get_caller_name', 'log_img_scale', 'compat_cfg',
'split_batch', 'register_all_modules', 'replace_cfg_vals', 'AvoidOOM',
'AvoidCUDAOOM', 'all_reduce_dict', 'allreduce_grads', 'reduce_mean',
'sync_random_seed', 'ConfigType', 'InstanceList', 'MultiConfig',
'OptConfigType', 'OptInstanceList', 'OptMultiConfig', 'OptPixelList',
'PixelList', 'RangeType'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, get_root_logger, log_img_scale
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import find_latest_checkpoint, update_data_root
from .replace_cfg_vals import replace_cfg_vals
from .setup_env import register_all_modules, setup_multi_processes
from .split_batch import split_batch
from .typing import (ConfigType, InstanceList, MultiConfig, OptConfigType,
OptInstanceList, OptMultiConfig, OptPixelList, PixelList,
RangeType)
__all__ = [
'get_root_logger', 'collect_env', 'find_latest_checkpoint',
'update_data_root', 'setup_multi_processes', 'get_caller_name',
'log_img_scale', 'compat_cfg', 'split_batch', 'register_all_modules',
'replace_cfg_vals', 'AvoidOOM', 'AvoidCUDAOOM', 'all_reduce_dict',
'allreduce_grads', 'reduce_mean', 'sync_random_seed', 'ConfigType',
'InstanceList', 'MultiConfig', 'OptConfigType', 'OptInstanceList',
'OptMultiConfig', 'OptPixelList', 'PixelList', 'RangeType'
]
|
import json
from json import JSONDecodeError
from typing import Union
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import (
AIMessage,
BaseMessage,
ToolCall,
)
from langchain_core.outputs import ChatGeneration, Generation
from langchain.agents.agent import MultiActionAgentOutputParser
class ToolAgentAction(AgentActionMessageLog):
tool_call_id: str
"""Tool call that this message is responding to."""
def parse_ai_message_to_tool_action(
message: BaseMessage,
) -> Union[list[AgentAction], AgentFinish]:
"""Parse an AI message potentially containing tool_calls."""
if not isinstance(message, AIMessage):
raise TypeError(f"Expected an AI message got {type(message)}")
actions: list = []
if message.tool_calls:
tool_calls = message.tool_calls
else:
if not message.additional_kwargs.get("tool_calls"):
return AgentFinish(
return_values={"output": message.content}, log=str(message.content)
)
# Best-effort parsing
tool_calls = []
for tool_call in message.additional_kwargs["tool_calls"]:
function = tool_call["function"]
function_name = function["name"]
try:
args = json.loads(function["arguments"] or "{}")
tool_calls.append(
ToolCall(name=function_name, args=args, id=tool_call["id"])
)
except JSONDecodeError:
raise OutputParserException(
f"Could not parse tool input: {function} because "
f"the `arguments` is not valid JSON."
)
for tool_call in tool_calls:
# HACK HACK HACK:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
function_name = tool_call["name"]
_tool_input = tool_call["args"]
if "__arg1" in _tool_input:
tool_input = _tool_input["__arg1"]
else:
tool_input = _tool_input
content_msg = f"responded: {message.content}\n" if message.content else "\n"
log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n"
actions.append(
ToolAgentAction(
tool=function_name,
tool_input=tool_input,
log=log,
message_log=[message],
tool_call_id=tool_call["id"],
)
)
return actions
class ToolsAgentOutputParser(MultiActionAgentOutputParser):
"""Parses a message into agent actions/finish.
If a tool_calls parameter is passed, then that is used to get
the tool names and tool inputs.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "tools-agent-output-parser"
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> Union[list[AgentAction], AgentFinish]:
if not isinstance(result[0], ChatGeneration):
raise ValueError("This output parser only works on ChatGeneration output")
message = result[0].message
return parse_ai_message_to_tool_action(message)
def parse(self, text: str) -> Union[list[AgentAction], AgentFinish]:
raise ValueError("Can only parse messages")
|
import json
from json import JSONDecodeError
from typing import Union
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import (
AIMessage,
BaseMessage,
ToolCall,
)
from langchain_core.outputs import ChatGeneration, Generation
from langchain.agents.agent import MultiActionAgentOutputParser
class ToolAgentAction(AgentActionMessageLog): # type: ignore[override]
tool_call_id: str
"""Tool call that this message is responding to."""
def parse_ai_message_to_tool_action(
message: BaseMessage,
) -> Union[list[AgentAction], AgentFinish]:
"""Parse an AI message potentially containing tool_calls."""
if not isinstance(message, AIMessage):
raise TypeError(f"Expected an AI message got {type(message)}")
actions: list = []
if message.tool_calls:
tool_calls = message.tool_calls
else:
if not message.additional_kwargs.get("tool_calls"):
return AgentFinish(
return_values={"output": message.content}, log=str(message.content)
)
# Best-effort parsing
tool_calls = []
for tool_call in message.additional_kwargs["tool_calls"]:
function = tool_call["function"]
function_name = function["name"]
try:
args = json.loads(function["arguments"] or "{}")
tool_calls.append(
ToolCall(name=function_name, args=args, id=tool_call["id"])
)
except JSONDecodeError:
raise OutputParserException(
f"Could not parse tool input: {function} because "
f"the `arguments` is not valid JSON."
)
for tool_call in tool_calls:
# HACK HACK HACK:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
function_name = tool_call["name"]
_tool_input = tool_call["args"]
if "__arg1" in _tool_input:
tool_input = _tool_input["__arg1"]
else:
tool_input = _tool_input
content_msg = f"responded: {message.content}\n" if message.content else "\n"
log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n"
actions.append(
ToolAgentAction(
tool=function_name,
tool_input=tool_input,
log=log,
message_log=[message],
tool_call_id=tool_call["id"],
)
)
return actions
class ToolsAgentOutputParser(MultiActionAgentOutputParser):
"""Parses a message into agent actions/finish.
If a tool_calls parameter is passed, then that is used to get
the tool names and tool inputs.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "tools-agent-output-parser"
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> Union[list[AgentAction], AgentFinish]:
if not isinstance(result[0], ChatGeneration):
raise ValueError("This output parser only works on ChatGeneration output")
message = result[0].message
return parse_ai_message_to_tool_action(message)
def parse(self, text: str) -> Union[list[AgentAction], AgentFinish]:
raise ValueError("Can only parse messages")
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
backbone=dict(norm_cfg=norm_cfg, norm_eval=False),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
norm_cfg=norm_cfg,
num_outs=5),
roi_head=dict(
bbox_head=dict(norm_cfg=norm_cfg), mask_head=dict(norm_cfg=norm_cfg)))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=(640, 640),
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=(640, 640)),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(640, 640),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# learning policy
optimizer = dict(
type='SGD',
lr=0.08,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.1,
step=[30, 40])
# runtime settings
runner = dict(max_epochs=50)
evaluation = dict(interval=2)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
backbone=dict(norm_cfg=norm_cfg, norm_eval=False),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
norm_cfg=norm_cfg,
num_outs=5),
roi_head=dict(
bbox_head=dict(norm_cfg=norm_cfg), mask_head=dict(norm_cfg=norm_cfg)))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=(640, 640),
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=(640, 640)),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(640, 640),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# learning policy
optimizer = dict(
type='SGD',
lr=0.08,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.1,
step=[30, 40])
# runtime settings
runner = dict(max_epochs=50)
evaluation = dict(interval=2)
|
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_pure_tensor, register_kernel # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_bounding_box_format,
get_dimensions_image,
_get_dimensions_image_pil,
get_dimensions_video,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image,
_get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_size_bounding_boxes,
get_size_image,
_get_size_image_pil,
get_size_mask,
get_size_video,
get_size,
) # usort: skip
from ._augment import _erase_image_pil, _jpeg_image_pil, erase, erase_image, erase_video, jpeg, jpeg_image, jpeg_video
from ._color import (
_adjust_brightness_image_pil,
_adjust_contrast_image_pil,
_adjust_gamma_image_pil,
_adjust_hue_image_pil,
_adjust_saturation_image_pil,
_adjust_sharpness_image_pil,
_autocontrast_image_pil,
_equalize_image_pil,
_invert_image_pil,
_permute_channels_image_pil,
_posterize_image_pil,
_rgb_to_grayscale_image_pil,
_solarize_image_pil,
adjust_brightness,
adjust_brightness_image,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image,
adjust_gamma_video,
adjust_hue,
adjust_hue_image,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image,
adjust_sharpness_video,
autocontrast,
autocontrast_image,
autocontrast_video,
equalize,
equalize_image,
equalize_video,
grayscale_to_rgb,
grayscale_to_rgb_image,
invert,
invert_image,
invert_video,
permute_channels,
permute_channels_image,
permute_channels_video,
posterize,
posterize_image,
posterize_video,
rgb_to_grayscale,
rgb_to_grayscale_image,
solarize,
solarize_image,
solarize_video,
to_grayscale,
)
from ._geometry import (
_affine_image_pil,
_center_crop_image_pil,
_crop_image_pil,
_elastic_image_pil,
_five_crop_image_pil,
_horizontal_flip_image_pil,
_pad_image_pil,
_perspective_image_pil,
_resize_image_pil,
_resized_crop_image_pil,
_rotate_image_pil,
_ten_crop_image_pil,
_vertical_flip_image_pil,
affine,
affine_bounding_boxes,
affine_image,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_boxes,
center_crop_image,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_boxes,
crop_image,
crop_mask,
crop_video,
elastic,
elastic_bounding_boxes,
elastic_image,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_boxes,
horizontal_flip_image,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_boxes,
pad_image,
pad_mask,
pad_video,
perspective,
perspective_bounding_boxes,
perspective_image,
perspective_mask,
perspective_video,
resize,
resize_bounding_boxes,
resize_image,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_boxes,
resized_crop_image,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_boxes,
rotate_image,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_boxes,
vertical_flip_image,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
_gaussian_blur_image_pil,
convert_image_dtype,
gaussian_blur,
gaussian_blur_image,
gaussian_blur_video,
normalize,
normalize_image,
normalize_video,
sanitize_bounding_boxes,
to_dtype,
to_dtype_image,
to_dtype_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image, to_pil_image
from ._deprecated import get_image_size, to_tensor # usort: skip
|
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_pure_tensor, register_kernel # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_bounding_box_format,
get_dimensions_image,
_get_dimensions_image_pil,
get_dimensions_video,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image,
_get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_size_bounding_boxes,
get_size_image,
_get_size_image_pil,
get_size_mask,
get_size_video,
get_size,
) # usort: skip
from ._augment import _erase_image_pil, erase, erase_image, erase_video
from ._color import (
_adjust_brightness_image_pil,
_adjust_contrast_image_pil,
_adjust_gamma_image_pil,
_adjust_hue_image_pil,
_adjust_saturation_image_pil,
_adjust_sharpness_image_pil,
_autocontrast_image_pil,
_equalize_image_pil,
_invert_image_pil,
_permute_channels_image_pil,
_posterize_image_pil,
_rgb_to_grayscale_image_pil,
_solarize_image_pil,
adjust_brightness,
adjust_brightness_image,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image,
adjust_gamma_video,
adjust_hue,
adjust_hue_image,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image,
adjust_sharpness_video,
autocontrast,
autocontrast_image,
autocontrast_video,
equalize,
equalize_image,
equalize_video,
grayscale_to_rgb,
grayscale_to_rgb_image,
invert,
invert_image,
invert_video,
permute_channels,
permute_channels_image,
permute_channels_video,
posterize,
posterize_image,
posterize_video,
rgb_to_grayscale,
rgb_to_grayscale_image,
solarize,
solarize_image,
solarize_video,
to_grayscale,
)
from ._geometry import (
_affine_image_pil,
_center_crop_image_pil,
_crop_image_pil,
_elastic_image_pil,
_five_crop_image_pil,
_horizontal_flip_image_pil,
_pad_image_pil,
_perspective_image_pil,
_resize_image_pil,
_resized_crop_image_pil,
_rotate_image_pil,
_ten_crop_image_pil,
_vertical_flip_image_pil,
affine,
affine_bounding_boxes,
affine_image,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_boxes,
center_crop_image,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_boxes,
crop_image,
crop_mask,
crop_video,
elastic,
elastic_bounding_boxes,
elastic_image,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_boxes,
horizontal_flip_image,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_boxes,
pad_image,
pad_mask,
pad_video,
perspective,
perspective_bounding_boxes,
perspective_image,
perspective_mask,
perspective_video,
resize,
resize_bounding_boxes,
resize_image,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_boxes,
resized_crop_image,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_boxes,
rotate_image,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_boxes,
vertical_flip_image,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
_gaussian_blur_image_pil,
convert_image_dtype,
gaussian_blur,
gaussian_blur_image,
gaussian_blur_video,
normalize,
normalize_image,
normalize_video,
sanitize_bounding_boxes,
to_dtype,
to_dtype_image,
to_dtype_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image, to_pil_image
from ._deprecated import get_image_size, to_tensor # usort: skip
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import List, Optional
from mmengine.dataset import BaseDataset
from mmengine.fileio import load
from mmengine.utils import is_abs
from ..registry import DATASETS
@DATASETS.register_module()
class BaseDetDataset(BaseDataset):
"""Base dataset for detection.
Args:
proposal_file (str, optional): Proposals file path. Defaults to None.
file_client_args (dict): Arguments to instantiate the
corresponding backend in mmdet <= 3.0.0rc6. Defaults to None.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
"""
def __init__(self,
*args,
seg_map_suffix: str = '.png',
proposal_file: Optional[str] = None,
file_client_args: dict = None,
backend_args: dict = None,
**kwargs) -> None:
self.seg_map_suffix = seg_map_suffix
self.proposal_file = proposal_file
self.backend_args = backend_args
if file_client_args is not None:
raise RuntimeError(
'The `file_client_args` is deprecated, '
'please use `backend_args` instead, please refer to'
'https://github.com/open-mmlab/mmdetection/blob/main/configs/_base_/datasets/coco_detection.py' # noqa: E501
)
super().__init__(*args, **kwargs)
def full_init(self) -> None:
"""Load annotation file and set ``BaseDataset._fully_initialized`` to
True.
If ``lazy_init=False``, ``full_init`` will be called during the
instantiation and ``self._fully_initialized`` will be set to True. If
``obj._fully_initialized=False``, the class method decorated by
``force_full_init`` will call ``full_init`` automatically.
Several steps to initialize annotation:
- load_data_list: Load annotations from annotation file.
- load_proposals: Load proposals from proposal file, if
`self.proposal_file` is not None.
- filter data information: Filter annotations according to
filter_cfg.
- slice_data: Slice dataset according to ``self._indices``
- serialize_data: Serialize ``self.data_list`` if
``self.serialize_data`` is True.
"""
if self._fully_initialized:
return
# load data information
self.data_list = self.load_data_list()
# get proposals from file
if self.proposal_file is not None:
self.load_proposals()
# filter illegal data, such as data that has no annotations.
self.data_list = self.filter_data()
# Get subset data according to indices.
if self._indices is not None:
self.data_list = self._get_unserialized_subset(self._indices)
# serialize data_list
if self.serialize_data:
self.data_bytes, self.data_address = self._serialize_data()
self._fully_initialized = True
def load_proposals(self) -> None:
"""Load proposals from proposals file.
The `proposals_list` should be a dict[img_path: proposals]
with the same length as `data_list`. And the `proposals` should be
a `dict` or :obj:`InstanceData` usually contains following keys.
- bboxes (np.ndarry): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- scores (np.ndarry): Classification scores, has a shape
(num_instance, ).
"""
# TODO: Add Unit Test after fully support Dump-Proposal Metric
if not is_abs(self.proposal_file):
self.proposal_file = osp.join(self.data_root, self.proposal_file)
proposals_list = load(
self.proposal_file, backend_args=self.backend_args)
assert len(self.data_list) == len(proposals_list)
for data_info in self.data_list:
img_path = data_info['img_path']
# `file_name` is the key to obtain the proposals from the
# `proposals_list`.
file_name = osp.join(
osp.split(osp.split(img_path)[0])[-1],
osp.split(img_path)[-1])
proposals = proposals_list[file_name]
data_info['proposals'] = proposals
def get_cat_ids(self, idx: int) -> List[int]:
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
List[int]: All categories in the image of specified index.
"""
instances = self.get_data_info(idx)['instances']
return [instance['bbox_label'] for instance in instances]
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import List, Optional
from mmengine.dataset import BaseDataset
from mmengine.fileio import load
from mmengine.utils import is_abs
from ..registry import DATASETS
@DATASETS.register_module()
class BaseDetDataset(BaseDataset):
"""Base dataset for detection.
Args:
proposal_file (str, optional): Proposals file path. Defaults to None.
file_client_args (dict): Arguments to instantiate the
corresponding backend in mmdet <= 3.0.0rc6. Defaults to None.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
"""
def __init__(self,
*args,
seg_map_suffix: str = '.png',
proposal_file: Optional[str] = None,
file_client_args: dict = None,
backend_args: dict = None,
**kwargs) -> None:
self.seg_map_suffix = seg_map_suffix
self.proposal_file = proposal_file
self.backend_args = backend_args
if file_client_args is not None:
raise RuntimeError(
'The `file_client_args` is deprecated, '
'please use `backend_args` instead, please refer to'
'https://github.com/open-mmlab/mmdetection/blob/dev-3.x/configs/_base_/datasets/coco_detection.py' # noqa: E501
)
super().__init__(*args, **kwargs)
def full_init(self) -> None:
"""Load annotation file and set ``BaseDataset._fully_initialized`` to
True.
If ``lazy_init=False``, ``full_init`` will be called during the
instantiation and ``self._fully_initialized`` will be set to True. If
``obj._fully_initialized=False``, the class method decorated by
``force_full_init`` will call ``full_init`` automatically.
Several steps to initialize annotation:
- load_data_list: Load annotations from annotation file.
- load_proposals: Load proposals from proposal file, if
`self.proposal_file` is not None.
- filter data information: Filter annotations according to
filter_cfg.
- slice_data: Slice dataset according to ``self._indices``
- serialize_data: Serialize ``self.data_list`` if
``self.serialize_data`` is True.
"""
if self._fully_initialized:
return
# load data information
self.data_list = self.load_data_list()
# get proposals from file
if self.proposal_file is not None:
self.load_proposals()
# filter illegal data, such as data that has no annotations.
self.data_list = self.filter_data()
# Get subset data according to indices.
if self._indices is not None:
self.data_list = self._get_unserialized_subset(self._indices)
# serialize data_list
if self.serialize_data:
self.data_bytes, self.data_address = self._serialize_data()
self._fully_initialized = True
def load_proposals(self) -> None:
"""Load proposals from proposals file.
The `proposals_list` should be a dict[img_path: proposals]
with the same length as `data_list`. And the `proposals` should be
a `dict` or :obj:`InstanceData` usually contains following keys.
- bboxes (np.ndarry): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- scores (np.ndarry): Classification scores, has a shape
(num_instance, ).
"""
# TODO: Add Unit Test after fully support Dump-Proposal Metric
if not is_abs(self.proposal_file):
self.proposal_file = osp.join(self.data_root, self.proposal_file)
proposals_list = load(
self.proposal_file, backend_args=self.backend_args)
assert len(self.data_list) == len(proposals_list)
for data_info in self.data_list:
img_path = data_info['img_path']
# `file_name` is the key to obtain the proposals from the
# `proposals_list`.
file_name = osp.join(
osp.split(osp.split(img_path)[0])[-1],
osp.split(img_path)[-1])
proposals = proposals_list[file_name]
data_info['proposals'] = proposals
def get_cat_ids(self, idx: int) -> List[int]:
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
List[int]: All categories in the image of specified index.
"""
instances = self.get_data_info(idx)['instances']
return [instance['bbox_label'] for instance in instances]
|
"""Callback Handler that writes to a file."""
from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING, Any, Optional, TextIO, cast
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.utils.input import print_text
if TYPE_CHECKING:
from langchain_core.agents import AgentAction, AgentFinish
class FileCallbackHandler(BaseCallbackHandler):
"""Callback Handler that writes to a file.
Parameters:
filename: The file to write to.
mode: The mode to open the file in. Defaults to "a".
color: The color to use for the text.
"""
def __init__(
self, filename: str, mode: str = "a", color: Optional[str] = None
) -> None:
"""Initialize callback handler.
Args:
filename: The filename to write to.
mode: The mode to open the file in. Defaults to "a".
color: The color to use for the text. Defaults to None.
"""
self.file = cast("TextIO", Path(filename).open(mode, encoding="utf-8")) # noqa: SIM115
self.color = color
def __del__(self) -> None:
"""Destructor to cleanup when done."""
self.file.close()
def on_chain_start(
self, serialized: dict[str, Any], inputs: dict[str, Any], **kwargs: Any
) -> None:
"""Print out that we are entering a chain.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Dict[str, Any]): The inputs to the chain.
**kwargs (Any): Additional keyword arguments.
"""
if "name" in kwargs:
name = kwargs["name"]
else:
if serialized:
name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
else:
name = "<unknown>"
print_text(
f"\n\n\033[1m> Entering new {name} chain...\033[0m",
end="\n",
file=self.file,
)
def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None:
"""Print out that we finished a chain.
Args:
outputs (Dict[str, Any]): The outputs of the chain.
**kwargs (Any): Additional keyword arguments.
"""
print_text("\n\033[1m> Finished chain.\033[0m", end="\n", file=self.file)
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
"""Run on agent action.
Args:
action (AgentAction): The agent action.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
print_text(action.log, color=color or self.color, file=self.file)
def on_tool_end(
self,
output: str,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""If not the final action, print out observation.
Args:
output (str): The output to print.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
observation_prefix (Optional[str], optional): The observation prefix.
Defaults to None.
llm_prefix (Optional[str], optional): The LLM prefix.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
if observation_prefix is not None:
print_text(f"\n{observation_prefix}", file=self.file)
print_text(output, color=color or self.color, file=self.file)
if llm_prefix is not None:
print_text(f"\n{llm_prefix}", file=self.file)
def on_text(
self, text: str, color: Optional[str] = None, end: str = "", **kwargs: Any
) -> None:
"""Run when the agent ends.
Args:
text (str): The text to print.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
end (str, optional): The end character. Defaults to "".
**kwargs (Any): Additional keyword arguments.
"""
print_text(text, color=color or self.color, end=end, file=self.file)
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
"""Run on the agent end.
Args:
finish (AgentFinish): The agent finish.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
print_text(finish.log, color=color or self.color, end="\n", file=self.file)
|
"""Callback Handler that writes to a file."""
from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING, Any, Optional, TextIO, cast
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.utils.input import print_text
if TYPE_CHECKING:
from langchain_core.agents import AgentAction, AgentFinish
class FileCallbackHandler(BaseCallbackHandler):
"""Callback Handler that writes to a file.
Parameters:
filename: The file to write to.
mode: The mode to open the file in. Defaults to "a".
color: The color to use for the text.
"""
def __init__(
self, filename: str, mode: str = "a", color: Optional[str] = None
) -> None:
"""Initialize callback handler.
Args:
filename: The filename to write to.
mode: The mode to open the file in. Defaults to "a".
color: The color to use for the text. Defaults to None.
"""
self.file = cast(TextIO, Path(filename).open(mode, encoding="utf-8")) # noqa: SIM115
self.color = color
def __del__(self) -> None:
"""Destructor to cleanup when done."""
self.file.close()
def on_chain_start(
self, serialized: dict[str, Any], inputs: dict[str, Any], **kwargs: Any
) -> None:
"""Print out that we are entering a chain.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Dict[str, Any]): The inputs to the chain.
**kwargs (Any): Additional keyword arguments.
"""
if "name" in kwargs:
name = kwargs["name"]
else:
if serialized:
name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
else:
name = "<unknown>"
print_text(
f"\n\n\033[1m> Entering new {name} chain...\033[0m",
end="\n",
file=self.file,
)
def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None:
"""Print out that we finished a chain.
Args:
outputs (Dict[str, Any]): The outputs of the chain.
**kwargs (Any): Additional keyword arguments.
"""
print_text("\n\033[1m> Finished chain.\033[0m", end="\n", file=self.file)
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
"""Run on agent action.
Args:
action (AgentAction): The agent action.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
print_text(action.log, color=color or self.color, file=self.file)
def on_tool_end(
self,
output: str,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""If not the final action, print out observation.
Args:
output (str): The output to print.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
observation_prefix (Optional[str], optional): The observation prefix.
Defaults to None.
llm_prefix (Optional[str], optional): The LLM prefix.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
if observation_prefix is not None:
print_text(f"\n{observation_prefix}", file=self.file)
print_text(output, color=color or self.color, file=self.file)
if llm_prefix is not None:
print_text(f"\n{llm_prefix}", file=self.file)
def on_text(
self, text: str, color: Optional[str] = None, end: str = "", **kwargs: Any
) -> None:
"""Run when the agent ends.
Args:
text (str): The text to print.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
end (str, optional): The end character. Defaults to "".
**kwargs (Any): Additional keyword arguments.
"""
print_text(text, color=color or self.color, end=end, file=self.file)
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
"""Run on the agent end.
Args:
finish (AgentFinish): The agent finish.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
print_text(finish.log, color=color or self.color, end="\n", file=self.file)
|
import asyncio
import time
import pytest
from jina import Document
from jina.clients.request import request_generator
from jina.serve.stream.helper import AsyncRequestsIterator, RequestsCounter
def slow_blocking_generator():
for i in range(2):
yield Document(id=str(i))
time.sleep(2)
@pytest.mark.asyncio
async def test_iter_requests():
iter = request_generator(exec_endpoint='/', data=slow_blocking_generator())
count = 0
num_reqs = 0
async def another_task():
nonlocal count
for _ in range(20):
await asyncio.sleep(0.2)
count += 1
task = asyncio.create_task(another_task())
async for _ in AsyncRequestsIterator(iter):
"""Using following code will block the event loop and count will be <5
for _ in iter:
...
"""
num_reqs += 1
task.cancel()
# ideally count will be 20, but to avoid flaky CI
assert count > 15
@pytest.mark.asyncio
async def test_iter_requests_with_prefetch():
max_amount_requests = RequestsCounter()
counter = RequestsCounter()
async def consume_requests():
while True:
await asyncio.sleep(0.01)
if counter.count > 0:
counter.count -= 1
async def req_iterator(max_amount_requests):
for i in range(1000):
await asyncio.sleep(0.001)
counter.count += 1
if counter.count > max_amount_requests.count:
max_amount_requests.count = counter.count
yield i
consume_task = asyncio.create_task(consume_requests())
async for _ in AsyncRequestsIterator(
req_iterator(max_amount_requests), counter, 10
):
pass
consume_task.cancel()
assert max_amount_requests.count == 10
|
import time
import asyncio
from jina import Document
from jina.clients.request import request_generator
from jina.serve.stream.helper import AsyncRequestsIterator
import pytest
def slow_blocking_generator():
for i in range(2):
yield Document(id=i)
time.sleep(2)
@pytest.mark.asyncio
async def test_iter_requests():
iter = request_generator(exec_endpoint='/', data=slow_blocking_generator())
count = 0
num_reqs = 0
async def another_task():
nonlocal count
for _ in range(20):
await asyncio.sleep(0.2)
count += 1
task = asyncio.create_task(another_task())
async for _ in AsyncRequestsIterator(iter):
"""Using following code will block the event loop and count will be <5
for _ in iter:
...
"""
num_reqs += 1
task.cancel()
# ideally count will be 20, but to avoid flaky CI
assert count > 15
|
"""Question-answering with sources over a vector database."""
import warnings
from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.vectorstores import VectorStore
from pydantic import Field, model_validator
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
class VectorDBQAWithSourcesChain(BaseQAWithSourcesChain):
"""Question-answering with sources over a vector database."""
vectorstore: VectorStore = Field(exclude=True)
"""Vector Database to connect to."""
k: int = 4
"""Number of results to return from store"""
reduce_k_below_max_tokens: bool = False
"""Reduce the number of results to return from store based on tokens limit"""
max_tokens_limit: int = 3375
"""Restrict the docs to return from store based on tokens,
enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true"""
search_kwargs: dict[str, Any] = Field(default_factory=dict)
"""Extra search args."""
def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]:
num_docs = len(docs)
if self.reduce_k_below_max_tokens and isinstance(
self.combine_documents_chain,
StuffDocumentsChain,
):
tokens = [
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content)
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(
self,
inputs: dict[str, Any],
*,
run_manager: CallbackManagerForChainRun,
) -> list[Document]:
question = inputs[self.question_key]
docs = self.vectorstore.similarity_search(
question,
k=self.k,
**self.search_kwargs,
)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(
self,
inputs: dict[str, Any],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> list[Document]:
msg = "VectorDBQAWithSourcesChain does not support async"
raise NotImplementedError(msg)
@model_validator(mode="before")
@classmethod
def raise_deprecation(cls, values: dict) -> Any:
warnings.warn(
"`VectorDBQAWithSourcesChain` is deprecated - "
"please use `from langchain.chains import RetrievalQAWithSourcesChain`",
stacklevel=5,
)
return values
@property
def _chain_type(self) -> str:
return "vector_db_qa_with_sources_chain"
|
"""Question-answering with sources over a vector database."""
import warnings
from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.vectorstores import VectorStore
from pydantic import Field, model_validator
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
class VectorDBQAWithSourcesChain(BaseQAWithSourcesChain):
"""Question-answering with sources over a vector database."""
vectorstore: VectorStore = Field(exclude=True)
"""Vector Database to connect to."""
k: int = 4
"""Number of results to return from store"""
reduce_k_below_max_tokens: bool = False
"""Reduce the number of results to return from store based on tokens limit"""
max_tokens_limit: int = 3375
"""Restrict the docs to return from store based on tokens,
enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true"""
search_kwargs: dict[str, Any] = Field(default_factory=dict)
"""Extra search args."""
def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]:
num_docs = len(docs)
if self.reduce_k_below_max_tokens and isinstance(
self.combine_documents_chain,
StuffDocumentsChain,
):
tokens = [
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content)
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(
self,
inputs: dict[str, Any],
*,
run_manager: CallbackManagerForChainRun,
) -> list[Document]:
question = inputs[self.question_key]
docs = self.vectorstore.similarity_search(
question,
k=self.k,
**self.search_kwargs,
)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(
self,
inputs: dict[str, Any],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> list[Document]:
msg = "VectorDBQAWithSourcesChain does not support async"
raise NotImplementedError(msg)
@model_validator(mode="before")
@classmethod
def raise_deprecation(cls, values: dict) -> Any:
warnings.warn(
"`VectorDBQAWithSourcesChain` is deprecated - "
"please use `from langchain.chains import RetrievalQAWithSourcesChain`",
)
return values
@property
def _chain_type(self) -> str:
return "vector_db_qa_with_sources_chain"
|
# Copyright (c) OpenMMLab. All rights reserved.
from .registry import (DATA_SAMPLERS, DATASETS, HOOKS, LOOPS, METRICS,
MODEL_WRAPPERS, MODELS, OPTIM_WRAPPER_CONSTRUCTORS,
OPTIMIZERS, PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS,
RUNNERS, TASK_UTILS, TRANSFORMS, VISBACKENDS,
VISUALIZERS, WEIGHT_INITIALIZERS)
__all__ = [
'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS', 'DATASETS', 'DATA_SAMPLERS',
'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS', 'OPTIMIZERS',
'OPTIM_WRAPPER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS', 'METRICS',
'MODEL_WRAPPERS', 'LOOPS', 'VISBACKENDS', 'VISUALIZERS'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .registry import (DATA_SAMPLERS, DATASETS, HOOKS, LOOPS, METRICS,
MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS,
OPTIMIZERS, PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS,
RUNNERS, TASK_UTILS, TRANSFORMS, VISBACKENDS,
VISUALIZERS, WEIGHT_INITIALIZERS)
__all__ = [
'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS', 'DATASETS', 'DATA_SAMPLERS',
'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS', 'OPTIMIZERS',
'OPTIMIZER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS', 'METRICS',
'MODEL_WRAPPERS', 'LOOPS', 'VISBACKENDS', 'VISUALIZERS'
]
|
import warnings
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.filetypes import AUDIO_FILE_FORMATS
from docarray.utils.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AudioUrl')
@_register_proto(proto_type_name='audio_url')
class AudioUrl(AnyUrl):
"""
URL to a audio file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
import os
from urllib.parse import urlparse
url = super().validate(value, field, config) # basic url validation
path = urlparse(url).path
ext = os.path.splitext(path)[1][1:].lower()
# pass test if extension is valid or no extension
has_audio_extension = ext in AUDIO_FILE_FORMATS or ext == ''
if not has_audio_extension:
raise ValueError('Audio URL must have a valid extension')
return cls(str(url), scheme=None)
def load(self: T) -> Tuple[np.ndarray, int]:
"""
Load the data from the url into an AudioNdArray.
:return: AudioNdArray representing the audio file content.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDoc
import numpy as np
from docarray.typing import AudioUrl
class MyDoc(Document):
audio_url: AudioUrl
audio_tensor: AudioNdArray
doc = MyDoc(audio_url="toydata/hello.wav")
doc.audio_tensor, doc.frame_rate = doc.audio_url.load()
assert isinstance(doc.audio_tensor, np.ndarray)
"""
bytes_ = AudioBytes(self.load_bytes())
return bytes_.load()
def display(self):
"""
Play the audio sound from url in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Audio(data=self))
else:
display(Audio(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
import warnings
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.filetypes import AUDIO_FILE_FORMATS
from docarray.utils.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AudioUrl')
@_register_proto(proto_type_name='audio_url')
class AudioUrl(AnyUrl):
"""
URL to a audio file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
import os
from urllib.parse import urlparse
url = super().validate(value, field, config) # basic url validation
path = urlparse(url).path
ext = os.path.splitext(path)[1][1:].lower()
# pass test if extension is valid or no extension
has_audio_extension = ext in AUDIO_FILE_FORMATS or ext == ''
if not has_audio_extension:
raise ValueError('Audio URL must have a valid extension')
return cls(str(url), scheme=None)
def load(self: T) -> Tuple[np.ndarray, int]:
"""
Load the data from the url into an AudioNdArray.
:return: AudioNdArray representing the audio file content.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
import numpy as np
from docarray.typing import AudioUrl
class MyDoc(Document):
audio_url: AudioUrl
audio_tensor: AudioNdArray
doc = MyDoc(audio_url="toydata/hello.wav")
doc.audio_tensor, doc.frame_rate = doc.audio_url.load()
assert isinstance(doc.audio_tensor, np.ndarray)
"""
bytes_ = AudioBytes(self.load_bytes())
return bytes_.load()
def display(self):
"""
Play the audio sound from url in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Audio(data=self))
else:
display(Audio(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from typing import Tuple
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.core.utils.typing import ConfigDict, MultiConfig, OptConfigType
from mmdet.registry import MODELS
@MODELS.register_module()
class FusedSemanticHead(BaseModule):
r"""Multi-level fused semantic segmentation head.
.. code-block:: none
in_1 -> 1x1 conv ---
|
in_2 -> 1x1 conv -- |
||
in_3 -> 1x1 conv - ||
||| /-> 1x1 conv (mask prediction)
in_4 -> 1x1 conv -----> 3x3 convs (*4)
| \-> 1x1 conv (feature)
in_5 -> 1x1 conv ---
""" # noqa: W605
def __init__(
self,
num_ins: int,
fusion_level: int,
seg_scale_factor=1 / 8,
num_convs: int = 4,
in_channels: int = 256,
conv_out_channels: int = 256,
num_classes: int = 183,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
ignore_label: int = None,
loss_weight: float = None,
loss_seg: ConfigDict = dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2),
init_cfg: MultiConfig = dict(
type='Kaiming', override=dict(name='conv_logits'))
) -> None:
super().__init__(init_cfg=init_cfg)
self.num_ins = num_ins
self.fusion_level = fusion_level
self.seg_scale_factor = seg_scale_factor
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.lateral_convs = nn.ModuleList()
for i in range(self.num_ins):
self.lateral_convs.append(
ConvModule(
self.in_channels,
self.in_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=False))
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.conv_embedding = ConvModule(
conv_out_channels,
conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)
if ignore_label:
loss_seg['ignore_index'] = ignore_label
if loss_weight:
loss_seg['loss_weight'] = loss_weight
if ignore_label or loss_weight:
warnings.warn('``ignore_label`` and ``loss_weight`` would be '
'deprecated soon. Please set ``ingore_index`` and '
'``loss_weight`` in ``loss_seg`` instead.')
self.criterion = MODELS.build(loss_seg)
def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor]:
"""Forward function.
Args:
feats (tuple[Tensor]): Multi scale feature maps.
Returns:
tuple[Tensor]:
- mask_pred (Tensor): Predicted mask logits.
- x (Tensor): Fused feature.
"""
x = self.lateral_convs[self.fusion_level](feats[self.fusion_level])
fused_size = tuple(x.shape[-2:])
for i, feat in enumerate(feats):
if i != self.fusion_level:
feat = F.interpolate(
feat, size=fused_size, mode='bilinear', align_corners=True)
# fix runtime error of "+=" inplace operation in PyTorch 1.10
x = x + self.lateral_convs[i](feat)
for i in range(self.num_convs):
x = self.convs[i](x)
mask_pred = self.conv_logits(x)
x = self.conv_embedding(x)
return mask_pred, x
def loss(self, mask_pred: Tensor, labels: Tensor) -> Tensor:
"""Loss function.
Args:
mask_pred (Tensor): Predicted mask logits.
labels (Tensor): Ground truth.
Returns:
Tensor: Semantic segmentation loss.
"""
labels = F.interpolate(
labels.float(), scale_factor=self.seg_scale_factor, mode='nearest')
labels = labels.squeeze(1).long()
loss_semantic_seg = self.criterion(mask_pred, labels)
return loss_semantic_seg
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16, force_fp32
from mmdet.models.builder import build_loss
from mmdet.registry import MODELS
@MODELS.register_module()
class FusedSemanticHead(BaseModule):
r"""Multi-level fused semantic segmentation head.
.. code-block:: none
in_1 -> 1x1 conv ---
|
in_2 -> 1x1 conv -- |
||
in_3 -> 1x1 conv - ||
||| /-> 1x1 conv (mask prediction)
in_4 -> 1x1 conv -----> 3x3 convs (*4)
| \-> 1x1 conv (feature)
in_5 -> 1x1 conv ---
""" # noqa: W605
def __init__(self,
num_ins,
fusion_level,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
conv_cfg=None,
norm_cfg=None,
ignore_label=None,
loss_weight=None,
loss_seg=dict(
type='CrossEntropyLoss',
ignore_index=255,
loss_weight=0.2),
init_cfg=dict(
type='Kaiming', override=dict(name='conv_logits'))):
super(FusedSemanticHead, self).__init__(init_cfg)
self.num_ins = num_ins
self.fusion_level = fusion_level
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.lateral_convs = nn.ModuleList()
for i in range(self.num_ins):
self.lateral_convs.append(
ConvModule(
self.in_channels,
self.in_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=False))
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.conv_embedding = ConvModule(
conv_out_channels,
conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)
if ignore_label:
loss_seg['ignore_index'] = ignore_label
if loss_weight:
loss_seg['loss_weight'] = loss_weight
if ignore_label or loss_weight:
warnings.warn('``ignore_label`` and ``loss_weight`` would be '
'deprecated soon. Please set ``ingore_index`` and '
'``loss_weight`` in ``loss_seg`` instead.')
self.criterion = build_loss(loss_seg)
@auto_fp16()
def forward(self, feats):
x = self.lateral_convs[self.fusion_level](feats[self.fusion_level])
fused_size = tuple(x.shape[-2:])
for i, feat in enumerate(feats):
if i != self.fusion_level:
feat = F.interpolate(
feat, size=fused_size, mode='bilinear', align_corners=True)
# fix runtime error of "+=" inplace operation in PyTorch 1.10
x = x + self.lateral_convs[i](feat)
for i in range(self.num_convs):
x = self.convs[i](x)
mask_pred = self.conv_logits(x)
x = self.conv_embedding(x)
return mask_pred, x
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, labels):
labels = labels.squeeze(1).long()
loss_semantic_seg = self.criterion(mask_pred, labels)
return loss_semantic_seg
|
import torch
from parameterized import parameterized
from torchaudio.prototype.models import conformer_wav2vec2_base, emformer_hubert_base
from torchaudio_unittest.common_utils import nested_params, skipIfNoCuda, torch_script, TorchaudioTestCase
class TestSSLModel(TorchaudioTestCase):
def _smoke_test(self, model, feature_dim, device, dtype):
model = model.to(device=device, dtype=dtype)
model = model.eval()
batch_size, num_frames = 3, 1024
features = torch.randn(batch_size, num_frames, feature_dim, device=device, dtype=dtype)
lengths = torch.randint(
low=0,
high=num_frames,
size=[
batch_size,
],
device=device,
)
model(features, lengths)
@nested_params(
[(conformer_wav2vec2_base, 64), (emformer_hubert_base, 80)],
[torch.float32, torch.float64],
)
def test_cpu_smoke_test(self, model_feature_dim, dtype):
model, feature_dim = model_feature_dim
model = model()
self._smoke_test(model, feature_dim, torch.device("cpu"), dtype)
@nested_params(
[(conformer_wav2vec2_base, 64), (emformer_hubert_base, 80)],
[torch.float32, torch.float64],
)
@skipIfNoCuda
def test_cuda_smoke_test(self, model_feature_dim, dtype):
model, feature_dim = model_feature_dim
model = model()
self._smoke_test(model, feature_dim, torch.device("cuda"), dtype)
@parameterized.expand(
[
(conformer_wav2vec2_base, 64, None),
(emformer_hubert_base, 80, None),
(emformer_hubert_base, 80, 512),
]
)
def test_extract_feature(self, model, feature_dim, aux_num_out):
if aux_num_out is not None:
model = model(aux_num_out=aux_num_out)
else:
model = model()
model.eval()
batch_size, num_frames = 3, 1024
if feature_dim == 64:
num_layers = len(model.encoder.conformer)
else:
num_layers = len(model.encoder.emformer.emformer_layers)
features = torch.randn(batch_size, num_frames, feature_dim)
lengths = torch.randint(
low=0,
high=num_frames,
size=[
batch_size,
],
)
all_features, lengths_ = model.extract_features(features, lengths, num_layers=None)
assert len(all_features) == num_layers
for feats in all_features:
assert feats.ndim == 3
assert feats.shape[0] == batch_size
assert lengths_.shape == torch.Size([batch_size])
for l in range(1, num_layers + 1):
feats, lengths_ = model.extract_features(features, lengths, num_layers=l)
assert len(feats) == l
for i in range(l):
self.assertEqual(all_features[i], feats[i])
assert lengths_.shape == torch.Size([batch_size])
@parameterized.expand(
[
(conformer_wav2vec2_base, 64, None),
(emformer_hubert_base, 80, None),
(emformer_hubert_base, 80, 512),
]
)
def test_zero_length(self, model, feature_dim, aux_num_out):
if aux_num_out is not None:
model = model(aux_num_out=aux_num_out)
else:
model = model()
model.eval()
batch_size, num_frames = 3, 1024
features = torch.randn(batch_size, num_frames, feature_dim)
input_lengths = torch.zeros(batch_size)
_, output_lengths = model(features, input_lengths)
self.assertEqual(torch.zeros_like(output_lengths), output_lengths)
_, output_lengths = model.extract_features(features, input_lengths)
self.assertEqual(torch.zeros_like(output_lengths), output_lengths)
@parameterized.expand(
[
(conformer_wav2vec2_base, 64, None),
(emformer_hubert_base, 80, None),
(emformer_hubert_base, 80, 512),
]
)
def test_torchscript_consistency(self, model, feature_dim, aux_num_out):
if aux_num_out is not None:
model = model(aux_num_out=aux_num_out)
else:
model = model()
model.eval()
batch_size, num_frames = 3, 1024
features = torch.randn(batch_size, num_frames, feature_dim)
lengths = torch.randint(
low=0,
high=num_frames,
size=[
batch_size,
],
)
ref_out, ref_len = model(features, lengths)
scripted = torch_script(model)
hyp_out, hyp_len = scripted(features, lengths)
self.assertEqual(hyp_out, ref_out)
self.assertEqual(hyp_len, ref_len)
|
import torch
from parameterized import parameterized
from torchaudio.prototype.models import conformer_wav2vec2_base, emformer_hubert_base
from torchaudio_unittest.common_utils import skipIfNoCuda, torch_script, TorchaudioTestCase
class TestSSLModel(TorchaudioTestCase):
def _smoke_test(self, model, feature_dim, device, dtype):
model = model.to(device=device, dtype=dtype)
model = model.eval()
batch_size, num_frames = 3, 1024
features = torch.randn(batch_size, num_frames, feature_dim, device=device, dtype=dtype)
lengths = torch.randint(
low=0,
high=num_frames,
size=[
batch_size,
],
device=device,
)
model(features, lengths)
@parameterized.expand(
[
(conformer_wav2vec2_base, torch.float32, 64),
(conformer_wav2vec2_base, torch.float64, 64),
(emformer_hubert_base, torch.float32, 80),
(emformer_hubert_base, torch.float64, 80),
]
)
def test_cpu_smoke_test(self, model, dtype, feature_dim):
model = model()
self._smoke_test(model, feature_dim, torch.device("cpu"), dtype)
@parameterized.expand(
[
(conformer_wav2vec2_base, torch.float32, 64),
(conformer_wav2vec2_base, torch.float64, 64),
(emformer_hubert_base, torch.float32, 80),
(emformer_hubert_base, torch.float64, 80),
]
)
@skipIfNoCuda
def test_cuda_smoke_test(self, model, dtype, feature_dim):
model = model()
self._smoke_test(model, feature_dim, torch.device("cuda"), dtype)
@parameterized.expand(
[
(conformer_wav2vec2_base, 64),
(emformer_hubert_base, 80),
]
)
def test_extract_feature(self, model, feature_dim):
model = model()
model.eval()
batch_size, num_frames = 3, 1024
if feature_dim == 64:
num_layers = len(model.encoder.conformer)
else:
num_layers = len(model.encoder.emformer.emformer_layers)
features = torch.randn(batch_size, num_frames, feature_dim)
lengths = torch.randint(
low=0,
high=num_frames,
size=[
batch_size,
],
)
all_features, lengths_ = model.extract_features(features, lengths, num_layers=None)
assert len(all_features) == num_layers
for feats in all_features:
assert feats.ndim == 3
assert feats.shape[0] == batch_size
assert lengths_.shape == torch.Size([batch_size])
for l in range(1, num_layers + 1):
feats, lengths_ = model.extract_features(features, lengths, num_layers=l)
assert len(feats) == l
for i in range(l):
self.assertEqual(all_features[i], feats[i])
assert lengths_.shape == torch.Size([batch_size])
@parameterized.expand(
[
(conformer_wav2vec2_base, 64),
(emformer_hubert_base, 80),
]
)
def test_zero_length(self, model, feature_dim):
model = model()
model.eval()
batch_size, num_frames = 3, 1024
features = torch.randn(batch_size, num_frames, feature_dim)
input_lengths = torch.zeros(batch_size)
_, output_lengths = model(features, input_lengths)
self.assertEqual(torch.zeros_like(output_lengths), output_lengths)
_, output_lengths = model.extract_features(features, input_lengths)
self.assertEqual(torch.zeros_like(output_lengths), output_lengths)
@parameterized.expand(
[
(conformer_wav2vec2_base, 64),
(emformer_hubert_base, 80),
]
)
def test_torchscript_consistency(self, model, feature_dim):
model = model()
model.eval()
batch_size, num_frames = 3, 1024
features = torch.randn(batch_size, num_frames, feature_dim)
lengths = torch.randint(
low=0,
high=num_frames,
size=[
batch_size,
],
)
ref_out, ref_len = model(features, lengths)
scripted = torch_script(model)
hyp_out, hyp_len = scripted(features, lengths)
self.assertEqual(hyp_out, ref_out)
self.assertEqual(hyp_len, ref_len)
|
from dataclasses import dataclass, field
from typing import Union
from transformers import TrainingArguments as TransformersTrainingArguments
from transformers.utils import ExplicitEnum
class BatchSamplers(ExplicitEnum):
"""
Stores the acceptable string identifiers for batch samplers.
The batch sampler is responsible for determining how samples are grouped into batches during training.
Valid options are:
- ``BatchSamplers.BATCH_SAMPLER``: The default PyTorch batch sampler.
- ``BatchSamplers.NO_DUPLICATES``: Ensures no duplicate samples in a batch.
- ``BatchSamplers.GROUP_BY_LABEL``: Ensures each batch has 2+ samples from the same label.
"""
BATCH_SAMPLER = "batch_sampler"
NO_DUPLICATES = "no_duplicates"
GROUP_BY_LABEL = "group_by_label"
class MultiDatasetBatchSamplers(ExplicitEnum):
"""
Stores the acceptable string identifiers for multi-dataset batch samplers.
The multi-dataset batch sampler is responsible for determining in what order batches are sampled from multiple
datasets during training. Valid options are:
- ``MultiDatasetBatchSamplers.ROUND_ROBIN``: Round-robin sampling from each dataset until one is exhausted.
With this strategy, it's likely that not all samples from each dataset are used, but each dataset is sampled
from equally.
- ``MultiDatasetBatchSamplers.PROPORTIONAL``: Sample from each dataset in proportion to its size [default].
With this strategy, all samples from each dataset are used and larger datasets are sampled from more frequently.
"""
ROUND_ROBIN = "round_robin" # Round-robin sampling from each dataset
PROPORTIONAL = "proportional" # Sample from each dataset in proportion to its size [default]
@dataclass
class SentenceTransformerTrainingArguments(TransformersTrainingArguments):
"""
SentenceTransformerTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
"""
batch_sampler: Union[BatchSamplers, str] = field(
default=BatchSamplers.BATCH_SAMPLER, metadata={"help": "The batch sampler to use."}
)
multi_dataset_batch_sampler: Union[MultiDatasetBatchSamplers, str] = field(
default=MultiDatasetBatchSamplers.PROPORTIONAL, metadata={"help": "The multi-dataset batch sampler to use."}
)
def __post_init__(self):
super().__post_init__()
self.batch_sampler = BatchSamplers(self.batch_sampler)
self.multi_dataset_batch_sampler = MultiDatasetBatchSamplers(self.multi_dataset_batch_sampler)
# The `compute_loss` method in `SentenceTransformerTrainer` is overridden to only compute the prediction loss,
# so we set `prediction_loss_only` to `True` here to avoid
self.prediction_loss_only = True
|
from dataclasses import dataclass, field
from typing import Union
from transformers import TrainingArguments as TransformersTrainingArguments
from transformers.utils import ExplicitEnum
class BatchSamplers(ExplicitEnum):
"""
Stores the acceptable string identifiers for batch samplers.
"""
BATCH_SAMPLER = "batch_sampler" # Just the default PyTorch batch sampler [default]
NO_DUPLICATES = "no_duplicates" # Ensures no duplicate samples in a batch
GROUP_BY_LABEL = "group_by_label" # Ensure each batch has 2+ samples from the same label
class MultiDatasetBatchSamplers(ExplicitEnum):
"""
Stores the acceptable string identifiers for multi-dataset batch samplers.
"""
ROUND_ROBIN = "round_robin" # Round-robin sampling from each dataset
PROPORTIONAL = "proportional" # Sample from each dataset in proportion to its size [default]
@dataclass
class SentenceTransformerTrainingArguments(TransformersTrainingArguments):
batch_sampler: Union[BatchSamplers, str] = field(
default=BatchSamplers.BATCH_SAMPLER, metadata={"help": "The batch sampler to use."}
)
multi_dataset_batch_sampler: Union[MultiDatasetBatchSamplers, str] = field(
default=MultiDatasetBatchSamplers.PROPORTIONAL, metadata={"help": "The multi-dataset batch sampler to use."}
)
def __post_init__(self):
super().__post_init__()
self.batch_sampler = BatchSamplers(self.batch_sampler)
self.multi_dataset_batch_sampler = MultiDatasetBatchSamplers(self.multi_dataset_batch_sampler)
# The `compute_loss` method in `SentenceTransformerTrainer` is overridden to only compute the prediction loss,
# so we set `prediction_loss_only` to `True` here to avoid
self.prediction_loss_only = True
|
"""
RAKE keyword-table based index.
Similar to KeywordTableIndex, but uses RAKE instead of GPT.
"""
from typing import Any, Set, Union
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.indices.keyword_table.base import (
BaseKeywordTableIndex,
KeywordTableRetrieverMode,
)
from llama_index.core.indices.keyword_table.utils import rake_extract_keywords
class RAKEKeywordTableIndex(BaseKeywordTableIndex):
"""
RAKE Keyword Table Index.
This index uses a RAKE keyword extractor to extract keywords from the text.
"""
def _extract_keywords(self, text: str) -> Set[str]:
"""Extract keywords from text."""
return rake_extract_keywords(text, max_keywords=self.max_keywords_per_chunk)
def as_retriever(
self,
retriever_mode: Union[
str, KeywordTableRetrieverMode
] = KeywordTableRetrieverMode.RAKE,
**kwargs: Any,
) -> BaseRetriever:
return super().as_retriever(retriever_mode=retriever_mode, **kwargs)
# legacy
GPTRAKEKeywordTableIndex = RAKEKeywordTableIndex
|
"""RAKE keyword-table based index.
Similar to KeywordTableIndex, but uses RAKE instead of GPT.
"""
from typing import Any, Set, Union
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.indices.keyword_table.base import (
BaseKeywordTableIndex,
KeywordTableRetrieverMode,
)
from llama_index.core.indices.keyword_table.utils import rake_extract_keywords
class RAKEKeywordTableIndex(BaseKeywordTableIndex):
"""RAKE Keyword Table Index.
This index uses a RAKE keyword extractor to extract keywords from the text.
"""
def _extract_keywords(self, text: str) -> Set[str]:
"""Extract keywords from text."""
return rake_extract_keywords(text, max_keywords=self.max_keywords_per_chunk)
def as_retriever(
self,
retriever_mode: Union[
str, KeywordTableRetrieverMode
] = KeywordTableRetrieverMode.RAKE,
**kwargs: Any,
) -> BaseRetriever:
return super().as_retriever(retriever_mode=retriever_mode, **kwargs)
# legacy
GPTRAKEKeywordTableIndex = RAKEKeywordTableIndex
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class FlaxControlNetPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def test_canny(self):
controlnet, controlnet_params = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny", from_pt=True, dtype=jnp.bfloat16
)
pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16
)
params["controlnet"] = controlnet_params
prompts = "bird"
num_samples = jax.device_count()
prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples)
canny_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png"
)
processed_image = pipe.prepare_image_inputs([canny_image] * num_samples)
rng = jax.random.PRNGKey(0)
rng = jax.random.split(rng, jax.device_count())
p_params = replicate(params)
prompt_ids = shard(prompt_ids)
processed_image = shard(processed_image)
images = pipe(
prompt_ids=prompt_ids,
image=processed_image,
params=p_params,
prng_seed=rng,
num_inference_steps=50,
jit=True,
).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
image_slice = images[0, 253:256, 253:256, -1]
output_slice = jnp.asarray(jax.device_get(image_slice.flatten()))
expected_slice = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078]
)
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def test_pose(self):
controlnet, controlnet_params = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose", from_pt=True, dtype=jnp.bfloat16
)
pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16
)
params["controlnet"] = controlnet_params
prompts = "Chef in the kitchen"
num_samples = jax.device_count()
prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples)
pose_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png"
)
processed_image = pipe.prepare_image_inputs([pose_image] * num_samples)
rng = jax.random.PRNGKey(0)
rng = jax.random.split(rng, jax.device_count())
p_params = replicate(params)
prompt_ids = shard(prompt_ids)
processed_image = shard(processed_image)
images = pipe(
prompt_ids=prompt_ids,
image=processed_image,
params=p_params,
prng_seed=rng,
num_inference_steps=50,
jit=True,
).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
image_slice = images[0, 253:256, 253:256, -1]
output_slice = jnp.asarray(jax.device_get(image_slice.flatten()))
expected_slice = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]]
)
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class FlaxControlNetPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def test_canny(self):
controlnet, controlnet_params = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny", from_pt=True, dtype=jnp.bfloat16
)
pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16
)
params["controlnet"] = controlnet_params
prompts = "bird"
num_samples = jax.device_count()
prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples)
canny_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png"
)
processed_image = pipe.prepare_image_inputs([canny_image] * num_samples)
rng = jax.random.PRNGKey(0)
rng = jax.random.split(rng, jax.device_count())
p_params = replicate(params)
prompt_ids = shard(prompt_ids)
processed_image = shard(processed_image)
images = pipe(
prompt_ids=prompt_ids,
image=processed_image,
params=p_params,
prng_seed=rng,
num_inference_steps=50,
jit=True,
).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
image_slice = images[0, 253:256, 253:256, -1]
output_slice = jnp.asarray(jax.device_get(image_slice.flatten()))
expected_slice = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078]
)
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def test_pose(self):
controlnet, controlnet_params = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose", from_pt=True, dtype=jnp.bfloat16
)
pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16
)
params["controlnet"] = controlnet_params
prompts = "Chef in the kitchen"
num_samples = jax.device_count()
prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples)
pose_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png"
)
processed_image = pipe.prepare_image_inputs([pose_image] * num_samples)
rng = jax.random.PRNGKey(0)
rng = jax.random.split(rng, jax.device_count())
p_params = replicate(params)
prompt_ids = shard(prompt_ids)
processed_image = shard(processed_image)
images = pipe(
prompt_ids=prompt_ids,
image=processed_image,
params=p_params,
prng_seed=rng,
num_inference_steps=50,
jit=True,
).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
image_slice = images[0, 253:256, 253:256, -1]
output_slice = jnp.asarray(jax.device_get(image_slice.flatten()))
expected_slice = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]]
)
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
|
"""Tool for Steam Web API"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.steam import SteamWebAPIWrapper
class SteamWebAPIQueryRun(BaseTool):
"""Tool that searches the Steam Web API."""
mode: str
name: str = "steam"
description: str = (
"A wrapper around Steam Web API."
"Steam Tool is useful for fetching User profiles and stats, Game data and more!"
"Input should be the User or Game you want to query."
)
api_wrapper: SteamWebAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Steam-WebAPI tool."""
return self.api_wrapper.run(self.mode, query)
|
"""Tool for Steam Web API"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.steam import SteamWebAPIWrapper
class SteamWebAPIQueryRun(BaseTool): # type: ignore[override]
"""Tool that searches the Steam Web API."""
mode: str
name: str = "steam"
description: str = (
"A wrapper around Steam Web API."
"Steam Tool is useful for fetching User profiles and stats, Game data and more!"
"Input should be the User or Game you want to query."
)
api_wrapper: SteamWebAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Steam-WebAPI tool."""
return self.api_wrapper.run(self.mode, query)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.retrievers import (
GoogleCloudEnterpriseSearchRetriever,
GoogleVertexAIMultiTurnSearchRetriever,
GoogleVertexAISearchRetriever,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GoogleVertexAISearchRetriever": "langchain_community.retrievers",
"GoogleVertexAIMultiTurnSearchRetriever": "langchain_community.retrievers",
"GoogleCloudEnterpriseSearchRetriever": "langchain_community.retrievers",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GoogleCloudEnterpriseSearchRetriever",
"GoogleVertexAIMultiTurnSearchRetriever",
"GoogleVertexAISearchRetriever",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.retrievers import (
GoogleCloudEnterpriseSearchRetriever,
GoogleVertexAIMultiTurnSearchRetriever,
GoogleVertexAISearchRetriever,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GoogleVertexAISearchRetriever": "langchain_community.retrievers",
"GoogleVertexAIMultiTurnSearchRetriever": "langchain_community.retrievers",
"GoogleCloudEnterpriseSearchRetriever": "langchain_community.retrievers",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GoogleVertexAISearchRetriever",
"GoogleVertexAIMultiTurnSearchRetriever",
"GoogleCloudEnterpriseSearchRetriever",
]
|
from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from packaging.version import Version, parse
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
from model2vec import __version__ as M2V_VERSION
except ImportError:
model2vec = None
skip_if_no_model2vec = pytest.mark.skipif(model2vec is None, reason="The model2vec library is not installed.")
@pytest.fixture(scope="session")
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
def test_initialization_with_embedding_weights(tokenizer: Tokenizer, embedding_weights) -> None:
model = StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
assert model.embedding.weight.shape == (30522, 768)
def test_initialization_with_embedding_dim(tokenizer: Tokenizer) -> None:
model = StaticEmbedding(tokenizer, embedding_dim=768)
assert model.embedding.weight.shape == (30522, 768)
def test_tokenize(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
assert "input_ids" in tokens
assert "offsets" in tokens
def test_forward(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
output = static_embedding(tokens)
assert "sentence_embedding" in output
def test_save_and_load(tmp_path: Path, static_embedding: StaticEmbedding) -> None:
save_dir = tmp_path / "model"
save_dir.mkdir()
static_embedding.save(str(save_dir))
loaded_model = StaticEmbedding.load(str(save_dir))
assert loaded_model.embedding.weight.shape == static_embedding.embedding.weight.shape
@skip_if_no_model2vec()
def test_from_distillation() -> None:
model = StaticEmbedding.from_distillation("sentence-transformers-testing/stsb-bert-tiny-safetensors", pca_dims=32)
expected_shape = (29525 if parse(M2V_VERSION) >= Version("0.5.0") else 29528, 32)
assert model.embedding.weight.shape == expected_shape
@skip_if_no_model2vec()
def test_from_model2vec() -> None:
model = StaticEmbedding.from_model2vec("minishlab/M2V_base_output")
assert model.embedding.weight.shape == (29528, 256)
def test_loading_model2vec() -> None:
model = SentenceTransformer("minishlab/potion-base-8M")
assert model.get_sentence_embedding_dimension() == 256
assert model.max_seq_length == math.inf
test_sentences = ["It's so sunny outside!", "The sun is shining outside!"]
embeddings = model.encode(test_sentences)
assert embeddings.shape == (2, 256)
similarity = model.similarity(embeddings[0], embeddings[1])
assert similarity.item() > 0.7
|
from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
except ImportError:
model2vec = None
skip_if_no_model2vec = pytest.mark.skipif(model2vec is None, reason="The model2vec library is not installed.")
@pytest.fixture(scope="session")
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
def test_initialization_with_embedding_weights(tokenizer: Tokenizer, embedding_weights) -> None:
model = StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
assert model.embedding.weight.shape == (30522, 768)
def test_initialization_with_embedding_dim(tokenizer: Tokenizer) -> None:
model = StaticEmbedding(tokenizer, embedding_dim=768)
assert model.embedding.weight.shape == (30522, 768)
def test_tokenize(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
assert "input_ids" in tokens
assert "offsets" in tokens
def test_forward(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
output = static_embedding(tokens)
assert "sentence_embedding" in output
def test_save_and_load(tmp_path: Path, static_embedding: StaticEmbedding) -> None:
save_dir = tmp_path / "model"
save_dir.mkdir()
static_embedding.save(str(save_dir))
loaded_model = StaticEmbedding.load(str(save_dir))
assert loaded_model.embedding.weight.shape == static_embedding.embedding.weight.shape
@skip_if_no_model2vec()
def test_from_distillation() -> None:
model = StaticEmbedding.from_distillation("sentence-transformers-testing/stsb-bert-tiny-safetensors", pca_dims=32)
assert model.embedding.weight.shape == (29528, 32)
@skip_if_no_model2vec()
def test_from_model2vec() -> None:
model = StaticEmbedding.from_model2vec("minishlab/M2V_base_output")
assert model.embedding.weight.shape == (29528, 256)
def test_loading_model2vec() -> None:
model = SentenceTransformer("minishlab/potion-base-8M")
assert model.get_sentence_embedding_dimension() == 256
assert model.max_seq_length == math.inf
test_sentences = ["It's so sunny outside!", "The sun is shining outside!"]
embeddings = model.encode(test_sentences)
assert embeddings.shape == (2, 256)
similarity = model.similarity(embeddings[0], embeddings[1])
assert similarity.item() > 0.7
|
"""
Given a dataset with parallel sentences, one "english" column and one "non_english" column, this script evaluates a model on the translation task.
Given a sentence in the "english" column, the model should find the correct translation in the "non_english" column, based on just the embeddings.
It then computes an accuracy over all possible source sentences src_i. Equivalently, it computes also the accuracy for the other direction.
A high accuracy score indicates that the model is able to find the correct translation out of a large pool with sentences.
Good options for datasets are:
* sentence-transformers/parallel-sentences-wikimatrix
* sentence-transformers/parallel-sentences-tatoeba
* sentence-transformers/parallel-sentences-talks
As these have development sets.
Usage:
python examples/evaluation/evaluation_translation_matching.py [model_name_or_path] [dataset_name] [subset1] [subset2] ...
For example:
python examples/evaluation/evaluation_translation_matching.py distiluse-base-multilingual-cased sentence-transformers/parallel-sentences-tatoeba en-ar en-de en-nl
"""
import logging
import sys
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, evaluation
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1]
dataset_name = sys.argv[2]
subsets = sys.argv[3:]
inference_batch_size = 32
model = SentenceTransformer(model_name)
for subset in subsets:
dataset = load_dataset(dataset_name, subset)
datasets = {}
if dataset.column_names == ["train"]:
num_samples = min(5000, len(dataset["train"]))
datasets[f"train[:{num_samples}]"].append(dataset["train"].select(range(num_samples)))
else:
for split, sub_dataset in dataset.items():
if split != "train":
datasets[split] = sub_dataset
for split, sub_dataset in datasets.items():
logging.info(f"{dataset_name}, subset={subset}, split={split}, num_samples={len(sub_dataset)}")
translation_evaluator = evaluation.TranslationEvaluator(
sub_dataset["english"],
sub_dataset["non_english"],
name=f"{dataset_name}-{subset}-{split}",
batch_size=inference_batch_size,
)
translation_evaluator(model)
|
"""
Given a tab seperated file (.tsv) with parallel sentences, where the second column is the translation of the sentence in the first column, for example, in the format:
src1 trg1
src2 trg2
...
where trg_i is the translation of src_i.
Given src_i, the TranslationEvaluator checks which trg_j has the highest similarity using cosine similarity. If i == j, we assume
a match, i.e., the correct translation has been found for src_i out of all possible target sentences.
It then computes an accuracy over all possible source sentences src_i. Equivalently, it computes also the accuracy for the other direction.
A high accuracy score indicates that the model is able to find the correct translation out of a large pool with sentences.
Usage:
python [model_name_or_path] [parallel-file1] [parallel-file2] ...
For example:
python distiluse-base-multilingual-cased TED2020-en-de.tsv.gz
See the training_multilingual/get_parallel_data_...py scripts for getting parallel sentence data from different sources
"""
from sentence_transformers import SentenceTransformer, evaluation, LoggingHandler
import sys
import gzip
import os
import logging
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
logger = logging.getLogger(__name__)
model_name = sys.argv[1]
filepaths = sys.argv[2:]
inference_batch_size = 32
model = SentenceTransformer(model_name)
for filepath in filepaths:
src_sentences = []
trg_sentences = []
with gzip.open(filepath, 'rt', encoding='utf8') if filepath.endswith('.gz') else open(filepath, 'r', encoding='utf8') as fIn:
for line in fIn:
splits = line.strip().split('\t')
if len(splits) >= 2:
src_sentences.append(splits[0])
trg_sentences.append(splits[1])
logger.info(os.path.basename(filepath)+": "+str(len(src_sentences))+" sentence pairs")
dev_trans_acc = evaluation.TranslationEvaluator(src_sentences, trg_sentences, name=os.path.basename(filepath), batch_size=inference_batch_size)
dev_trans_acc(model)
|
"""Helper functions for clients in Jina."""
from functools import wraps
from typing import Callable
from jina.excepts import BadClientCallback, BadServer
from jina.helper import get_rich_console
from jina.logging.logger import JinaLogger
from jina.proto import jina_pb2
from jina.types.request.data import Response
def pprint_routes(resp: 'Response', stack_limit: int = 3):
"""Pretty print routes with :mod:`prettytable`, fallback to :func:`print`.
:param resp: the :class:`Response` object
:param stack_limit: traceback limit
"""
routes = resp.routes
from rich import box
from rich.table import Table
table = Table(box=box.SIMPLE)
for v in ('Executor', 'Time', 'Exception'):
table.add_column(v)
for route in routes:
status_icon = '🟢'
if route.status.code == jina_pb2.StatusProto.ERROR:
status_icon = '🔴'
table.add_row(
f'{status_icon} {route.executor}',
f'{route.start_time.ToMilliseconds() - routes[0].start_time.ToMilliseconds()}ms',
''.join(route.status.exception.stacks[-stack_limit:]),
)
console = get_rich_console()
console.print(table)
def _safe_callback(func: Callable, continue_on_error: bool, logger) -> Callable:
@wraps(func)
def _arg_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as ex:
err_msg = f'uncaught exception in callback {func.__name__}(): {ex!r}'
if continue_on_error:
logger.error(err_msg)
else:
raise BadClientCallback(err_msg) from ex
return _arg_wrapper
def callback_exec(
response,
on_done: Callable,
on_error: Callable,
on_always: Callable,
continue_on_error: bool,
logger: JinaLogger,
) -> None:
"""Execute the callback with the response.
:param response: the response
:param on_done: the on_done callback
:param on_error: the on_error callback
:param on_always: the on_always callback
:param continue_on_error: whether to continue on error
:param logger: a logger instance
"""
if response.header.status.code >= jina_pb2.StatusProto.ERROR:
if on_error:
_safe_callback(on_error, continue_on_error, logger)(response)
elif continue_on_error:
logger.error(f'Server error: {response.header}')
else:
raise BadServer(response.header)
elif on_done and response.header.status.code == jina_pb2.StatusProto.SUCCESS:
_safe_callback(on_done, continue_on_error, logger)(response)
if on_always:
_safe_callback(on_always, continue_on_error, logger)(response)
|
"""Helper functions for clients in Jina."""
from functools import wraps
from typing import Callable, Optional
from jina.excepts import BadClientCallback
from jina.helper import get_rich_console
from jina.logging.logger import JinaLogger
from jina.proto import jina_pb2
from jina.types.request.data import Response
def pprint_routes(resp: 'Response', stack_limit: int = 3):
"""Pretty print routes with :mod:`prettytable`, fallback to :func:`print`.
:param resp: the :class:`Response` object
:param stack_limit: traceback limit
"""
routes = resp.routes
from rich import box
from rich.table import Table
table = Table(box=box.SIMPLE)
for v in ('Executor', 'Time', 'Exception'):
table.add_column(v)
for route in routes:
status_icon = '🟢'
if route.status.code == jina_pb2.StatusProto.ERROR:
status_icon = '🔴'
table.add_row(
f'{status_icon} {route.executor}',
f'{route.start_time.ToMilliseconds() - routes[0].start_time.ToMilliseconds()}ms',
''.join(route.status.exception.stacks[-stack_limit:]),
)
console = get_rich_console()
console.print(table)
def _safe_callback(func: Callable, continue_on_error: bool, logger) -> Callable:
@wraps(func)
def _arg_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as ex:
err_msg = f'uncaught exception in callback {func.__name__}(): {ex!r}'
if continue_on_error:
logger.error(err_msg)
else:
raise BadClientCallback(err_msg) from ex
return _arg_wrapper
def callback_exec(
response,
on_done: Callable,
on_error: Callable,
on_always: Callable,
continue_on_error: bool,
logger: JinaLogger,
) -> None:
"""Execute the callback with the response.
:param response: the response
:param on_done: the on_done callback
:param on_error: the on_error callback
:param on_always: the on_always callback
:param continue_on_error: whether to continue on error
:param logger: a logger instance
"""
if on_error and response.header.status.code >= jina_pb2.StatusProto.ERROR:
@wraps(on_error)
def on_error_wrap(resp):
on_error(resp, None)
_safe_callback(on_error_wrap, continue_on_error, logger)(response)
elif on_done and response.header.status.code == jina_pb2.StatusProto.SUCCESS:
_safe_callback(on_done, continue_on_error, logger)(response)
if on_always:
_safe_callback(on_always, continue_on_error, logger)(response)
def callback_exec_on_error(
on_error: Callable,
exception: Exception,
logger: JinaLogger,
response: Optional = None,
) -> None:
"""Execute the on_error callback with the response, Use when an error outside the response status was thrown.
:param on_error: the on_error callback
:param exception: the exception with was thrown and led to the call of on_error
:param logger: a logger instance
:param response: the response
"""
@wraps(on_error)
def on_error_wrap(resp):
on_error(resp, exception)
_safe_callback(on_error_wrap, False, logger)(response)
|
"""Mongo client."""
from collections.abc import Callable
from typing import Dict, Iterable, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class SimpleMongoReader(BaseReader):
"""Simple mongo reader.
Concatenates each Mongo doc into Document used by LlamaIndex.
Args:
host (str): Mongo host.
port (int): Mongo port.
"""
def __init__(
self,
host: Optional[str] = None,
port: Optional[int] = None,
uri: Optional[str] = None,
) -> None:
"""Initialize with parameters."""
try:
from pymongo import MongoClient
except ImportError as err:
raise ImportError(
"`pymongo` package not found, please run `pip install pymongo`"
) from err
client: MongoClient
if uri:
client = MongoClient(uri)
elif host and port:
client = MongoClient(host, port)
else:
raise ValueError("Either `host` and `port` or `uri` must be provided.")
self.client = client
def lazy_load_data(
self,
db_name: str,
collection_name: str,
field_names: List[str] = ["text"],
separator: str = "",
query_dict: Optional[Dict] = None,
max_docs: int = 0,
metadata_names: Optional[List[str]] = None,
field_extractors: Optional[Dict[str, Callable[..., str]]] = None,
) -> Iterable[Document]:
"""Load data from the input directory.
Args:
db_name (str): name of the database.
collection_name (str): name of the collection.
field_names(List[str]): names of the fields to be concatenated.
Defaults to ["text"]
separator (str): separator to be used between fields.
Defaults to ""
query_dict (Optional[Dict]): query to filter documents. Read more
at [official docs](https://www.mongodb.com/docs/manual/reference/method/db.collection.find/#std-label-method-find-query)
Defaults to None
max_docs (int): maximum number of documents to load.
Defaults to 0 (no limit)
metadata_names (Optional[List[str]]): names of the fields to be added
to the metadata attribute of the Document. Defaults to None
field_extractors (Optional[Dict[str, Callable[..., str]]]): dictionary
containing field name and a function to extract text from the field.
The default extractor function is `str`. Defaults to None.
Returns:
List[Document]: A list of documents.
"""
db = self.client[db_name]
cursor = db[collection_name].find(
filter=query_dict or {},
limit=max_docs,
projection={name: 1 for name in field_names + (metadata_names or [])},
)
field_extractors = field_extractors or {}
for item in cursor:
try:
texts = [
field_extractors.get(name, str)(item[name]) for name in field_names
]
except KeyError as err:
raise ValueError(
f"{err.args[0]} field not found in Mongo document."
) from err
text = separator.join(texts)
if metadata_names is None:
yield Document(text=text, id_=str(item["_id"]))
else:
try:
metadata = {name: item.get(name) for name in metadata_names}
metadata["collection"] = collection_name
except KeyError as err:
raise ValueError(
f"{err.args[0]} field not found in Mongo document."
) from err
yield Document(text=text, id_=str(item["_id"]), metadata=metadata)
|
"""Mongo client."""
from typing import Dict, Iterable, List, Optional, Union
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class SimpleMongoReader(BaseReader):
"""Simple mongo reader.
Concatenates each Mongo doc into Document used by LlamaIndex.
Args:
host (str): Mongo host.
port (int): Mongo port.
"""
def __init__(
self,
host: Optional[str] = None,
port: Optional[int] = None,
uri: Optional[str] = None,
) -> None:
"""Initialize with parameters."""
try:
from pymongo import MongoClient
except ImportError as err:
raise ImportError(
"`pymongo` package not found, please run `pip install pymongo`"
) from err
client: MongoClient
if uri:
client = MongoClient(uri)
elif host and port:
client = MongoClient(host, port)
else:
raise ValueError("Either `host` and `port` or `uri` must be provided.")
self.client = client
def _flatten(self, texts: List[Union[str, List[str]]]) -> List[str]:
result = []
for text in texts:
result += text if isinstance(text, list) else [text]
return result
def lazy_load_data(
self,
db_name: str,
collection_name: str,
field_names: List[str] = ["text"],
separator: str = "",
query_dict: Optional[Dict] = None,
max_docs: int = 0,
metadata_names: Optional[List[str]] = None,
) -> Iterable[Document]:
"""Load data from the input directory.
Args:
db_name (str): name of the database.
collection_name (str): name of the collection.
field_names(List[str]): names of the fields to be concatenated.
Defaults to ["text"]
separator (str): separator to be used between fields.
Defaults to ""
query_dict (Optional[Dict]): query to filter documents. Read more
at [official docs](https://www.mongodb.com/docs/manual/reference/method/db.collection.find/#std-label-method-find-query)
Defaults to None
max_docs (int): maximum number of documents to load.
Defaults to 0 (no limit)
metadata_names (Optional[List[str]]): names of the fields to be added
to the metadata attribute of the Document. Defaults to None
Returns:
List[Document]: A list of documents.
"""
db = self.client[db_name]
cursor = db[collection_name].find(
filter=query_dict or {},
limit=max_docs,
projection={name: 1 for name in field_names + (metadata_names or [])},
)
for item in cursor:
try:
texts = [str(item[name]) for name in field_names]
except KeyError as err:
raise ValueError(
f"{err.args[0]} field not found in Mongo document."
) from err
texts = self._flatten(texts)
text = separator.join(texts)
if metadata_names is None:
yield Document(text=text, id_=str(item["_id"]))
else:
try:
metadata = {name: item.get(name) for name in metadata_names}
metadata["collection"] = collection_name
except KeyError as err:
raise ValueError(
f"{err.args[0]} field not found in Mongo document."
) from err
yield Document(text=text, id_=str(item["_id"]), metadata=metadata)
|
import os
from typing import Optional
import pytest
from docarray import BaseDoc, DocList, DocVec
from docarray.documents import ImageDoc
from tests import TOYDATA_DIR
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: ImageDoc
image2: ImageDoc
return MyDocNested
def test_to_from_csv(tmpdir, nested_doc_cls):
da = DocList[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=ImageDoc(url='aux.png'),
image2=ImageDoc(url='aux.png'),
),
nested_doc_cls(text='hello world', image=ImageDoc(), image2=ImageDoc()),
]
)
tmp_file = str(tmpdir / 'tmp.csv')
da.to_csv(tmp_file)
assert os.path.isfile(tmp_file)
da_from = DocList[nested_doc_cls].from_csv(tmp_file)
assert isinstance(da_from, DocList)
for doc1, doc2 in zip(da, da_from):
assert doc1 == doc2
def test_from_csv_nested(nested_doc_cls):
da = DocList[nested_doc_cls].from_csv(
file_path=str(TOYDATA_DIR / 'docs_nested.csv')
)
assert isinstance(da, DocList)
assert len(da) == 3
for i, doc in enumerate(da):
assert doc.count.__class__ == int
assert doc.count == int(f'{i}{i}{i}')
assert doc.text.__class__ == str
assert doc.text == f'hello {i}'
assert doc.image.__class__ == ImageDoc
assert doc.image.tensor is None
assert doc.image.embedding is None
assert doc.image.bytes_ is None
assert doc.image2.__class__ == ImageDoc
assert doc.image2.tensor is None
assert doc.image2.embedding is None
assert doc.image2.bytes_ is None
assert da[0].image2.url == 'image_10.png'
assert da[1].image2.url is None
assert da[2].image2.url is None
@pytest.fixture()
def nested_doc():
class Inner(BaseDoc):
img: Optional[ImageDoc]
class Middle(BaseDoc):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDoc):
img: Optional[ImageDoc]
middle: Optional[Middle]
doc = Outer(
img=ImageDoc(), middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc()))
)
return doc
def test_from_csv_without_schema_raise_exception():
with pytest.raises(TypeError, match='no document schema defined'):
DocList.from_csv(file_path=str(TOYDATA_DIR / 'docs_nested.csv'))
def test_from_csv_with_wrong_schema_raise_exception(nested_doc):
with pytest.raises(ValueError, match='Column names do not match the schema'):
DocList[nested_doc.__class__].from_csv(file_path=str(TOYDATA_DIR / 'docs.csv'))
def test_from_remote_csv_file():
remote_url = 'https://github.com/docarray/docarray/blob/main/tests/toydata/books.csv?raw=true'
class Book(BaseDoc):
title: str
author: str
year: int
books = DocList[Book].from_csv(file_path=remote_url)
assert isinstance(books, DocList)
assert len(books) == 3
def test_doc_list_error(tmpdir):
class Book(BaseDoc):
title: str
# not testing DocVec bc it already fails here (as it should!)
docs = DocList([Book(title='hello'), Book(title='world')])
tmp_file = str(tmpdir / 'tmp.csv')
with pytest.raises(TypeError):
docs.to_csv(tmp_file)
def test_union_type_error(tmp_path):
from typing import Union
from docarray.documents import TextDoc
class CustomDoc(BaseDoc):
ud: Union[TextDoc, ImageDoc] = TextDoc(text='union type')
docs = DocList[CustomDoc]([CustomDoc(ud=TextDoc(text='union type'))])
with pytest.raises(ValueError):
docs.to_csv(str(tmp_path) + ".csv")
DocList[CustomDoc].from_csv(str(tmp_path) + ".csv")
class BasisUnion(BaseDoc):
ud: Union[int, str]
docs_basic = DocList[BasisUnion]([BasisUnion(ud="hello")])
docs_basic.to_csv(str(tmp_path) + ".csv")
docs_copy = DocList[BasisUnion].from_csv(str(tmp_path) + ".csv")
assert docs_copy == docs_basic
def test_to_from_csv_docvec_raises():
class Book(BaseDoc):
title: str
author: str
year: int
books = DocVec[Book](
[Book(title='It\'s me, hi', author='I\'m the problem it\'s me', year=2022)]
)
with pytest.raises(NotImplementedError):
books.to_csv('dummy/file/path')
with pytest.raises(NotImplementedError):
DocVec[Book].from_csv('dummy/file/path')
|
import os
from typing import Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from tests import TOYDATA_DIR
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: ImageDoc
image2: ImageDoc
return MyDocNested
def test_to_from_csv(tmpdir, nested_doc_cls):
da = DocList[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=ImageDoc(url='aux.png'),
image2=ImageDoc(url='aux.png'),
),
nested_doc_cls(text='hello world', image=ImageDoc(), image2=ImageDoc()),
]
)
tmp_file = str(tmpdir / 'tmp.csv')
da.to_csv(tmp_file)
assert os.path.isfile(tmp_file)
da_from = DocList[nested_doc_cls].from_csv(tmp_file)
for doc1, doc2 in zip(da, da_from):
assert doc1 == doc2
def test_from_csv_nested(nested_doc_cls):
da = DocList[nested_doc_cls].from_csv(
file_path=str(TOYDATA_DIR / 'docs_nested.csv')
)
assert len(da) == 3
for i, doc in enumerate(da):
assert doc.count.__class__ == int
assert doc.count == int(f'{i}{i}{i}')
assert doc.text.__class__ == str
assert doc.text == f'hello {i}'
assert doc.image.__class__ == ImageDoc
assert doc.image.tensor is None
assert doc.image.embedding is None
assert doc.image.bytes_ is None
assert doc.image2.__class__ == ImageDoc
assert doc.image2.tensor is None
assert doc.image2.embedding is None
assert doc.image2.bytes_ is None
assert da[0].image2.url == 'image_10.png'
assert da[1].image2.url is None
assert da[2].image2.url is None
@pytest.fixture()
def nested_doc():
class Inner(BaseDoc):
img: Optional[ImageDoc]
class Middle(BaseDoc):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDoc):
img: Optional[ImageDoc]
middle: Optional[Middle]
doc = Outer(
img=ImageDoc(), middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc()))
)
return doc
def test_from_csv_without_schema_raise_exception():
with pytest.raises(TypeError, match='no document schema defined'):
DocList.from_csv(file_path=str(TOYDATA_DIR / 'docs_nested.csv'))
def test_from_csv_with_wrong_schema_raise_exception(nested_doc):
with pytest.raises(ValueError, match='Column names do not match the schema'):
DocList[nested_doc.__class__].from_csv(file_path=str(TOYDATA_DIR / 'docs.csv'))
def test_from_remote_csv_file():
remote_url = 'https://github.com/docarray/docarray/blob/main/tests/toydata/books.csv?raw=true'
class Book(BaseDoc):
title: str
author: str
year: int
books = DocList[Book].from_csv(file_path=remote_url)
assert len(books) == 3
def test_doc_list_error(tmpdir):
class Book(BaseDoc):
title: str
docs = DocList([Book(title='hello'), Book(title='world')])
tmp_file = str(tmpdir / 'tmp.csv')
with pytest.raises(TypeError):
docs.to_csv(tmp_file)
def test_union_type_error(tmp_path):
from typing import Union
from docarray.documents import TextDoc
class CustomDoc(BaseDoc):
ud: Union[TextDoc, ImageDoc] = TextDoc(text='union type')
docs = DocList[CustomDoc]([CustomDoc(ud=TextDoc(text='union type'))])
with pytest.raises(ValueError):
docs.to_csv(str(tmp_path) + ".csv")
DocList[CustomDoc].from_csv(str(tmp_path) + ".csv")
class BasisUnion(BaseDoc):
ud: Union[int, str]
docs_basic = DocList[BasisUnion]([BasisUnion(ud="hello")])
docs_basic.to_csv(str(tmp_path) + ".csv")
docs_copy = DocList[BasisUnion].from_csv(str(tmp_path) + ".csv")
assert docs_copy == docs_basic
|
import torch
from torchvision.transforms.functional import InterpolationMode
def get_module(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.transforms.v2
return torchvision.transforms.v2
else:
import torchvision.transforms
return torchvision.transforms
class ClassificationPresetTrain:
def __init__(
self,
*,
crop_size,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
interpolation=InterpolationMode.BILINEAR,
hflip_prob=0.5,
auto_augment_policy=None,
ra_magnitude=9,
augmix_severity=3,
random_erase_prob=0.0,
backend="pil",
use_v2=False,
):
module = get_module(use_v2)
transforms = []
backend = backend.lower()
if backend == "tensor":
transforms.append(module.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}")
transforms.append(module.RandomResizedCrop(crop_size, interpolation=interpolation, antialias=True))
if hflip_prob > 0:
transforms.append(module.RandomHorizontalFlip(hflip_prob))
if auto_augment_policy is not None:
if auto_augment_policy == "ra":
transforms.append(module.RandAugment(interpolation=interpolation, magnitude=ra_magnitude))
elif auto_augment_policy == "ta_wide":
transforms.append(module.TrivialAugmentWide(interpolation=interpolation))
elif auto_augment_policy == "augmix":
transforms.append(module.AugMix(interpolation=interpolation, severity=augmix_severity))
else:
aa_policy = module.AutoAugmentPolicy(auto_augment_policy)
transforms.append(module.AutoAugment(policy=aa_policy, interpolation=interpolation))
if backend == "pil":
transforms.append(module.PILToTensor())
transforms.extend(
[
module.ConvertImageDtype(torch.float),
module.Normalize(mean=mean, std=std),
]
)
if random_erase_prob > 0:
transforms.append(module.RandomErasing(p=random_erase_prob))
self.transforms = module.Compose(transforms)
def __call__(self, img):
return self.transforms(img)
class ClassificationPresetEval:
def __init__(
self,
*,
crop_size,
resize_size=256,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
interpolation=InterpolationMode.BILINEAR,
backend="pil",
use_v2=False,
):
module = get_module(use_v2)
transforms = []
backend = backend.lower()
if backend == "tensor":
transforms.append(module.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}")
transforms += [
module.Resize(resize_size, interpolation=interpolation, antialias=True),
module.CenterCrop(crop_size),
]
if backend == "pil":
transforms.append(module.PILToTensor())
transforms += [
module.ConvertImageDtype(torch.float),
module.Normalize(mean=mean, std=std),
]
self.transforms = module.Compose(transforms)
def __call__(self, img):
return self.transforms(img)
|
import torch
from torchvision.transforms import autoaugment, transforms
from torchvision.transforms.functional import InterpolationMode
class ClassificationPresetTrain:
def __init__(
self,
*,
crop_size,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
interpolation=InterpolationMode.BILINEAR,
hflip_prob=0.5,
auto_augment_policy=None,
ra_magnitude=9,
augmix_severity=3,
random_erase_prob=0.0,
backend="pil",
):
trans = []
backend = backend.lower()
if backend == "tensor":
trans.append(transforms.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}")
trans.append(transforms.RandomResizedCrop(crop_size, interpolation=interpolation, antialias=True))
if hflip_prob > 0:
trans.append(transforms.RandomHorizontalFlip(hflip_prob))
if auto_augment_policy is not None:
if auto_augment_policy == "ra":
trans.append(autoaugment.RandAugment(interpolation=interpolation, magnitude=ra_magnitude))
elif auto_augment_policy == "ta_wide":
trans.append(autoaugment.TrivialAugmentWide(interpolation=interpolation))
elif auto_augment_policy == "augmix":
trans.append(autoaugment.AugMix(interpolation=interpolation, severity=augmix_severity))
else:
aa_policy = autoaugment.AutoAugmentPolicy(auto_augment_policy)
trans.append(autoaugment.AutoAugment(policy=aa_policy, interpolation=interpolation))
if backend == "pil":
trans.append(transforms.PILToTensor())
trans.extend(
[
transforms.ConvertImageDtype(torch.float),
transforms.Normalize(mean=mean, std=std),
]
)
if random_erase_prob > 0:
trans.append(transforms.RandomErasing(p=random_erase_prob))
self.transforms = transforms.Compose(trans)
def __call__(self, img):
return self.transforms(img)
class ClassificationPresetEval:
def __init__(
self,
*,
crop_size,
resize_size=256,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
interpolation=InterpolationMode.BILINEAR,
backend="pil",
):
trans = []
backend = backend.lower()
if backend == "tensor":
trans.append(transforms.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}")
trans += [
transforms.Resize(resize_size, interpolation=interpolation, antialias=True),
transforms.CenterCrop(crop_size),
]
if backend == "pil":
trans.append(transforms.PILToTensor())
trans += [
transforms.ConvertImageDtype(torch.float),
transforms.Normalize(mean=mean, std=std),
]
self.transforms = transforms.Compose(trans)
def __call__(self, img):
return self.transforms(img)
|
import os
import fsspec
import pytest
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lz4, require_zstandard
def test_extract_path_from_uri():
mock_bucket = "mock-s3-bucket"
dataset_path = f"s3://{mock_bucket}"
dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path.startswith("s3://") is False
dataset_path = "./local/path"
new_dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path == new_dataset_path
def test_is_remote_filesystem(mockfs):
is_remote = is_remote_filesystem(mockfs)
assert is_remote is True
fs = fsspec.filesystem("file")
is_remote = is_remote_filesystem(fs)
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS)
def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file):
input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file}
input_path = input_paths[compression_fs_class.protocol]
if input_path is None:
reason = f"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lz4.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(reason)
fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path)
assert isinstance(fs, compression_fs_class)
expected_filename = os.path.basename(input_path)
expected_filename = expected_filename[: expected_filename.rindex(".")]
assert fs.glob("*") == [expected_filename]
with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol", ["zip", "gzip"])
def test_fs_isfile(protocol, zip_jsonl_path, jsonl_gz_path):
compressed_file_paths = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
compressed_file_path = compressed_file_paths[protocol]
member_file_path = "dataset.jsonl"
path = f"{protocol}://{member_file_path}::{compressed_file_path}"
fs, *_ = fsspec.get_fs_token_paths(path)
assert fs.isfile(member_file_path)
assert not fs.isfile("non_existing_" + member_file_path)
@pytest.mark.integration
def test_hf_filesystem(hf_token, hf_api, hf_private_dataset_repo_txt_data, text_file):
repo_info = hf_api.dataset_info(hf_private_dataset_repo_txt_data, token=hf_token)
hffs = HfFileSystem(repo_info=repo_info, token=hf_token)
assert sorted(hffs.glob("*")) == [".gitattributes", "data"]
assert hffs.isdir("data")
assert hffs.isfile(".gitattributes") and hffs.isfile("data/text_data.txt")
with open(text_file) as f:
assert hffs.open("data/text_data.txt", "r").read() == f.read()
|
import os
import fsspec
import pytest
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from datasets.utils._hf_hub_fixes import dataset_info as hf_api_dataset_info
from .utils import require_lz4, require_zstandard
def test_extract_path_from_uri():
mock_bucket = "mock-s3-bucket"
dataset_path = f"s3://{mock_bucket}"
dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path.startswith("s3://") is False
dataset_path = "./local/path"
new_dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path == new_dataset_path
def test_is_remote_filesystem(mockfs):
is_remote = is_remote_filesystem(mockfs)
assert is_remote is True
fs = fsspec.filesystem("file")
is_remote = is_remote_filesystem(fs)
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS)
def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file):
input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file}
input_path = input_paths[compression_fs_class.protocol]
if input_path is None:
reason = f"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lz4.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(reason)
fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path)
assert isinstance(fs, compression_fs_class)
expected_filename = os.path.basename(input_path)
expected_filename = expected_filename[: expected_filename.rindex(".")]
assert fs.glob("*") == [expected_filename]
with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol", ["zip", "gzip"])
def test_fs_isfile(protocol, zip_jsonl_path, jsonl_gz_path):
compressed_file_paths = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
compressed_file_path = compressed_file_paths[protocol]
member_file_path = "dataset.jsonl"
path = f"{protocol}://{member_file_path}::{compressed_file_path}"
fs, *_ = fsspec.get_fs_token_paths(path)
assert fs.isfile(member_file_path)
assert not fs.isfile("non_existing_" + member_file_path)
@pytest.mark.integration
def test_hf_filesystem(hf_token, hf_api, hf_private_dataset_repo_txt_data, text_file):
repo_info = hf_api_dataset_info(hf_api, hf_private_dataset_repo_txt_data, use_auth_token=hf_token)
hffs = HfFileSystem(repo_info=repo_info, token=hf_token)
assert sorted(hffs.glob("*")) == [".gitattributes", "data"]
assert hffs.isdir("data")
assert hffs.isfile(".gitattributes") and hffs.isfile("data/text_data.txt")
with open(text_file) as f:
assert hffs.open("data/text_data.txt", "r").read() == f.read()
|
"""**Chat Sessions** are a collection of messages and function calls."""
from collections.abc import Sequence
from typing import TypedDict
from langchain_core.messages import BaseMessage
class ChatSession(TypedDict, total=False):
"""Chat Session.
Chat Session represents a single conversation, channel, or other group of messages.
"""
messages: Sequence[BaseMessage]
"""A sequence of the LangChain chat messages loaded from the source."""
functions: Sequence[dict]
"""A sequence of the function calling specs for the messages."""
|
"""**Chat Sessions** are a collection of messages and function calls."""
from collections.abc import Sequence
from typing import TypedDict
from langchain_core.messages import BaseMessage
class ChatSession(TypedDict, total=False):
"""Chat Session represents a single
conversation, channel, or other group of messages.
"""
messages: Sequence[BaseMessage]
"""A sequence of the LangChain chat messages loaded from the source."""
functions: Sequence[dict]
"""A sequence of the function calling specs for the messages."""
|
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...clip_text import CLIPTextEncoder
_EMBEDDING_DIM = 512
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=CLIPTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
|
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...clip_text import CLIPTextEncoder
_EMBEDDING_DIM = 512
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=CLIPTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
'--volumes=.cache:/workspace/.cache',
],
timeout=30,
check=True,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
import torch
from mmengine.structures import InstanceData, PixelData
from mmdet.datasets.transforms import PackDetInputs
from mmdet.structures import DetDataSample
from mmdet.structures.mask import BitmapMasks
class TestPackDetInputs(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
img_path = osp.join(data_prefix, 'color.jpg')
rng = np.random.RandomState(0)
self.results1 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'gt_ignore_flags': np.array([0, 0, 1], dtype=np.bool),
'proposals': rng.rand(2, 4),
'proposals_scores': rng.rand(2, )
}
self.results2 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'proposals': rng.rand(2, 4),
'proposals_scores': rng.rand(2, )
}
self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'scale_factor',
'flip')
def test_transform(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results1))
self.assertIn('data_samples', results)
self.assertIsInstance(results['data_samples'], DetDataSample)
self.assertIsInstance(results['data_samples'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_samples'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_samples'].gt_instances), 2)
self.assertEqual(len(results['data_samples'].ignored_instances), 1)
self.assertIsInstance(results['data_samples'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_samples'].proposals, InstanceData)
self.assertEqual(len(results['data_samples'].proposals), 2)
self.assertIsInstance(results['data_samples'].proposals.bboxes,
torch.Tensor)
self.assertIsInstance(results['data_samples'].proposals.scores,
torch.Tensor)
def test_transform_without_ignore(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results2))
self.assertIn('data_samples', results)
self.assertIsInstance(results['data_samples'], DetDataSample)
self.assertIsInstance(results['data_samples'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_samples'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_samples'].gt_instances), 3)
self.assertEqual(len(results['data_samples'].ignored_instances), 0)
self.assertIsInstance(results['data_samples'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_samples'].proposals, InstanceData)
self.assertEqual(len(results['data_samples'].proposals), 2)
self.assertIsInstance(results['data_samples'].proposals.bboxes,
torch.Tensor)
self.assertIsInstance(results['data_samples'].proposals.scores,
torch.Tensor)
def test_repr(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
self.assertEqual(
repr(transform), f'PackDetInputs(meta_keys={self.meta_keys})')
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
from mmengine.structures import InstanceData, PixelData
from mmdet.datasets.transforms import PackDetInputs
from mmdet.structures import DetDataSample
from mmdet.structures.mask import BitmapMasks
class TestPackDetInputs(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
img_path = osp.join(data_prefix, 'color.jpg')
rng = np.random.RandomState(0)
self.results1 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'gt_ignore_flags': np.array([0, 0, 1], dtype=np.bool),
'proposals': rng.rand(2, 4)
}
self.results2 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'proposals': rng.rand(2, 4)
}
self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'scale_factor',
'flip')
def test_transform(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results1))
self.assertIn('data_samples', results)
self.assertIsInstance(results['data_samples'], DetDataSample)
self.assertIsInstance(results['data_samples'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_samples'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_samples'].gt_instances), 2)
self.assertEqual(len(results['data_samples'].ignored_instances), 1)
self.assertIsInstance(results['data_samples'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_samples'].proposals, InstanceData)
self.assertEqual(len(results['data_samples'].proposals), 2)
self.assertIsInstance(results['data_samples'].proposals.bboxes,
np.ndarray)
def test_transform_without_ignore(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results2))
self.assertIn('data_samples', results)
self.assertIsInstance(results['data_samples'], DetDataSample)
self.assertIsInstance(results['data_samples'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_samples'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_samples'].gt_instances), 3)
self.assertEqual(len(results['data_samples'].ignored_instances), 0)
self.assertIsInstance(results['data_samples'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_samples'].proposals, InstanceData)
self.assertEqual(len(results['data_samples'].proposals), 2)
self.assertIsInstance(results['data_samples'].proposals.bboxes,
np.ndarray)
def test_repr(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
self.assertEqual(
repr(transform), f'PackDetInputs(meta_keys={self.meta_keys})')
|
import requests
from yarl import URL
from typing import Dict, List
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
JINA_SEARCH_URL_ENDPOINT = "https://s.jina.ai/"
class JinaToolSpec(BaseToolSpec):
"""
Jina tool spec.
"""
spec_functions = ["jina_search"]
def _make_request(self, params: Dict) -> requests.Response:
"""
Make a request to the Jina Search API.
Args:
params (dict): The parameters to be passed to the API.
Returns:
requests.Response: The response from the API.
"""
headers = {
"Accept": "application/json",
}
url = str(URL(JINA_SEARCH_URL_ENDPOINT + params.get("query")))
response = requests.get(url, headers=headers)
response.raise_for_status()
return response.json()
def jina_search(self, query: str) -> List[Document]:
"""
Make a query to the Jina Search engine to receive a list of results.
Args:
query (str): The query to be passed to Jina Search.
Returns:
[Document]: A list of documents containing search results.
"""
search_params = {
"query": query,
}
response = self._make_request(search_params)
return [
Document(
text=result["content"],
extra_info={
"url": result["url"],
"title": result["title"],
"description": result["description"],
},
)
for result in response["data"]
]
|
import requests
from yarl import URL
from typing import Dict, List
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
JINA_SEARCH_URL_ENDPOINT = "https://s.jina.ai/"
class JinaToolSpec(BaseToolSpec):
"""
Jina tool spec.
"""
spec_functions = ["jina_search"]
def _make_request(self, params: Dict) -> requests.Response:
"""
Make a request to the Jina Search API.
Args:
params (dict): The parameters to be passed to the API.
Returns:
requests.Response: The response from the API.
"""
headers = {
"Accept": "application/json",
}
url = str(URL(JINA_SEARCH_URL_ENDPOINT + params.get("query")))
response = requests.get(url, headers=headers)
response.raise_for_status()
return response.json()
def jina_search(self, query: str) -> List[Document]:
"""
Make a query to the Jina Search engine to receive a list of results.
Args:
query (str): The query to be passed to Jina Search.
Returns:
[Document]: A list of documents containing search results.
"""
search_params = {
"query": query,
}
response = self._make_request(search_params)
return [
Document(
text=result["content"],
extra_info={
"url": result["url"],
"title": result["title"],
"description": result["description"],
},
)
for result in response["data"]
]
|
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import tree
from keras.src.utils.module_utils import tensorflow as tf
def get_input_signature(model):
if not isinstance(model, models.Model):
raise TypeError(
"The model must be a `keras.Model`. "
f"Received: model={model} of the type {type(model)}"
)
if not model.built:
raise ValueError(
"The model provided has not yet been built. It must be built "
"before export."
)
if isinstance(model, (models.Functional, models.Sequential)):
input_signature = tree.map_structure(make_input_spec, model.inputs)
if isinstance(input_signature, list) and len(input_signature) > 1:
input_signature = [input_signature]
else:
input_signature = _infer_input_signature_from_model(model)
if not input_signature or not model._called:
raise ValueError(
"The model provided has never called. "
"It must be called at least once before export."
)
return input_signature
def _infer_input_signature_from_model(model):
shapes_dict = getattr(model, "_build_shapes_dict", None)
if not shapes_dict:
return None
def _make_input_spec(structure):
# We need to turn wrapper structures like TrackingDict or _DictWrapper
# into plain Python structures because they don't work with jax2tf/JAX.
if isinstance(structure, dict):
return {k: _make_input_spec(v) for k, v in structure.items()}
elif isinstance(structure, tuple):
if all(isinstance(d, (int, type(None))) for d in structure):
return layers.InputSpec(
shape=(None,) + structure[1:], dtype=model.input_dtype
)
return tuple(_make_input_spec(v) for v in structure)
elif isinstance(structure, list):
if all(isinstance(d, (int, type(None))) for d in structure):
return layers.InputSpec(
shape=[None] + structure[1:], dtype=model.input_dtype
)
return [_make_input_spec(v) for v in structure]
else:
raise ValueError(
f"Unsupported type {type(structure)} for {structure}"
)
return [_make_input_spec(value) for value in shapes_dict.values()]
def make_input_spec(x):
if isinstance(x, layers.InputSpec):
if x.shape is None or x.dtype is None:
raise ValueError(
f"The `shape` and `dtype` must be provided. Received: x={x}"
)
input_spec = x
elif isinstance(x, backend.KerasTensor):
shape = (None,) + backend.standardize_shape(x.shape)[1:]
dtype = backend.standardize_dtype(x.dtype)
input_spec = layers.InputSpec(dtype=dtype, shape=shape, name=x.name)
elif backend.is_tensor(x):
shape = (None,) + backend.standardize_shape(x.shape)[1:]
dtype = backend.standardize_dtype(x.dtype)
input_spec = layers.InputSpec(dtype=dtype, shape=shape, name=None)
else:
raise TypeError(
f"Unsupported x={x} of the type ({type(x)}). Supported types are: "
"`keras.InputSpec`, `keras.KerasTensor` and backend tensor."
)
return input_spec
def make_tf_tensor_spec(x):
if isinstance(x, tf.TensorSpec):
tensor_spec = x
else:
input_spec = make_input_spec(x)
tensor_spec = tf.TensorSpec(
input_spec.shape, dtype=input_spec.dtype, name=input_spec.name
)
return tensor_spec
def convert_spec_to_tensor(spec, replace_none_number=None):
shape = backend.standardize_shape(spec.shape)
if replace_none_number is not None:
replace_none_number = int(replace_none_number)
shape = tuple(
s if s is not None else replace_none_number for s in shape
)
return ops.ones(shape, spec.dtype)
|
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import tree
from keras.src.utils.module_utils import tensorflow as tf
def get_input_signature(model):
if not isinstance(model, models.Model):
raise TypeError(
"The model must be a `keras.Model`. "
f"Received: model={model} of the type {type(model)}"
)
if not model.built:
raise ValueError(
"The model provided has not yet been built. It must be built "
"before export."
)
if isinstance(model, (models.Functional, models.Sequential)):
input_signature = tree.map_structure(make_input_spec, model.inputs)
if isinstance(input_signature, list) and len(input_signature) > 1:
input_signature = [input_signature]
else:
input_signature = _infer_input_signature_from_model(model)
if not input_signature or not model._called:
raise ValueError(
"The model provided has never called. "
"It must be called at least once before export."
)
return input_signature
def _infer_input_signature_from_model(model):
shapes_dict = getattr(model, "_build_shapes_dict", None)
if not shapes_dict:
return None
def _make_input_spec(structure):
# We need to turn wrapper structures like TrackingDict or _DictWrapper
# into plain Python structures because they don't work with jax2tf/JAX.
if isinstance(structure, dict):
return {k: _make_input_spec(v) for k, v in structure.items()}
elif isinstance(structure, tuple):
if all(isinstance(d, (int, type(None))) for d in structure):
return layers.InputSpec(
shape=(None,) + structure[1:], dtype=model.input_dtype
)
return tuple(_make_input_spec(v) for v in structure)
elif isinstance(structure, list):
if all(isinstance(d, (int, type(None))) for d in structure):
return layers.InputSpec(
shape=[None] + structure[1:], dtype=model.input_dtype
)
return [_make_input_spec(v) for v in structure]
else:
raise ValueError(
f"Unsupported type {type(structure)} for {structure}"
)
return [_make_input_spec(value) for value in shapes_dict.values()]
def make_input_spec(x):
if isinstance(x, layers.InputSpec):
if x.shape is None or x.dtype is None:
raise ValueError(
"The `shape` and `dtype` must be provided. " f"Received: x={x}"
)
input_spec = x
elif isinstance(x, backend.KerasTensor):
shape = (None,) + backend.standardize_shape(x.shape)[1:]
dtype = backend.standardize_dtype(x.dtype)
input_spec = layers.InputSpec(dtype=dtype, shape=shape, name=x.name)
elif backend.is_tensor(x):
shape = (None,) + backend.standardize_shape(x.shape)[1:]
dtype = backend.standardize_dtype(x.dtype)
input_spec = layers.InputSpec(dtype=dtype, shape=shape, name=None)
else:
raise TypeError(
f"Unsupported x={x} of the type ({type(x)}). Supported types are: "
"`keras.InputSpec`, `keras.KerasTensor` and backend tensor."
)
return input_spec
def make_tf_tensor_spec(x):
if isinstance(x, tf.TensorSpec):
tensor_spec = x
else:
input_spec = make_input_spec(x)
tensor_spec = tf.TensorSpec(
input_spec.shape, dtype=input_spec.dtype, name=input_spec.name
)
return tensor_spec
def convert_spec_to_tensor(spec, replace_none_number=None):
shape = backend.standardize_shape(spec.shape)
if replace_none_number is not None:
replace_none_number = int(replace_none_number)
shape = tuple(
s if s is not None else replace_none_number for s in shape
)
return ops.ones(shape, spec.dtype)
|
import grpc
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from grpc_reflection.v1alpha import reflection
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.proto import jina_pb2, jina_pb2_grpc
class DummyResponseModel(BaseModel):
protocol: str
class MultiProtocolGateway(Gateway):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.http_port = self.runtime_args.port[0]
self.grpc_port = self.runtime_args.port[1]
self.health_servicer = health.HealthServicer(experimental_non_blocking=True)
async def _setup_http_server(self):
from fastapi import FastAPI
app = FastAPI(
title='HTTP Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {'protocol': 'http'}
self.http_server = Server(
Config(app, host=__default_host__, port=self.http_port)
)
async def _setup_grpc_server(self):
self.grpc_server = grpc.aio.server()
jina_pb2_grpc.add_JinaRPCServicer_to_server(
self.streamer._streamer, self.grpc_server
)
service_names = (
jina_pb2.DESCRIPTOR.services_by_name['JinaRPC'].full_name,
reflection.SERVICE_NAME,
)
# Mark all services as healthy.
health_pb2_grpc.add_HealthServicer_to_server(
self.health_servicer, self.grpc_server
)
for service in service_names:
self.health_servicer.set(service, health_pb2.HealthCheckResponse.SERVING)
reflection.enable_server_reflection(service_names, self.grpc_server)
self.grpc_server.add_insecure_port(f'{__default_host__}:{self.grpc_port}')
await self.grpc_server.start()
async def setup_server(self):
await self._setup_http_server()
await self._setup_grpc_server()
async def run_server(self):
await self.http_server.serve()
await self.grpc_server.wait_for_termination()
async def shutdown(self):
self.http_server.should_exit = True
await self.grpc_server.stop(0)
await self.http_server.shutdown()
self.health_servicer.enter_graceful_shutdown()
@property
def _should_exit(self) -> bool:
return self.http_server.should_exit
|
import grpc
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from grpc_reflection.v1alpha import reflection
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.proto import jina_pb2, jina_pb2_grpc
class DummyResponseModel(BaseModel):
protocol: str
class MultiProtocolGateway(Gateway):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.http_port = self.runtime_args.port[0]
self.grpc_port = self.runtime_args.port[1]
self.health_servicer = health.HealthServicer(experimental_non_blocking=True)
async def _setup_http_server(self):
from fastapi import FastAPI
app = FastAPI(
title='HTTP Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {'protocol': 'http'}
self.http_server = Server(
Config(app, host=__default_host__, port=self.http_port)
)
async def _setup_grpc_server(self):
self.grpc_server = grpc.aio.server()
jina_pb2_grpc.add_JinaRPCServicer_to_server(
self.streamer._streamer, self.grpc_server
)
service_names = (
jina_pb2.DESCRIPTOR.services_by_name['JinaRPC'].full_name,
reflection.SERVICE_NAME,
)
# Mark all services as healthy.
health_pb2_grpc.add_HealthServicer_to_server(
self.health_servicer, self.grpc_server
)
for service in service_names:
self.health_servicer.set(service, health_pb2.HealthCheckResponse.SERVING)
reflection.enable_server_reflection(service_names, self.grpc_server)
self.grpc_server.add_insecure_port(f'{__default_host__}:{self.grpc_port}')
await self.grpc_server.start()
async def setup_server(self):
await self._setup_http_server()
await self._setup_grpc_server()
async def run_server(self):
await self.http_server.serve()
await self.grpc_server.wait_for_termination()
async def teardown(self):
await super().teardown()
await self.http_server.shutdown()
self.health_servicer.enter_graceful_shutdown()
async def stop_server(self):
self.http_server.should_exit = True
await self.grpc_server.stop(0)
@property
def should_exit(self) -> bool:
return self.http_server.should_exit
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_video_metric import BaseVideoMetric
from .cityscapes_metric import CityScapesMetric
from .coco_caption_metric import COCOCaptionMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .coco_video_metric import CocoVideoMetric
from .crowdhuman_metric import CrowdHumanMetric
from .dump_det_results import DumpDetResults
from .dump_proposals_metric import DumpProposals
from .lvis_metric import LVISMetric
from .mot_challenge_metric import MOTChallengeMetric
from .openimages_metric import OpenImagesMetric
from .reid_metric import ReIDMetrics
from .semseg_metric import SemSegMetric
from .voc_metric import VOCMetric
from .youtube_vis_metric import YouTubeVISMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric', 'OpenImagesMetric',
'VOCMetric', 'LVISMetric', 'CrowdHumanMetric', 'DumpProposals',
'CocoOccludedSeparatedMetric', 'DumpDetResults', 'BaseVideoMetric',
'MOTChallengeMetric', 'CocoVideoMetric', 'ReIDMetrics', 'YouTubeVISMetric',
'COCOCaptionMetric', 'SemSegMetric'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_video_metric import BaseVideoMetric
from .cityscapes_metric import CityScapesMetric
from .coco_caption_metric import COCOCaptionMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .coco_video_metric import CocoVideoMetric
from .crowdhuman_metric import CrowdHumanMetric
from .dump_det_results import DumpDetResults
from .dump_proposals_metric import DumpProposals
from .lvis_metric import LVISMetric
from .mot_challenge_metric import MOTChallengeMetric
from .openimages_metric import OpenImagesMetric
from .reid_metric import ReIDMetrics
from .voc_metric import VOCMetric
from .youtube_vis_metric import YouTubeVISMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric', 'OpenImagesMetric',
'VOCMetric', 'LVISMetric', 'CrowdHumanMetric', 'DumpProposals',
'CocoOccludedSeparatedMetric', 'DumpDetResults', 'BaseVideoMetric',
'MOTChallengeMetric', 'CocoVideoMetric', 'ReIDMetrics', 'YouTubeVISMetric',
'COCOCaptionMetric'
]
|
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch dots1 model."""
import gc
import unittest
import pytest
from transformers import AutoTokenizer, Dots1Config, is_torch_available
from transformers.testing_utils import (
backend_empty_cache,
cleanup,
require_flash_attn,
require_torch,
require_torch_accelerator,
require_torch_gpu,
slow,
torch_device,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
if is_torch_available():
import torch
from transformers import (
Dots1ForCausalLM,
Dots1Model,
)
class Dots1ModelTester(CausalLMModelTester):
config_class = Dots1Config
if is_torch_available():
base_model_class = Dots1Model
causal_lm_class = Dots1ForCausalLM
def __init__(
self,
parent,
n_routed_experts=8,
n_shared_experts=1,
n_group=1,
topk_group=1,
num_experts_per_tok=8,
):
super().__init__(parent=parent, num_experts_per_tok=num_experts_per_tok)
self.n_routed_experts = n_routed_experts
self.n_shared_experts = n_shared_experts
self.n_group = n_group
self.topk_group = topk_group
@require_torch
class Dots1ModelTest(CausalLMModelTest, unittest.TestCase):
all_model_classes = (
(
Dots1Model,
Dots1ForCausalLM,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": Dots1Model,
"text-generation": Dots1ForCausalLM,
}
if is_torch_available()
else {}
)
test_headmasking = False
test_pruning = False
model_tester_class = Dots1ModelTester
@unittest.skip("dots.llm1's moe is not compatible `token_indices, weight_indices = torch.where(mask)`.")
def test_generate_with_static_cache(self):
pass
@unittest.skip("dots.llm1's moe is not compatible `token_indices, weight_indices = torch.where(mask)`.")
def test_generate_compilation_all_outputs(self):
pass
@unittest.skip("dots.llm1's moe is not compatible `token_indices, weight_indices = torch.where(mask)`")
def test_generate_compile_model_forward(self):
pass
@unittest.skip("dots.llm1's moe is not compatible token_indices, weight_indices = torch.where(mask).")
def test_generate_from_inputs_embeds_with_static_cache(self):
pass
@require_flash_attn
@require_torch_gpu
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_inference_equivalence_right_padding(self):
self.skipTest(reason="dots.llm1 flash attention does not support right padding")
@require_torch_accelerator
class Dots1IntegrationTest(unittest.TestCase):
# This variable is used to determine which CUDA device are we using for our runners (A10 or T4)
# Depending on the hardware we get different logits / generations
cuda_compute_capability_major_version = None
@classmethod
def setUpClass(cls):
if is_torch_available() and torch.cuda.is_available():
# 8 is for A100 / A10 and 7 for T4
cls.cuda_compute_capability_major_version = torch.cuda.get_device_capability()[0]
def tearDown(self):
# See LlamaIntegrationTest.tearDown(). Can be removed once LlamaIntegrationTest.tearDown() is removed.
cleanup(torch_device, gc_collect=False)
@slow
def test_model_15b_a2b_generation(self):
EXPECTED_TEXT_COMPLETION = (
"""To be or not to be, that is the question:\nWhether 'tis nobler in the mind to suffer\nThe"""
)
prompt = "To be or not to"
tokenizer = AutoTokenizer.from_pretrained("redmoe-ai-v1/dots.llm1.test", use_fast=False)
model = Dots1ForCausalLM.from_pretrained("redmoe-ai-v1/dots.llm1.test", device_map="auto")
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
# greedy generation outputs
generated_ids = model.generate(input_ids, max_new_tokens=20, do_sample=False)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
del model
backend_empty_cache(torch_device)
gc.collect()
|
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch dots1 model."""
import gc
import unittest
import pytest
from transformers import AutoTokenizer, Dots1Config, is_torch_available
from transformers.testing_utils import (
backend_empty_cache,
cleanup,
require_flash_attn,
require_torch,
require_torch_accelerator,
require_torch_gpu,
slow,
torch_device,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
if is_torch_available():
import torch
from transformers import (
Dots1ForCausalLM,
Dots1Model,
)
class Dots1ModelTester(CausalLMModelTester):
config_class = Dots1Config
if is_torch_available():
base_model_class = Dots1Model
causal_lm_class = Dots1ForCausalLM
def __init__(
self,
parent,
n_routed_experts=8,
n_shared_experts=1,
n_group=1,
topk_group=1,
num_experts_per_tok=8,
):
super().__init__(parent=parent, num_experts_per_tok=num_experts_per_tok)
self.n_routed_experts = n_routed_experts
self.n_shared_experts = n_shared_experts
self.n_group = n_group
self.topk_group = topk_group
@require_torch
class Dots1ModelTest(CausalLMModelTest, unittest.TestCase):
all_model_classes = (
(
Dots1Model,
Dots1ForCausalLM,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": Dots1Model,
"text-generation": Dots1ForCausalLM,
}
if is_torch_available()
else {}
)
test_headmasking = False
test_pruning = False
model_tester_class = Dots1ModelTester
@unittest.skip("dots.llm1's moe is not compatible `token_indices, weight_indices = torch.where(mask)`.")
def test_generate_compilation_all_outputs(self):
pass
@unittest.skip("dots.llm1's moe is not compatible `token_indices, weight_indices = torch.where(mask)`")
def test_generate_compile_model_forward(self):
pass
@unittest.skip("dots.llm1's moe is not compatible token_indices, weight_indices = torch.where(mask).")
def test_generate_from_inputs_embeds_with_static_cache(self):
pass
@require_flash_attn
@require_torch_gpu
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_inference_equivalence_right_padding(self):
self.skipTest(reason="dots.llm1 flash attention does not support right padding")
@require_torch_accelerator
class Dots1IntegrationTest(unittest.TestCase):
# This variable is used to determine which CUDA device are we using for our runners (A10 or T4)
# Depending on the hardware we get different logits / generations
cuda_compute_capability_major_version = None
@classmethod
def setUpClass(cls):
if is_torch_available() and torch.cuda.is_available():
# 8 is for A100 / A10 and 7 for T4
cls.cuda_compute_capability_major_version = torch.cuda.get_device_capability()[0]
def tearDown(self):
# See LlamaIntegrationTest.tearDown(). Can be removed once LlamaIntegrationTest.tearDown() is removed.
cleanup(torch_device, gc_collect=False)
@slow
def test_model_15b_a2b_generation(self):
EXPECTED_TEXT_COMPLETION = (
"""To be or not to be, that is the question:\nWhether 'tis nobler in the mind to suffer\nThe"""
)
prompt = "To be or not to"
tokenizer = AutoTokenizer.from_pretrained("redmoe-ai-v1/dots.llm1.test", use_fast=False)
model = Dots1ForCausalLM.from_pretrained("redmoe-ai-v1/dots.llm1.test", device_map="auto")
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
# greedy generation outputs
generated_ids = model.generate(input_ids, max_new_tokens=20, do_sample=False)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
del model
backend_empty_cache(torch_device)
gc.collect()
|
"""
This module provides dynamic access to deprecated Jira tools.
When attributes like `JiraAction` are accessed, they are redirected to their new
locations in `langchain_community.tools`. This ensures backward compatibility
while warning developers about deprecation.
Attributes:
JiraAction (deprecated): Dynamically loaded from langchain_community.tools.
"""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import JiraAction
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"JiraAction": "langchain_community.tools"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""
Dynamically retrieve attributes from the updated module path.
Args:
name (str): The name of the attribute to import.
Returns:
Any: The resolved attribute from the updated path.
"""
return _import_attribute(name)
__all__ = [
"JiraAction",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import JiraAction
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"JiraAction": "langchain_community.tools"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"JiraAction",
]
|
from pathlib import Path
from typing import List
import pytest
import torch
from jina import Document, DocumentArray, Executor
from ...dpr_text import DPRTextEncoder
@pytest.fixture(scope='session')
def basic_encoder() -> DPRTextEncoder:
return DPRTextEncoder()
@pytest.fixture(scope='session')
def basic_encoder_ctx() -> DPRTextEncoder:
return DPRTextEncoder(
'facebook/dpr-ctx_encoder-single-nq-base',
encoder_type='context',
title_tag_key='title',
)
def test_config():
encoder = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert encoder.default_batch_size == 32
assert encoder.default_traversal_paths == ('r',)
assert encoder.encoder_type == 'question'
assert encoder.title_tag_key is None
def test_no_document(basic_encoder: DPRTextEncoder):
basic_encoder.encode(None, {})
def test_empty_documents(basic_encoder: DPRTextEncoder):
docs = DocumentArray([])
basic_encoder.encode(docs, {})
assert len(docs) == 0
def test_no_text_documents(basic_encoder: DPRTextEncoder):
docs = DocumentArray([Document()])
basic_encoder.encode(docs, {})
assert len(docs) == 1
assert docs[0].embedding is None
def test_context_encoder_doc_no_title(basic_encoder_ctx: DPRTextEncoder):
docs = DocumentArray([Document(text='hello there')])
with pytest.raises(ValueError, match='If you set `title_tag_key` property'):
basic_encoder_ctx.encode(docs, {})
def test_wrong_encoder_type():
with pytest.raises(ValueError, match='The ``encoder_type`` parameter'):
encoder = DPRTextEncoder(encoder_type='worng_type')
def test_encoding_cpu():
docs = DocumentArray([Document(text='hello there')])
encoder = DPRTextEncoder(device='cpu')
encoder.encode(docs, {})
assert docs[0].embedding.shape == (768,)
def test_encoding_question_type(basic_encoder: DPRTextEncoder):
docs = DocumentArray([Document(text='hello there')])
basic_encoder.encode(docs, {})
assert docs[0].embedding.shape == (768,)
def test_encoding_context_type(basic_encoder_ctx: DPRTextEncoder):
docs = DocumentArray([Document(text='hello there', tags={'title': 'greeting'})])
basic_encoder_ctx.encode(docs, {})
assert docs[0].embedding.shape == (768,)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='GPU is needed for this test')
def test_encoding_gpu():
docs = DocumentArray([Document(text='hello there')])
encoder = DPRTextEncoder(device='cuda')
encoder.encode(docs, {})
assert docs[0].embedding.shape == (768,)
@pytest.mark.parametrize(
'traversal_path, counts',
[
('r', [['r', 1], ['c', 0], ['cc', 0]]),
('c', [['r', 0], ['c', 3], ['cc', 0]]),
('cc', [['r', 0], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_path: str, counts: List, basic_encoder: DPRTextEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(
docs=docs, parameters={'traversal_paths': [traversal_path]}, return_results=True
)
for path, count in counts:
assert len(docs.traverse_flat([path]).get_attributes('embedding')) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: DPRTextEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (768,)
def test_quality_embeddings(basic_encoder: DPRTextEncoder):
docs = DocumentArray(
[
Document(id='A', text='a furry animal that with a long tail'),
Document(id='B', text='a domesticated mammal with four legs'),
Document(id='C', text='a type of aircraft that uses rotating wings'),
Document(id='D', text='flying vehicle that has fixed wings and engines'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
from typing import List
import pytest
import torch
from jina import Document, DocumentArray
from jina.executors import BaseExecutor
from ...dpr_text import DPRTextEncoder
@pytest.fixture(scope='session')
def basic_encoder() -> DPRTextEncoder:
return DPRTextEncoder()
@pytest.fixture(scope='session')
def basic_encoder_ctx() -> DPRTextEncoder:
return DPRTextEncoder(
'facebook/dpr-ctx_encoder-single-nq-base',
encoder_type='context',
title_tag_key='title',
)
def test_config():
encoder = BaseExecutor.load_config('../../config.yml')
assert encoder.default_batch_size == 32
assert encoder.default_traversal_paths == ('r',)
assert encoder.encoder_type == 'question'
assert encoder.title_tag_key is None
def test_no_document(basic_encoder: DPRTextEncoder):
basic_encoder.encode(None, {})
def test_empty_documents(basic_encoder: DPRTextEncoder):
docs = DocumentArray([])
basic_encoder.encode(docs, {})
assert len(docs) == 0
def test_no_text_documents(basic_encoder: DPRTextEncoder):
docs = DocumentArray([Document()])
basic_encoder.encode(docs, {})
assert len(docs) == 1
assert docs[0].embedding is None
def test_context_encoder_doc_no_title(basic_encoder_ctx: DPRTextEncoder):
docs = DocumentArray([Document(text='hello there')])
with pytest.raises(ValueError, match='If you set `title_tag_key` property'):
basic_encoder_ctx.encode(docs, {})
def test_wrong_encoder_type():
with pytest.raises(ValueError, match='The ``encoder_type`` parameter'):
encoder = DPRTextEncoder(encoder_type='worng_type')
def test_encoding_cpu():
docs = DocumentArray([Document(text='hello there')])
encoder = DPRTextEncoder(device='cpu')
encoder.encode(docs, {})
assert docs[0].embedding.shape == (768,)
def test_encoding_question_type(basic_encoder: DPRTextEncoder):
docs = DocumentArray([Document(text='hello there')])
basic_encoder.encode(docs, {})
assert docs[0].embedding.shape == (768,)
def test_encoding_context_type(basic_encoder_ctx: DPRTextEncoder):
docs = DocumentArray([Document(text='hello there', tags={'title': 'greeting'})])
basic_encoder_ctx.encode(docs, {})
assert docs[0].embedding.shape == (768,)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='GPU is needed for this test')
def test_encoding_gpu():
docs = DocumentArray([Document(text='hello there')])
encoder = DPRTextEncoder(device='cuda')
encoder.encode(docs, {})
assert docs[0].embedding.shape == (768,)
@pytest.mark.parametrize(
'traversal_path, counts',
[
('r', [['r', 1], ['c', 0], ['cc', 0]]),
('c', [['r', 0], ['c', 3], ['cc', 0]]),
('cc', [['r', 0], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_path: str, counts: List, basic_encoder: DPRTextEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(
docs=docs, parameters={'traversal_paths': [traversal_path]}, return_results=True
)
for path, count in counts:
assert len(docs.traverse_flat([path]).get_attributes('embedding')) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: DPRTextEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (768,)
def test_quality_embeddings(basic_encoder: DPRTextEncoder):
docs = DocumentArray(
[
Document(id='A', text='a furry animal that with a long tail'),
Document(id='B', text='a domesticated mammal with four legs'),
Document(id='C', text='a type of aircraft that uses rotating wings'),
Document(id='D', text='flying vehicle that has fixed wings and engines'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
import numpy as np
import pytest
import torch
from docarray.computation.torch_backend import TorchCompBackend
def test_to_device():
t = torch.rand(10, 3)
assert t.device == torch.device('cpu')
t = TorchCompBackend.to_device(t, 'meta')
assert t.device == torch.device('meta')
@pytest.mark.parametrize(
'array,result',
[
(torch.zeros((5)), 1),
(torch.zeros((1, 5)), 2),
(torch.zeros((5, 5)), 2),
(torch.zeros(()), 0),
],
)
def test_n_dim(array, result):
assert TorchCompBackend.n_dim(array) == result
@pytest.mark.parametrize(
'array,result',
[
(torch.zeros((10,)), (10,)),
(torch.zeros((5, 5)), (5, 5)),
(torch.zeros(()), ()),
],
)
def test_shape(array, result):
shape = TorchCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
@pytest.mark.parametrize('dtype', [torch.int64, torch.float64, torch.int, torch.float])
def test_dtype(dtype):
tensor = torch.tensor([1, 2, 3], dtype=dtype)
assert TorchCompBackend.dtype(tensor) == dtype
def test_device():
tensor = torch.tensor([1, 2, 3])
assert TorchCompBackend.device(tensor) == 'cpu'
def test_empty():
tensor = TorchCompBackend.empty((10, 3))
assert tensor.shape == (10, 3)
def test_empty_dtype():
tensor = TorchCompBackend.empty((10, 3), dtype=torch.int32)
assert tensor.shape == (10, 3)
assert tensor.dtype == torch.int32
def test_empty_device():
tensor = TorchCompBackend.empty((10, 3), device='meta')
assert tensor.shape == (10, 3)
assert tensor.device == torch.device('meta')
def test_squeeze():
tensor = torch.zeros(size=(1, 1, 3, 1))
squeezed = TorchCompBackend.squeeze(tensor)
assert squeezed.shape == (3,)
@pytest.mark.parametrize(
'array,t_range,x_range,result',
[
(
torch.tensor([0, 1, 2, 3, 4, 5]),
(0, 10),
None,
torch.tensor([0, 2, 4, 6, 8, 10]),
),
(
torch.tensor([0, 1, 2, 3, 4, 5]),
(0, 10),
(0, 10),
torch.tensor([0, 1, 2, 3, 4, 5]),
),
(
torch.tensor([[0.0, 1.0], [0.0, 1.0]]),
(0, 10),
None,
torch.tensor([[0.0, 10.0], [0.0, 10.0]]),
),
],
)
def test_minmax_normalize(array, t_range, x_range, result):
output = TorchCompBackend.minmax_normalize(
tensor=array, t_range=t_range, x_range=x_range
)
assert torch.allclose(output, result)
def test_reshape():
a = torch.tensor([[[1, 2, 3], [4, 5, 6]]])
b = TorchCompBackend.reshape(a, (2, 3))
assert torch.equal(b, torch.tensor([[1, 2, 3], [4, 5, 6]]))
def test_copy():
a = torch.tensor([1, 2, 3])
b = TorchCompBackend.copy(a)
assert torch.equal(a, b)
def test_stack():
a = torch.tensor([1, 2, 3])
b = torch.tensor([4, 5, 6])
stacked = TorchCompBackend.stack([a, b], dim=0)
assert torch.equal(stacked, torch.tensor([[1, 2, 3], [4, 5, 6]]))
def test_empty_all():
shape = (2, 3)
dtype = torch.float32
device = 'cpu'
a = TorchCompBackend.empty(shape, dtype, device)
assert a.shape == shape and a.dtype == dtype and a.device.type == device
def test_to_numpy():
a = torch.tensor([1, 2, 3])
b = TorchCompBackend.to_numpy(a)
assert np.array_equal(b, np.array(a))
def test_none_value():
assert torch.isnan(TorchCompBackend.none_value())
def test_detach():
a = torch.tensor([1.0, 2.0, 3.0], requires_grad=True)
b = TorchCompBackend.detach(a)
assert not b.requires_grad
|
import pytest
import torch
from docarray.computation.torch_backend import TorchCompBackend
def test_to_device():
t = torch.rand(10, 3)
assert t.device == torch.device('cpu')
t = TorchCompBackend.to_device(t, 'meta')
assert t.device == torch.device('meta')
@pytest.mark.parametrize(
'array,result',
[
(torch.zeros((5)), 1),
(torch.zeros((1, 5)), 2),
(torch.zeros((5, 5)), 2),
(torch.zeros(()), 0),
],
)
def test_n_dim(array, result):
assert TorchCompBackend.n_dim(array) == result
@pytest.mark.parametrize(
'array,result',
[
(torch.zeros((10,)), (10,)),
(torch.zeros((5, 5)), (5, 5)),
(torch.zeros(()), ()),
],
)
def test_shape(array, result):
shape = TorchCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
@pytest.mark.parametrize('dtype', [torch.int64, torch.float64, torch.int, torch.float])
def test_dtype(dtype):
tensor = torch.tensor([1, 2, 3], dtype=dtype)
assert TorchCompBackend.dtype(tensor) == dtype
def test_device():
tensor = torch.tensor([1, 2, 3])
assert TorchCompBackend.device(tensor) == 'cpu'
def test_empty():
tensor = TorchCompBackend.empty((10, 3))
assert tensor.shape == (10, 3)
def test_empty_dtype():
tensor = TorchCompBackend.empty((10, 3), dtype=torch.int32)
assert tensor.shape == (10, 3)
assert tensor.dtype == torch.int32
def test_empty_device():
tensor = TorchCompBackend.empty((10, 3), device='meta')
assert tensor.shape == (10, 3)
assert tensor.device == torch.device('meta')
def test_squeeze():
tensor = torch.zeros(size=(1, 1, 3, 1))
squeezed = TorchCompBackend.squeeze(tensor)
assert squeezed.shape == (3,)
@pytest.mark.parametrize(
'array,t_range,x_range,result',
[
(
torch.tensor([0, 1, 2, 3, 4, 5]),
(0, 10),
None,
torch.tensor([0, 2, 4, 6, 8, 10]),
),
(
torch.tensor([0, 1, 2, 3, 4, 5]),
(0, 10),
(0, 10),
torch.tensor([0, 1, 2, 3, 4, 5]),
),
(
torch.tensor([[0.0, 1.0], [0.0, 1.0]]),
(0, 10),
None,
torch.tensor([[0.0, 10.0], [0.0, 10.0]]),
),
],
)
def test_minmax_normalize(array, t_range, x_range, result):
output = TorchCompBackend.minmax_normalize(
tensor=array, t_range=t_range, x_range=x_range
)
assert torch.allclose(output, result)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
from diffusers import (
FluxTransformer2DModel,
)
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
torch_device,
)
enable_full_determinism()
@require_torch_accelerator
class FluxTransformer2DModelSingleFileTests(unittest.TestCase):
model_class = FluxTransformer2DModel
ckpt_path = "https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/flux1-dev.safetensors"
alternate_keys_ckpt_paths = ["https://huggingface.co/Comfy-Org/flux1-dev/blob/main/flux1-dev-fp8.safetensors"]
repo_id = "black-forest-labs/FLUX.1-dev"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer")
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
def test_checkpoint_loading(self):
for ckpt_path in self.alternate_keys_ckpt_paths:
backend_empty_cache(torch_device)
model = self.model_class.from_single_file(ckpt_path)
del model
gc.collect()
backend_empty_cache(torch_device)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import (
FluxTransformer2DModel,
)
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
torch_device,
)
enable_full_determinism()
@require_torch_accelerator
class FluxTransformer2DModelSingleFileTests(unittest.TestCase):
model_class = FluxTransformer2DModel
ckpt_path = "https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/flux1-dev.safetensors"
alternate_keys_ckpt_paths = ["https://huggingface.co/Comfy-Org/flux1-dev/blob/main/flux1-dev-fp8.safetensors"]
repo_id = "black-forest-labs/FLUX.1-dev"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer")
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
def test_checkpoint_loading(self):
for ckpt_path in self.alternate_keys_ckpt_paths:
torch.cuda.empty_cache()
model = self.model_class.from_single_file(ckpt_path)
del model
gc.collect()
torch.cuda.empty_cache()
|
import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
|
import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
pass
|
from backend.blocks.smartlead.models import (
AddLeadsRequest,
AddLeadsToCampaignResponse,
CreateCampaignRequest,
CreateCampaignResponse,
SaveSequencesRequest,
SaveSequencesResponse,
)
from backend.util.request import Requests
class SmartLeadClient:
"""Client for the SmartLead API"""
# This api is stupid and requires your api key in the url. DO NOT RAISE ERRORS FOR BAD REQUESTS.
# FILTER OUT THE API KEY FROM THE ERROR MESSAGE.
API_URL = "https://server.smartlead.ai/api/v1"
def __init__(self, api_key: str):
self.api_key = api_key
self.requests = Requests()
def _add_auth_to_url(self, url: str) -> str:
return f"{url}?api_key={self.api_key}"
def _handle_error(self, e: Exception) -> str:
return e.__str__().replace(self.api_key, "API KEY")
async def create_campaign(
self, request: CreateCampaignRequest
) -> CreateCampaignResponse:
try:
response = await self.requests.post(
self._add_auth_to_url(f"{self.API_URL}/campaigns/create"),
json=request.model_dump(),
)
response_data = response.json()
return CreateCampaignResponse(**response_data)
except ValueError as e:
raise ValueError(f"Invalid response format: {str(e)}")
except Exception as e:
raise ValueError(f"Failed to create campaign: {self._handle_error(e)}")
async def add_leads_to_campaign(
self, request: AddLeadsRequest
) -> AddLeadsToCampaignResponse:
try:
response = await self.requests.post(
self._add_auth_to_url(
f"{self.API_URL}/campaigns/{request.campaign_id}/leads"
),
json=request.model_dump(exclude={"campaign_id"}),
)
response_data = response.json()
response_parsed = AddLeadsToCampaignResponse(**response_data)
if not response_parsed.ok:
raise ValueError(
f"Failed to add leads to campaign: {response_parsed.error}"
)
return response_parsed
except ValueError as e:
raise ValueError(f"Invalid response format: {str(e)}")
except Exception as e:
raise ValueError(
f"Failed to add leads to campaign: {self._handle_error(e)}"
)
async def save_campaign_sequences(
self, campaign_id: int, request: SaveSequencesRequest
) -> SaveSequencesResponse:
"""
Save sequences within a campaign.
Args:
campaign_id: ID of the campaign to save sequences for
request: SaveSequencesRequest containing the sequences configuration
Returns:
SaveSequencesResponse with the result of the operation
Note:
For variant_distribution_type:
- MANUAL_EQUAL: Equally distributes variants across leads
- AI_EQUAL: Requires winning_metric_property and lead_distribution_percentage
- MANUAL_PERCENTAGE: Requires variant_distribution_percentage in seq_variants
"""
try:
response = await self.requests.post(
self._add_auth_to_url(
f"{self.API_URL}/campaigns/{campaign_id}/sequences"
),
json=request.model_dump(exclude_none=True),
)
return SaveSequencesResponse(**(response.json()))
except Exception as e:
raise ValueError(
f"Failed to save campaign sequences: {e.__str__().replace(self.api_key, 'API KEY')}"
)
|
from backend.blocks.smartlead.models import (
AddLeadsRequest,
AddLeadsToCampaignResponse,
CreateCampaignRequest,
CreateCampaignResponse,
SaveSequencesRequest,
SaveSequencesResponse,
)
from backend.util.request import Requests
class SmartLeadClient:
"""Client for the SmartLead API"""
# This api is stupid and requires your api key in the url. DO NOT RAISE ERRORS FOR BAD REQUESTS.
# FILTER OUT THE API KEY FROM THE ERROR MESSAGE.
API_URL = "https://server.smartlead.ai/api/v1"
def __init__(self, api_key: str):
self.api_key = api_key
self.requests = Requests()
def _add_auth_to_url(self, url: str) -> str:
return f"{url}?api_key={self.api_key}"
def _handle_error(self, e: Exception) -> str:
return e.__str__().replace(self.api_key, "API KEY")
def create_campaign(self, request: CreateCampaignRequest) -> CreateCampaignResponse:
try:
response = self.requests.post(
self._add_auth_to_url(f"{self.API_URL}/campaigns/create"),
json=request.model_dump(),
)
response_data = response.json()
return CreateCampaignResponse(**response_data)
except ValueError as e:
raise ValueError(f"Invalid response format: {str(e)}")
except Exception as e:
raise ValueError(f"Failed to create campaign: {self._handle_error(e)}")
def add_leads_to_campaign(
self, request: AddLeadsRequest
) -> AddLeadsToCampaignResponse:
try:
response = self.requests.post(
self._add_auth_to_url(
f"{self.API_URL}/campaigns/{request.campaign_id}/leads"
),
json=request.model_dump(exclude={"campaign_id"}),
)
response_data = response.json()
response_parsed = AddLeadsToCampaignResponse(**response_data)
if not response_parsed.ok:
raise ValueError(
f"Failed to add leads to campaign: {response_parsed.error}"
)
return response_parsed
except ValueError as e:
raise ValueError(f"Invalid response format: {str(e)}")
except Exception as e:
raise ValueError(
f"Failed to add leads to campaign: {self._handle_error(e)}"
)
def save_campaign_sequences(
self, campaign_id: int, request: SaveSequencesRequest
) -> SaveSequencesResponse:
"""
Save sequences within a campaign.
Args:
campaign_id: ID of the campaign to save sequences for
request: SaveSequencesRequest containing the sequences configuration
Returns:
SaveSequencesResponse with the result of the operation
Note:
For variant_distribution_type:
- MANUAL_EQUAL: Equally distributes variants across leads
- AI_EQUAL: Requires winning_metric_property and lead_distribution_percentage
- MANUAL_PERCENTAGE: Requires variant_distribution_percentage in seq_variants
"""
try:
response = self.requests.post(
self._add_auth_to_url(
f"{self.API_URL}/campaigns/{campaign_id}/sequences"
),
json=request.model_dump(exclude_none=True),
)
return SaveSequencesResponse(**response.json())
except Exception as e:
raise ValueError(
f"Failed to save campaign sequences: {e.__str__().replace(self.api_key, 'API KEY')}"
)
|
_base_ = './faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py'
conv_cfg = dict(type='ConvWS')
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://jhu/resnext101_32x4d_gn_ws')))
|
_base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py'
conv_cfg = dict(type='ConvWS')
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://jhu/resnext101_32x4d_gn_ws')))
|
import time
from jina import Flow
from tests.integration.instrumentation import ExecutorTestWithTracing, get_traces
def test_span_order(jaeger_port, otlp_collector, otlp_receiver_port):
f = Flow(
tracing=True,
traces_exporter_host='http://localhost',
traces_exporter_port=otlp_receiver_port,
).add(uses=ExecutorTestWithTracing)
with f:
from jina import DocumentArray
f.post(f'/search', DocumentArray.empty(), continue_on_error=True)
# give some time for the tracing and metrics exporters to finish exporting.
# the client is slow to export the data
time.sleep(8)
traces = get_traces(jaeger_port, 'executor0/rep-0')
process_single_data_span_ids = set()
search_request_parent_span_ids = set()
for trace in traces:
for span in trace['spans']:
if (
span['operationName']
== '/jina.JinaSingleDataRequestRPC/process_single_data'
):
process_single_data_span_ids.add(span['spanID'])
if span['operationName'] == '/search':
references = span.get('references', [])
for ref in references:
search_request_parent_span_ids.add(ref.get('spanID', ''))
assert any(
[
search_span in process_single_data_span_ids
for search_span in search_request_parent_span_ids
]
)
|
import time
from jina import Flow
from tests.integration.instrumentation import ExecutorTestWithTracing, get_traces
def test_span_order(jaeger_port, otlp_collector, otlp_receiver_port):
f = Flow(
tracing=True,
traces_exporter_host='localhost',
traces_exporter_port=otlp_receiver_port,
).add(uses=ExecutorTestWithTracing)
with f:
from jina import DocumentArray
f.post(f'/search', DocumentArray.empty(), continue_on_error=True)
# give some time for the tracing and metrics exporters to finish exporting.
# the client is slow to export the data
time.sleep(8)
traces = get_traces(jaeger_port, 'executor0/rep-0')
process_single_data_span_ids = set()
search_request_parent_span_ids = set()
for trace in traces:
for span in trace['spans']:
if (
span['operationName']
== '/jina.JinaSingleDataRequestRPC/process_single_data'
):
process_single_data_span_ids.add(span['spanID'])
if span['operationName'] == '/search':
references = span.get('references', [])
for ref in references:
search_request_parent_span_ids.add(ref.get('spanID', ''))
assert any(
[
search_span in process_single_data_span_ids
for search_span in search_request_parent_span_ids
]
)
|
# Copyright 2024 The HuggingFace Team, the AllenNLP library authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script to close stale issue. Taken in part from the AllenNLP repository.
https://github.com/allenai/allennlp.
"""
import os
from datetime import datetime as dt
from datetime import timezone
from github import Github
LABELS_TO_EXEMPT = [
"close-to-merge",
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def main():
g = Github(os.environ["GITHUB_TOKEN"])
repo = g.get_repo("huggingface/diffusers")
open_issues = repo.get_issues(state="open")
for issue in open_issues:
labels = [label.name.lower() for label in issue.get_labels()]
if "stale" in labels:
comments = sorted(issue.get_comments(), key=lambda i: i.created_at, reverse=True)
last_comment = comments[0] if len(comments) > 0 else None
if last_comment is not None and last_comment.user.login != "github-actions[bot]":
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open")
issue.remove_from_labels("stale")
elif (
(dt.now(timezone.utc) - issue.updated_at).days > 23
and (dt.now(timezone.utc) - issue.created_at).days >= 30
and not any(label in LABELS_TO_EXEMPT for label in labels)
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored."
)
issue.add_to_labels("stale")
if __name__ == "__main__":
main()
|
# Copyright 2024 The HuggingFace Team, the AllenNLP library authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script to close stale issue. Taken in part from the AllenNLP repository.
https://github.com/allenai/allennlp.
"""
import os
from datetime import datetime as dt
from datetime import timezone
from github import Github
LABELS_TO_EXEMPT = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def main():
g = Github(os.environ["GITHUB_TOKEN"])
repo = g.get_repo("huggingface/diffusers")
open_issues = repo.get_issues(state="open")
for issue in open_issues:
labels = [label.name.lower() for label in issue.get_labels()]
if "stale" in labels:
comments = sorted(issue.get_comments(), key=lambda i: i.created_at, reverse=True)
last_comment = comments[0] if len(comments) > 0 else None
if last_comment is not None and last_comment.user.login != "github-actions[bot]":
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open")
issue.remove_from_labels("stale")
elif (
(dt.now(timezone.utc) - issue.updated_at).days > 23
and (dt.now(timezone.utc) - issue.created_at).days >= 30
and not any(label in LABELS_TO_EXEMPT for label in labels)
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored."
)
issue.add_to_labels("stale")
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
"""Generate the conf JSONs from fairseq pretrained weight file, consumed by unit tests
Note:
The current configuration files were generated on fairseq e47a4c84
Usage:
1. Download pretrained parameters from https://github.com/pytorch/fairseq/tree/main/examples/hubert
2. Run this script and save the resulting JSON configuration in assets directory.
Example:
```
python generate_hubert_model_config.py \
--model-file hubert_base_ls960.pt \
> hubert_base_ls960.json
python generate_hubert_model_config.py \
--model-file hubert_large_ll60k.pt \
> hubert_large_ll60k.json
python generate_hubert_model_config.py \
--model-file hubert_large_ll60k_finetune_ls960.pt \
> hubert_large_ll60k_finetune_ls960.json
python generate_hubert_model_config.py \
--model-file hubert_xlarge_ll60k.pt \
> hubert_large_ll60k.json
python generate_hubert_model_config.py \
--model-file hubert_xlarge_ll60k_finetune_ls960.pt \
> hubert_large_ll60k_finetune_ls960.json
```
"""
import argparse
import json
def _parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"--model-file",
required=True,
help=("A pt file from " "https://github.com/pytorch/fairseq/tree/main/examples/hubert"),
)
return parser.parse_args()
def _load(model_file):
import fairseq
from omegaconf import OmegaConf
models, cfg, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([model_file])
model = models[0]
cfg = OmegaConf.to_container(cfg)
return model, cfg
def _main():
args = _parse_args()
model, cfg = _load(args.model_file)
if model.__class__.__name__ == "HubertModel":
cfg["task"]["data"] = "/foo/bar"
cfg["task"]["label_dir"] = None
conf = {
"_name": "hubert",
"model": cfg["model"],
"task": cfg["task"],
"num_classes": model.num_classes,
}
elif model.__class__.__name__ == "HubertCtc":
conf = cfg["model"]
del conf["w2v_path"]
keep = ["_name", "task", "model"]
for key in conf["w2v_args"]:
if key not in keep:
del conf["w2v_args"][key]
conf["data"] = "/foo/bar/"
conf["w2v_args"]["task"]["data"] = "/foo/bar"
conf["w2v_args"]["task"]["labels"] = []
conf["w2v_args"]["task"]["label_dir"] = "/foo/bar"
print(json.dumps(conf, indent=4, sort_keys=True))
if __name__ == "__main__":
_main()
|
#!/usr/bin/env python3
"""Generate the conf JSONs from fairseq pretrained weight file, consumed by unit tests
Note:
The current configuration files were generated on fairseq e47a4c84
Usage:
1. Download pretrained parameters from https://github.com/pytorch/fairseq/tree/main/examples/hubert
2. Run this script and save the resulting JSON configuration in assets directory.
Example:
```
python generate_hubert_model_config.py \
--model-file hubert_base_ls960.pt \
> hubert_base_ls960.json
python generate_hubert_model_config.py \
--model-file hubert_large_ll60k.pt \
> hubert_large_ll60k.json
python generate_hubert_model_config.py \
--model-file hubert_large_ll60k_finetune_ls960.pt \
> hubert_large_ll60k_finetune_ls960.json
python generate_hubert_model_config.py \
--model-file hubert_xlarge_ll60k.pt \
> hubert_large_ll60k.json
python generate_hubert_model_config.py \
--model-file hubert_xlarge_ll60k_finetune_ls960.pt \
> hubert_large_ll60k_finetune_ls960.json
```
"""
import argparse
import json
def _parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"--model-file",
required=True,
help=("A pt file from " "https://github.com/pytorch/fairseq/tree/main/examples/hubert"),
)
return parser.parse_args()
def _load(model_file):
import fairseq
from omegaconf import OmegaConf
models, cfg, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([model_file])
model = models[0]
cfg = OmegaConf.to_container(cfg)
return model, cfg
def _main():
args = _parse_args()
model, cfg = _load(args.model_file)
if model.__class__.__name__ == "HubertModel":
cfg["task"]["data"] = "/foo/bar"
cfg["task"]["label_dir"] = None
conf = {
"_name": "hubert",
"model": cfg["model"],
"task": cfg["task"],
"num_classes": model.num_classes,
}
elif model.__class__.__name__ == "HubertCtc":
conf = cfg["model"]
del conf["w2v_path"]
keep = ["_name", "task", "model"]
for key in list(k for k in conf["w2v_args"] if k not in keep):
del conf["w2v_args"][key]
conf["data"] = "/foo/bar/"
conf["w2v_args"]["task"]["data"] = "/foo/bar"
conf["w2v_args"]["task"]["labels"] = []
conf["w2v_args"]["task"]["label_dir"] = "/foo/bar"
print(json.dumps(conf, indent=4, sort_keys=True))
if __name__ == "__main__":
_main()
|
import sys
from jina.serve.runtimes.head import HeadRuntime
from jina.parsers import set_pod_parser
def run(*args, **kwargs):
runtime_args = set_pod_parser().parse_args(args)
runtime_args.host = runtime_args.host[0]
runtime_args.port = runtime_args.port[0]
with HeadRuntime(runtime_args) as runtime:
runtime.run_forever()
if __name__ == '__main__':
run(*sys.argv[1:])
|
import sys
from jina.serve.runtimes.head import HeadRuntime
from jina.parsers import set_pod_parser
def run(*args, **kwargs):
runtime_args = set_pod_parser().parse_args(args)
with HeadRuntime(runtime_args) as runtime:
runtime.run_forever()
if __name__ == '__main__':
run(*sys.argv[1:])
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import ImageToTensor, PackDetInputs, ToTensor, Transpose
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, LoadAnnotations, LoadEmptyAnnotations,
LoadImageFromNDArray, LoadMultiChannelImageFromFiles,
LoadPanopticAnnotations, LoadProposals)
from .transforms import (Albu, CachedMixUp, CachedMosaic, CopyPaste, CutOut,
Expand, MinIoURandomCrop, MixUp, Mosaic, Normalize,
Pad, PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomErasing,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
from .wrappers import MultiBranch, RandomOrder
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'Transpose',
'LoadImageFromNDArray', 'LoadAnnotations', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'Resize', 'RandomFlip',
'RandomCrop', 'Normalize', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert', 'MultiBranch', 'RandomErasing',
'LoadEmptyAnnotations', 'RandomOrder', 'CachedMosaic', 'CachedMixUp'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import ImageToTensor, PackDetInputs, ToTensor, Transpose
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, LoadAnnotations, LoadEmptyAnnotations,
LoadImageFromNDArray, LoadMultiChannelImageFromFiles,
LoadPanopticAnnotations, LoadProposals)
from .transforms import (Albu, CopyPaste, CutOut, Expand, MinIoURandomCrop,
MixUp, Mosaic, Normalize, Pad, PhotoMetricDistortion,
RandomAffine, RandomCenterCropPad, RandomCrop,
RandomErasing, RandomFlip, RandomShift, Resize,
SegRescale, YOLOXHSVRandomAug)
from .wrappers import MultiBranch, RandomOrder
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'Transpose',
'LoadImageFromNDArray', 'LoadAnnotations', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'Resize', 'RandomFlip',
'RandomCrop', 'Normalize', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert', 'MultiBranch', 'RandomErasing',
'LoadEmptyAnnotations', 'RandomOrder'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner,
MaxIoUAssigner, RegionAssigner)
from .builder import build_assigner, build_bbox_coder, build_sampler
from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, PseudoBBoxCoder,
TBLRBBoxCoder)
from .iou_calculators import BboxOverlaps2D, bbox_overlaps
from .samplers import (BaseSampler, CombinedSampler,
InstanceBalancedPosSampler, IoUBalancedNegSampler,
OHEMSampler, PseudoSampler, RandomSampler,
SamplingResult, ScoreHLRSampler)
from .transforms import (bbox2distance, bbox2result, bbox2roi,
bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping,
bbox_mapping_back, bbox_rescale, bbox_xyxy_to_cxcywh,
distance2bbox, roi2bbox)
__all__ = [
'bbox_overlaps', 'BboxOverlaps2D', 'BaseAssigner', 'MaxIoUAssigner',
'AssignResult', 'BaseSampler', 'PseudoSampler', 'RandomSampler',
'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'build_assigner',
'build_sampler', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back',
'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance',
'build_bbox_coder', 'BaseBBoxCoder', 'PseudoBBoxCoder',
'DeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'CenterRegionAssigner',
'bbox_rescale', 'bbox_cxcywh_to_xyxy', 'bbox_xyxy_to_cxcywh',
'RegionAssigner'
]
|
from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner,
MaxIoUAssigner, RegionAssigner)
from .builder import build_assigner, build_bbox_coder, build_sampler
from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, PseudoBBoxCoder,
TBLRBBoxCoder)
from .iou_calculators import BboxOverlaps2D, bbox_overlaps
from .samplers import (BaseSampler, CombinedSampler,
InstanceBalancedPosSampler, IoUBalancedNegSampler,
OHEMSampler, PseudoSampler, RandomSampler,
SamplingResult, ScoreHLRSampler)
from .transforms import (bbox2distance, bbox2result, bbox2roi,
bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping,
bbox_mapping_back, bbox_rescale, bbox_xyxy_to_cxcywh,
distance2bbox, roi2bbox)
__all__ = [
'bbox_overlaps', 'BboxOverlaps2D', 'BaseAssigner', 'MaxIoUAssigner',
'AssignResult', 'BaseSampler', 'PseudoSampler', 'RandomSampler',
'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'build_assigner',
'build_sampler', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back',
'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance',
'build_bbox_coder', 'BaseBBoxCoder', 'PseudoBBoxCoder',
'DeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'CenterRegionAssigner',
'bbox_rescale', 'bbox_cxcywh_to_xyxy', 'bbox_xyxy_to_cxcywh',
'RegionAssigner'
]
|
"""Filter that uses an LLM to drop documents that aren't relevant to the query."""
from collections.abc import Sequence
from typing import Any, Callable, Optional
from langchain_core.callbacks import Callbacks
from langchain_core.documents import BaseDocumentCompressor, Document
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import BasePromptTemplate, PromptTemplate
from langchain_core.runnables import Runnable
from langchain_core.runnables.config import RunnableConfig
from pydantic import ConfigDict
from langchain.chains import LLMChain
from langchain.output_parsers.boolean import BooleanOutputParser
from langchain.retrievers.document_compressors.chain_filter_prompt import (
prompt_template,
)
def _get_default_chain_prompt() -> PromptTemplate:
return PromptTemplate(
template=prompt_template,
input_variables=["question", "context"],
output_parser=BooleanOutputParser(),
)
def default_get_input(query: str, doc: Document) -> dict[str, Any]:
"""Return the compression chain input."""
return {"question": query, "context": doc.page_content}
class LLMChainFilter(BaseDocumentCompressor):
"""Filter that drops documents that aren't relevant to the query."""
llm_chain: Runnable
"""LLM wrapper to use for filtering documents.
The chain prompt is expected to have a BooleanOutputParser."""
get_input: Callable[[str, Document], dict] = default_get_input
"""Callable for constructing the chain input from the query and a Document."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Filter down documents based on their relevance to the query."""
filtered_docs = []
config = RunnableConfig(callbacks=callbacks)
outputs = zip(
self.llm_chain.batch(
[self.get_input(query, doc) for doc in documents], config=config
),
documents,
)
for output_, doc in outputs:
include_doc = None
if isinstance(self.llm_chain, LLMChain):
output = output_[self.llm_chain.output_key]
if self.llm_chain.prompt.output_parser is not None:
include_doc = self.llm_chain.prompt.output_parser.parse(output)
else:
if isinstance(output_, bool):
include_doc = output_
if include_doc:
filtered_docs.append(doc)
return filtered_docs
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Filter down documents based on their relevance to the query."""
filtered_docs = []
config = RunnableConfig(callbacks=callbacks)
outputs = zip(
await self.llm_chain.abatch(
[self.get_input(query, doc) for doc in documents], config=config
),
documents,
)
for output_, doc in outputs:
include_doc = None
if isinstance(self.llm_chain, LLMChain):
output = output_[self.llm_chain.output_key]
if self.llm_chain.prompt.output_parser is not None:
include_doc = self.llm_chain.prompt.output_parser.parse(output)
else:
if isinstance(output_, bool):
include_doc = output_
if include_doc:
filtered_docs.append(doc)
return filtered_docs
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> "LLMChainFilter":
"""Create a LLMChainFilter from a language model.
Args:
llm: The language model to use for filtering.
prompt: The prompt to use for the filter.
kwargs: Additional arguments to pass to the constructor.
Returns:
A LLMChainFilter that uses the given language model.
"""
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
if _prompt.output_parser is not None:
parser = _prompt.output_parser
else:
parser = StrOutputParser()
llm_chain = _prompt | llm | parser
return cls(llm_chain=llm_chain, **kwargs)
|
"""Filter that uses an LLM to drop documents that aren't relevant to the query."""
from collections.abc import Sequence
from typing import Any, Callable, Optional
from langchain_core.callbacks import Callbacks
from langchain_core.documents import BaseDocumentCompressor, Document
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import BasePromptTemplate, PromptTemplate
from langchain_core.runnables import Runnable
from langchain_core.runnables.config import RunnableConfig
from pydantic import ConfigDict
from langchain.chains import LLMChain
from langchain.output_parsers.boolean import BooleanOutputParser
from langchain.retrievers.document_compressors.chain_filter_prompt import (
prompt_template,
)
def _get_default_chain_prompt() -> PromptTemplate:
return PromptTemplate(
template=prompt_template,
input_variables=["question", "context"],
output_parser=BooleanOutputParser(),
)
def default_get_input(query: str, doc: Document) -> dict[str, Any]:
"""Return the compression chain input."""
return {"question": query, "context": doc.page_content}
class LLMChainFilter(BaseDocumentCompressor):
"""Filter that drops documents that aren't relevant to the query."""
llm_chain: Runnable
"""LLM wrapper to use for filtering documents.
The chain prompt is expected to have a BooleanOutputParser."""
get_input: Callable[[str, Document], dict] = default_get_input
"""Callable for constructing the chain input from the query and a Document."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Filter down documents based on their relevance to the query."""
filtered_docs = []
config = RunnableConfig(callbacks=callbacks)
outputs = zip(
self.llm_chain.batch(
[self.get_input(query, doc) for doc in documents], config=config
),
documents,
)
for output_, doc in outputs:
include_doc = None
if isinstance(self.llm_chain, LLMChain):
output = output_[self.llm_chain.output_key]
if self.llm_chain.prompt.output_parser is not None:
include_doc = self.llm_chain.prompt.output_parser.parse(output)
else:
if isinstance(output_, bool):
include_doc = output_
if include_doc:
filtered_docs.append(doc)
return filtered_docs
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Filter down documents based on their relevance to the query."""
filtered_docs = []
config = RunnableConfig(callbacks=callbacks)
outputs = zip(
await self.llm_chain.abatch(
[self.get_input(query, doc) for doc in documents], config=config
),
documents,
)
for output_, doc in outputs:
include_doc = None
if isinstance(self.llm_chain, LLMChain):
output = output_[self.llm_chain.output_key]
if self.llm_chain.prompt.output_parser is not None:
include_doc = self.llm_chain.prompt.output_parser.parse(output)
else:
if isinstance(output_, bool):
include_doc = output_
if include_doc:
filtered_docs.append(doc)
return filtered_docs
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> "LLMChainFilter":
"""Create a LLMChainFilter from a language model.
Args:
llm: The language model to use for filtering.
prompt: The prompt to use for the filter.
kwargs: Additional arguments to pass to the constructor.
Returns:
A LLMChainFilter that uses the given language model.
"""
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
if _prompt.output_parser is not None:
parser = _prompt.output_parser
else:
parser = StrOutputParser()
llm_chain = _prompt | llm | parser
return cls(llm_chain=llm_chain, **kwargs)
|
from ._hubert_loss import hubert_loss
__all__ = [
"hubert_loss",
"wav2vec2_loss",
]
|
from ._hubert_loss import hubert_loss
__all__ = [
"hubert_loss",
]
|
import os
import subprocess
import pytest
from xgboost import testing as tm
pytestmark = [
pytest.mark.skipif(**tm.no_dask()),
pytest.mark.skipif(**tm.no_dask_cuda()),
tm.timeout(60),
]
@pytest.mark.skipif(**tm.no_cupy())
@pytest.mark.mgpu
def test_dask_training() -> None:
script = os.path.join(tm.demo_dir(__file__), "dask", "gpu_training.py")
cmd = ["python", script]
subprocess.check_call(cmd)
@pytest.mark.mgpu
def test_dask_sklearn_demo() -> None:
script = os.path.join(tm.demo_dir(__file__), "dask", "sklearn_gpu_training.py")
cmd = ["python", script]
subprocess.check_call(cmd)
@pytest.mark.mgpu
@pytest.mark.skipif(**tm.no_cupy())
def test_forward_logging_demo() -> None:
script = os.path.join(tm.demo_dir(__file__), "dask", "forward_logging.py")
cmd = ["python", script]
subprocess.check_call(cmd)
|
import os
import subprocess
import pytest
from xgboost import testing as tm
pytestmark = [
pytest.mark.skipif(**tm.no_dask()),
pytest.mark.skipif(**tm.no_dask_cuda()),
tm.timeout(60),
]
@pytest.mark.skipif(**tm.no_cupy())
@pytest.mark.mgpu
def test_dask_training():
script = os.path.join(tm.demo_dir(__file__), "dask", "gpu_training.py")
cmd = ["python", script]
subprocess.check_call(cmd)
@pytest.mark.mgpu
def test_dask_sklearn_demo():
script = os.path.join(tm.demo_dir(__file__), "dask", "sklearn_gpu_training.py")
cmd = ["python", script]
subprocess.check_call(cmd)
@pytest.mark.mgpu
@pytest.mark.skipif(**tm.no_cupy())
def test_forward_logging_demo():
script = os.path.join(tm.demo_dir(__file__), "dask", "forward_logging.py")
cmd = ["python", script]
subprocess.check_call(cmd)
|
from dask.array.fft import * # noqa: F403
# dask.array.fft doesn't have __all__. If it is added, replace this with
#
# from dask.array.fft import __all__ as linalg_all
_n = {}
exec('from dask.array.fft import *', _n)
for k in ("__builtins__", "Sequence", "annotations", "warnings"):
_n.pop(k, None)
fft_all = list(_n)
del _n, k
from ...common import _fft
from ..._internal import get_xp
import dask.array as da
fftfreq = get_xp(da)(_fft.fftfreq)
rfftfreq = get_xp(da)(_fft.rfftfreq)
__all__ = fft_all + ["fftfreq", "rfftfreq"]
_all_ignore = ["da", "fft_all", "get_xp", "warnings"]
|
from dask.array.fft import * # noqa: F403
# dask.array.fft doesn't have __all__. If it is added, replace this with
#
# from dask.array.fft import __all__ as linalg_all
_n = {}
exec('from dask.array.fft import *', _n)
del _n['__builtins__']
fft_all = list(_n)
del _n
from ...common import _fft
from ..._internal import get_xp
import dask.array as da
fftfreq = get_xp(da)(_fft.fftfreq)
rfftfreq = get_xp(da)(_fft.rfftfreq)
__all__ = [elem for elem in fft_all if elem != "annotations"] + ["fftfreq", "rfftfreq"]
del get_xp
del da
del fft_all
del _fft
|
"""Standard LangChain interface tests"""
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests.chat_models import (
ChatModelUnitTests,
)
from langchain_groq import ChatGroq
class TestGroqStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatGroq
@property
def chat_model_params(self) -> dict:
return {"model": "llama-3.1-8b-instant"}
|
"""Standard LangChain interface tests"""
from typing import Type
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests.chat_models import (
ChatModelUnitTests,
)
from langchain_groq import ChatGroq
class TestGroqStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatGroq
@property
def chat_model_params(self) -> dict:
return {"model": "llama-3.1-8b-instant"}
|
from datetime import datetime
import pytest
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.credit import BetaUserCredit
from backend.data.user import DEFAULT_USER_ID
from backend.integrations.credentials_store import openai_credentials
from backend.util.test import SpinTestServer
REFILL_VALUE = 1000
user_credit = BetaUserCredit(REFILL_VALUE)
async def disable_test_user_transactions():
await CreditTransaction.prisma().delete_many(where={"userId": DEFAULT_USER_ID})
@pytest.mark.asyncio(scope="session")
async def test_block_credit_usage(server: SpinTestServer):
await disable_test_user_transactions()
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
spending_amount_1 = await user_credit.spend_credits(
DEFAULT_USER_ID,
AITextGeneratorBlock().id,
{
"model": "gpt-4-turbo",
"credentials": {
"id": openai_credentials.id,
"provider": openai_credentials.provider,
"type": openai_credentials.type,
},
},
0.0,
0.0,
)
assert spending_amount_1 > 0
spending_amount_2 = await user_credit.spend_credits(
DEFAULT_USER_ID,
AITextGeneratorBlock().id,
{"model": "gpt-4-turbo", "api_key": "owned_api_key"},
0.0,
0.0,
)
assert spending_amount_2 == 0
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit - spending_amount_1 - spending_amount_2
@pytest.mark.asyncio(scope="session")
async def test_block_credit_top_up(server: SpinTestServer):
await disable_test_user_transactions()
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit + 100
@pytest.mark.asyncio(scope="session")
async def test_block_credit_reset(server: SpinTestServer):
await disable_test_user_transactions()
month1 = 1
month2 = 2
# set the calendar to month 2 but use current time from now
user_credit.time_now = lambda: datetime.now().replace(month=month2)
month2credit = await user_credit.get_credits(DEFAULT_USER_ID)
# Month 1 result should only affect month 1
user_credit.time_now = lambda: datetime.now().replace(month=month1)
month1credit = await user_credit.get_credits(DEFAULT_USER_ID)
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month1credit + 100
# Month 2 balance is unaffected
user_credit.time_now = lambda: datetime.now().replace(month=month2)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month2credit
@pytest.mark.asyncio(scope="session")
async def test_credit_refill(server: SpinTestServer):
await disable_test_user_transactions()
balance = await user_credit.get_credits(DEFAULT_USER_ID)
assert balance == REFILL_VALUE
|
from datetime import datetime
import pytest
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.credit import UserCredit
from backend.data.user import DEFAULT_USER_ID
from backend.integrations.credentials_store import openai_credentials
from backend.util.test import SpinTestServer
REFILL_VALUE = 1000
user_credit = UserCredit(REFILL_VALUE)
@pytest.mark.asyncio(scope="session")
async def test_block_credit_usage(server: SpinTestServer):
current_credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
spending_amount_1 = await user_credit.spend_credits(
DEFAULT_USER_ID,
current_credit,
AITextGeneratorBlock().id,
{
"model": "gpt-4-turbo",
"credentials": {
"id": openai_credentials.id,
"provider": openai_credentials.provider,
"type": openai_credentials.type,
},
},
0.0,
0.0,
validate_balance=False,
)
assert spending_amount_1 > 0
spending_amount_2 = await user_credit.spend_credits(
DEFAULT_USER_ID,
current_credit,
AITextGeneratorBlock().id,
{"model": "gpt-4-turbo", "api_key": "owned_api_key"},
0.0,
0.0,
validate_balance=False,
)
assert spending_amount_2 == 0
new_credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
assert new_credit == current_credit - spending_amount_1 - spending_amount_2
@pytest.mark.asyncio(scope="session")
async def test_block_credit_top_up(server: SpinTestServer):
current_credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
new_credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
assert new_credit == current_credit + 100
@pytest.mark.asyncio(scope="session")
async def test_block_credit_reset(server: SpinTestServer):
month1 = datetime(2022, 1, 15)
month2 = datetime(2022, 2, 15)
user_credit.time_now = lambda: month2
month2credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
# Month 1 result should only affect month 1
user_credit.time_now = lambda: month1
month1credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
assert await user_credit.get_or_refill_credit(DEFAULT_USER_ID) == month1credit + 100
# Month 2 balance is unaffected
user_credit.time_now = lambda: month2
assert await user_credit.get_or_refill_credit(DEFAULT_USER_ID) == month2credit
@pytest.mark.asyncio(scope="session")
async def test_credit_refill(server: SpinTestServer):
# Clear all transactions within the month
await CreditTransaction.prisma().update_many(
where={
"userId": DEFAULT_USER_ID,
"createdAt": {
"gte": datetime(2022, 2, 1),
"lt": datetime(2022, 3, 1),
},
},
data={"isActive": False},
)
user_credit.time_now = lambda: datetime(2022, 2, 15)
balance = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
assert balance == REFILL_VALUE
|
from google.protobuf import __version__ as __pb__version__
from jina._docarray import docarray_v2 as is_docarray_v2
if __pb__version__.startswith('4'):
if is_docarray_v2:
from .docarray_v2.pb.jina_pb2_grpc import *
else:
from .docarray_v1.pb.jina_pb2_grpc import *
else:
if is_docarray_v2:
from .docarray_v2.pb2.jina_pb2_grpc import *
else:
from .docarray_v1.pb2.jina_pb2_grpc import *
|
from google.protobuf import __version__ as __pb__version__
if __pb__version__.startswith('4'):
from .pb.jina_pb2_grpc import *
else:
from .pb2.jina_pb2_grpc import *
|
from typing import Iterable, Dict, TYPE_CHECKING
import numpy as np
from docarray import DocumentArray
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray.array.storage.milvus.backend import (
_always_true_expr,
_ids_to_milvus_expr,
_batch_list,
)
if TYPE_CHECKING:
from docarray import Document, DocumentArray
class GetSetDelMixin(BaseGetSetDelMixin):
def _get_doc_by_id(self, _id: str) -> 'Document':
# to be implemented
return self._get_docs_by_ids([_id])[0]
def _del_doc_by_id(self, _id: str):
# to be implemented
self._del_docs_by_ids([_id])
def _set_doc_by_id(self, _id: str, value: 'Document', **kwargs):
# to be implemented
self._set_docs_by_ids([_id], [value], None, **kwargs)
def _load_offset2ids(self):
if self._list_like:
collection = self._offset2id_collection
kwargs = self._update_kwargs_from_config('consistency_level', **dict())
with self.loaded_collection(collection):
res = collection.query(
expr=_always_true_expr('document_id'),
output_fields=['offset', 'document_id'],
**kwargs,
)
sorted_res = sorted(res, key=lambda k: int(k['offset']))
self._offset2ids = Offset2ID([r['document_id'] for r in sorted_res])
else:
self._offset2ids = Offset2ID([], list_like=self._list_like)
def _save_offset2ids(self):
if self._list_like:
# delete old entries
self._clear_offset2ids_milvus()
# insert current entries
ids = self._offset2ids.ids
if not ids:
return
offsets = [str(i) for i in range(len(ids))]
dummy_vectors = [np.zeros(1) for _ in range(len(ids))]
collection = self._offset2id_collection
collection.insert([offsets, ids, dummy_vectors])
def _get_docs_by_ids(self, ids: 'Iterable[str]', **kwargs) -> 'DocumentArray':
if not ids:
return DocumentArray()
ids = list(ids)
kwargs = self._update_kwargs_from_config('consistency_level', **kwargs)
kwargs = self._update_kwargs_from_config('batch_size', **kwargs)
with self.loaded_collection():
docs = DocumentArray()
for id_batch in _batch_list(ids, kwargs['batch_size']):
res = self._collection.query(
expr=f'document_id in {_ids_to_milvus_expr(id_batch)}',
output_fields=['serialized'],
**kwargs,
)
if not res:
raise KeyError(f'No documents found for ids {ids}')
docs.extend(self._docs_from_query_response(res))
# sort output docs according to input id sorting
id_to_index = {id_: i for i, id_ in enumerate(ids)}
return DocumentArray(sorted(docs, key=lambda d: id_to_index[d.id]))
def _del_docs_by_ids(self, ids: 'Iterable[str]', **kwargs) -> 'DocumentArray':
kwargs = self._update_kwargs_from_config('consistency_level', **kwargs)
kwargs = self._update_kwargs_from_config('batch_size', **kwargs)
for id_batch in _batch_list(list(ids), kwargs['batch_size']):
self._collection.delete(
expr=f'document_id in {_ids_to_milvus_expr(id_batch)}', **kwargs
)
def _set_docs_by_ids(
self, ids, docs: 'Iterable[Document]', mismatch_ids: 'Dict', **kwargs
):
kwargs = self._update_kwargs_from_config('consistency_level', **kwargs)
kwargs = self._update_kwargs_from_config('batch_size', **kwargs)
# delete old entries
for id_batch in _batch_list(list(ids), kwargs['batch_size']):
self._collection.delete(
expr=f'document_id in {_ids_to_milvus_expr(id_batch)}',
**kwargs,
)
for docs_batch in _batch_list(list(docs), kwargs['batch_size']):
# insert new entries
payload = self._docs_to_milvus_payload(docs_batch)
self._collection.insert(payload, **kwargs)
def _clear_storage(self):
self._collection.drop()
self._create_or_reuse_collection()
self._clear_offset2ids_milvus()
def _clear_offset2ids_milvus(self):
self._offset2id_collection.drop()
self._create_or_reuse_offset2id_collection()
|
from typing import Iterable, Dict, TYPE_CHECKING
import numpy as np
from docarray import DocumentArray
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray.array.storage.milvus.backend import (
_always_true_expr,
_ids_to_milvus_expr,
_batch_list,
)
if TYPE_CHECKING:
from docarray import Document, DocumentArray
class GetSetDelMixin(BaseGetSetDelMixin):
def _get_doc_by_id(self, _id: str) -> 'Document':
# to be implemented
return self._get_docs_by_ids([_id])[0]
def _del_doc_by_id(self, _id: str):
# to be implemented
self._del_docs_by_ids([_id])
def _set_doc_by_id(self, _id: str, value: 'Document', **kwargs):
# to be implemented
self._set_docs_by_ids([_id], [value], None, **kwargs)
def _load_offset2ids(self):
if self._list_like:
collection = self._offset2id_collection
kwargs = self._update_kwargs_from_config('consistency_level', **dict())
with self.loaded_collection(collection):
res = collection.query(
expr=_always_true_expr('document_id'),
output_fields=['offset', 'document_id'],
**kwargs,
)
sorted_res = sorted(res, key=lambda k: int(k['offset']))
self._offset2ids = Offset2ID([r['document_id'] for r in sorted_res])
def _save_offset2ids(self):
if self._list_like:
# delete old entries
self._clear_offset2ids_milvus()
# insert current entries
ids = self._offset2ids.ids
if not ids:
return
offsets = [str(i) for i in range(len(ids))]
dummy_vectors = [np.zeros(1) for _ in range(len(ids))]
collection = self._offset2id_collection
collection.insert([offsets, ids, dummy_vectors])
def _get_docs_by_ids(self, ids: 'Iterable[str]', **kwargs) -> 'DocumentArray':
if not ids:
return DocumentArray()
ids = list(ids)
kwargs = self._update_kwargs_from_config('consistency_level', **kwargs)
kwargs = self._update_kwargs_from_config('batch_size', **kwargs)
with self.loaded_collection():
docs = DocumentArray()
for id_batch in _batch_list(ids, kwargs['batch_size']):
res = self._collection.query(
expr=f'document_id in {_ids_to_milvus_expr(id_batch)}',
output_fields=['serialized'],
**kwargs,
)
if not res:
raise KeyError(f'No documents found for ids {ids}')
docs.extend(self._docs_from_query_response(res))
# sort output docs according to input id sorting
id_to_index = {id_: i for i, id_ in enumerate(ids)}
return DocumentArray(sorted(docs, key=lambda d: id_to_index[d.id]))
def _del_docs_by_ids(self, ids: 'Iterable[str]', **kwargs) -> 'DocumentArray':
kwargs = self._update_kwargs_from_config('consistency_level', **kwargs)
kwargs = self._update_kwargs_from_config('batch_size', **kwargs)
for id_batch in _batch_list(list(ids), kwargs['batch_size']):
self._collection.delete(
expr=f'document_id in {_ids_to_milvus_expr(id_batch)}', **kwargs
)
def _set_docs_by_ids(
self, ids, docs: 'Iterable[Document]', mismatch_ids: 'Dict', **kwargs
):
kwargs = self._update_kwargs_from_config('consistency_level', **kwargs)
kwargs = self._update_kwargs_from_config('batch_size', **kwargs)
# delete old entries
for id_batch in _batch_list(list(ids), kwargs['batch_size']):
self._collection.delete(
expr=f'document_id in {_ids_to_milvus_expr(id_batch)}',
**kwargs,
)
for docs_batch in _batch_list(list(docs), kwargs['batch_size']):
# insert new entries
payload = self._docs_to_milvus_payload(docs_batch)
self._collection.insert(payload, **kwargs)
def _clear_storage(self):
self._collection.drop()
self._create_or_reuse_collection()
self._clear_offset2ids_milvus()
def _clear_offset2ids_milvus(self):
self._offset2id_collection.drop()
self._create_or_reuse_offset2id_collection()
|
"""Generation output schema."""
from __future__ import annotations
from typing import Any, Literal, Optional
from pydantic import computed_field
from langchain_core.load import Serializable
from langchain_core.utils._merge import merge_dicts
class Generation(Serializable):
"""A single text generation output.
Generation represents the response from an "old-fashioned" LLM that
generates regular text (not chat messages).
This model is used internally by chat model and will eventually
be mapped to a more general `LLMResult` object, and then projected into
an `AIMessage` object.
LangChain users working with chat models will usually access information via
`AIMessage` (returned from runnable interfaces) or `LLMResult` (available
via callbacks). Please refer the `AIMessage` and `LLMResult` schema documentation
for more information.
"""
def __init__(
self,
text: str = "",
generation_info: Optional[dict[str, Any]] = None,
**kwargs: Any,
):
"""Initialize a Generation."""
super().__init__(generation_info=generation_info, **kwargs)
self._text = text
# workaround for ChatGeneration so that we can use a computed field to populate
# the text field from the message content (parent class needs to have a property)
@computed_field # type: ignore[prop-decorator]
@property
def text(self) -> str:
"""The text contents of the output."""
return self._text
generation_info: Optional[dict[str, Any]] = None
"""Raw response from the provider.
May include things like the reason for finishing or token log probabilities.
"""
type: Literal["Generation"] = "Generation"
"""Type is used exclusively for serialization purposes.
Set to "Generation" for this class."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
Default namespace is ["langchain", "schema", "output"].
"""
return ["langchain", "schema", "output"]
class GenerationChunk(Generation):
"""Generation chunk, which can be concatenated with other Generation chunks."""
def __init__(
self,
text: str = "",
generation_info: Optional[dict[str, Any]] = None,
**kwargs: Any,
):
"""Initialize a GenerationChunk."""
super().__init__(text=text, generation_info=generation_info, **kwargs)
self._text = text
def __add__(self, other: GenerationChunk) -> GenerationChunk:
"""Concatenate two GenerationChunks."""
if isinstance(other, GenerationChunk):
generation_info = merge_dicts(
self.generation_info or {},
other.generation_info or {},
)
return GenerationChunk(
text=self.text + other.text,
generation_info=generation_info or None,
)
msg = f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
raise TypeError(msg)
|
"""Generation output schema."""
from __future__ import annotations
from typing import Any, Literal, Optional
from langchain_core.load import Serializable
from langchain_core.utils._merge import merge_dicts
class Generation(Serializable):
"""A single text generation output.
Generation represents the response from an "old-fashioned" LLM that
generates regular text (not chat messages).
This model is used internally by chat model and will eventually
be mapped to a more general `LLMResult` object, and then projected into
an `AIMessage` object.
LangChain users working with chat models will usually access information via
`AIMessage` (returned from runnable interfaces) or `LLMResult` (available
via callbacks). Please refer the `AIMessage` and `LLMResult` schema documentation
for more information.
"""
text: str
"""Generated text output."""
generation_info: Optional[dict[str, Any]] = None
"""Raw response from the provider.
May include things like the reason for finishing or token log probabilities.
"""
type: Literal["Generation"] = "Generation"
"""Type is used exclusively for serialization purposes.
Set to "Generation" for this class."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
Default namespace is ["langchain", "schema", "output"].
"""
return ["langchain", "schema", "output"]
class GenerationChunk(Generation):
"""Generation chunk, which can be concatenated with other Generation chunks."""
def __add__(self, other: GenerationChunk) -> GenerationChunk:
"""Concatenate two GenerationChunks."""
if isinstance(other, GenerationChunk):
generation_info = merge_dicts(
self.generation_info or {},
other.generation_info or {},
)
return GenerationChunk(
text=self.text + other.text,
generation_info=generation_info or None,
)
msg = f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
raise TypeError(msg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .scnet import SCNet
from .single_stage import SingleStageDetector
from .solo import SOLO
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .scnet import SCNet
from .single_stage import SingleStageDetector
from .solo import SOLO
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class DETR(SingleStageDetector):
r"""Implementation of `DETR: End-to-End Object Detection with
Transformers <https://arxiv.org/pdf/2005.12872>`_"""
def __init__(self,
backbone: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=None,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class DETR(SingleStageDetector):
r"""Implementation of `DETR: End-to-End Object Detection with
Transformers <https://arxiv.org/pdf/2005.12872>`_"""
def __init__(self,
backbone: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=None,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
find_latest_checkpoint, has_batch_norm, has_method,
import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
iter_cast, list_cast, mmcv_full_available,
requires_executable, requires_package, slice_list,
to_1tuple, to_2tuple, to_3tuple, to_4tuple, to_ntuple,
tuple_cast)
from .parrots_wrapper import TORCH_VERSION
from .path import (check_file_exist, fopen, is_abs, is_filepath,
mkdir_or_exist, scandir, symlink)
from .setup_env import set_multi_processing
from .version_utils import digit_version, get_git_hash
# TODO: creates intractable circular import issues
# from .time_counter import TimeCounter
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method', 'mmcv_full_available',
'digit_version', 'get_git_hash', 'TORCH_VERSION', 'load_url',
'find_latest_checkpoint', 'ManagerMeta', 'ManagerMixin',
'set_multi_processing', 'has_batch_norm', 'is_abs'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
find_latest_checkpoint, has_batch_norm, has_method,
import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
iter_cast, list_cast, mmcv_full_available,
requires_executable, requires_package, slice_list,
to_1tuple, to_2tuple, to_3tuple, to_4tuple, to_ntuple,
tuple_cast)
from .parrots_wrapper import TORCH_VERSION
from .path import (check_file_exist, fopen, is_filepath, mkdir_or_exist,
scandir, symlink)
from .setup_env import set_multi_processing
from .version_utils import digit_version, get_git_hash
# TODO: creates intractable circular import issues
# from .time_counter import TimeCounter
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method', 'mmcv_full_available',
'digit_version', 'get_git_hash', 'TORCH_VERSION', 'load_url',
'find_latest_checkpoint', 'ManagerMeta', 'ManagerMixin',
'set_multi_processing', 'has_batch_norm'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.0.1'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.0.1'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.0.1'
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
from keras.src import backend
from keras.src import ops
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.ops.operation_utils import compute_pooling_output_shape
from keras.src.utils import argument_validation
class BasePooling(Layer):
"""Base pooling layer."""
def __init__(
self,
pool_size,
strides,
pool_dimensions,
pool_mode="max",
padding="valid",
data_format=None,
name=None,
**kwargs,
):
super().__init__(name=name, **kwargs)
self.pool_size = argument_validation.standardize_tuple(
pool_size, pool_dimensions, "pool_size"
)
strides = pool_size if strides is None else strides
self.strides = argument_validation.standardize_tuple(
strides, pool_dimensions, "strides", allow_zero=True
)
self.pool_mode = pool_mode
self.padding = padding
self.data_format = backend.standardize_data_format(data_format)
self.input_spec = InputSpec(ndim=pool_dimensions + 2)
self._build_at_init()
def call(self, inputs):
if self.pool_mode == "max":
return ops.max_pool(
inputs,
pool_size=self.pool_size,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
)
elif self.pool_mode == "average":
return ops.average_pool(
inputs,
pool_size=self.pool_size,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
)
else:
raise ValueError(
"`pool_mode` must be either 'max' or 'average'. Received: "
f"{self.pool_mode}."
)
def compute_output_shape(self, input_shape):
return compute_pooling_output_shape(
input_shape,
self.pool_size,
self.strides,
self.padding,
self.data_format,
)
def get_config(self):
config = super().get_config()
config.update(
{
"pool_size": self.pool_size,
"padding": self.padding,
"strides": self.strides,
"data_format": self.data_format,
}
)
return config
|
from keras.src import backend
from keras.src import ops
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.ops.operation_utils import compute_pooling_output_shape
from keras.src.utils import argument_validation
class BasePooling(Layer):
"""Base pooling layer."""
def __init__(
self,
pool_size,
strides,
pool_dimensions,
pool_mode="max",
padding="valid",
data_format=None,
name=None,
**kwargs,
):
super().__init__(name=name, **kwargs)
self.pool_size = argument_validation.standardize_tuple(
pool_size, pool_dimensions, "pool_size"
)
strides = pool_size if strides is None else strides
self.strides = argument_validation.standardize_tuple(
strides, pool_dimensions, "strides", allow_zero=True
)
self.pool_mode = pool_mode
self.padding = padding
self.data_format = backend.standardize_data_format(data_format)
self.input_spec = InputSpec(ndim=pool_dimensions + 2)
self.built = True
def call(self, inputs):
if self.pool_mode == "max":
return ops.max_pool(
inputs,
pool_size=self.pool_size,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
)
elif self.pool_mode == "average":
return ops.average_pool(
inputs,
pool_size=self.pool_size,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
)
else:
raise ValueError(
"`pool_mode` must be either 'max' or 'average'. Received: "
f"{self.pool_mode}."
)
def compute_output_shape(self, input_shape):
return compute_pooling_output_shape(
input_shape,
self.pool_size,
self.strides,
self.padding,
self.data_format,
)
def get_config(self):
config = super().get_config()
config.update(
{
"pool_size": self.pool_size,
"padding": self.padding,
"strides": self.strides,
"data_format": self.data_format,
}
)
return config
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
from ..utils import is_torchdynamo_compiling
try:
from kernels import (
Device,
LayerRepository,
register_kernel_mapping,
replace_kernel_forward_from_hub,
)
from kernels import (
use_kernel_forward_from_hub as original_use_kernel_forward_from_hub,
)
_hub_kernels_available = True
_KERNEL_MAPPING: dict[str, dict[Union[Device, str], LayerRepository]] = {
"MultiScaleDeformableAttention": {
"cuda": LayerRepository(
repo_id="kernels-community/deformable-detr",
layer_name="MultiScaleDeformableAttention",
)
},
"Llama4TextMoe": {
"cuda": LayerRepository(
# Move to kernels-community/moe once we release.
repo_id="kernels-community/moe",
layer_name="Llama4TextMoe",
)
},
"RMSNorm": {
"cuda": LayerRepository(
repo_id="kernels-community/triton-layer-norm",
layer_name="LlamaRMSNorm",
revision="pure-layer-test",
)
},
"MLP": {
"cuda": LayerRepository(
repo_id="medmekk/triton-llama-mlp",
layer_name="TritonLlamaMLP",
)
},
}
register_kernel_mapping(_KERNEL_MAPPING)
def use_kernel_forward_from_hub(*args, **kwargs):
"""
Expands `kernels`' `use_kernel_forward_from_hub` to NOT use a kernel at compile time. This should be removed
when `kernels` supports `torch.compile`.
If the layer has a `config` attribute, we can also set `config.disable_custom_kernels = True` to disable the
kernel.
"""
def decorator_with_compile_path(cls):
# Keeps a reference to the original forward method
original_forward = cls.forward
# Applies the original decorator
decorator = original_use_kernel_forward_from_hub(*args, **kwargs)
cls = decorator(cls)
# Replaces the kernel forward with a compile-friendly version
kernel_forward = cls.forward
def forward_with_compile_path(*forward_args, **forward_kwargs):
disable_custom_kernels = hasattr(cls, "config") and getattr(cls.config, "disable_custom_kernels", None)
if is_torchdynamo_compiling() or disable_custom_kernels:
return original_forward(*forward_args, **forward_kwargs)
else:
return kernel_forward(*forward_args, **forward_kwargs)
cls.forward = forward_with_compile_path
return cls
return decorator_with_compile_path
except ImportError:
# Stub to make decorators int transformers work when `kernels`
# is not installed.
def use_kernel_forward_from_hub(*args, **kwargs):
def decorator(cls):
return cls
return decorator
class LayerRepository:
def __init__(self, *args, **kwargs):
raise RuntimeError("LayerRepository requires `kernels` to be installed. Run `pip install kernels`.")
def replace_kernel_forward_from_hub(*args, **kwargs):
raise RuntimeError(
"replace_kernel_forward_from_hub requires `kernels` to be installed. Run `pip install kernels`."
)
def register_kernel_mapping(*args, **kwargs):
raise RuntimeError("register_kernel_mapping requires `kernels` to be installed. Run `pip install kernels`.")
_hub_kernels_available = False
def is_hub_kernels_available():
return _hub_kernels_available
__all__ = [
"LayerRepository",
"is_hub_kernels_available",
"use_kernel_forward_from_hub",
"register_kernel_mapping",
"replace_kernel_forward_from_hub",
]
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Union
from ..utils import is_torchdynamo_compiling
try:
from kernels import (
Device,
LayerRepository,
register_kernel_mapping,
replace_kernel_forward_from_hub,
)
from kernels import (
use_kernel_forward_from_hub as original_use_kernel_forward_from_hub,
)
_hub_kernels_available = True
_KERNEL_MAPPING: Dict[str, Dict[Union[Device, str], LayerRepository]] = {
"MultiScaleDeformableAttention": {
"cuda": LayerRepository(
repo_id="kernels-community/deformable-detr",
layer_name="MultiScaleDeformableAttention",
)
},
"Llama4TextMoe": {
"cuda": LayerRepository(
# Move to kernels-community/moe once we release.
repo_id="kernels-community/moe",
layer_name="Llama4TextMoe",
)
},
"RMSNorm": {
"cuda": LayerRepository(
repo_id="kernels-community/triton-layer-norm",
layer_name="LlamaRMSNorm",
revision="pure-layer-test",
)
},
"MLP": {
"cuda": LayerRepository(
repo_id="medmekk/triton-llama-mlp",
layer_name="TritonLlamaMLP",
)
},
}
register_kernel_mapping(_KERNEL_MAPPING)
def use_kernel_forward_from_hub(*args, **kwargs):
"""
Expands `kernels`' `use_kernel_forward_from_hub` to NOT use a kernel at compile time. This should be removed
when `kernels` supports `torch.compile`.
If the layer has a `config` attribute, we can also set `config.disable_custom_kernels = True` to disable the
kernel.
"""
def decorator_with_compile_path(cls):
# Keeps a reference to the original forward method
original_forward = cls.forward
# Applies the original decorator
decorator = original_use_kernel_forward_from_hub(*args, **kwargs)
cls = decorator(cls)
# Replaces the kernel forward with a compile-friendly version
kernel_forward = cls.forward
def forward_with_compile_path(*forward_args, **forward_kwargs):
disable_custom_kernels = hasattr(cls, "config") and getattr(cls.config, "disable_custom_kernels", None)
if is_torchdynamo_compiling() or disable_custom_kernels:
return original_forward(*forward_args, **forward_kwargs)
else:
return kernel_forward(*forward_args, **forward_kwargs)
cls.forward = forward_with_compile_path
return cls
return decorator_with_compile_path
except ImportError:
# Stub to make decorators int transformers work when `kernels`
# is not installed.
def use_kernel_forward_from_hub(*args, **kwargs):
def decorator(cls):
return cls
return decorator
class LayerRepository:
def __init__(self, *args, **kwargs):
raise RuntimeError("LayerRepository requires `kernels` to be installed. Run `pip install kernels`.")
def replace_kernel_forward_from_hub(*args, **kwargs):
raise RuntimeError(
"replace_kernel_forward_from_hub requires `kernels` to be installed. Run `pip install kernels`."
)
def register_kernel_mapping(*args, **kwargs):
raise RuntimeError("register_kernel_mapping requires `kernels` to be installed. Run `pip install kernels`.")
_hub_kernels_available = False
def is_hub_kernels_available():
return _hub_kernels_available
__all__ = [
"LayerRepository",
"is_hub_kernels_available",
"use_kernel_forward_from_hub",
"register_kernel_mapping",
"replace_kernel_forward_from_hub",
]
|
from docarray import BaseDocument
from docarray.typing import Mesh3DUrl
def test_set_mesh_url():
class MyDocument(BaseDocument):
mesh_url: Mesh3DUrl
d = MyDocument(mesh_url="https://jina.ai/mesh.obj")
assert isinstance(d.mesh_url, Mesh3DUrl)
assert d.mesh_url == "https://jina.ai/mesh.obj"
|
from docarray import Document
from docarray.typing import Mesh3DUrl
def test_set_mesh_url():
class MyDocument(Document):
mesh_url: Mesh3DUrl
d = MyDocument(mesh_url="https://jina.ai/mesh.obj")
assert isinstance(d.mesh_url, Mesh3DUrl)
assert d.mesh_url == "https://jina.ai/mesh.obj"
|
from collections.abc import Sequence
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
class SelfAskOutputParser(AgentOutputParser):
"""Parses self-ask style LLM calls.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
Thoughts go here...
Follow up: what is the temperature in SF?
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
Thoughts go here...
So the final answer is: The temperature is 100 degrees
```
"""
followups: Sequence[str] = ("Follow up:", "Followup:")
finish_string: str = "So the final answer is: "
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
last_line = text.split("\n")[-1]
if not any([follow in last_line for follow in self.followups]):
if self.finish_string not in last_line:
msg = f"Could not parse output: {text}"
raise OutputParserException(msg)
return AgentFinish({"output": last_line[len(self.finish_string) :]}, text)
after_colon = text.split(":")[-1].strip()
return AgentAction("Intermediate Answer", after_colon, text)
@property
def _type(self) -> str:
return "self_ask"
|
from collections.abc import Sequence
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
class SelfAskOutputParser(AgentOutputParser):
"""Parses self-ask style LLM calls.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
Thoughts go here...
Follow up: what is the temperature in SF?
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
Thoughts go here...
So the final answer is: The temperature is 100 degrees
```
"""
followups: Sequence[str] = ("Follow up:", "Followup:")
finish_string: str = "So the final answer is: "
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
last_line = text.split("\n")[-1]
if not any([follow in last_line for follow in self.followups]):
if self.finish_string not in last_line:
raise OutputParserException(f"Could not parse output: {text}")
return AgentFinish({"output": last_line[len(self.finish_string) :]}, text)
after_colon = text.split(":")[-1].strip()
return AgentAction("Intermediate Answer", after_colon, text)
@property
def _type(self) -> str:
return "self_ask"
|
# Whether to disable showing progress on compilation passes
# Need to add a new config otherwise will get a circular import if dynamo config is imported here
disable_progress = True
# If True this also shows the node names in each pass, for small models this is great but larger models it's quite noisy
verbose_progress = False
|
# Whether to disable showing progress on compilation passes
# Need to add a new config otherwise wil get a circular import if dynamo config is imported here
disable_progress = True
# If True this also shows the node names in each pass, for small models this is great but larger models it's quite noisy
verbose_progress = False
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
from keras.src.ops.nn import tanh_shrink
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
from keras.src.ops.nn import tanh_shrink
|
import logging
from typing import Optional, cast
from autogpt_libs.supabase_integration_credentials_store.types import (
UserIntegrations,
UserMetadata,
UserMetadataRaw,
)
from fastapi import HTTPException
from prisma import Json
from prisma.models import User
from backend.data.db import prisma
from backend.util.encryption import JSONCryptor
logger = logging.getLogger(__name__)
DEFAULT_USER_ID = "3e53486c-cf57-477e-ba2a-cb02dc828e1a"
DEFAULT_EMAIL = "[email protected]"
async def get_or_create_user(user_data: dict) -> User:
user_id = user_data.get("sub")
if not user_id:
raise HTTPException(status_code=401, detail="User ID not found in token")
user_email = user_data.get("email")
if not user_email:
raise HTTPException(status_code=401, detail="Email not found in token")
user = await prisma.user.find_unique(where={"id": user_id})
if not user:
user = await prisma.user.create(
data={
"id": user_id,
"email": user_email,
"name": user_data.get("user_metadata", {}).get("name"),
}
)
return User.model_validate(user)
async def get_user_by_id(user_id: str) -> Optional[User]:
user = await prisma.user.find_unique(where={"id": user_id})
return User.model_validate(user) if user else None
async def create_default_user() -> Optional[User]:
user = await prisma.user.find_unique(where={"id": DEFAULT_USER_ID})
if not user:
user = await prisma.user.create(
data={
"id": DEFAULT_USER_ID,
"email": "[email protected]",
"name": "Default User",
}
)
return User.model_validate(user)
async def get_user_metadata(user_id: str) -> UserMetadata:
user = await User.prisma().find_unique_or_raise(
where={"id": user_id},
)
metadata = cast(UserMetadataRaw, user.metadata)
return UserMetadata.model_validate(metadata)
async def update_user_metadata(user_id: str, metadata: UserMetadata):
await User.prisma().update(
where={"id": user_id},
data={"metadata": Json(metadata.model_dump())},
)
async def get_user_integrations(user_id: str) -> UserIntegrations:
user = await User.prisma().find_unique_or_raise(
where={"id": user_id},
)
encrypted_integrations = user.integrations
if not encrypted_integrations:
return UserIntegrations()
else:
return UserIntegrations.model_validate(
JSONCryptor().decrypt(encrypted_integrations)
)
async def update_user_integrations(user_id: str, data: UserIntegrations):
encrypted_data = JSONCryptor().encrypt(data.model_dump())
await User.prisma().update(
where={"id": user_id},
data={"integrations": encrypted_data},
)
async def migrate_and_encrypt_user_integrations():
"""Migrate integration credentials and OAuth states from metadata to integrations column."""
users = await User.prisma().find_many(
where={
"metadata": {
"path": ["integration_credentials"],
"not": Json({"a": "yolo"}), # bogus value works to check if key exists
} # type: ignore
}
)
logger.info(f"Migrating integration credentials for {len(users)} users")
for user in users:
raw_metadata = cast(UserMetadataRaw, user.metadata)
metadata = UserMetadata.model_validate(raw_metadata)
# Get existing integrations data
integrations = await get_user_integrations(user_id=user.id)
# Copy credentials and oauth states from metadata if they exist
if metadata.integration_credentials and not integrations.credentials:
integrations.credentials = metadata.integration_credentials
if metadata.integration_oauth_states:
integrations.oauth_states = metadata.integration_oauth_states
# Save to integrations column
await update_user_integrations(user_id=user.id, data=integrations)
# Remove from metadata
raw_metadata = dict(raw_metadata)
raw_metadata.pop("integration_credentials", None)
raw_metadata.pop("integration_oauth_states", None)
# Update metadata without integration data
await User.prisma().update(
where={"id": user.id},
data={"metadata": Json(raw_metadata)},
)
|
from typing import Optional
from autogpt_libs.supabase_integration_credentials_store.types import UserMetadataRaw
from fastapi import HTTPException
from prisma import Json
from prisma.models import User
from backend.data.db import prisma
DEFAULT_USER_ID = "3e53486c-cf57-477e-ba2a-cb02dc828e1a"
DEFAULT_EMAIL = "[email protected]"
async def get_or_create_user(user_data: dict) -> User:
user_id = user_data.get("sub")
if not user_id:
raise HTTPException(status_code=401, detail="User ID not found in token")
user_email = user_data.get("email")
if not user_email:
raise HTTPException(status_code=401, detail="Email not found in token")
user = await prisma.user.find_unique(where={"id": user_id})
if not user:
user = await prisma.user.create(
data={
"id": user_id,
"email": user_email,
"name": user_data.get("user_metadata", {}).get("name"),
}
)
return User.model_validate(user)
async def get_user_by_id(user_id: str) -> Optional[User]:
user = await prisma.user.find_unique(where={"id": user_id})
return User.model_validate(user) if user else None
async def create_default_user() -> Optional[User]:
user = await prisma.user.find_unique(where={"id": DEFAULT_USER_ID})
if not user:
user = await prisma.user.create(
data={
"id": DEFAULT_USER_ID,
"email": "[email protected]",
"name": "Default User",
}
)
return User.model_validate(user)
async def get_user_metadata(user_id: str) -> UserMetadataRaw:
user = await User.prisma().find_unique_or_raise(
where={"id": user_id},
)
return (
UserMetadataRaw.model_validate(user.metadata)
if user.metadata
else UserMetadataRaw()
)
async def update_user_metadata(user_id: str, metadata: UserMetadataRaw):
await User.prisma().update(
where={"id": user_id},
data={"metadata": Json(metadata.model_dump())},
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmdet.data_elements import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestCornerNet(TestCase):
def setUp(self) -> None:
register_all_modules()
model_cfg = get_detector_cfg(
'cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py')
backbone = dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(3, ),
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch')
neck = dict(
type='FPN',
in_channels=[512],
out_channels=256,
start_level=0,
add_extra_convs='on_input',
num_outs=1)
model_cfg.backbone = ConfigDict(**backbone)
model_cfg.neck = ConfigDict(**neck)
model_cfg.bbox_head.num_feat_levels = 1
self.model_cfg = model_cfg
def test_init(self):
model = get_detector_cfg(
'cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py')
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
self.assertTrue(detector.bbox_head is not None)
self.assertTrue(detector.backbone is not None)
self.assertTrue(not hasattr(detector, 'neck'))
@unittest.skipIf(not torch.cuda.is_available(),
'test requires GPU and torch+cuda')
def test_cornernet_forward_loss_mode(self):
from mmdet.models import build_detector
detector = build_detector(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 511, 511], [3, 511, 511]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, True)
losses = detector.forward(batch_inputs, data_samples, mode='loss')
assert isinstance(losses, dict)
@unittest.skipIf(not torch.cuda.is_available(),
'test requires GPU and torch+cuda')
def test_cornernet_forward_predict_mode(self):
from mmdet.models import build_detector
detector = build_detector(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 512, 512], [3, 512, 512]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
batch_inputs, data_samples, mode='predict')
assert len(batch_results) == 2
assert isinstance(batch_results[0], DetDataSample)
@unittest.skipIf(not torch.cuda.is_available(),
'test requires GPU and torch+cuda')
def test_cornernet_forward_tensor_mode(self):
from mmdet.models import build_detector
detector = build_detector(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 512, 512], [3, 512, 512]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
batch_results = detector.forward(
batch_inputs, data_samples, mode='tensor')
assert isinstance(batch_results, tuple)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmdet.core import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestCornerNet(TestCase):
def setUp(self) -> None:
register_all_modules()
model_cfg = get_detector_cfg(
'cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py')
backbone = dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(3, ),
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch')
neck = dict(
type='FPN',
in_channels=[512],
out_channels=256,
start_level=0,
add_extra_convs='on_input',
num_outs=1)
model_cfg.backbone = ConfigDict(**backbone)
model_cfg.neck = ConfigDict(**neck)
model_cfg.bbox_head.num_feat_levels = 1
self.model_cfg = model_cfg
def test_init(self):
model = get_detector_cfg(
'cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py')
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
self.assertTrue(detector.bbox_head is not None)
self.assertTrue(detector.backbone is not None)
self.assertTrue(not hasattr(detector, 'neck'))
@unittest.skipIf(not torch.cuda.is_available(),
'test requires GPU and torch+cuda')
def test_cornernet_forward_loss_mode(self):
from mmdet.models import build_detector
detector = build_detector(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 511, 511], [3, 511, 511]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, True)
losses = detector.forward(batch_inputs, data_samples, mode='loss')
assert isinstance(losses, dict)
@unittest.skipIf(not torch.cuda.is_available(),
'test requires GPU and torch+cuda')
def test_cornernet_forward_predict_mode(self):
from mmdet.models import build_detector
detector = build_detector(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 512, 512], [3, 512, 512]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
batch_inputs, data_samples, mode='predict')
assert len(batch_results) == 2
assert isinstance(batch_results[0], DetDataSample)
@unittest.skipIf(not torch.cuda.is_available(),
'test requires GPU and torch+cuda')
def test_cornernet_forward_tensor_mode(self):
from mmdet.models import build_detector
detector = build_detector(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 512, 512], [3, 512, 512]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
batch_results = detector.forward(
batch_inputs, data_samples, mode='tensor')
assert isinstance(batch_results, tuple)
|
from __future__ import annotations
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.utils.json import parse_json_markdown
from langchain.agents import AgentOutputParser
from langchain.agents.conversational_chat.prompt import FORMAT_INSTRUCTIONS
# Define a class that parses output for conversational agents
class ConvoOutputParser(AgentOutputParser):
"""Output parser for the conversational agent."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
"""Attempts to parse the given text into an AgentAction or AgentFinish.
Raises:
OutputParserException if parsing fails.
"""
try:
# Attempt to parse the text into a structured format (assumed to be JSON
# stored as markdown)
response = parse_json_markdown(text)
# If the response contains an 'action' and 'action_input'
if "action" in response and "action_input" in response:
action, action_input = response["action"], response["action_input"]
# If the action indicates a final answer, return an AgentFinish
if action == "Final Answer":
return AgentFinish({"output": action_input}, text)
# Otherwise, return an AgentAction with the specified action and
# input
return AgentAction(action, action_input, text)
# If the necessary keys aren't present in the response, raise an
# exception
msg = f"Missing 'action' or 'action_input' in LLM output: {text}"
raise OutputParserException(msg)
except Exception as e:
# If any other exception is raised during parsing, also raise an
# OutputParserException
msg = f"Could not parse LLM output: {text}"
raise OutputParserException(msg) from e
@property
def _type(self) -> str:
return "conversational_chat"
|
from __future__ import annotations
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.utils.json import parse_json_markdown
from langchain.agents import AgentOutputParser
from langchain.agents.conversational_chat.prompt import FORMAT_INSTRUCTIONS
# Define a class that parses output for conversational agents
class ConvoOutputParser(AgentOutputParser):
"""Output parser for the conversational agent."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
"""Attempts to parse the given text into an AgentAction or AgentFinish.
Raises:
OutputParserException if parsing fails.
"""
try:
# Attempt to parse the text into a structured format (assumed to be JSON
# stored as markdown)
response = parse_json_markdown(text)
# If the response contains an 'action' and 'action_input'
if "action" in response and "action_input" in response:
action, action_input = response["action"], response["action_input"]
# If the action indicates a final answer, return an AgentFinish
if action == "Final Answer":
return AgentFinish({"output": action_input}, text)
else:
# Otherwise, return an AgentAction with the specified action and
# input
return AgentAction(action, action_input, text)
else:
# If the necessary keys aren't present in the response, raise an
# exception
msg = f"Missing 'action' or 'action_input' in LLM output: {text}"
raise OutputParserException(msg)
except Exception as e:
# If any other exception is raised during parsing, also raise an
# OutputParserException
msg = f"Could not parse LLM output: {text}"
raise OutputParserException(msg) from e
@property
def _type(self) -> str:
return "conversational_chat"
|
"""Test EdenAi's invoice parser Tool .
In order to run this test, you need to have an EdenAI api key.
You can get it by registering for free at https://app.edenai.run/user/register.
A test key can be found at https://app.edenai.run/admin/account/settings by
clicking on the 'sandbox' toggle.
(calls will be free, and will return dummy results)
You'll then need to set EDENAI_API_KEY environment variable to your api key.
"""
from langchain_community.tools.edenai import EdenAiParsingInvoiceTool
def test_edenai_call() -> None:
"""Test simple call to edenai's invoice parser endpoint."""
invoice_parser = EdenAiParsingInvoiceTool(providers=["amazon"], language="en")
output = invoice_parser.invoke(
"https://app.edenai.run/assets/img/data_1.72e3bdcc.png"
)
assert invoice_parser.name == "edenai_invoice_parsing"
assert invoice_parser.feature == "ocr"
assert invoice_parser.subfeature == "invoice_parser"
assert isinstance(output, str)
|
"""Test EdenAi's invoice parser Tool .
In order to run this test, you need to have an EdenAI api key.
You can get it by registering for free at https://app.edenai.run/user/register.
A test key can be found at https://app.edenai.run/admin/account/settings by
clicking on the 'sandbox' toggle.
(calls will be free, and will return dummy results)
You'll then need to set EDENAI_API_KEY environment variable to your api key.
"""
from langchain_community.tools.edenai import EdenAiParsingInvoiceTool
def test_edenai_call() -> None:
"""Test simple call to edenai's invoice parser endpoint."""
invoice_parser = EdenAiParsingInvoiceTool(providers=["amazon"], language="en") # type: ignore[call-arg]
output = invoice_parser.invoke(
"https://app.edenai.run/assets/img/data_1.72e3bdcc.png"
)
assert invoice_parser.name == "edenai_invoice_parsing"
assert invoice_parser.feature == "ocr"
assert invoice_parser.subfeature == "invoice_parser"
assert isinstance(output, str)
|
from __future__ import annotations
from typing import Any, Dict, List, Optional, Type
from langchain_core.tools import BaseTool, BaseToolkit
from langchain_core.utils.pydantic import get_fields
from pydantic import model_validator
from langchain_community.tools.file_management.copy import CopyFileTool
from langchain_community.tools.file_management.delete import DeleteFileTool
from langchain_community.tools.file_management.file_search import FileSearchTool
from langchain_community.tools.file_management.list_dir import ListDirectoryTool
from langchain_community.tools.file_management.move import MoveFileTool
from langchain_community.tools.file_management.read import ReadFileTool
from langchain_community.tools.file_management.write import WriteFileTool
_FILE_TOOLS: List[Type[BaseTool]] = [
CopyFileTool,
DeleteFileTool,
FileSearchTool,
MoveFileTool,
ReadFileTool,
WriteFileTool,
ListDirectoryTool,
]
_FILE_TOOLS_MAP: Dict[str, Type[BaseTool]] = {
get_fields(tool_cls)["name"].default: tool_cls for tool_cls in _FILE_TOOLS
}
class FileManagementToolkit(BaseToolkit):
"""Toolkit for interacting with local files.
*Security Notice*: This toolkit provides methods to interact with local files.
If providing this toolkit to an agent on an LLM, ensure you scope
the agent's permissions to only include the necessary permissions
to perform the desired operations.
By **default** the agent will have access to all files within
the root dir and will be able to Copy, Delete, Move, Read, Write
and List files in that directory.
Consider the following:
- Limit access to particular directories using `root_dir`.
- Use filesystem permissions to restrict access and permissions to only
the files and directories required by the agent.
- Limit the tools available to the agent to only the file operations
necessary for the agent's intended use.
- Sandbox the agent by running it in a container.
See https://python.langchain.com/docs/security for more information.
Parameters:
root_dir: Optional. The root directory to perform file operations.
If not provided, file operations are performed relative to the current
working directory.
selected_tools: Optional. The tools to include in the toolkit. If not
provided, all tools are included.
"""
root_dir: Optional[str] = None
"""If specified, all file operations are made relative to root_dir."""
selected_tools: Optional[List[str]] = None
"""If provided, only provide the selected tools. Defaults to all."""
@model_validator(mode="before")
@classmethod
def validate_tools(cls, values: dict) -> Any:
selected_tools = values.get("selected_tools") or []
for tool_name in selected_tools:
if tool_name not in _FILE_TOOLS_MAP:
raise ValueError(
f"File Tool of name {tool_name} not supported."
f" Permitted tools: {list(_FILE_TOOLS_MAP)}"
)
return values
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
allowed_tools = self.selected_tools or _FILE_TOOLS_MAP
tools: List[BaseTool] = []
for tool in allowed_tools:
tool_cls = _FILE_TOOLS_MAP[tool]
tools.append(tool_cls(root_dir=self.root_dir))
return tools
__all__ = ["FileManagementToolkit"]
|
from __future__ import annotations
from typing import Any, Dict, List, Optional, Type
from langchain_core.tools import BaseTool, BaseToolkit
from langchain_core.utils.pydantic import get_fields
from pydantic import model_validator
from langchain_community.tools.file_management.copy import CopyFileTool
from langchain_community.tools.file_management.delete import DeleteFileTool
from langchain_community.tools.file_management.file_search import FileSearchTool
from langchain_community.tools.file_management.list_dir import ListDirectoryTool
from langchain_community.tools.file_management.move import MoveFileTool
from langchain_community.tools.file_management.read import ReadFileTool
from langchain_community.tools.file_management.write import WriteFileTool
_FILE_TOOLS: List[Type[BaseTool]] = [
CopyFileTool,
DeleteFileTool,
FileSearchTool,
MoveFileTool,
ReadFileTool,
WriteFileTool,
ListDirectoryTool,
]
_FILE_TOOLS_MAP: Dict[str, Type[BaseTool]] = {
get_fields(tool_cls)["name"].default: tool_cls for tool_cls in _FILE_TOOLS
}
class FileManagementToolkit(BaseToolkit):
"""Toolkit for interacting with local files.
*Security Notice*: This toolkit provides methods to interact with local files.
If providing this toolkit to an agent on an LLM, ensure you scope
the agent's permissions to only include the necessary permissions
to perform the desired operations.
By **default** the agent will have access to all files within
the root dir and will be able to Copy, Delete, Move, Read, Write
and List files in that directory.
Consider the following:
- Limit access to particular directories using `root_dir`.
- Use filesystem permissions to restrict access and permissions to only
the files and directories required by the agent.
- Limit the tools available to the agent to only the file operations
necessary for the agent's intended use.
- Sandbox the agent by running it in a container.
See https://python.langchain.com/docs/security for more information.
Parameters:
root_dir: Optional. The root directory to perform file operations.
If not provided, file operations are performed relative to the current
working directory.
selected_tools: Optional. The tools to include in the toolkit. If not
provided, all tools are included.
"""
root_dir: Optional[str] = None
"""If specified, all file operations are made relative to root_dir."""
selected_tools: Optional[List[str]] = None
"""If provided, only provide the selected tools. Defaults to all."""
@model_validator(mode="before")
@classmethod
def validate_tools(cls, values: dict) -> Any:
selected_tools = values.get("selected_tools") or []
for tool_name in selected_tools:
if tool_name not in _FILE_TOOLS_MAP:
raise ValueError(
f"File Tool of name {tool_name} not supported."
f" Permitted tools: {list(_FILE_TOOLS_MAP)}"
)
return values
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
allowed_tools = self.selected_tools or _FILE_TOOLS_MAP
tools: List[BaseTool] = []
for tool in allowed_tools:
tool_cls = _FILE_TOOLS_MAP[tool]
tools.append(tool_cls(root_dir=self.root_dir)) # type: ignore[call-arg]
return tools
__all__ = ["FileManagementToolkit"]
|
#!/usr/bin/env python
import functools as func
import glob
import os.path as osp
import re
import numpy as np
url_prefix = 'https://github.com/open-mmlab/mmdetection/blob/3.x/'
files = sorted(glob.glob('../configs/*/README.md'))
stats = []
titles = []
num_ckpts = 0
for f in files:
url = osp.dirname(f.replace('../', url_prefix))
with open(f, 'r') as content_file:
content = content_file.read()
title = content.split('\n')[0].replace('# ', '').strip()
ckpts = set(x.lower().strip()
for x in re.findall(r'\[model\]\((https?.*)\)', content))
if len(ckpts) == 0:
continue
_papertype = [x for x in re.findall(r'\[([A-Z]+)\]', content)]
assert len(_papertype) > 0
papertype = _papertype[0]
paper = set([(papertype, title)])
titles.append(title)
num_ckpts += len(ckpts)
statsmsg = f"""
\t* [{papertype}] [{title}]({url}) ({len(ckpts)} ckpts)
"""
stats.append((paper, ckpts, statsmsg))
allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _ in stats])
msglist = '\n'.join(x for _, _, x in stats)
papertypes, papercounts = np.unique([t for t, _ in allpapers],
return_counts=True)
countstr = '\n'.join(
[f' - {t}: {c}' for t, c in zip(papertypes, papercounts)])
modelzoo = f"""
# Model Zoo Statistics
* Number of papers: {len(set(titles))}
{countstr}
* Number of checkpoints: {num_ckpts}
{msglist}
"""
with open('modelzoo_statistics.md', 'w') as f:
f.write(modelzoo)
|
#!/usr/bin/env python
import functools as func
import glob
import os.path as osp
import re
import numpy as np
url_prefix = 'https://github.com/open-mmlab/mmdetection/blob/master/'
files = sorted(glob.glob('../configs/*/README.md'))
stats = []
titles = []
num_ckpts = 0
for f in files:
url = osp.dirname(f.replace('../', url_prefix))
with open(f, 'r') as content_file:
content = content_file.read()
title = content.split('\n')[0].replace('# ', '').strip()
ckpts = set(x.lower().strip()
for x in re.findall(r'\[model\]\((https?.*)\)', content))
if len(ckpts) == 0:
continue
_papertype = [x for x in re.findall(r'\[([A-Z]+)\]', content)]
assert len(_papertype) > 0
papertype = _papertype[0]
paper = set([(papertype, title)])
titles.append(title)
num_ckpts += len(ckpts)
statsmsg = f"""
\t* [{papertype}] [{title}]({url}) ({len(ckpts)} ckpts)
"""
stats.append((paper, ckpts, statsmsg))
allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _ in stats])
msglist = '\n'.join(x for _, _, x in stats)
papertypes, papercounts = np.unique([t for t, _ in allpapers],
return_counts=True)
countstr = '\n'.join(
[f' - {t}: {c}' for t, c in zip(papertypes, papercounts)])
modelzoo = f"""
# Model Zoo Statistics
* Number of papers: {len(set(titles))}
{countstr}
* Number of checkpoints: {num_ckpts}
{msglist}
"""
with open('modelzoo_statistics.md', 'w') as f:
f.write(modelzoo)
|
# Optional list of dependencies required by the package
dependencies = ["torch"]
from torchvision.models import get_model_weights, get_weight
from torchvision.models.alexnet import alexnet
from torchvision.models.convnext import convnext_base, convnext_large, convnext_small, convnext_tiny
from torchvision.models.densenet import densenet121, densenet161, densenet169, densenet201
from torchvision.models.efficientnet import (
efficientnet_b0,
efficientnet_b1,
efficientnet_b2,
efficientnet_b3,
efficientnet_b4,
efficientnet_b5,
efficientnet_b6,
efficientnet_b7,
efficientnet_v2_l,
efficientnet_v2_m,
efficientnet_v2_s,
)
from torchvision.models.googlenet import googlenet
from torchvision.models.inception import inception_v3
from torchvision.models.maxvit import maxvit_t
from torchvision.models.mnasnet import mnasnet0_5, mnasnet0_75, mnasnet1_0, mnasnet1_3
from torchvision.models.mobilenetv2 import mobilenet_v2
from torchvision.models.mobilenetv3 import mobilenet_v3_large, mobilenet_v3_small
from torchvision.models.optical_flow import raft_large, raft_small
from torchvision.models.regnet import (
regnet_x_16gf,
regnet_x_1_6gf,
regnet_x_32gf,
regnet_x_3_2gf,
regnet_x_400mf,
regnet_x_800mf,
regnet_x_8gf,
regnet_y_128gf,
regnet_y_16gf,
regnet_y_1_6gf,
regnet_y_32gf,
regnet_y_3_2gf,
regnet_y_400mf,
regnet_y_800mf,
regnet_y_8gf,
)
from torchvision.models.resnet import (
resnet101,
resnet152,
resnet18,
resnet34,
resnet50,
resnext101_32x8d,
resnext101_64x4d,
resnext50_32x4d,
wide_resnet101_2,
wide_resnet50_2,
)
from torchvision.models.segmentation import (
deeplabv3_mobilenet_v3_large,
deeplabv3_resnet101,
deeplabv3_resnet50,
fcn_resnet101,
fcn_resnet50,
lraspp_mobilenet_v3_large,
)
from torchvision.models.shufflenetv2 import (
shufflenet_v2_x0_5,
shufflenet_v2_x1_0,
shufflenet_v2_x1_5,
shufflenet_v2_x2_0,
)
from torchvision.models.squeezenet import squeezenet1_0, squeezenet1_1
from torchvision.models.swin_transformer import swin_b, swin_s, swin_t, swin_v2_b, swin_v2_s, swin_v2_t
from torchvision.models.vgg import vgg11, vgg11_bn, vgg13, vgg13_bn, vgg16, vgg16_bn, vgg19, vgg19_bn
from torchvision.models.video import (
mc3_18,
mvit_v1_b,
mvit_v2_s,
r2plus1d_18,
r3d_18,
s3d,
swin3d_b,
swin3d_s,
swin3d_t,
)
from torchvision.models.vision_transformer import vit_b_16, vit_b_32, vit_h_14, vit_l_16, vit_l_32
|
# Optional list of dependencies required by the package
dependencies = ["torch"]
from torchvision.models import get_model_weights, get_weight
from torchvision.models.alexnet import alexnet
from torchvision.models.convnext import convnext_base, convnext_large, convnext_small, convnext_tiny
from torchvision.models.densenet import densenet121, densenet161, densenet169, densenet201
from torchvision.models.efficientnet import (
efficientnet_b0,
efficientnet_b1,
efficientnet_b2,
efficientnet_b3,
efficientnet_b4,
efficientnet_b5,
efficientnet_b6,
efficientnet_b7,
efficientnet_v2_l,
efficientnet_v2_m,
efficientnet_v2_s,
)
from torchvision.models.googlenet import googlenet
from torchvision.models.inception import inception_v3
from torchvision.models.maxvit import maxvit_t
from torchvision.models.mnasnet import mnasnet0_5, mnasnet0_75, mnasnet1_0, mnasnet1_3
from torchvision.models.mobilenetv2 import mobilenet_v2
from torchvision.models.mobilenetv3 import mobilenet_v3_large, mobilenet_v3_small
from torchvision.models.optical_flow import raft_large, raft_small
from torchvision.models.regnet import (
regnet_x_16gf,
regnet_x_1_6gf,
regnet_x_32gf,
regnet_x_3_2gf,
regnet_x_400mf,
regnet_x_800mf,
regnet_x_8gf,
regnet_y_128gf,
regnet_y_16gf,
regnet_y_1_6gf,
regnet_y_32gf,
regnet_y_3_2gf,
regnet_y_400mf,
regnet_y_800mf,
regnet_y_8gf,
)
from torchvision.models.resnet import (
resnet101,
resnet152,
resnet18,
resnet34,
resnet50,
resnext101_32x8d,
resnext101_64x4d,
resnext50_32x4d,
wide_resnet101_2,
wide_resnet50_2,
)
from torchvision.models.segmentation import (
deeplabv3_mobilenet_v3_large,
deeplabv3_resnet101,
deeplabv3_resnet50,
fcn_resnet101,
fcn_resnet50,
lraspp_mobilenet_v3_large,
)
from torchvision.models.shufflenetv2 import (
shufflenet_v2_x0_5,
shufflenet_v2_x1_0,
shufflenet_v2_x1_5,
shufflenet_v2_x2_0,
)
from torchvision.models.squeezenet import squeezenet1_0, squeezenet1_1
from torchvision.models.swin_transformer import swin_b, swin_s, swin_t, swin_v2_b, swin_v2_s, swin_v2_t
from torchvision.models.vgg import vgg11, vgg11_bn, vgg13, vgg13_bn, vgg16, vgg16_bn, vgg19, vgg19_bn
from torchvision.models.vision_transformer import vit_b_16, vit_b_32, vit_h_14, vit_l_16, vit_l_32
|
"""Top-level imports for LlamaIndex."""
__version__ = "0.12.45"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Top-level imports for LlamaIndex."""
__version__ = "0.12.44"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
from .hifigan_pipeline import HIFIGAN_VOCODER_V3_LJSPEECH, HiFiGANVocoderBundle
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
from .squim_pipeline import SQUIM_OBJECTIVE, SquimObjectiveBundle
__all__ = [
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
"HIFIGAN_VOCODER_V3_LJSPEECH",
"HiFiGANVocoderBundle",
"SQUIM_OBJECTIVE",
"SquimObjectiveBundle",
]
|
from .hifigan_pipeline import HIFIGAN_VOCODER_V3_LJSPEECH, HiFiGANVocoderBundle
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
__all__ = [
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
"HIFIGAN_VOCODER_V3_LJSPEECH",
"HiFiGANVocoderBundle",
]
|
from typing import Any
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
def arize_phoenix_callback_handler(**kwargs: Any) -> BaseCallbackHandler:
# newer versions of arize, v2.x
try:
from openinference.instrumentation.llama_index import LlamaIndexInstrumentor
from opentelemetry.exporter.otlp.proto.http.trace_exporter import (
OTLPSpanExporter,
)
from opentelemetry.sdk import trace as trace_sdk
from opentelemetry.sdk.trace.export import SimpleSpanProcessor
endpoint = kwargs.get("endpoint", "http://127.0.0.1:6006/v1/traces")
tracer_provider = trace_sdk.TracerProvider()
tracer_provider.add_span_processor(
SimpleSpanProcessor(OTLPSpanExporter(endpoint))
)
return LlamaIndexInstrumentor().instrument(
tracer_provider=kwargs.get("tracer_provider", tracer_provider),
separate_trace_from_runtime_context=kwargs.get(
"separate_trace_from_runtime_context"
),
)
except ImportError:
# using an older version of arize
pass
# older versions of arize, v1.x
try:
from phoenix.trace.llama_index import OpenInferenceTraceCallbackHandler
except ImportError:
raise ImportError(
"Please install Arize Phoenix with `pip install -q arize-phoenix`"
)
return OpenInferenceTraceCallbackHandler(**kwargs)
|
from typing import Any
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
def arize_phoenix_callback_handler(**kwargs: Any) -> BaseCallbackHandler:
# newer versions of arize, v2.x
try:
from openinference.instrumentation.llama_index import LlamaIndexInstrumentor
from opentelemetry.exporter.otlp.proto.http.trace_exporter import (
OTLPSpanExporter,
)
from opentelemetry.sdk import trace as trace_sdk
from opentelemetry.sdk.trace.export import SimpleSpanProcessor
endpoint = kwargs.get("endpoint", "http://127.0.0.1:6006/v1/traces")
tracer_provider = trace_sdk.TracerProvider()
tracer_provider.add_span_processor(
SimpleSpanProcessor(OTLPSpanExporter(endpoint))
)
return LlamaIndexInstrumentor().instrument(
tracer_provider=kwargs.get("tracer_provider", tracer_provider)
)
except ImportError:
# using an older version of arize
pass
# older versions of arize, v1.x
try:
from phoenix.trace.llama_index import OpenInferenceTraceCallbackHandler
except ImportError:
raise ImportError(
"Please install Arize Phoenix with `pip install -q arize-phoenix`"
)
return OpenInferenceTraceCallbackHandler(**kwargs)
|
from typing import Any, List, Optional, Union
from pathlib import Path
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks import CBEventType, EventPayload
from llama_index.core.instrumentation import get_dispatcher
from llama_index.core.instrumentation.events.rerank import (
ReRankEndEvent,
ReRankStartEvent,
)
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle
from llama_index.core.utils import infer_torch_device
DEFAULT_SENTENCE_TRANSFORMER_MAX_LENGTH = 512
dispatcher = get_dispatcher(__name__)
class SentenceTransformerRerank(BaseNodePostprocessor):
model: str = Field(description="Sentence transformer model name.")
top_n: int = Field(description="Number of nodes to return sorted by score.")
device: str = Field(
default="cpu",
description="Device to use for sentence transformer.",
)
keep_retrieval_score: bool = Field(
default=False,
description="Whether to keep the retrieval score in metadata.",
)
cross_encoder_kwargs: dict = Field(
default_factory=dict,
description="Additional keyword arguments for CrossEncoder initialization. "
"device and model should not be included here.",
)
_model: Any = PrivateAttr()
def __init__(
self,
top_n: int = 2,
model: str = "cross-encoder/stsb-distilroberta-base",
device: Optional[str] = None,
keep_retrieval_score: Optional[bool] = False,
cache_dir: Optional[Union[str, Path]] = None,
cross_encoder_kwargs: Optional[dict] = None,
):
try:
from sentence_transformers import CrossEncoder
except ImportError:
raise ImportError(
"Cannot import sentence-transformers or torch package,",
"please `pip install torch sentence-transformers`",
)
super().__init__(
top_n=top_n,
model=model,
device=device,
keep_retrieval_score=keep_retrieval_score,
cross_encoder_kwargs=cross_encoder_kwargs or {},
)
init_kwargs = self.cross_encoder_kwargs.copy()
if "device" in init_kwargs or "model" in init_kwargs:
raise ValueError(
"'device' and 'model' should not be specified in 'cross_encoder_kwargs'. "
"Use the top-level 'device' and 'model' parameters instead."
)
# Set default max_length if not provided by the user in kwargs.
if "max_length" not in init_kwargs:
init_kwargs["max_length"] = DEFAULT_SENTENCE_TRANSFORMER_MAX_LENGTH
# Explicit arguments from the constructor take precedence over kwargs
resolved_device = infer_torch_device() if device is None else device
init_kwargs["device"] = resolved_device
if cache_dir:
init_kwargs["cache_dir"] = cache_dir
self._model = CrossEncoder(
model_name=model,
**init_kwargs,
)
@classmethod
def class_name(cls) -> str:
return "SentenceTransformerRerank"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
dispatcher.event(
ReRankStartEvent(
query=query_bundle,
nodes=nodes,
top_n=self.top_n,
model_name=self.model,
)
)
if query_bundle is None:
raise ValueError("Missing query bundle in extra info.")
if len(nodes) == 0:
return []
query_and_nodes = [
(
query_bundle.query_str,
node.node.get_content(metadata_mode=MetadataMode.EMBED),
)
for node in nodes
]
with self.callback_manager.event(
CBEventType.RERANKING,
payload={
EventPayload.NODES: nodes,
EventPayload.MODEL_NAME: self.model,
EventPayload.QUERY_STR: query_bundle.query_str,
EventPayload.TOP_K: self.top_n,
},
) as event:
scores = self._model.predict(query_and_nodes)
assert len(scores) == len(nodes)
for node, score in zip(nodes, scores):
if self.keep_retrieval_score:
# keep the retrieval score in metadata
node.node.metadata["retrieval_score"] = node.score
node.score = float(score)
new_nodes = sorted(nodes, key=lambda x: -x.score if x.score else 0)[
: self.top_n
]
event.on_end(payload={EventPayload.NODES: new_nodes})
dispatcher.event(ReRankEndEvent(nodes=new_nodes))
return new_nodes
|
from typing import Any, List, Optional, Union
from pathlib import Path
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks import CBEventType, EventPayload
from llama_index.core.instrumentation import get_dispatcher
from llama_index.core.instrumentation.events.rerank import (
ReRankEndEvent,
ReRankStartEvent,
)
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle
from llama_index.core.utils import infer_torch_device
DEFAULT_SENTENCE_TRANSFORMER_MAX_LENGTH = 512
dispatcher = get_dispatcher(__name__)
class SentenceTransformerRerank(BaseNodePostprocessor):
model: str = Field(description="Sentence transformer model name.")
top_n: int = Field(description="Number of nodes to return sorted by score.")
device: str = Field(
default="cpu",
description="Device to use for sentence transformer.",
)
keep_retrieval_score: bool = Field(
default=False,
description="Whether to keep the retrieval score in metadata.",
)
_model: Any = PrivateAttr()
def __init__(
self,
top_n: int = 2,
model: str = "cross-encoder/stsb-distilroberta-base",
device: Optional[str] = None,
keep_retrieval_score: Optional[bool] = False,
cache_dir: Optional[Union[str, Path]] = None,
):
try:
from sentence_transformers import CrossEncoder
except ImportError:
raise ImportError(
"Cannot import sentence-transformers or torch package,",
"please `pip install torch sentence-transformers`",
)
super().__init__(
top_n=top_n,
model=model,
device=device,
keep_retrieval_score=keep_retrieval_score,
)
device = infer_torch_device() if device is None else device
self._model = CrossEncoder(
model,
max_length=DEFAULT_SENTENCE_TRANSFORMER_MAX_LENGTH,
device=device,
cache_dir=cache_dir,
)
@classmethod
def class_name(cls) -> str:
return "SentenceTransformerRerank"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
dispatcher.event(
ReRankStartEvent(
query=query_bundle,
nodes=nodes,
top_n=self.top_n,
model_name=self.model,
)
)
if query_bundle is None:
raise ValueError("Missing query bundle in extra info.")
if len(nodes) == 0:
return []
query_and_nodes = [
(
query_bundle.query_str,
node.node.get_content(metadata_mode=MetadataMode.EMBED),
)
for node in nodes
]
with self.callback_manager.event(
CBEventType.RERANKING,
payload={
EventPayload.NODES: nodes,
EventPayload.MODEL_NAME: self.model,
EventPayload.QUERY_STR: query_bundle.query_str,
EventPayload.TOP_K: self.top_n,
},
) as event:
scores = self._model.predict(query_and_nodes)
assert len(scores) == len(nodes)
for node, score in zip(nodes, scores):
if self.keep_retrieval_score:
# keep the retrieval score in metadata
node.node.metadata["retrieval_score"] = node.score
node.score = float(score)
new_nodes = sorted(nodes, key=lambda x: -x.score if x.score else 0)[
: self.top_n
]
event.on_end(payload={EventPayload.NODES: new_nodes})
dispatcher.event(ReRankEndEvent(nodes=new_nodes))
return new_nodes
|
# Copyright (c) OpenMMLab. All rights reserved.
import collections
from mmcv.utils import build_from_cfg
from ..builder import PIPELINES
@PIPELINES.register_module()
class Compose:
"""Compose multiple transforms sequentially.
Args:
transforms (Sequence[dict | callable]): Sequence of transform object or
config dict to be composed.
"""
def __init__(self, transforms):
assert isinstance(transforms, collections.abc.Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError('transform must be callable or a dict')
def __call__(self, data):
"""Call function to apply transforms sequentially.
Args:
data (dict): A result dict contains the data to transform.
Returns:
dict: Transformed data.
"""
for t in self.transforms:
data = t(data)
if data is None:
return None
return data
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
str_ = t.__repr__()
if 'Compose(' in str_:
str_ = str_.replace('\n', '\n ')
format_string += '\n'
format_string += f' {str_}'
format_string += '\n)'
return format_string
|
# Copyright (c) OpenMMLab. All rights reserved.
import collections
from mmcv.utils import build_from_cfg
from ..builder import PIPELINES
@PIPELINES.register_module()
class Compose:
"""Compose multiple transforms sequentially.
Args:
transforms (Sequence[dict | callable]): Sequence of transform object or
config dict to be composed.
"""
def __init__(self, transforms):
assert isinstance(transforms, collections.abc.Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError('transform must be callable or a dict')
def __call__(self, data):
"""Call function to apply transforms sequentially.
Args:
data (dict): A result dict contains the data to transform.
Returns:
dict: Transformed data.
"""
for t in self.transforms:
data = t(data)
if data is None:
return None
return data
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += f' {t}'
format_string += '\n)'
return format_string
|
# Copyright (c) OpenMMLab. All rights reserved.
from .log_buffer import LogBuffer
from .logger import MMLogger, print_log
from .message_hub import MessageHub
__all__ = ['LogBuffer', 'MessageHub', 'MMLogger', 'print_log']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_global_accsessible import BaseGlobalAccessible, MetaGlobalAccessible
from .log_buffer import LogBuffer
from .logger import MMLogger, print_log
from .message_hub import MessageHub
__all__ = [
'LogBuffer', 'MessageHub', 'MetaGlobalAccessible', 'BaseGlobalAccessible',
'MMLogger', 'print_log'
]
|
"""Airtable reader."""
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from pyairtable import Table
class AirtableReader(BaseReader):
"""
Airtable reader. Reads data from a table in a base.
Args:
api_key (str): Airtable API key.
"""
def __init__(self, api_key: str) -> None:
"""Initialize Airtable reader."""
self.api_key = api_key
def load_data(self, base_id: str, table_id: str) -> List[Document]:
"""
Load data from a table in a base.
Args:
table_id (str): Table ID.
base_id (str): Base ID.
Returns:
List[Document]: List of documents.
"""
table = Table(self.api_key, base_id, table_id)
all_records = table.all()
return [Document(text=f"{all_records}", extra_info={})]
|
"""Airtable reader."""
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from pyairtable import Table
class AirtableReader(BaseReader):
"""
Airtable reader. Reads data from a table in a base.
Args:
api_key (str): Airtable API key.
"""
def __init__(self, api_key: str) -> None:
"""Initialize Airtable reader."""
self.api_key = api_key
def load_data(self, base_id: str, table_id: str) -> List[Document]:
"""
Load data from a table in a base.
Args:
table_id (str): Table ID.
base_id (str): Base ID.
Returns:
List[Document]: List of documents.
"""
table = Table(self.api_key, base_id, table_id)
all_records = table.all()
return [Document(text=f"{all_records}", extra_info={})]
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# model settings
input_size = 300
model = dict(
bbox_head=dict(
type='SSDHead',
anchor_generator=dict(
type='LegacySSDAnchorGenerator',
scale_major=False,
input_size=input_size,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),
bbox_coder=dict(
type='LegacyDeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2])))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(300, 300),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=3,
train=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict(_delete_=True)
dist_params = dict(backend='nccl', port=29555)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# model settings
input_size = 300
model = dict(
bbox_head=dict(
type='SSDHead',
anchor_generator=dict(
type='LegacySSDAnchorGenerator',
scale_major=False,
input_size=input_size,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),
bbox_coder=dict(
type='LegacyDeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2])))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(300, 300),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=3,
train=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict(_delete_=True)
dist_params = dict(backend='nccl', port=29555)
|
"""**Prompt values** for language model prompts.
Prompt values are used to represent different pieces of prompts.
They can be used to represent text, images, or chat message pieces.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from collections.abc import Sequence
from typing import Literal, cast
from typing_extensions import TypedDict
from langchain_core.load.serializable import Serializable
from langchain_core.messages import (
AnyMessage,
BaseMessage,
HumanMessage,
get_buffer_string,
)
class PromptValue(Serializable, ABC):
"""Base abstract class for inputs to any language model.
PromptValues can be converted to both LLM (pure text-generation) inputs and
ChatModel inputs.
"""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable. Defaults to True."""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
This is used to determine the namespace of the object when serializing.
Defaults to ["langchain", "schema", "prompt"].
"""
return ["langchain", "schema", "prompt"]
@abstractmethod
def to_string(self) -> str:
"""Return prompt value as string."""
@abstractmethod
def to_messages(self) -> list[BaseMessage]:
"""Return prompt as a list of Messages."""
class StringPromptValue(PromptValue):
"""String prompt value."""
text: str
"""Prompt text."""
type: Literal["StringPromptValue"] = "StringPromptValue"
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
This is used to determine the namespace of the object when serializing.
Defaults to ["langchain", "prompts", "base"].
"""
return ["langchain", "prompts", "base"]
def to_string(self) -> str:
"""Return prompt as string."""
return self.text
def to_messages(self) -> list[BaseMessage]:
"""Return prompt as messages."""
return [HumanMessage(content=self.text)]
class ChatPromptValue(PromptValue):
"""Chat prompt value.
A type of a prompt value that is built from messages.
"""
messages: Sequence[BaseMessage]
"""List of messages."""
def to_string(self) -> str:
"""Return prompt as string."""
return get_buffer_string(self.messages)
def to_messages(self) -> list[BaseMessage]:
"""Return prompt as a list of messages."""
return list(self.messages)
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
This is used to determine the namespace of the object when serializing.
Defaults to ["langchain", "prompts", "chat"].
"""
return ["langchain", "prompts", "chat"]
class ImageURL(TypedDict, total=False):
"""Image URL."""
detail: Literal["auto", "low", "high"]
"""Specifies the detail level of the image. Defaults to "auto".
Can be "auto", "low", or "high"."""
url: str
"""Either a URL of the image or the base64 encoded image data."""
class ImagePromptValue(PromptValue):
"""Image prompt value."""
image_url: ImageURL
"""Image URL."""
type: Literal["ImagePromptValue"] = "ImagePromptValue"
def to_string(self) -> str:
"""Return prompt (image URL) as string."""
return self.image_url["url"]
def to_messages(self) -> list[BaseMessage]:
"""Return prompt (image URL) as messages."""
return [HumanMessage(content=[cast("dict", self.image_url)])]
class ChatPromptValueConcrete(ChatPromptValue):
"""Chat prompt value which explicitly lists out the message types it accepts.
For use in external schemas.
"""
messages: Sequence[AnyMessage]
"""Sequence of messages."""
type: Literal["ChatPromptValueConcrete"] = "ChatPromptValueConcrete"
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
This is used to determine the namespace of the object when serializing.
Defaults to ["langchain", "prompts", "chat"].
"""
return ["langchain", "prompts", "chat"]
|
"""**Prompt values** for language model prompts.
Prompt values are used to represent different pieces of prompts.
They can be used to represent text, images, or chat message pieces.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from collections.abc import Sequence
from typing import Literal, cast
from typing_extensions import TypedDict
from langchain_core.load.serializable import Serializable
from langchain_core.messages import (
AnyMessage,
BaseMessage,
HumanMessage,
get_buffer_string,
)
class PromptValue(Serializable, ABC):
"""Base abstract class for inputs to any language model.
PromptValues can be converted to both LLM (pure text-generation) inputs and
ChatModel inputs.
"""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable. Defaults to True."""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
This is used to determine the namespace of the object when serializing.
Defaults to ["langchain", "schema", "prompt"].
"""
return ["langchain", "schema", "prompt"]
@abstractmethod
def to_string(self) -> str:
"""Return prompt value as string."""
@abstractmethod
def to_messages(self) -> list[BaseMessage]:
"""Return prompt as a list of Messages."""
class StringPromptValue(PromptValue):
"""String prompt value."""
text: str
"""Prompt text."""
type: Literal["StringPromptValue"] = "StringPromptValue"
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
This is used to determine the namespace of the object when serializing.
Defaults to ["langchain", "prompts", "base"].
"""
return ["langchain", "prompts", "base"]
def to_string(self) -> str:
"""Return prompt as string."""
return self.text
def to_messages(self) -> list[BaseMessage]:
"""Return prompt as messages."""
return [HumanMessage(content=self.text)]
class ChatPromptValue(PromptValue):
"""Chat prompt value.
A type of a prompt value that is built from messages.
"""
messages: Sequence[BaseMessage]
"""List of messages."""
def to_string(self) -> str:
"""Return prompt as string."""
return get_buffer_string(self.messages)
def to_messages(self) -> list[BaseMessage]:
"""Return prompt as a list of messages."""
return list(self.messages)
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
This is used to determine the namespace of the object when serializing.
Defaults to ["langchain", "prompts", "chat"].
"""
return ["langchain", "prompts", "chat"]
class ImageURL(TypedDict, total=False):
"""Image URL."""
detail: Literal["auto", "low", "high"]
"""Specifies the detail level of the image. Defaults to "auto".
Can be "auto", "low", or "high"."""
url: str
"""Either a URL of the image or the base64 encoded image data."""
class ImagePromptValue(PromptValue):
"""Image prompt value."""
image_url: ImageURL
"""Image URL."""
type: Literal["ImagePromptValue"] = "ImagePromptValue"
def to_string(self) -> str:
"""Return prompt (image URL) as string."""
return self.image_url["url"]
def to_messages(self) -> list[BaseMessage]:
"""Return prompt (image URL) as messages."""
return [HumanMessage(content=[cast(dict, self.image_url)])]
class ChatPromptValueConcrete(ChatPromptValue):
"""Chat prompt value which explicitly lists out the message types it accepts.
For use in external schemas.
"""
messages: Sequence[AnyMessage]
"""Sequence of messages."""
type: Literal["ChatPromptValueConcrete"] = "ChatPromptValueConcrete"
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
This is used to determine the namespace of the object when serializing.
Defaults to ["langchain", "prompts", "chat"].
"""
return ["langchain", "prompts", "chat"]
|
import prisma
AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = {
"Input": True,
"Output": True,
"Webhook": True,
"AgentBlock": True,
}
AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore
}
EXECUTION_RESULT_INCLUDE: prisma.types.AgentNodeExecutionInclude = {
"Input": True,
"Output": True,
"AgentNode": True,
"AgentGraphExecution": True,
}
GRAPH_EXECUTION_INCLUDE: prisma.types.AgentGraphExecutionInclude = {
"AgentNodeExecutions": {
"include": {
"Input": True,
"Output": True,
"AgentNode": True,
"AgentGraphExecution": True,
}
}
}
INTEGRATION_WEBHOOK_INCLUDE: prisma.types.IntegrationWebhookInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore
}
|
import prisma
AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = {
"Input": True,
"Output": True,
"AgentBlock": True,
}
AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore
}
EXECUTION_RESULT_INCLUDE: prisma.types.AgentNodeExecutionInclude = {
"Input": True,
"Output": True,
"AgentNode": True,
"AgentGraphExecution": True,
}
GRAPH_EXECUTION_INCLUDE: prisma.types.AgentGraphExecutionInclude = {
"AgentNodeExecutions": {
"include": {
"Input": True,
"Output": True,
"AgentNode": True,
"AgentGraphExecution": True,
}
}
}
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from jina.executors.metas import get_default_metas
from ...faiss_searcher import FaissSearcher
def _get_docs_from_vecs(queries):
docs = DocumentArray()
for q in queries:
doc = Document(embedding=q)
docs.append(doc)
return docs
@pytest.fixture(scope='function', autouse=True)
def metas(tmpdir):
os.environ['TEST_WORKSPACE'] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ['TEST_WORKSPACE']
metas['name'] = 'faiss_idx'
yield metas
del os.environ['TEST_WORKSPACE']
def test_train_and_index(metas, tmpdir):
query = np.array(np.random.random([10, 10]), dtype=np.float32)
query_docs = _get_docs_from_vecs(query)
trained_index_file = os.path.join(tmpdir, 'faiss.index')
train_data = np.array(np.random.random([512, 10]), dtype=np.float32)
index_docs = _get_docs_from_vecs(train_data)
f = Flow().add(
uses=FaissSearcher,
timeout_ready=-1,
uses_with={
'index_key': 'IVF6,PQ2',
'trained_index_file': trained_index_file,
},
uses_meta=metas,
)
with f:
import faiss
faiss_index = faiss.index_factory(10, 'IVF6,PQ2', faiss.METRIC_INNER_PRODUCT)
faiss.normalize_L2(train_data)
faiss_index.train(train_data)
faiss.write_index(faiss_index, trained_index_file)
# train and index docs first
f.post(on='/index', data=index_docs)
result = f.post(
on='/search', data=query_docs, return_results=True, parameters={'top_k': 4}
)[0].docs
assert len(result[0].matches) == 4
for d in result:
assert (
d.matches[0].scores['cosine'].value
<= d.matches[1].scores['cosine'].value
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import export_dump_streaming
from ...faiss_searcher import FaissSearcher
def _get_docs_from_vecs(queries):
docs = DocumentArray()
for q in queries:
doc = Document(embedding=q)
docs.append(doc)
return docs
@pytest.fixture(scope='function', autouse=True)
def metas(tmpdir):
os.environ['TEST_WORKSPACE'] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ['TEST_WORKSPACE']
metas['name'] = 'faiss_idx'
yield metas
del os.environ['TEST_WORKSPACE']
def test_train_and_index(metas, tmpdir):
vec_idx = np.random.randint(0, high=512, size=[512]).astype(str)
vec = np.array(np.random.random([512, 10]), dtype=np.float32)
query = np.array(np.random.random([10, 10]), dtype=np.float32)
query_docs = _get_docs_from_vecs(query)
train_data_file = os.path.join(os.environ['TEST_WORKSPACE'], 'train.npy')
train_data = np.array(np.random.random([1024, 10]), dtype=np.float32)
np.save(train_data_file, train_data)
trained_index_file = os.path.join(os.environ['TEST_WORKSPACE'], 'faiss.index')
export_dump_streaming(
os.path.join(tmpdir, 'dump'),
1,
len(vec_idx),
zip(vec_idx, vec, [b'' for _ in range(len(vec))]),
)
dump_path = os.path.join(tmpdir, 'dump')
f = Flow().add(
uses=FaissSearcher,
timeout_ready=-1,
uses_with={
'index_key': 'IVF10_HNSW32,PQ2',
'trained_index_file': trained_index_file,
},
)
with f:
# the trained index will be dumped to "faiss.index"
f.post(on='/train', parameters={'train_data_file': train_data_file})
f = Flow().add(
uses=FaissSearcher,
timeout_ready=-1,
uses_with={
'index_key': 'IVF10_HNSW32,PQ2',
'trained_index_file': trained_index_file,
'dump_path': dump_path,
},
)
with f:
result = f.post(
on='/search', data=query_docs, return_results=True, parameters={'top_k': 4}
)[0].docs
assert len(result[0].matches) == 4
for d in result:
assert (
d.matches[0].scores['cosine'].value
<= d.matches[1].scores['cosine'].value
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
],
timeout=30,
check=True,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core import bbox2result, bbox_mapping_back
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class CornerNet(SingleStageDetector):
"""CornerNet.
This detector is the implementation of the paper `CornerNet: Detecting
Objects as Paired Keypoints <https://arxiv.org/abs/1808.01244>`_ .
"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(CornerNet, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
def merge_aug_results(self, aug_results, img_metas):
"""Merge augmented detection bboxes and score.
Args:
aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each
image.
img_metas (list[list[dict]]): Meta information of each image, e.g.,
image size, scaling factor, etc.
Returns:
tuple: (bboxes, labels)
"""
recovered_bboxes, aug_labels = [], []
for bboxes_labels, img_info in zip(aug_results, img_metas):
img_shape = img_info[0]['img_shape'] # using shape before padding
scale_factor = img_info[0]['scale_factor']
flip = img_info[0]['flip']
bboxes, labels = bboxes_labels
bboxes, scores = bboxes[:, :4], bboxes[:, -1:]
bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip)
recovered_bboxes.append(torch.cat([bboxes, scores], dim=-1))
aug_labels.append(labels)
bboxes = torch.cat(recovered_bboxes, dim=0)
labels = torch.cat(aug_labels)
if bboxes.shape[0] > 0:
out_bboxes, out_labels = self.bbox_head._bboxes_nms(
bboxes, labels, self.bbox_head.test_cfg)
else:
out_bboxes, out_labels = bboxes, labels
return out_bboxes, out_labels
def aug_test(self, imgs, img_metas, rescale=False):
"""Augment testing of CornerNet.
Args:
imgs (list[Tensor]): Augmented images.
img_metas (list[list[dict]]): Meta information of each image, e.g.,
image size, scaling factor, etc.
rescale (bool): If True, return boxes in original image space.
Default: False.
Note:
``imgs`` must including flipped image pairs.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
img_inds = list(range(len(imgs)))
assert img_metas[0][0]['flip'] + img_metas[1][0]['flip'], (
'aug test must have flipped image pair')
aug_results = []
for ind, flip_ind in zip(img_inds[0::2], img_inds[1::2]):
img_pair = torch.cat([imgs[ind], imgs[flip_ind]])
x = self.extract_feat(img_pair)
outs = self.bbox_head(x)
bbox_list = self.bbox_head.get_bboxes(
*outs, [img_metas[ind], img_metas[flip_ind]], False, False)
aug_results.append(bbox_list[0])
aug_results.append(bbox_list[1])
bboxes, labels = self.merge_aug_results(aug_results, img_metas)
bbox_results = bbox2result(bboxes, labels, self.bbox_head.num_classes)
return [bbox_results]
|
import torch
from mmdet.core import bbox2result, bbox_mapping_back
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class CornerNet(SingleStageDetector):
"""CornerNet.
This detector is the implementation of the paper `CornerNet: Detecting
Objects as Paired Keypoints <https://arxiv.org/abs/1808.01244>`_ .
"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(CornerNet, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
def merge_aug_results(self, aug_results, img_metas):
"""Merge augmented detection bboxes and score.
Args:
aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each
image.
img_metas (list[list[dict]]): Meta information of each image, e.g.,
image size, scaling factor, etc.
Returns:
tuple: (bboxes, labels)
"""
recovered_bboxes, aug_labels = [], []
for bboxes_labels, img_info in zip(aug_results, img_metas):
img_shape = img_info[0]['img_shape'] # using shape before padding
scale_factor = img_info[0]['scale_factor']
flip = img_info[0]['flip']
bboxes, labels = bboxes_labels
bboxes, scores = bboxes[:, :4], bboxes[:, -1:]
bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip)
recovered_bboxes.append(torch.cat([bboxes, scores], dim=-1))
aug_labels.append(labels)
bboxes = torch.cat(recovered_bboxes, dim=0)
labels = torch.cat(aug_labels)
if bboxes.shape[0] > 0:
out_bboxes, out_labels = self.bbox_head._bboxes_nms(
bboxes, labels, self.bbox_head.test_cfg)
else:
out_bboxes, out_labels = bboxes, labels
return out_bboxes, out_labels
def aug_test(self, imgs, img_metas, rescale=False):
"""Augment testing of CornerNet.
Args:
imgs (list[Tensor]): Augmented images.
img_metas (list[list[dict]]): Meta information of each image, e.g.,
image size, scaling factor, etc.
rescale (bool): If True, return boxes in original image space.
Default: False.
Note:
``imgs`` must including flipped image pairs.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
img_inds = list(range(len(imgs)))
assert img_metas[0][0]['flip'] + img_metas[1][0]['flip'], (
'aug test must have flipped image pair')
aug_results = []
for ind, flip_ind in zip(img_inds[0::2], img_inds[1::2]):
img_pair = torch.cat([imgs[ind], imgs[flip_ind]])
x = self.extract_feat(img_pair)
outs = self.bbox_head(x)
bbox_list = self.bbox_head.get_bboxes(
*outs, [img_metas[ind], img_metas[flip_ind]], False, False)
aug_results.append(bbox_list[0])
aug_results.append(bbox_list[1])
bboxes, labels = self.merge_aug_results(aug_results, img_metas)
bbox_results = bbox2result(bboxes, labels, self.bbox_head.num_classes)
return [bbox_results]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.mnist import load_data as load_data
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.mnist import load_data
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.california_housing import load_data as load_data
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.california_housing import load_data
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from spacy_text_encoder import SpacyTextEncoder
_EMBEDDING_DIM = 96
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=SpacyTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:"/GPU:0"',
],
timeout=30,
check=True,
)
def test_spacy_text_encoder():
docs = DocumentArray(
[
Document(text='Han likes eating pizza'),
Document(text='Han likes pizza'),
Document(text='Jina rocks'),
]
)
f = Flow().add(uses=SpacyTextEncoder)
with f:
resp = f.post(on='/test', inputs=docs, return_results=True)
docs = resp[0].docs
assert len(docs) == 3
for doc in docs:
assert doc.embedding.shape == (96,)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from spacy_text_encoder import SpacyTextEncoder
_EMBEDDING_DIM = 96
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=SpacyTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
|
import numpy as np
import pytest
import torch
from docarray import BaseDoc
from docarray.base_doc import AnyDoc
from docarray.typing import (
AnyEmbedding,
AnyUrl,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchTensor,
)
@pytest.mark.proto
def test_proto_all_types():
class Mymmdoc(BaseDoc):
tensor: NdArray
torch_tensor: TorchTensor
embedding: AnyEmbedding
any_url: AnyUrl
image_url: ImageUrl
text_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
doc = Mymmdoc(
tensor=np.zeros((3, 224, 224)),
torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((100, 1)),
any_url='http://jina.ai',
image_url='http://jina.ai/bla.jpg',
text_url='http://jina.ai',
mesh_url='http://jina.ai/mesh.obj',
point_cloud_url='http://jina.ai/mesh.obj',
)
new_doc = AnyDoc.from_protobuf(doc.to_protobuf())
for field, value in new_doc:
if field == 'embedding':
# embedding is a Union type, not supported by isinstance
assert isinstance(value, np.ndarray) or isinstance(value, torch.Tensor)
else:
assert isinstance(value, doc._get_field_type(field))
@pytest.mark.tensorflow
def test_proto_all_types_proto3():
import tensorflow as tf
from docarray.typing import TensorFlowTensor
class Mymmdoc(BaseDoc):
tensor: NdArray
torch_tensor: TorchTensor
tf_tensor: TensorFlowTensor
embedding: AnyEmbedding
any_url: AnyUrl
image_url: ImageUrl
text_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
doc = Mymmdoc(
tensor=np.zeros((3, 224, 224)),
torch_tensor=torch.zeros((3, 224, 224)),
tf_tensor=tf.zeros((3, 224, 224)),
embedding=np.zeros((100, 1)),
any_url='http://jina.ai',
image_url='http://jina.ai/bla.jpg',
text_url='http://jina.ai/file.txt',
mesh_url='http://jina.ai/mesh.obj',
point_cloud_url='http://jina.ai/mesh.obj',
)
new_doc = AnyDoc.from_protobuf(doc.to_protobuf())
for field, value in new_doc:
if field == 'embedding':
# embedding is a Union type, not supported by isinstance
assert isinstance(value, np.ndarray) or isinstance(value, torch.Tensor)
else:
assert isinstance(value, doc._get_field_type(field))
|
import numpy as np
import pytest
import torch
from docarray import BaseDoc
from docarray.base_doc import AnyDoc
from docarray.typing import (
AnyEmbedding,
AnyUrl,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchTensor,
)
@pytest.mark.proto
def test_proto_all_types():
class Mymmdoc(BaseDoc):
tensor: NdArray
torch_tensor: TorchTensor
embedding: AnyEmbedding
any_url: AnyUrl
image_url: ImageUrl
text_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
doc = Mymmdoc(
tensor=np.zeros((3, 224, 224)),
torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((100, 1)),
any_url='http://jina.ai',
image_url='http://jina.ai/bla.jpg',
text_url='http://jina.ai',
mesh_url='http://jina.ai/mesh.obj',
point_cloud_url='http://jina.ai/mesh.obj',
)
new_doc = AnyDoc.from_protobuf(doc.to_protobuf())
for field, value in new_doc:
if field == 'embedding':
# embedding is a Union type, not supported by isinstance
assert isinstance(value, np.ndarray) or isinstance(value, torch.Tensor)
else:
assert isinstance(value, doc._get_field_type(field))
@pytest.mark.tensorflow
def test_proto_all_types_proto3():
import tensorflow as tf
from docarray.typing import TensorFlowTensor
class Mymmdoc(BaseDoc):
tensor: NdArray
torch_tensor: TorchTensor
tf_tensor: TensorFlowTensor
embedding: AnyEmbedding
any_url: AnyUrl
image_url: ImageUrl
text_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
doc = Mymmdoc(
tensor=np.zeros((3, 224, 224)),
torch_tensor=torch.zeros((3, 224, 224)),
tf_tensor=tf.zeros((3, 224, 224)),
embedding=np.zeros((100, 1)),
any_url='http://jina.ai',
image_url='http://jina.ai/bla.jpg',
text_url='http://jina.ai',
mesh_url='http://jina.ai/mesh.obj',
point_cloud_url='http://jina.ai/mesh.obj',
)
new_doc = AnyDoc.from_protobuf(doc.to_protobuf())
for field, value in new_doc:
if field == 'embedding':
# embedding is a Union type, not supported by isinstance
assert isinstance(value, np.ndarray) or isinstance(value, torch.Tensor)
else:
assert isinstance(value, doc._get_field_type(field))
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval", "MSMARCO"]
evaluator = SparseNanoBEIREvaluator(
dataset_names=datasets,
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
results = evaluator(model)
"""
Evaluating NanoQuoraRetrieval
Information Retrieval Evaluation of the model on the NanoQuoraRetrieval dataset:
Queries: 50
Corpus: 5046
Score-Function: dot
Accuracy@1: 92.00%
Accuracy@3: 96.00%
Accuracy@5: 98.00%
Accuracy@10: 100.00%
Precision@1: 92.00%
Precision@3: 40.00%
Precision@5: 24.80%
Precision@10: 13.20%
Recall@1: 79.73%
Recall@3: 92.53%
Recall@5: 94.93%
Recall@10: 98.27%
MRR@10: 0.9439
NDCG@10: 0.9339
MAP@100: 0.9072
Model Query Sparsity: Active Dimensions: 63.0, Sparsity Ratio: 0.9979
Model Corpus Sparsity: Active Dimensions: 63.4, Sparsity Ratio: 0.9979
Information Retrieval Evaluation of the model on the NanoMSMARCO dataset:
Queries: 50
Corpus: 5043
Score-Function: dot
Accuracy@1: 48.00%
Accuracy@3: 74.00%
Accuracy@5: 76.00%
Accuracy@10: 88.00%
Precision@1: 48.00%
Precision@3: 24.67%
Precision@5: 15.20%
Precision@10: 8.80%
Recall@1: 48.00%
Recall@3: 74.00%
Recall@5: 76.00%
Recall@10: 88.00%
MRR@10: 0.6211
NDCG@10: 0.6838
MAP@100: 0.6277
Model Query Sparsity: Active Dimensions: 48.1, Sparsity Ratio: 0.9984
Model Corpus Sparsity: Active Dimensions: 125.4, Sparsity Ratio: 0.9959
Average Queries: 50.0
Average Corpus: 5044.5
Aggregated for Score Function: dot
Accuracy@1: 70.00%
Accuracy@3: 85.00%
Accuracy@5: 87.00%
Accuracy@10: 94.00%
Precision@1: 70.00%
Recall@1: 63.87%
Precision@3: 32.33%
Recall@3: 83.27%
Precision@5: 20.00%
Recall@5: 85.47%
Precision@10: 11.00%
Recall@10: 93.13%
MRR@10: 0.7825
NDCG@10: 0.8089
Model Query Sparsity: Active Dimensions: 55.5, Sparsity Ratio: 0.9982
Model Corpus Sparsity: Active Dimensions: 94.4, Sparsity Ratio: 0.9969
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8089
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval", "MSMARCO"]
evaluator = SparseNanoBEIREvaluator(
dataset_names=datasets,
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
results = evaluator(model)
"""
Evaluating NanoQuoraRetrieval
Information Retrieval Evaluation of the model on the NanoQuoraRetrieval dataset:
Queries: 50
Corpus: 5046
Score-Function: dot
Accuracy@1: 92.00%
Accuracy@3: 96.00%
Accuracy@5: 98.00%
Accuracy@10: 100.00%
Precision@1: 92.00%
Precision@3: 40.00%
Precision@5: 24.80%
Precision@10: 13.20%
Recall@1: 79.73%
Recall@3: 92.53%
Recall@5: 94.93%
Recall@10: 98.27%
MRR@10: 0.9439
NDCG@10: 0.9339
MAP@100: 0.9072
Model Sparsity Stats Query : Row Non-Zero Mean: 62.97999954223633, Row Sparsity Mean: 0.9979365468025208
Model Sparsity Stats Corpus : Row Non-Zero Mean: 63.39932632446289, Row Sparsity Mean: 0.9979228377342224
Information Retrieval Evaluation of the model on the NanoMSMARCO dataset:
Queries: 50
Corpus: 5043
Score-Function: dot
Accuracy@1: 48.00%
Accuracy@3: 74.00%
Accuracy@5: 76.00%
Accuracy@10: 88.00%
Precision@1: 48.00%
Precision@3: 24.67%
Precision@5: 15.20%
Precision@10: 8.80%
Recall@1: 48.00%
Recall@3: 74.00%
Recall@5: 76.00%
Recall@10: 88.00%
MRR@10: 0.6211
NDCG@10: 0.6838
MAP@100: 0.6277
Model Sparsity Stats Query : Row Non-Zero Mean: 48.08000183105469, Row Sparsity Mean: 0.9984247088432312
Model Sparsity Stats Corpus : Row Non-Zero Mean: 125.3604965209961, Row Sparsity Mean: 0.9958928227424622
Average Queries: 50.0
Average Corpus: 5044.5
Aggregated for Score Function: dot
Accuracy@1: 70.00%
Accuracy@3: 85.00%
Accuracy@5: 87.00%
Accuracy@10: 94.00%
Precision@1: 70.00%
Recall@1: 63.87%
Precision@3: 32.33%
Recall@3: 83.27%
Precision@5: 20.00%
Recall@5: 85.47%
Precision@10: 11.00%
Recall@10: 93.13%
MRR@10: 0.7825
NDCG@10: 0.8089
Model Sparsity Stats Query : Row Non-Zero Mean: 55.53000068664551, Row Sparsity Mean: 0.998180627822876
Model Sparsity Stats Corpus : Row Non-Zero Mean: 94.37991142272949, Row Sparsity Mean: 0.9969078302383423
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8089
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.