input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import argparse
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from tarfile import TarFile
from zipfile import ZipFile
import torch
def parse_args():
parser = argparse.ArgumentParser(
description='Download datasets for training')
parser.add_argument(
'--dataset-name', type=str, help='dataset name', default='coco2017')
parser.add_argument(
'--save-dir',
type=str,
help='the dir to save dataset',
default='data/coco')
parser.add_argument(
'--unzip',
action='store_true',
help='whether unzip dataset or not, zipped files will be saved')
parser.add_argument(
'--delete',
action='store_true',
help='delete the download zipped files')
parser.add_argument(
'--threads', type=int, help='number of threading', default=4)
args = parser.parse_args()
return args
def download(url, dir, unzip=True, delete=False, threads=1):
def download_one(url, dir):
f = dir / Path(url).name
if Path(url).is_file():
Path(url).rename(f)
elif not f.exists():
print('Downloading {} to {}'.format(url, f))
torch.hub.download_url_to_file(url, f, progress=True)
if unzip and f.suffix in ('.zip', '.tar'):
print('Unzipping {}'.format(f.name))
if f.suffix == '.zip':
ZipFile(f).extractall(path=dir)
elif f.suffix == '.tar':
TarFile(f).extractall(path=dir)
if delete:
f.unlink()
print('Delete {}'.format(f))
dir = Path(dir)
if threads > 1:
pool = ThreadPool(threads)
pool.imap(lambda x: download_one(*x), zip(url, repeat(dir)))
pool.close()
pool.join()
else:
for u in [url] if isinstance(url, (str, Path)) else url:
download_one(u, dir)
def main():
args = parse_args()
path = Path(args.save_dir)
if not path.exists():
path.mkdir(parents=True, exist_ok=True)
data2url = dict(
# TODO: Support for downloading Panoptic Segmentation of COCO
coco2017=[
'http://images.cocodataset.org/zips/train2017.zip',
'http://images.cocodataset.org/zips/val2017.zip',
'http://images.cocodataset.org/zips/test2017.zip',
'http://images.cocodataset.org/zips/unlabeled2017.zip',
'http://images.cocodataset.org/annotations/annotations_trainval2017.zip', # noqa
'http://images.cocodataset.org/annotations/stuff_annotations_trainval2017.zip', # noqa
'http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip', # noqa
'http://images.cocodataset.org/annotations/image_info_test2017.zip', # noqa
'http://images.cocodataset.org/annotations/image_info_unlabeled2017.zip', # noqa
],
lvis=[
'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip', # noqa
'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip', # noqa
],
voc2007=[
'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar', # noqa
'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar', # noqa
'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCdevkit_08-Jun-2007.tar', # noqa
],
)
url = data2url.get(args.dataset_name, None)
if url is None:
print('Only support COCO, VOC, and LVIS now!')
return
download(
url,
dir=path,
unzip=args.unzip,
delete=args.delete,
threads=args.threads)
if __name__ == '__main__':
main()
|
import argparse
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from tarfile import TarFile
from zipfile import ZipFile
import torch
def parse_args():
parser = argparse.ArgumentParser(
description='Download datasets for training')
parser.add_argument(
'--dataset-name', type=str, help='dataset name', default='coco2017')
parser.add_argument(
'--save-dir',
type=str,
help='the dir to save dataset',
default='data/coco')
parser.add_argument(
'--unzip',
action='store_true',
help='whether unzip dataset or not, zipped files will be saved')
parser.add_argument(
'--delete',
action='store_true',
help='delete the download zipped files')
parser.add_argument(
'--threads', type=int, help='number of threading', default=4)
args = parser.parse_args()
return args
def download(url, dir, unzip=True, delete=False, threads=1):
def download_one(url, dir):
f = dir / Path(url).name
if Path(url).is_file():
Path(url).rename(f)
elif not f.exists():
print('Downloading {} to {}'.format(url, f))
torch.hub.download_url_to_file(url, f, progress=True)
if unzip and f.suffix in ('.zip', '.tar'):
print('Unzipping {}'.format(f.name))
if f.suffix == '.zip':
ZipFile(f).extractall(path=dir)
elif f.suffix == '.tar':
TarFile(f).extractall(path=dir)
if delete:
f.unlink()
print('Delete {}'.format(f))
dir = Path(dir)
if threads > 1:
pool = ThreadPool(threads)
pool.imap(lambda x: download_one(*x), zip(url, repeat(dir)))
pool.close()
pool.join()
else:
for u in [url] if isinstance(url, (str, Path)) else url:
download_one(u, dir)
def main():
args = parse_args()
path = Path(args.save_dir)
if not path.exists():
path.mkdir(parents=True, exist_ok=True)
data2url = dict(
# TODO: Support for downloading Panoptic Segmentation of COCO
coco2017=[
'http://images.cocodataset.org/zips/train2017.zip',
'http://images.cocodataset.org/zips/val2017.zip',
'http://images.cocodataset.org/zips/test2017.zip',
'http://images.cocodataset.org/annotations/' +
'annotations_trainval2017.zip'
],
lvis=[
'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip', # noqa
'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip', # noqa
],
voc2007=[
'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar', # noqa
'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar', # noqa
'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCdevkit_08-Jun-2007.tar', # noqa
],
)
url = data2url.get(args.dataset_name, None)
if url is None:
print('Only support COCO, VOC, and LVIS now!')
return
download(
url,
dir=path,
unzip=args.unzip,
delete=args.delete,
threads=args.threads)
if __name__ == '__main__':
main()
|
"""langchain-core version information and utilities."""
VERSION = "0.3.57"
|
"""langchain-core version information and utilities."""
VERSION = "0.3.56"
|
from typing import Any
from llama_index.core.bridge.pydantic import model_serializer
from llama_index.core.tools import ToolSelection, ToolOutput
from llama_index.core.llms import ChatMessage
from llama_index.core.workflow import Event, StartEvent
class AgentInput(Event):
"""LLM input."""
input: list[ChatMessage]
current_agent_name: str
class AgentSetup(Event):
"""Agent setup."""
input: list[ChatMessage]
current_agent_name: str
class AgentStream(Event):
"""Agent stream."""
delta: str
response: str
current_agent_name: str
tool_calls: list[ToolSelection]
raw: Any
class AgentOutput(Event):
"""LLM output."""
response: ChatMessage
tool_calls: list[ToolSelection]
raw: Any
current_agent_name: str
def __str__(self) -> str:
return self.response.content or ""
class ToolCall(Event):
"""All tool calls are surfaced."""
tool_name: str
tool_kwargs: dict
tool_id: str
class ToolCallResult(Event):
"""Tool call result."""
tool_name: str
tool_kwargs: dict
tool_id: str
tool_output: ToolOutput
return_direct: bool
class AgentWorkflowStartEvent(StartEvent):
@model_serializer()
def serialize_start_event(self) -> dict:
"""Serialize the start event and exclude the memory."""
return {
"user_msg": self.user_msg,
"chat_history": self.chat_history,
}
|
from typing import Any
from llama_index.core.tools import ToolSelection, ToolOutput
from llama_index.core.llms import ChatMessage
from llama_index.core.workflow import Event
class AgentInput(Event):
"""LLM input."""
input: list[ChatMessage]
current_agent_name: str
class AgentSetup(Event):
"""Agent setup."""
input: list[ChatMessage]
current_agent_name: str
class AgentStream(Event):
"""Agent stream."""
delta: str
response: str
current_agent_name: str
tool_calls: list[ToolSelection]
raw: Any
class AgentOutput(Event):
"""LLM output."""
response: ChatMessage
tool_calls: list[ToolSelection]
raw: Any
current_agent_name: str
def __str__(self) -> str:
return self.response.content or ""
class ToolCall(Event):
"""All tool calls are surfaced."""
tool_name: str
tool_kwargs: dict
tool_id: str
class ToolCallResult(ToolCall):
"""Tool call result."""
tool_output: ToolOutput
return_direct: bool
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmdet.core.bbox import distance2bbox
from mmdet.core.mask.structures import BitmapMasks, PolygonMasks
from mmdet.core.utils import center_of_mass, mask2ndarray
def dummy_raw_polygon_masks(size):
"""
Args:
size (tuple): expected shape of dummy masks, (N, H, W)
Return:
list[list[ndarray]]: dummy mask
"""
num_obj, height, width = size
polygons = []
for _ in range(num_obj):
num_points = np.random.randint(5) * 2 + 6
polygons.append([np.random.uniform(0, min(height, width), num_points)])
return polygons
def test_mask2ndarray():
raw_masks = np.ones((3, 28, 28))
bitmap_mask = BitmapMasks(raw_masks, 28, 28)
output_mask = mask2ndarray(bitmap_mask)
assert np.allclose(raw_masks, output_mask)
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
output_mask = mask2ndarray(polygon_masks)
assert output_mask.shape == (3, 28, 28)
raw_masks = np.ones((3, 28, 28))
output_mask = mask2ndarray(raw_masks)
assert np.allclose(raw_masks, output_mask)
raw_masks = torch.ones((3, 28, 28))
output_mask = mask2ndarray(raw_masks)
assert np.allclose(raw_masks, output_mask)
# test unsupported type
raw_masks = []
with pytest.raises(TypeError):
output_mask = mask2ndarray(raw_masks)
def test_distance2bbox():
point = torch.Tensor([[74., 61.], [-29., 106.], [138., 61.], [29., 170.]])
distance = torch.Tensor([[0., 0, 1., 1.], [1., 2., 10., 6.],
[22., -29., 138., 61.], [54., -29., 170., 61.]])
expected_decode_bboxes = torch.Tensor([[74., 61., 75., 62.],
[0., 104., 0., 112.],
[100., 90., 100., 120.],
[0., 120., 100., 120.]])
out_bbox = distance2bbox(point, distance, max_shape=(120, 100))
assert expected_decode_bboxes.allclose(out_bbox)
out = distance2bbox(point, distance, max_shape=torch.Tensor((120, 100)))
assert expected_decode_bboxes.allclose(out)
batch_point = point.unsqueeze(0).repeat(2, 1, 1)
batch_distance = distance.unsqueeze(0).repeat(2, 1, 1)
batch_out = distance2bbox(
batch_point, batch_distance, max_shape=(120, 100))[0]
assert out.allclose(batch_out)
batch_out = distance2bbox(
batch_point, batch_distance, max_shape=[(120, 100), (120, 100)])[0]
assert out.allclose(batch_out)
batch_out = distance2bbox(point, batch_distance, max_shape=(120, 100))[0]
assert out.allclose(batch_out)
# test max_shape is not equal to batch
with pytest.raises(AssertionError):
distance2bbox(
batch_point,
batch_distance,
max_shape=[(120, 100), (120, 100), (32, 32)])
rois = torch.zeros((0, 4))
deltas = torch.zeros((0, 4))
out = distance2bbox(rois, deltas, max_shape=(120, 100))
assert rois.shape == out.shape
rois = torch.zeros((2, 0, 4))
deltas = torch.zeros((2, 0, 4))
out = distance2bbox(rois, deltas, max_shape=(120, 100))
assert rois.shape == out.shape
@pytest.mark.parametrize('mask', [
torch.ones((28, 28)),
torch.zeros((28, 28)),
torch.rand(28, 28) > 0.5,
torch.tensor([[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]])
])
def test_center_of_mass(mask):
center_h, center_w = center_of_mass(mask)
if mask.shape[0] == 4:
assert center_h == 1.5
assert center_w == 1.5
assert isinstance(center_h, torch.Tensor) \
and isinstance(center_w, torch.Tensor)
assert 0 <= center_h <= 28 \
and 0 <= center_w <= 28
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmdet.core.bbox import distance2bbox
from mmdet.core.mask.structures import BitmapMasks, PolygonMasks
from mmdet.core.utils import center_of_mass, mask2ndarray
def dummy_raw_polygon_masks(size):
"""
Args:
size (tuple): expected shape of dummy masks, (N, H, W)
Return:
list[list[ndarray]]: dummy mask
"""
num_obj, heigt, width = size
polygons = []
for _ in range(num_obj):
num_points = np.random.randint(5) * 2 + 6
polygons.append([np.random.uniform(0, min(heigt, width), num_points)])
return polygons
def test_mask2ndarray():
raw_masks = np.ones((3, 28, 28))
bitmap_mask = BitmapMasks(raw_masks, 28, 28)
output_mask = mask2ndarray(bitmap_mask)
assert np.allclose(raw_masks, output_mask)
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
output_mask = mask2ndarray(polygon_masks)
assert output_mask.shape == (3, 28, 28)
raw_masks = np.ones((3, 28, 28))
output_mask = mask2ndarray(raw_masks)
assert np.allclose(raw_masks, output_mask)
raw_masks = torch.ones((3, 28, 28))
output_mask = mask2ndarray(raw_masks)
assert np.allclose(raw_masks, output_mask)
# test unsupported type
raw_masks = []
with pytest.raises(TypeError):
output_mask = mask2ndarray(raw_masks)
def test_distance2bbox():
point = torch.Tensor([[74., 61.], [-29., 106.], [138., 61.], [29., 170.]])
distance = torch.Tensor([[0., 0, 1., 1.], [1., 2., 10., 6.],
[22., -29., 138., 61.], [54., -29., 170., 61.]])
expected_decode_bboxes = torch.Tensor([[74., 61., 75., 62.],
[0., 104., 0., 112.],
[100., 90., 100., 120.],
[0., 120., 100., 120.]])
out_bbox = distance2bbox(point, distance, max_shape=(120, 100))
assert expected_decode_bboxes.allclose(out_bbox)
out = distance2bbox(point, distance, max_shape=torch.Tensor((120, 100)))
assert expected_decode_bboxes.allclose(out)
batch_point = point.unsqueeze(0).repeat(2, 1, 1)
batch_distance = distance.unsqueeze(0).repeat(2, 1, 1)
batch_out = distance2bbox(
batch_point, batch_distance, max_shape=(120, 100))[0]
assert out.allclose(batch_out)
batch_out = distance2bbox(
batch_point, batch_distance, max_shape=[(120, 100), (120, 100)])[0]
assert out.allclose(batch_out)
batch_out = distance2bbox(point, batch_distance, max_shape=(120, 100))[0]
assert out.allclose(batch_out)
# test max_shape is not equal to batch
with pytest.raises(AssertionError):
distance2bbox(
batch_point,
batch_distance,
max_shape=[(120, 100), (120, 100), (32, 32)])
rois = torch.zeros((0, 4))
deltas = torch.zeros((0, 4))
out = distance2bbox(rois, deltas, max_shape=(120, 100))
assert rois.shape == out.shape
rois = torch.zeros((2, 0, 4))
deltas = torch.zeros((2, 0, 4))
out = distance2bbox(rois, deltas, max_shape=(120, 100))
assert rois.shape == out.shape
@pytest.mark.parametrize('mask', [
torch.ones((28, 28)),
torch.zeros((28, 28)),
torch.rand(28, 28) > 0.5,
torch.tensor([[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]])
])
def test_center_of_mass(mask):
center_h, center_w = center_of_mass(mask)
if mask.shape[0] == 4:
assert center_h == 1.5
assert center_w == 1.5
assert isinstance(center_h, torch.Tensor) \
and isinstance(center_w, torch.Tensor)
assert 0 <= center_h <= 28 \
and 0 <= center_w <= 28
|
_base_ = './cascade-mask-rcnn_convnext-t-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py' # noqa
# please install mmcls>=1.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-small_3rdparty_32xb128-noema_in1k_20220301-303e75e3.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.ConvNeXt',
arch='small',
out_indices=[0, 1, 2, 3],
drop_path_rate=0.6,
layer_scale_init_value=1.0,
gap_before_final_norm=False,
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')))
optim_wrapper = dict(paramwise_cfg={
'decay_rate': 0.7,
'decay_type': 'layer_wise',
'num_layers': 12
})
|
_base_ = './cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py' # noqa
# please install mmcls>=1.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-small_3rdparty_32xb128-noema_in1k_20220301-303e75e3.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.ConvNeXt',
arch='small',
out_indices=[0, 1, 2, 3],
drop_path_rate=0.6,
layer_scale_init_value=1.0,
gap_before_final_norm=False,
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')))
optim_wrapper = dict(paramwise_cfg={
'decay_rate': 0.7,
'decay_type': 'layer_wise',
'num_layers': 12
})
|
"""Standard LangChain interface tests"""
import os
from typing import Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openai import AzureChatOpenAI
OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION", "")
OPENAI_API_BASE = os.environ.get("AZURE_OPENAI_API_BASE", "")
class TestAzureOpenAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"],
"model": "gpt-4o-mini",
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
@pytest.mark.xfail(reason="Not yet supported.")
def test_usage_metadata_streaming(self, model: BaseChatModel) -> None:
super().test_usage_metadata_streaming(model)
class TestAzureOpenAIStandardLegacy(ChatModelIntegrationTests):
"""Test a legacy model."""
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME"],
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
}
@property
def structured_output_kwargs(self) -> dict:
return {"method": "function_calling"}
@pytest.mark.xfail(reason="Not yet supported.")
def test_usage_metadata_streaming(self, model: BaseChatModel) -> None:
super().test_usage_metadata_streaming(model)
|
"""Standard LangChain interface tests"""
import os
from typing import Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openai import AzureChatOpenAI
OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION", "")
OPENAI_API_BASE = os.environ.get("AZURE_OPENAI_API_BASE", "")
class TestAzureOpenAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"],
"model": "gpt-4o",
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
@pytest.mark.xfail(reason="Not yet supported.")
def test_usage_metadata_streaming(self, model: BaseChatModel) -> None:
super().test_usage_metadata_streaming(model)
class TestAzureOpenAIStandardLegacy(ChatModelIntegrationTests):
"""Test a legacy model."""
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME"],
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
}
@property
def structured_output_kwargs(self) -> dict:
return {"method": "function_calling"}
@pytest.mark.xfail(reason="Not yet supported.")
def test_usage_metadata_streaming(self, model: BaseChatModel) -> None:
super().test_usage_metadata_streaming(model)
|
"""Torch backend APIs.
# Note on device placement
Torch has a different device placement style compared to TF and JAX.
In short, variables/tensors are not created on GPU by default,
and the GPU cannot directly communicate with the CPU.
To bring Torch behavior in line with TF and JAX automated device placement,
we are doing the following to automate device placement if a GPU is available:
- Variables are created on GPU.
- Input data will be placed on GPU at the first `keras.layers.Layer` call.
- Tensor creation happens on GPU, e.g., `zeros()` will create a tensor on GPU.
- `convert_to_numpy` will bring the tensor to CPU before converting it to NumPy.
"""
from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.torch import core
from keras.src.backend.torch import image
from keras.src.backend.torch import linalg
from keras.src.backend.torch import math
from keras.src.backend.torch import nn
from keras.src.backend.torch import numpy
from keras.src.backend.torch import random
from keras.src.backend.torch.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.torch.core import Variable
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import compute_output_spec
from keras.src.backend.torch.core import cond
from keras.src.backend.torch.core import convert_to_numpy
from keras.src.backend.torch.core import convert_to_tensor
from keras.src.backend.torch.core import device_scope
from keras.src.backend.torch.core import is_tensor
from keras.src.backend.torch.core import random_seed_dtype
from keras.src.backend.torch.core import scatter
from keras.src.backend.torch.core import shape
from keras.src.backend.torch.core import stop_gradient
from keras.src.backend.torch.core import to_torch_dtype
from keras.src.backend.torch.core import vectorized_map
from keras.src.backend.torch.rnn import cudnn_ok
from keras.src.backend.torch.rnn import gru
from keras.src.backend.torch.rnn import lstm
from keras.src.backend.torch.rnn import rnn
|
"""Torch backend APIs.
# Note on device placement
Torch has a different device placement style compared to TF and JAX.
In short, variables/tensors are not created on GPU by default,
and the GPU cannot directly communicate with the CPU.
To bring Torch behavior in line with TF and JAX automated device placement,
we are doing the following to automate device placement if a GPU is available:
- Variables are created on GPU.
- Input data will be placed on GPU at the first `keras.layers.Layer` call.
- Tensor creation happens on GPU, e.g., `zeros()` will create a tensor on GPU.
- `convert_to_numpy` will bring the tensor to CPU before converting it to NumPy.
"""
from keras.src.backend.torch import core
from keras.src.backend.torch import image
from keras.src.backend.torch import linalg
from keras.src.backend.torch import math
from keras.src.backend.torch import nn
from keras.src.backend.torch import numpy
from keras.src.backend.torch import random
from keras.src.backend.torch.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.torch.core import Variable
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import compute_output_spec
from keras.src.backend.torch.core import cond
from keras.src.backend.torch.core import convert_to_numpy
from keras.src.backend.torch.core import convert_to_tensor
from keras.src.backend.torch.core import device_scope
from keras.src.backend.torch.core import is_tensor
from keras.src.backend.torch.core import random_seed_dtype
from keras.src.backend.torch.core import scatter
from keras.src.backend.torch.core import shape
from keras.src.backend.torch.core import stop_gradient
from keras.src.backend.torch.core import to_torch_dtype
from keras.src.backend.torch.core import vectorized_map
from keras.src.backend.torch.rnn import cudnn_ok
from keras.src.backend.torch.rnn import gru
from keras.src.backend.torch.rnn import lstm
from keras.src.backend.torch.rnn import rnn
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Dict, List, Optional, Union
import torch
from mmengine.optim.optimizer._deepspeed import DeepSpeedOptimWrapper
from mmengine.registry import MODEL_WRAPPERS
try:
from deepspeed.runtime.engine import DeepSpeedEngine
except ImportError:
DeepSpeedEngine = None
@MODEL_WRAPPERS.register_module()
class MMDeepSpeedEngineWrapper:
def __init__(
self,
*,
model: DeepSpeedEngine,
inputs_to_half: Optional[List[Union[int, str]]] = None,
):
self.model = model
self._inputs_to_half = inputs_to_half
def __getattr__(self, name):
return getattr(self.model, name)
def train_step(
self,
data: Union[dict, tuple, list],
optim_wrapper: DeepSpeedOptimWrapper,
) -> Dict[str, torch.Tensor]:
data = self.model.module.data_preprocessor(data, training=True)
data = self._cast_inputs_half(data)
losses = self._run_forward(data, mode='loss')
parsed_loss, log_vars = self.model.module.parse_losses(losses)
optim_wrapper.update_params(parsed_loss)
return log_vars
def val_step(self, data: Union[dict, tuple, list]) -> list:
"""Gets the prediction of module during validation process.
Args:
data (dict or tuple or list): Data sampled from dataset.
Returns:
list: The predictions of given data.
"""
data = self.model.module.data_preprocessor(data, False)
data = self._cast_inputs_half(data)
return self._run_forward(data, mode='predict')
def test_step(self, data: Union[dict, tuple, list]) -> list:
"""Gets the predictions of module during testing process.
Args:
data (dict or tuple or list): Data sampled from dataset.
Returns:
list: The predictions of given data.
"""
data = self.model.module.data_preprocessor(data, False)
data = self._cast_inputs_half(data)
return self._run_forward(data, mode='predict')
def _run_forward(self, data: Union[dict, tuple, list], mode: str) -> Any:
"""Unpacks data for :meth:`forward`
Args:
data (dict or tuple or list): Data sampled from dataset.
mode (str): Mode of forward.
Returns:
dict or list: Results of training or testing mode.
"""
if isinstance(data, dict):
results = self.model(**data, mode=mode)
elif isinstance(data, (list, tuple)):
results = self.model(*data, mode=mode)
else:
raise TypeError('Output of `data_preprocessor` should be '
f'list, tuple or dict, but got {type(data)}')
return results
def _cast_inputs_half(self, inputs: Union[list, tuple, dict, None]):
"""Cast inputs to half precision if needed.
Args:
inputs (list or tuple or dict or None): Inputs to be casted.
Returns:
list or tuple or dict or None: Casted inputs.
"""
if self._inputs_to_half is None:
return inputs
if isinstance(inputs, (list, tuple)):
new_inputs = []
for i, v in enumerate(inputs):
if i in self._inputs_to_half:
new_inputs.append(v.half())
else:
new_inputs.append(v)
return inputs.__class__(new_inputs)
elif isinstance(inputs, dict):
for k, v in inputs.items():
if k in self._inputs_to_half:
inputs[k] = v.half()
return inputs
else:
raise TypeError('inputs should be list, tuple or dict, '
f'but got {type(inputs)}')
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Dict, List, Optional, Union
import torch
from deepspeed.runtime.engine import DeepSpeedEngine
from mmengine.optim.optimizer._deepspeed import DeepSpeedOptimWrapper
from mmengine.registry import MODEL_WRAPPERS
@MODEL_WRAPPERS.register_module()
class MMDeepSpeedEngineWrapper:
def __init__(
self,
*,
model: DeepSpeedEngine,
inputs_to_half: Optional[List[Union[int, str]]] = None,
):
self.model = model
self._inputs_to_half = inputs_to_half
def __getattr__(self, name):
return getattr(self.model, name)
def train_step(
self,
data: Union[dict, tuple, list],
optim_wrapper: DeepSpeedOptimWrapper,
) -> Dict[str, torch.Tensor]:
data = self.model.module.data_preprocessor(data, training=True)
data = self._cast_inputs_half(data)
losses = self._run_forward(data, mode='loss')
parsed_loss, log_vars = self.model.module.parse_losses(losses)
optim_wrapper.update_params(parsed_loss)
return log_vars
def val_step(self, data: Union[dict, tuple, list]) -> list:
"""Gets the prediction of module during validation process.
Args:
data (dict or tuple or list): Data sampled from dataset.
Returns:
list: The predictions of given data.
"""
data = self.model.module.data_preprocessor(data, False)
data = self._cast_inputs_half(data)
return self._run_forward(data, mode='predict')
def test_step(self, data: Union[dict, tuple, list]) -> list:
"""Gets the predictions of module during testing process.
Args:
data (dict or tuple or list): Data sampled from dataset.
Returns:
list: The predictions of given data.
"""
data = self.model.module.data_preprocessor(data, False)
data = self._cast_inputs_half(data)
return self._run_forward(data, mode='predict')
def _run_forward(self, data: Union[dict, tuple, list], mode: str) -> Any:
"""Unpacks data for :meth:`forward`
Args:
data (dict or tuple or list): Data sampled from dataset.
mode (str): Mode of forward.
Returns:
dict or list: Results of training or testing mode.
"""
if isinstance(data, dict):
results = self.model(**data, mode=mode)
elif isinstance(data, (list, tuple)):
results = self.model(*data, mode=mode)
else:
raise TypeError('Output of `data_preprocessor` should be '
f'list, tuple or dict, but got {type(data)}')
return results
def _cast_inputs_half(self, inputs: Union[list, tuple, dict, None]):
"""Cast inputs to half precision if needed.
Args:
inputs (list or tuple or dict or None): Inputs to be casted.
Returns:
list or tuple or dict or None: Casted inputs.
"""
if self._inputs_to_half is None:
return inputs
if isinstance(inputs, (list, tuple)):
new_inputs = []
for i, v in enumerate(inputs):
if i in self._inputs_to_half:
new_inputs.append(v.half())
else:
new_inputs.append(v)
return inputs.__class__(new_inputs)
elif isinstance(inputs, dict):
for k, v in inputs.items():
if k in self._inputs_to_half:
inputs[k] = v.half()
return inputs
else:
raise TypeError('inputs should be list, tuple or dict, '
f'but got {type(inputs)}')
|
"""
This scripts demonstrates how to train a Sparse Encoder model for Information Retrieval.
As dataset, we use sentence-transformers/msmarco-bm25, where we have triplets versions of MSMARCO mined thanks to BM25.
As loss function, we use MultipleNegativesRankingLoss in the SpladeLoss.
"""
import logging
import traceback
from datasets import load_dataset
from sentence_transformers import (
SparseEncoder,
SparseEncoderModelCardData,
SparseEncoderTrainer,
SparseEncoderTrainingArguments,
)
from sentence_transformers.sparse_encoder import evaluation, losses
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
model_name = "distilbert/distilbert-base-uncased"
train_batch_size = 12
num_epochs = 1
lambda_query = 5e-5
lambda_corpus = 3e-5
learning_rate = 2e-5
# 1. Define our SparseEncoder model
model = SparseEncoder(
model_name,
model_card_data=SparseEncoderModelCardData(
language="en",
license="apache-2.0",
model_name="splade-distilbert-base-uncased trained on Quora Duplicates Questions",
),
)
model.max_seq_length = 256 # Set the max sequence length to 256 for the training
logging.info("Model max length:", model.max_seq_length)
# 2. Load the MS MARCO dataset: https://huggingface.co/datasets/sentence-transformers/msmarco-bm25
logging.info("Read the MS MARCO training dataset")
full_dataset = load_dataset("sentence-transformers/quora-duplicates", "triplet", split="train").select(
range(100000)
)
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Define our training loss
loss = losses.SpladeLoss(
model=model,
loss=losses.SparseMultipleNegativesRankingLoss(model=model),
lambda_query=lambda_query, # Weight for query loss
lambda_corpus=lambda_corpus, # Weight for document loss
)
# 4. Define the evaluator. We use the SparseNanoBEIREvaluator, which is a light-weight evaluator for English
evaluator = evaluation.SparseNanoBEIREvaluator(
dataset_names=["msmarco", "nfcorpus", "nq"], batch_size=train_batch_size
)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"splade-{short_model_name}-msmarco-mrl"
args = SparseEncoderTrainingArguments(
# Required parameter:
output_dir=f"models/{run_name}",
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
learning_rate=learning_rate,
load_best_model_at_end=True,
metric_for_best_model="eval_NanoBEIR_mean_dot_ndcg@10",
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=1650,
save_strategy="steps",
save_steps=1650,
save_total_limit=2,
logging_steps=200,
run_name=run_name, # Will be used in W&B if `wandb` is installed
seed=42,
)
# 6. Create the trainer & start training
trainer = SparseEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=evaluator,
)
trainer.train()
# 7. Evaluate the final model, using the complete NanoBEIR dataset
test_evaluator = evaluation.SparseNanoBEIREvaluator(show_progress_bar=True, batch_size=train_batch_size)
test_evaluator(model)
# 8. Save the final model
final_output_dir = f"models/{run_name}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SparseEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
if __name__ == "__main__":
main()
|
"""
This scripts demonstrates how to train a Sparse Encoder model for Information Retrieval.
As dataset, we use sentence-transformers/msmarco-bm25, where we have triplets versions of MSMARCO mined thanks to BM25.
As loss function, we use MultipleNegativesRankingLoss in the SpladeLoss.
"""
import logging
import traceback
from datasets import load_dataset
from sentence_transformers import (
SparseEncoder,
SparseEncoderModelCardData,
SparseEncoderTrainer,
SparseEncoderTrainingArguments,
)
from sentence_transformers.sparse_encoder import evaluation, losses
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
model_name = "distilbert/distilbert-base-uncased"
train_batch_size = 12
num_epochs = 1
lambda_query = 5e-5
lambda_corpus = 3e-5
learning_rate = 2e-5
# 1. Define our SparseEncoder model
model = SparseEncoder(
model_name,
model_card_data=SparseEncoderModelCardData(
language="en",
license="apache-2.0",
model_name="splade-distilbert-base-uncased trained on Quora Duplicates Questions",
),
)
model.max_seq_length = 256 # Set the max sequence length to 256 for the training
print("Model max length:", model.max_seq_length)
# 2. Load the MS MARCO dataset: https://huggingface.co/datasets/sentence-transformers/msmarco-bm25
logging.info("Read the MS MARCO training dataset")
full_dataset = load_dataset("sentence-transformers/quora-duplicates", "triplet", split="train").select(
range(100000)
)
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Define our training loss
loss = losses.SpladeLoss(
model=model,
loss=losses.SparseMultipleNegativesRankingLoss(model=model),
lambda_query=lambda_query, # Weight for query loss
lambda_corpus=lambda_corpus, # Weight for document loss
)
# 4. Define the evaluator. We use the SparseNanoBEIREvaluator, which is a light-weight evaluator for English
evaluator = evaluation.SparseNanoBEIREvaluator(
dataset_names=["msmarco", "nfcorpus", "nq"], batch_size=train_batch_size
)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"splade-{short_model_name}-msmarco-mrl"
args = SparseEncoderTrainingArguments(
# Required parameter:
output_dir=f"models/{run_name}",
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
learning_rate=learning_rate,
load_best_model_at_end=True,
metric_for_best_model="eval_NanoBEIR_mean_dot_ndcg@10",
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=1650,
save_strategy="steps",
save_steps=1650,
save_total_limit=2,
logging_steps=200,
run_name=run_name, # Will be used in W&B if `wandb` is installed
seed=42,
)
# 6. Create the trainer & start training
trainer = SparseEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=evaluator,
)
trainer.train()
# 7. Evaluate the final model, using the complete NanoBEIR dataset
test_evaluator = evaluation.SparseNanoBEIREvaluator(show_progress_bar=True, batch_size=train_batch_size)
test_evaluator(model)
# 8. Save the final model
final_output_dir = f"models/{run_name}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SparseEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
if __name__ == "__main__":
main()
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for file names."""
import itertools
import os
import re
_uppercase_uppercase_re = re.compile(r"([A-Z]+)([A-Z][a-z])")
_lowercase_uppercase_re = re.compile(r"([a-z\d])([A-Z])")
_single_underscore_re = re.compile(r"(?<!_)_(?!_)")
_multiple_underscores_re = re.compile(r"(_{2,})")
_split_re = r"^\w+(\.\w+)*$"
INVALID_WINDOWS_CHARACTERS_IN_PATH = r"<>:/\|?*"
def camelcase_to_snakecase(name):
"""Convert camel-case string to snake-case."""
name = _uppercase_uppercase_re.sub(r"\1_\2", name)
name = _lowercase_uppercase_re.sub(r"\1_\2", name)
return name.lower()
def snakecase_to_camelcase(name):
"""Convert snake-case string to camel-case string."""
name = _single_underscore_re.split(name)
name = [_multiple_underscores_re.split(n) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(name) if n != "")
def filename_prefix_for_name(name):
if os.path.basename(name) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}")
return camelcase_to_snakecase(name)
def filename_prefix_for_split(name, split):
if os.path.basename(name) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}")
if not re.match(_split_re, split):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'.")
return f"{filename_prefix_for_name(name)}-{split}"
def filepattern_for_dataset_split(dataset_name, split, data_dir, filetype_suffix=None):
prefix = filename_prefix_for_split(dataset_name, split)
if filetype_suffix:
prefix += f".{filetype_suffix}"
filepath = os.path.join(data_dir, prefix)
return f"{filepath}*"
def filenames_for_dataset_split(path, dataset_name, split, filetype_suffix=None, shard_lengths=None):
prefix = filename_prefix_for_split(dataset_name, split)
prefix = os.path.join(path, prefix)
if shard_lengths:
num_shards = len(shard_lengths)
filenames = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(num_shards)]
if filetype_suffix:
filenames = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
filename = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for file names."""
import itertools
import os
import re
_uppercase_uppercase_re = re.compile(r"([A-Z]+)([A-Z][a-z])")
_lowercase_uppercase_re = re.compile(r"([a-z\d])([A-Z])")
_single_underscore_re = re.compile(r"(?<!_)_(?!_)")
_multiple_underscores_re = re.compile(r"(_{2,})")
_split_re = r"^\w+(\.\w+)*$"
INVALID_WINDOWS_CHARACTERS_IN_PATH = r"<>:/\|?*"
def camelcase_to_snakecase(name):
"""Convert camel-case string to snake-case."""
name = _uppercase_uppercase_re.sub(r"\1_\2", name)
name = _lowercase_uppercase_re.sub(r"\1_\2", name)
return name.lower()
def snakecase_to_camelcase(name):
"""Convert snake-case string to camel-case string."""
name = _single_underscore_re.split(name)
name = [_multiple_underscores_re.split(n) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(name) if n != "")
def filename_prefix_for_name(name):
if os.path.basename(name) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}")
return camelcase_to_snakecase(name)
def filename_prefix_for_split(name, split):
if os.path.basename(name) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}")
if not re.match(_split_re, split):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'.")
return f"{filename_prefix_for_name(name)}-{split}"
def filepattern_for_dataset_split(dataset_name, split, data_dir, filetype_suffix=None):
prefix = filename_prefix_for_split(dataset_name, split)
if filetype_suffix:
prefix += f".{filetype_suffix}"
filepath = os.path.join(data_dir, prefix)
return f"{filepath}*"
def filenames_for_dataset_split(path, dataset_name, split, filetype_suffix=None, shard_lengths=None):
prefix = filename_prefix_for_split(dataset_name, split)
prefix = os.path.join(path, prefix)
if shard_lengths:
num_shards = len(shard_lengths)
filenames = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(num_shards)]
if filetype_suffix:
filenames = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
filename = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
|
import numpy as np
import torch
from docarray import Document, Image, Text
from docarray.typing import (
AnyUrl,
Embedding,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
Tensor,
TextUrl,
TorchEmbedding,
TorchTensor,
)
from docarray.typing.tensor import NdArrayEmbedding
def test_multi_modal_doc_proto():
class MyMultiModalDoc(Document):
image: Image
text: Text
class MySUperDoc(Document):
doc: MyMultiModalDoc
description: str
doc = MyMultiModalDoc(
image=Image(tensor=np.zeros((3, 224, 224))), text=Text(text='hello')
)
MyMultiModalDoc.from_protobuf(doc.to_protobuf())
def test_all_types():
class MyDoc(Document):
img_url: ImageUrl
txt_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
any_url: AnyUrl
torch_tensor: TorchTensor
torch_tensor_param: TorchTensor[224, 224, 3]
np_array: NdArray
np_array_param: NdArray[224, 224, 3]
generic_nd_array: Tensor
generic_torch_tensor: Tensor
embedding: Embedding
torch_embedding: TorchEmbedding[128]
np_embedding: NdArrayEmbedding[128]
doc = MyDoc(
img_url='test.png',
txt_url='test.txt',
mesh_url='test.obj',
point_cloud_url='test.obj',
any_url='www.jina.ai',
torch_tensor=torch.zeros((3, 224, 224)),
torch_tensor_param=torch.zeros((3, 224, 224)),
np_array=np.zeros((3, 224, 224)),
np_array_param=np.zeros((3, 224, 224)),
generic_nd_array=np.zeros((3, 224, 224)),
generic_torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((3, 224, 224)),
torch_embedding=torch.zeros((128,)),
np_embedding=np.zeros((128,)),
)
doc = MyDoc.from_protobuf(doc.to_protobuf())
assert doc.img_url == 'test.png'
assert doc.txt_url == 'test.txt'
assert doc.mesh_url == 'test.obj'
assert doc.point_cloud_url == 'test.obj'
assert doc.any_url == 'www.jina.ai'
assert (doc.torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.torch_tensor, torch.Tensor)
assert (doc.torch_tensor_param == torch.zeros((224, 224, 3))).all()
assert isinstance(doc.torch_tensor_param, torch.Tensor)
assert (doc.np_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.np_array, np.ndarray)
assert doc.np_array.flags.writeable
assert (doc.np_array_param == np.zeros((224, 224, 3))).all()
assert isinstance(doc.np_array_param, np.ndarray)
assert (doc.generic_nd_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_nd_array, np.ndarray)
assert (doc.generic_torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_torch_tensor, torch.Tensor)
assert (doc.torch_embedding == torch.zeros((128,))).all()
assert isinstance(doc.torch_embedding, torch.Tensor)
assert (doc.np_embedding == np.zeros((128,))).all()
assert isinstance(doc.np_embedding, np.ndarray)
assert (doc.embedding == np.zeros((3, 224, 224))).all()
|
import numpy as np
import torch
from docarray import Document, Image, Text
from docarray.typing import (
AnyUrl,
Embedding,
ImageUrl,
NdArray,
Tensor,
TextUrl,
TorchEmbedding,
TorchTensor,
)
from docarray.typing.tensor import NdArrayEmbedding
def test_multi_modal_doc_proto():
class MyMultiModalDoc(Document):
image: Image
text: Text
class MySUperDoc(Document):
doc: MyMultiModalDoc
description: str
doc = MyMultiModalDoc(
image=Image(tensor=np.zeros((3, 224, 224))), text=Text(text='hello')
)
MyMultiModalDoc.from_protobuf(doc.to_protobuf())
def test_all_types():
class MyDoc(Document):
img_url: ImageUrl
txt_url: TextUrl
any_url: AnyUrl
torch_tensor: TorchTensor
torch_tensor_param: TorchTensor[224, 224, 3]
np_array: NdArray
np_array_param: NdArray[224, 224, 3]
generic_nd_array: Tensor
generic_torch_tensor: Tensor
embedding: Embedding
torch_embedding: TorchEmbedding[128]
np_embedding: NdArrayEmbedding[128]
doc = MyDoc(
img_url='test.png',
txt_url='test.txt',
any_url='www.jina.ai',
torch_tensor=torch.zeros((3, 224, 224)),
torch_tensor_param=torch.zeros((3, 224, 224)),
np_array=np.zeros((3, 224, 224)),
np_array_param=np.zeros((3, 224, 224)),
generic_nd_array=np.zeros((3, 224, 224)),
generic_torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((3, 224, 224)),
torch_embedding=torch.zeros((128,)),
np_embedding=np.zeros((128,)),
)
doc = MyDoc.from_protobuf(doc.to_protobuf())
assert doc.img_url == 'test.png'
assert doc.txt_url == 'test.txt'
assert doc.any_url == 'www.jina.ai'
assert (doc.torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.torch_tensor, torch.Tensor)
assert (doc.torch_tensor_param == torch.zeros((224, 224, 3))).all()
assert isinstance(doc.torch_tensor_param, torch.Tensor)
assert (doc.np_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.np_array, np.ndarray)
assert doc.np_array.flags.writeable
assert (doc.np_array_param == np.zeros((224, 224, 3))).all()
assert isinstance(doc.np_array_param, np.ndarray)
assert (doc.generic_nd_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_nd_array, np.ndarray)
assert (doc.generic_torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_torch_tensor, torch.Tensor)
assert (doc.torch_embedding == torch.zeros((128,))).all()
assert isinstance(doc.torch_embedding, torch.Tensor)
assert (doc.np_embedding == np.zeros((128,))).all()
assert isinstance(doc.np_embedding, np.ndarray)
assert (doc.embedding == np.zeros((3, 224, 224))).all()
|
#!/usr/bin/env python3
"""Convert the fairseq models available in voxpopuli repo https://github.com/facebookresearch/voxpopuli
The available checkpoints should open with fairseq.
But the following error cannot be resolved with almost any version of fairseq.
https://github.com/facebookresearch/voxpopuli/issues/29
So this script manually parse the checkpoint file and reconstruct the model.
Examples
```
python convert_voxpopuli_models.py \
--input-file wav2vec2_base_10k_ft_fr.pt \
--output-file wav2vec2_voxpopuli_base_10k_asr_fr.pt
```
"""
def _parse_args():
import argparse
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument("--input-file", required=True, help="Input checkpoint file.")
parser.add_argument("--output-file", required=False, help="Output model file.")
return parser.parse_args()
def _removeprefix(s, prefix):
if s.startswith(prefix):
return s[len(prefix) :]
return s
def _load(input_file):
import torch
from omegaconf import OmegaConf
data = torch.load(input_file)
cfg = OmegaConf.to_container(data["cfg"])
for key in list(cfg.keys()):
if key != "model":
del cfg[key]
if "w2v_args" in cfg["model"]:
del cfg["model"]["w2v_args"][key]
state_dict = {_removeprefix(k, "w2v_encoder."): v for k, v in data["model"].items()}
return cfg, state_dict
def _parse_model_param(cfg, state_dict):
key_mapping = {
"extractor_mode": "extractor_mode",
"conv_feature_layers": "extractor_conv_layer_config",
"conv_bias": "extractor_conv_bias",
"encoder_embed_dim": "encoder_embed_dim",
"dropout_input": "encoder_projection_dropout",
"conv_pos": "encoder_pos_conv_kernel",
"conv_pos_groups": "encoder_pos_conv_groups",
"encoder_layers": "encoder_num_layers",
"encoder_attention_heads": "encoder_num_heads",
"attention_dropout": "encoder_attention_dropout",
"encoder_ffn_embed_dim": "encoder_ff_interm_features",
"activation_dropout": "encoder_ff_interm_dropout",
"dropout": "encoder_dropout",
"layer_norm_first": "encoder_layer_norm_first",
"layerdrop": "encoder_layer_drop",
"encoder_layerdrop": "encoder_layer_drop",
}
params = {}
src_dicts = [cfg["model"]]
if "w2v_args" in cfg["model"]:
src_dicts.append(cfg["model"]["w2v_args"]["model"])
for src, tgt in key_mapping.items():
for model_cfg in src_dicts:
if src in model_cfg:
params[tgt] = model_cfg[src]
break
if params["extractor_mode"] == "default":
params["extractor_mode"] = "group_norm"
# the following line is commented out to resolve lint warning; uncomment before running script
# params["extractor_conv_layer_config"] = eval(params["extractor_conv_layer_config"])
assert len(params) == 15
params["aux_num_out"] = state_dict["proj.bias"].numel() if "proj.bias" in state_dict else None
return params
def _main(args):
import json
import torch
import torchaudio
from torchaudio.models.wav2vec2.utils.import_fairseq import (
_convert_state_dict as _convert,
)
cfg, state_dict = _load(args.input_file)
params = _parse_model_param(cfg, state_dict)
print(json.dumps(params, indent=4))
model = torchaudio.models.wav2vec2_model(**params)
model.load_state_dict(_convert(state_dict))
torch.save(model.state_dict(), args.output_file)
if __name__ == "__main__":
_main(_parse_args())
|
#!/usr/bin/env python3
"""Convert the fairseq models available in voxpopuli repo https://github.com/facebookresearch/voxpopuli
The available checkpoints should open with fairseq.
But the following error cannot be resolved with almost any version of fairseq.
https://github.com/facebookresearch/voxpopuli/issues/29
So this script manually parse the checkpoint file and reconstruct the model.
Examples
```
python convert_voxpopuli_models.py \
--input-file wav2vec2_base_10k_ft_fr.pt \
--output-file wav2vec2_voxpopuli_base_10k_asr_fr.pt
```
"""
def _parse_args():
import argparse
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument("--input-file", required=True, help="Input checkpoint file.")
parser.add_argument("--output-file", required=False, help="Output model file.")
return parser.parse_args()
def _removeprefix(s, prefix):
if s.startswith(prefix):
return s[len(prefix) :]
return s
def _load(input_file):
import torch
from omegaconf import OmegaConf
data = torch.load(input_file)
cfg = OmegaConf.to_container(data["cfg"])
for key in list(cfg.keys()):
if key != "model":
del cfg[key]
if "w2v_args" in cfg["model"]:
del cfg["model"]["w2v_args"][key]
state_dict = {_removeprefix(k, "w2v_encoder."): v for k, v in data["model"].items()}
return cfg, state_dict
def _parse_model_param(cfg, state_dict):
key_mapping = {
"extractor_mode": "extractor_mode",
"conv_feature_layers": "extractor_conv_layer_config",
"conv_bias": "extractor_conv_bias",
"encoder_embed_dim": "encoder_embed_dim",
"dropout_input": "encoder_projection_dropout",
"conv_pos": "encoder_pos_conv_kernel",
"conv_pos_groups": "encoder_pos_conv_groups",
"encoder_layers": "encoder_num_layers",
"encoder_attention_heads": "encoder_num_heads",
"attention_dropout": "encoder_attention_dropout",
"encoder_ffn_embed_dim": "encoder_ff_interm_features",
"activation_dropout": "encoder_ff_interm_dropout",
"dropout": "encoder_dropout",
"layer_norm_first": "encoder_layer_norm_first",
"layerdrop": "encoder_layer_drop",
"encoder_layerdrop": "encoder_layer_drop",
}
params = {}
src_dicts = [cfg["model"]]
if "w2v_args" in cfg["model"]:
src_dicts.append(cfg["model"]["w2v_args"]["model"])
for src, tgt in key_mapping.items():
for model_cfg in src_dicts:
if src in model_cfg:
params[tgt] = model_cfg[src]
break
if params["extractor_mode"] == "default":
params["extractor_mode"] = "group_norm"
# the following line is commented out to resolve lint warning; uncomment before running script
# params["extractor_conv_layer_config"] = eval(params["extractor_conv_layer_config"])
assert len(params) == 15
params["aux_num_out"] = state_dict["proj.bias"].numel() if "proj.bias" in state_dict else None
return params
def _main(args):
import json
import torch
import torchaudio
from torchaudio.models.wav2vec2.utils.import_fairseq import _convert_state_dict as _convert
cfg, state_dict = _load(args.input_file)
params = _parse_model_param(cfg, state_dict)
print(json.dumps(params, indent=4))
model = torchaudio.models.wav2vec2_model(**params)
model.load_state_dict(_convert(state_dict))
torch.save(model.state_dict(), args.output_file)
if __name__ == "__main__":
_main(_parse_args())
|
from sentence_transformers import models
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.models import IDF, MLMTransformer, SpladePooling
print("# ------------------------------------------example with v2 distill-----------------------------------------")
doc_encoder = MLMTransformer("opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill")
asym = models.Asym(
{
"query": [
IDF.from_json(
"opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill",
tokenizer=doc_encoder.tokenizer,
frozen=True,
),
],
"doc": [
doc_encoder,
SpladePooling("max"),
],
}
)
model = SparseEncoder(
modules=[asym],
similarity_fn_name="dot",
)
query = "What's the weather in ny now?"
document = "Currently New York is rainy."
query_embed = model.encode([{"query": query}])
document_embed = model.encode([{"doc": document}])
sim = model.similarity(query_embed, document_embed)
print(f"Similarity: {sim}")
# Visualize top tokens for each text
top_k = 3
print(f"\nTop tokens {top_k} for each text:")
decoded_query = model.decode(query_embed[0], top_k=top_k)
decoded_document = model.decode(document_embed[0])
for i in range(top_k):
query_token, query_score = decoded_query[i]
doc_score = next((score for token, score in decoded_document if token == query_token), 0)
if doc_score != 0:
print(f"Token: {query_token}, Query score: {query_score:.4f}, Document score: {doc_score:.4f}")
"""
# ------------------------------------------example with v2 distill-----------------------------------------
Similarity: tensor([[17.5307]], device='cuda:0')
Top tokens 3 for each text:
Token: ny, Query score: 5.7729, Document score: 1.4109
Token: weather, Query score: 4.5684, Document score: 1.4673
Token: now, Query score: 3.5895, Document score: 0.7473
"""
print("# -----------------------------------------example with v3 distill-----------------------------------------")
doc_encoder = MLMTransformer("opensearch-project/opensearch-neural-sparse-encoding-doc-v3-distill")
asym = models.Asym(
{
"query": [
IDF.from_json(
"opensearch-project/opensearch-neural-sparse-encoding-doc-v3-distill",
tokenizer=doc_encoder.tokenizer,
frozen=True,
),
],
"doc": [
doc_encoder,
SpladePooling(pooling_strategy="max", activation_function="log1p_relu"),
],
}
)
model = SparseEncoder(
modules=[asym],
similarity_fn_name="dot",
)
query = "What's the weather in ny now?"
document = "Currently New York is rainy."
query_embed = model.encode([{"query": query}])
document_embed = model.encode([{"doc": document}])
sim = model.similarity(query_embed, document_embed)
print(f"Similarity: {sim}")
# Visualize top tokens for each text
top_k = 10
print(f"\nTop tokens {top_k} for each text:")
decoded_query = model.decode(query_embed[0], top_k=top_k)
decoded_document = model.decode(document_embed[0])
for i in range(min(top_k, len(decoded_query))):
query_token, query_score = decoded_query[i]
doc_score = next((score for token, score in decoded_document if token == query_token), 0)
if doc_score != 0:
print(f"Token: {query_token}, Query score: {query_score:.4f}, Document score: {doc_score:.4f}")
"""
# -----------------------------------------example with v3 distill-----------------------------------------
Similarity: tensor([[11.1105]], device='cuda:0')
Top tokens 10 for each text:
Token: ny, Query score: 5.7729, Document score: 0.8049
Token: weather, Query score: 4.5684, Document score: 0.9710
Token: now, Query score: 3.5895, Document score: 0.4720
Token: ?, Query score: 3.3313, Document score: 0.0286
Token: what, Query score: 2.7699, Document score: 0.0787
Token: in, Query score: 0.4989, Document score: 0.0417
"""
|
from sentence_transformers import models
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.models import IDF, MLMTransformer, SpladePooling
print("# ------------------------------------------example with v2 distill-----------------------------------------")
doc_encoder = MLMTransformer("opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill")
asym = models.Asym(
{
"query": [
IDF.from_json(
"opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill",
tokenizer=doc_encoder.tokenizer,
frozen=True,
),
],
"doc": [
doc_encoder,
SpladePooling("max"),
],
}
)
model = SparseEncoder(
modules=[asym],
similarity_fn_name="dot",
)
query = "What's the weather in ny now?"
document = "Currently New York is rainy."
query_embed = model.encode([{"query": query}])
document_embed = model.encode([{"doc": document}])
sim = model.similarity(query_embed, document_embed)
print(f"Similarity: {sim}")
# Visualize top tokens for each text
top_k = 3
print(f"\nTop tokens {top_k} for each text:")
decoded_query = model.decode(query_embed)[0]
decoded_document = model.decode(document_embed[0], top_k=100)
for i in range(top_k):
query_token, query_score = decoded_query[i]
doc_score = next((score for token, score in decoded_document if token == query_token), 0)
if doc_score != 0:
print(f"Token: {query_token}, Query score: {query_score:.4f}, Document score: {doc_score:.4f}")
"""
# ------------------------------------------example with v2 distill-----------------------------------------
Similarity: tensor([[17.5307]], device='cuda:0')
Top tokens 3 for each text:
Token: ny, Query score: 5.7729, Document score: 1.4109
Token: weather, Query score: 4.5684, Document score: 1.4673
Token: now, Query score: 3.5895, Document score: 0.7473
"""
print("# -----------------------------------------example with v3 distill-----------------------------------------")
doc_encoder = MLMTransformer("opensearch-project/opensearch-neural-sparse-encoding-doc-v3-distill")
asym = models.Asym(
{
"query": [
IDF.from_json(
"opensearch-project/opensearch-neural-sparse-encoding-doc-v3-distill",
tokenizer=doc_encoder.tokenizer,
frozen=True,
),
],
"doc": [
doc_encoder,
SpladePooling(pooling_strategy="max", activation_function="log1p_relu"),
],
}
)
model = SparseEncoder(
modules=[asym],
similarity_fn_name="dot",
)
query = "What's the weather in ny now?"
document = "Currently New York is rainy."
query_embed = model.encode([{"query": query}])
document_embed = model.encode([{"doc": document}])
sim = model.similarity(query_embed, document_embed)
print(f"Similarity: {sim}")
# Visualize top tokens for each text
top_k = 10
print(f"\nTop tokens {top_k} for each text:")
decoded_query = model.decode(query_embed)[0]
decoded_document = model.decode(document_embed[0], top_k=100)
for i in range(top_k):
query_token, query_score = decoded_query[i]
doc_score = next((score for token, score in decoded_document if token == query_token), 0)
if doc_score != 0:
print(f"Token: {query_token}, Query score: {query_score:.4f}, Document score: {doc_score:.4f}")
"""
# -----------------------------------------example with v3 distill-----------------------------------------
Similarity: tensor([[11.1105]], device='cuda:0')
Top tokens 10 for each text:
Token: ny, Query score: 5.7729, Document score: 0.8049
Token: weather, Query score: 4.5684, Document score: 0.9710
Token: now, Query score: 3.5895, Document score: 0.4720
Token: ?, Query score: 3.3313, Document score: 0.0286
Token: what, Query score: 2.7699, Document score: 0.0787
Token: in, Query score: 0.4989, Document score: 0.0417
"""
|
import jwt # noqa
import pytest
from llama_index.core import Document
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilter,
MetadataFilters,
FilterCondition,
FilterOperator,
)
from llama_index.vector_stores.deeplake import DeepLakeVectorStore
def test_class():
names_of_base_classes = [b.__name__ for b in DeepLakeVectorStore.__mro__]
assert BasePydanticVectorStore.__name__ in names_of_base_classes
@pytest.fixture()
def vs_ids():
vs = DeepLakeVectorStore(dataset_path="mem://test", overwrite=True)
ids = vs.add(
nodes=[
Document(text="Doc 1", embedding=[1, 2, 1], metadata={"a": "1", "b": 10}),
Document(text="Doc 2", embedding=[1, 2, 2], metadata={"a": "2", "b": 11}),
Document(text="Doc 3", embedding=[1, 2, 3], metadata={"a": "3", "b": 12}),
]
)
yield (vs, ids)
vs.clear()
def test_filters(vs_ids):
vs, ids = vs_ids
nodes = vs.get_nodes(node_ids=[ids[0], ids[2]])
assert [x.text for x in nodes] == ["Doc 1", "Doc 3"]
nodes = vs.get_nodes(node_ids=["a"])
assert len(nodes) == 0
nodes = vs.get_nodes(
filters=MetadataFilters(filters=[MetadataFilter(key="a", value="2")])
)
assert [x.text for x in nodes] == ["Doc 2"]
nodes = vs.get_nodes(
filters=MetadataFilters(
filters=[
MetadataFilter(key="a", value="2"),
MetadataFilter(key="a", value="3"),
]
)
)
assert [x.text for x in nodes] == []
nodes = vs.get_nodes(
filters=MetadataFilters(
condition=FilterCondition.OR,
filters=[
MetadataFilter(key="a", value="2"),
MetadataFilter(key="a", value="3"),
],
)
)
assert [x.text for x in nodes] == ["Doc 2", "Doc 3"]
nodes = vs.get_nodes(
filters=MetadataFilters(
condition=FilterCondition.OR,
filters=[
MetadataFilter(key="a", value="2"),
MetadataFilter(key="a", value="3"),
],
)
)
assert [x.text for x in nodes] == ["Doc 2", "Doc 3"]
nodes = vs.get_nodes(
filters=MetadataFilters(
filters=[
MetadataFilter(key="b", value=10, operator=FilterOperator.GT),
]
)
)
assert [x.text for x in nodes] == ["Doc 2", "Doc 3"]
nodes = vs.get_nodes(
filters=MetadataFilters(
filters=[
MetadataFilter(key="b", value=11, operator=FilterOperator.LTE),
]
)
)
assert [x.text for x in nodes] == ["Doc 1", "Doc 2"]
def test_delete_id(vs_ids):
vs, ids = vs_ids
vs.delete_nodes(node_ids=[ids[0], ids[2]])
assert [x.text for x in vs.get_nodes()] == ["Doc 2"]
def test_delete_filter(vs_ids):
vs, ids = vs_ids
vs.delete_nodes(
filters=MetadataFilters(
filters=[
MetadataFilter(key="b", value=10, operator=FilterOperator.GT),
]
)
)
assert [x.text for x in vs.get_nodes()] == ["Doc 1"]
|
import jwt # noqa
from llama_index.core import Document
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilter,
MetadataFilters,
FilterCondition,
FilterOperator,
)
from llama_index.vector_stores.deeplake import DeepLakeVectorStore
def test_class():
names_of_base_classes = [b.__name__ for b in DeepLakeVectorStore.__mro__]
assert BasePydanticVectorStore.__name__ in names_of_base_classes
def test_e2e():
vs = DeepLakeVectorStore(dataset_path="mem://test", overwrite=True)
ids = vs.add(
nodes=[
Document(text="Doc 1", embedding=[1, 2, 1], metadata={"a": "1", "b": 10}),
Document(text="Doc 2", embedding=[1, 2, 2], metadata={"a": "2", "b": 11}),
Document(text="Doc 3", embedding=[1, 2, 3], metadata={"a": "3", "b": 12}),
]
)
nodes = vs.get_nodes(node_ids=[ids[0], ids[2]])
assert [x.text for x in nodes] == ["Doc 1", "Doc 3"]
nodes = vs.get_nodes(node_ids=["a"])
assert len(nodes) == 0
assert [
x.text
for x in vs.get_nodes(
filters=MetadataFilters(
filters=[
MetadataFilter(key="a", value="2"),
]
)
)
] == ["Doc 2"]
assert [
x.text
for x in vs.get_nodes(
filters=MetadataFilters(
filters=[
MetadataFilter(key="a", value="2"),
MetadataFilter(key="a", value="3"),
]
)
)
] == []
assert [
x.text
for x in vs.get_nodes(
filters=MetadataFilters(
condition=FilterCondition.OR,
filters=[
MetadataFilter(key="a", value="2"),
MetadataFilter(key="a", value="3"),
],
)
)
] == ["Doc 2", "Doc 3"]
assert [
x.text
for x in vs.get_nodes(
filters=MetadataFilters(
condition=FilterCondition.OR,
filters=[
MetadataFilter(key="a", value="2"),
MetadataFilter(key="a", value="3"),
],
)
)
] == ["Doc 2", "Doc 3"]
assert [
x.text
for x in vs.get_nodes(
filters=MetadataFilters(
filters=[
MetadataFilter(key="b", value=10, operator=FilterOperator.GT),
]
)
)
] == ["Doc 2", "Doc 3"]
assert [
x.text
for x in vs.get_nodes(
filters=MetadataFilters(
filters=[
MetadataFilter(key="b", value=11, operator=FilterOperator.LTE),
]
)
)
] == ["Doc 1", "Doc 2"]
vs.delete_nodes(node_ids=[ids[0], ids[2]])
assert [x.text for x in vs.get_nodes()] == ["Doc 2"]
vs.add(
nodes=[
Document(text="Doc 4", embedding=[1, 2, 1], metadata={"a": "4", "b": 14}),
Document(text="Doc 5", embedding=[1, 2, 2], metadata={"a": "5", "b": 15}),
Document(text="Doc 6", embedding=[1, 2, 3], metadata={"a": "6", "b": 16}),
]
)
vs.delete_nodes(
filters=MetadataFilters(
filters=[
MetadataFilter(key="b", value=14, operator=FilterOperator.GT),
]
)
)
assert [x.text for x in vs.get_nodes()] == ["Doc 2", "Doc 4"]
vs.clear()
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.image import affine_transform as affine_transform
from keras.src.ops.image import crop_images as crop_images
from keras.src.ops.image import elastic_transform as elastic_transform
from keras.src.ops.image import extract_patches as extract_patches
from keras.src.ops.image import gaussian_blur as gaussian_blur
from keras.src.ops.image import hsv_to_rgb as hsv_to_rgb
from keras.src.ops.image import map_coordinates as map_coordinates
from keras.src.ops.image import pad_images as pad_images
from keras.src.ops.image import perspective_transform as perspective_transform
from keras.src.ops.image import resize as resize
from keras.src.ops.image import rgb_to_grayscale as rgb_to_grayscale
from keras.src.ops.image import rgb_to_hsv as rgb_to_hsv
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.image import affine_transform
from keras.src.ops.image import crop_images
from keras.src.ops.image import elastic_transform
from keras.src.ops.image import extract_patches
from keras.src.ops.image import gaussian_blur
from keras.src.ops.image import hsv_to_rgb
from keras.src.ops.image import map_coordinates
from keras.src.ops.image import pad_images
from keras.src.ops.image import perspective_transform
from keras.src.ops.image import resize
from keras.src.ops.image import rgb_to_grayscale
from keras.src.ops.image import rgb_to_hsv
|
import os
import shutil
import subprocess
import numpy as np
import PIL.Image as Image
import pytest
from jina import Document, Flow
cur_dir = os.path.dirname(os.path.abspath(__file__))
def data_generator(num_docs):
for i in range(num_docs):
doc = Document(uri=os.path.join(cur_dir, '..', 'imgs', 'cat.jpg'))
doc.convert_image_uri_to_blob()
img = Image.fromarray(doc.blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.blob = img
yield doc
@pytest.mark.parametrize(
'model_name', ['R50x1', 'R101x1', 'R50x3', 'R101x3'] # , 'R152x4']
)
@pytest.mark.parametrize('dataset', ['Imagenet1k', 'Imagenet21k'])
def test_all_models(model_name: str, dataset: str):
shutil.rmtree('pretrained', ignore_errors=True)
os.environ['TRANSFER_MODEL_NAME'] = f'{dataset}/{model_name}'
with Flow.load_config(os.path.join(cur_dir, 'flow.yml')) as flow:
data = flow.post(
on='/index',
inputs=data_generator(100),
request_size=10,
return_results=True,
)
docs = data[0].docs
for doc in docs:
assert doc.embedding is not None
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:"/GPU:0"',
],
timeout=30,
check=True,
)
|
import os
import shutil
import subprocess
import numpy as np
import PIL.Image as Image
import pytest
from jina import Document, Flow
cur_dir = os.path.dirname(os.path.abspath(__file__))
def data_generator(num_docs):
for i in range(num_docs):
doc = Document(uri=os.path.join(cur_dir, '..', 'test_data', 'test_image.png'))
doc.convert_image_uri_to_blob()
img = Image.fromarray(doc.blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.blob = img
yield doc
@pytest.mark.parametrize(
'model_name', ['R50x1', 'R101x1', 'R50x3', 'R101x3'] # , 'R152x4']
)
@pytest.mark.parametrize('dataset', ['Imagenet1k', 'Imagenet21k'])
def test_all_models(model_name: str, dataset: str):
shutil.rmtree('pretrained', ignore_errors=True)
os.environ['TRANSFER_MODEL_NAME'] = f'{dataset}/{model_name}'
with Flow.load_config(os.path.join(cur_dir, 'flow.yml')) as flow:
data = flow.post(
on='/index',
inputs=data_generator(100),
request_size=10,
return_results=True,
)
docs = data[0].docs
for doc in docs:
assert doc.embedding is not None
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:"/GPU:0"',
],
timeout=30,
check=True,
)
|
# pylint: disable=invalid-name,unused-import
"""For compatibility and optional dependencies."""
import functools
import importlib.util
import logging
import sys
import types
from typing import Any, Sequence, cast
import numpy as np
from ._typing import _T
assert sys.version_info[0] == 3, "Python 2 is no longer supported."
def py_str(x: bytes | None) -> str:
"""convert c string back to python string"""
assert x is not None # ctypes might return None
return x.decode("utf-8") # type: ignore
def lazy_isinstance(instance: Any, module: str, name: str) -> bool:
"""Use string representation to identify a type."""
# Notice, we use .__class__ as opposed to type() in order
# to support object proxies such as weakref.proxy
cls = instance.__class__
is_same_module = cls.__module__ == module
has_same_name = cls.__name__ == name
return is_same_module and has_same_name
# pandas
try:
from pandas import DataFrame, Series
PANDAS_INSTALLED = True
except ImportError:
DataFrame = object
Series = object
PANDAS_INSTALLED = False
# sklearn
try:
from sklearn import __version__ as _sklearn_version
from sklearn.base import BaseEstimator as XGBModelBase
from sklearn.base import ClassifierMixin as XGBClassifierBase
from sklearn.base import RegressorMixin as XGBRegressorBase
try:
from sklearn.model_selection import StratifiedKFold as XGBStratifiedKFold
except ImportError:
from sklearn.cross_validation import StratifiedKFold as XGBStratifiedKFold
# sklearn.utils Tags types can be imported unconditionally once
# xgboost's minimum scikit-learn version is 1.6 or higher
try:
from sklearn.utils import Tags as _sklearn_Tags
except ImportError:
_sklearn_Tags = object
SKLEARN_INSTALLED = True
except ImportError:
SKLEARN_INSTALLED = False
# used for compatibility without sklearn
class XGBModelBase: # type: ignore[no-redef]
"""Dummy class for sklearn.base.BaseEstimator."""
class XGBClassifierBase: # type: ignore[no-redef]
"""Dummy class for sklearn.base.ClassifierMixin."""
class XGBRegressorBase: # type: ignore[no-redef]
"""Dummy class for sklearn.base.RegressorMixin."""
XGBStratifiedKFold = None
_sklearn_Tags = object
_sklearn_version = object
_logger = logging.getLogger(__name__)
@functools.cache
def is_cudf_available() -> bool:
"""Check cuDF package available or not"""
if importlib.util.find_spec("cudf") is None:
return False
try:
import cudf
return True
except ImportError:
_logger.exception("Importing cuDF failed, use DMatrix instead of QDM")
return False
@functools.cache
def is_cupy_available() -> bool:
"""Check cupy package available or not"""
if importlib.util.find_spec("cupy") is None:
return False
try:
import cupy
return True
except ImportError:
return False
@functools.cache
def import_cupy() -> types.ModuleType:
"""Import cupy."""
if not is_cupy_available():
raise ImportError("`cupy` is required for handling CUDA buffer.")
import cupy
return cupy
try:
import scipy.sparse as scipy_sparse
from scipy.sparse import csr_matrix as scipy_csr
except ImportError:
scipy_sparse = False
scipy_csr = object
def concat(value: Sequence[_T]) -> _T: # pylint: disable=too-many-return-statements
"""Concatenate row-wise."""
if isinstance(value[0], np.ndarray):
value_arr = cast(Sequence[np.ndarray], value)
return np.concatenate(value_arr, axis=0)
if scipy_sparse and isinstance(value[0], scipy_sparse.csr_matrix):
return scipy_sparse.vstack(value, format="csr")
if scipy_sparse and isinstance(value[0], scipy_sparse.csc_matrix):
return scipy_sparse.vstack(value, format="csc")
if scipy_sparse and isinstance(value[0], scipy_sparse.spmatrix):
# other sparse format will be converted to CSR.
return scipy_sparse.vstack(value, format="csr")
if PANDAS_INSTALLED and isinstance(value[0], (DataFrame, Series)):
from pandas import concat as pd_concat
return pd_concat(value, axis=0)
if lazy_isinstance(value[0], "cudf.core.dataframe", "DataFrame") or lazy_isinstance(
value[0], "cudf.core.series", "Series"
):
from cudf import concat as CUDF_concat
return CUDF_concat(value, axis=0)
from .data import _is_cupy_alike
if _is_cupy_alike(value[0]):
import cupy
# pylint: disable=c-extension-no-member,no-member
d = cupy.cuda.runtime.getDevice()
for v in value:
arr = cast(cupy.ndarray, v)
d_v = arr.device.id
assert d_v == d, "Concatenating arrays on different devices."
return cupy.concatenate(value, axis=0)
raise TypeError(f"Unknown type: {type(value[0])}")
|
# pylint: disable=invalid-name,unused-import
"""For compatibility and optional dependencies."""
import importlib.util
import logging
import sys
import types
from typing import Any, Sequence, cast
import numpy as np
from ._typing import _T
assert sys.version_info[0] == 3, "Python 2 is no longer supported."
def py_str(x: bytes | None) -> str:
"""convert c string back to python string"""
assert x is not None # ctypes might return None
return x.decode("utf-8") # type: ignore
def lazy_isinstance(instance: Any, module: str, name: str) -> bool:
"""Use string representation to identify a type."""
# Notice, we use .__class__ as opposed to type() in order
# to support object proxies such as weakref.proxy
cls = instance.__class__
is_same_module = cls.__module__ == module
has_same_name = cls.__name__ == name
return is_same_module and has_same_name
# pandas
try:
from pandas import DataFrame, Series
PANDAS_INSTALLED = True
except ImportError:
DataFrame = object
Series = object
PANDAS_INSTALLED = False
# sklearn
try:
from sklearn import __version__ as _sklearn_version
from sklearn.base import BaseEstimator as XGBModelBase
from sklearn.base import ClassifierMixin as XGBClassifierBase
from sklearn.base import RegressorMixin as XGBRegressorBase
try:
from sklearn.model_selection import StratifiedKFold as XGBStratifiedKFold
except ImportError:
from sklearn.cross_validation import StratifiedKFold as XGBStratifiedKFold
# sklearn.utils Tags types can be imported unconditionally once
# xgboost's minimum scikit-learn version is 1.6 or higher
try:
from sklearn.utils import Tags as _sklearn_Tags
except ImportError:
_sklearn_Tags = object
SKLEARN_INSTALLED = True
except ImportError:
SKLEARN_INSTALLED = False
# used for compatibility without sklearn
class XGBModelBase: # type: ignore[no-redef]
"""Dummy class for sklearn.base.BaseEstimator."""
class XGBClassifierBase: # type: ignore[no-redef]
"""Dummy class for sklearn.base.ClassifierMixin."""
class XGBRegressorBase: # type: ignore[no-redef]
"""Dummy class for sklearn.base.RegressorMixin."""
XGBStratifiedKFold = None
_sklearn_Tags = object
_sklearn_version = object
_logger = logging.getLogger(__name__)
def is_cudf_available() -> bool:
"""Check cuDF package available or not"""
if importlib.util.find_spec("cudf") is None:
return False
try:
import cudf
return True
except ImportError:
_logger.exception("Importing cuDF failed, use DMatrix instead of QDM")
return False
def is_cupy_available() -> bool:
"""Check cupy package available or not"""
if importlib.util.find_spec("cupy") is None:
return False
try:
import cupy
return True
except ImportError:
return False
def import_cupy() -> types.ModuleType:
"""Import cupy."""
if not is_cupy_available():
raise ImportError("`cupy` is required for handling CUDA buffer.")
import cupy
return cupy
try:
import scipy.sparse as scipy_sparse
from scipy.sparse import csr_matrix as scipy_csr
except ImportError:
scipy_sparse = False
scipy_csr = object
def concat(value: Sequence[_T]) -> _T: # pylint: disable=too-many-return-statements
"""Concatenate row-wise."""
if isinstance(value[0], np.ndarray):
value_arr = cast(Sequence[np.ndarray], value)
return np.concatenate(value_arr, axis=0)
if scipy_sparse and isinstance(value[0], scipy_sparse.csr_matrix):
return scipy_sparse.vstack(value, format="csr")
if scipy_sparse and isinstance(value[0], scipy_sparse.csc_matrix):
return scipy_sparse.vstack(value, format="csc")
if scipy_sparse and isinstance(value[0], scipy_sparse.spmatrix):
# other sparse format will be converted to CSR.
return scipy_sparse.vstack(value, format="csr")
if PANDAS_INSTALLED and isinstance(value[0], (DataFrame, Series)):
from pandas import concat as pd_concat
return pd_concat(value, axis=0)
if lazy_isinstance(value[0], "cudf.core.dataframe", "DataFrame") or lazy_isinstance(
value[0], "cudf.core.series", "Series"
):
from cudf import concat as CUDF_concat
return CUDF_concat(value, axis=0)
from .data import _is_cupy_alike
if _is_cupy_alike(value[0]):
import cupy
# pylint: disable=c-extension-no-member,no-member
d = cupy.cuda.runtime.getDevice()
for v in value:
arr = cast(cupy.ndarray, v)
d_v = arr.device.id
assert d_v == d, "Concatenating arrays on different devices."
return cupy.concatenate(value, axis=0)
raise TypeError(f"Unknown type: {type(value[0])}")
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
METAINFO = {
'classes':
('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'),
# palette is a list of color tuples, which is used for visualization.
'palette': [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192),
(197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255),
(153, 69, 1), (120, 166, 157), (0, 182, 199), (0, 226, 252),
(182, 182, 255), (0, 0, 230), (220, 20, 60), (163, 255, 0),
(0, 82, 0), (3, 95, 161), (0, 80, 100), (183, 130, 88)]
}
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/VOCdevkit/'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
_delete_=True,
type=dataset_type,
data_root=data_root,
ann_file='annotations/voc0712_trainval.json',
data_prefix=dict(img=''),
metainfo=METAINFO,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/voc07_test.json',
data_prefix=dict(img=''),
metainfo=METAINFO,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/voc07_test.json',
metric='bbox',
format_only=False)
test_evaluator = val_evaluator
# training schedule, the dataset is repeated 3 times, so the
# actual epoch = 4 * 3 = 12
max_epochs = 4
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[3],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
METAINFO = {
'CLASSES':
('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'),
# PALETTE is a list of color tuples, which is used for visualization.
'PALETTE': [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192),
(197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255),
(153, 69, 1), (120, 166, 157), (0, 182, 199), (0, 226, 252),
(182, 182, 255), (0, 0, 230), (220, 20, 60), (163, 255, 0),
(0, 82, 0), (3, 95, 161), (0, 80, 100), (183, 130, 88)]
}
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/VOCdevkit/'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
_delete_=True,
type=dataset_type,
data_root=data_root,
ann_file='annotations/voc0712_trainval.json',
data_prefix=dict(img=''),
metainfo=METAINFO,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/voc07_test.json',
data_prefix=dict(img=''),
metainfo=METAINFO,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/voc07_test.json',
metric='bbox',
format_only=False)
test_evaluator = val_evaluator
# training schedule, the dataset is repeated 3 times, so the
# actual epoch = 4 * 3 = 12
max_epochs = 4
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[3],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
# Copyright (c) OpenMMLab. All rights reserved.
from functools import partial
from typing import Optional
import torch
TORCH_VERSION = torch.__version__
def is_rocm_pytorch() -> bool:
"""Check whether the PyTorch is compiled on ROCm."""
is_rocm = False
if TORCH_VERSION != 'parrots':
try:
from torch.utils.cpp_extension import ROCM_HOME
is_rocm = True if ((torch.version.hip is not None) and
(ROCM_HOME is not None)) else False
except ImportError:
pass
return is_rocm
def _get_cuda_home() -> Optional[str]:
"""Obtain the path of CUDA home."""
if TORCH_VERSION == 'parrots':
from parrots.utils.build_extension import CUDA_HOME
else:
if is_rocm_pytorch():
from torch.utils.cpp_extension import ROCM_HOME
CUDA_HOME = ROCM_HOME
else:
from torch.utils.cpp_extension import CUDA_HOME
return CUDA_HOME
def get_build_config():
"""Obtain the build information of PyTorch or Parrots."""
if TORCH_VERSION == 'parrots':
from parrots.config import get_build_info
return get_build_info()
else:
return torch.__config__.show()
def _get_conv() -> tuple:
"""A wrapper to obtain base classes of Conv layers from PyTorch or
Parrots."""
if TORCH_VERSION == 'parrots':
from parrots.nn.modules.conv import _ConvNd, _ConvTransposeMixin
else:
from torch.nn.modules.conv import _ConvNd, _ConvTransposeMixin
return _ConvNd, _ConvTransposeMixin
def _get_dataloader() -> tuple:
"""A wrapper to obtain DataLoader class from PyTorch or Parrots."""
if TORCH_VERSION == 'parrots':
from torch.utils.data import DataLoader, PoolDataLoader
else:
from torch.utils.data import DataLoader
PoolDataLoader = DataLoader
return DataLoader, PoolDataLoader
def _get_extension():
"""A wrapper to obtain extension class from PyTorch or Parrots."""
if TORCH_VERSION == 'parrots':
from parrots.utils.build_extension import BuildExtension, Extension
CppExtension = partial(Extension, cuda=False)
CUDAExtension = partial(Extension, cuda=True)
else:
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension)
return BuildExtension, CppExtension, CUDAExtension
def _get_pool() -> tuple:
"""A wrapper to obtain base classes of pooling layers from PyTorch or
Parrots."""
if TORCH_VERSION == 'parrots':
from parrots.nn.modules.pool import (_AdaptiveAvgPoolNd,
_AdaptiveMaxPoolNd, _AvgPoolNd,
_MaxPoolNd)
else:
from torch.nn.modules.pooling import (_AdaptiveAvgPoolNd,
_AdaptiveMaxPoolNd, _AvgPoolNd,
_MaxPoolNd)
return _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd
def _get_norm() -> tuple:
"""A wrapper to obtain base classes of normalization layers from PyTorch or
Parrots."""
if TORCH_VERSION == 'parrots':
from parrots.nn.modules.batchnorm import _BatchNorm, _InstanceNorm
SyncBatchNorm_ = torch.nn.SyncBatchNorm2d
else:
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.instancenorm import _InstanceNorm
SyncBatchNorm_ = torch.nn.SyncBatchNorm
return _BatchNorm, _InstanceNorm, SyncBatchNorm_
_ConvNd, _ConvTransposeMixin = _get_conv()
DataLoader, PoolDataLoader = _get_dataloader()
BuildExtension, CppExtension, CUDAExtension = _get_extension()
_BatchNorm, _InstanceNorm, SyncBatchNorm_ = _get_norm()
_AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd = _get_pool()
|
# Copyright (c) OpenMMLab. All rights reserved.
from functools import partial
from typing import Optional
import torch
TORCH_VERSION = torch.__version__
def is_rocm_pytorch() -> bool:
is_rocm = False
if TORCH_VERSION != 'parrots':
try:
from torch.utils.cpp_extension import ROCM_HOME
is_rocm = True if ((torch.version.hip is not None) and
(ROCM_HOME is not None)) else False
except ImportError:
pass
return is_rocm
def _get_cuda_home() -> Optional[str]:
if TORCH_VERSION == 'parrots':
from parrots.utils.build_extension import CUDA_HOME
else:
if is_rocm_pytorch():
from torch.utils.cpp_extension import ROCM_HOME
CUDA_HOME = ROCM_HOME
else:
from torch.utils.cpp_extension import CUDA_HOME
return CUDA_HOME
def get_build_config():
if TORCH_VERSION == 'parrots':
from parrots.config import get_build_info
return get_build_info()
else:
return torch.__config__.show()
def _get_conv() -> tuple:
if TORCH_VERSION == 'parrots':
from parrots.nn.modules.conv import _ConvNd, _ConvTransposeMixin
else:
from torch.nn.modules.conv import _ConvNd, _ConvTransposeMixin
return _ConvNd, _ConvTransposeMixin
def _get_dataloader() -> tuple:
if TORCH_VERSION == 'parrots':
from torch.utils.data import DataLoader, PoolDataLoader
else:
from torch.utils.data import DataLoader
PoolDataLoader = DataLoader
return DataLoader, PoolDataLoader
def _get_extension():
if TORCH_VERSION == 'parrots':
from parrots.utils.build_extension import BuildExtension, Extension
CppExtension = partial(Extension, cuda=False)
CUDAExtension = partial(Extension, cuda=True)
else:
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension)
return BuildExtension, CppExtension, CUDAExtension
def _get_pool() -> tuple:
if TORCH_VERSION == 'parrots':
from parrots.nn.modules.pool import (_AdaptiveAvgPoolNd,
_AdaptiveMaxPoolNd, _AvgPoolNd,
_MaxPoolNd)
else:
from torch.nn.modules.pooling import (_AdaptiveAvgPoolNd,
_AdaptiveMaxPoolNd, _AvgPoolNd,
_MaxPoolNd)
return _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd
def _get_norm() -> tuple:
if TORCH_VERSION == 'parrots':
from parrots.nn.modules.batchnorm import _BatchNorm, _InstanceNorm
SyncBatchNorm_ = torch.nn.SyncBatchNorm2d
else:
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.instancenorm import _InstanceNorm
SyncBatchNorm_ = torch.nn.SyncBatchNorm
return _BatchNorm, _InstanceNorm, SyncBatchNorm_
_ConvNd, _ConvTransposeMixin = _get_conv()
DataLoader, PoolDataLoader = _get_dataloader()
BuildExtension, CppExtension, CUDAExtension = _get_extension()
_BatchNorm, _InstanceNorm, SyncBatchNorm_ = _get_norm()
_AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd = _get_pool()
|
_base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
# model settings
model = dict(
type='CornerNet',
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, 2, 2, 2, 2, 4],
norm_cfg=dict(type='BN', requires_grad=True)),
neck=None,
bbox_head=dict(
type='CentripetalHead',
num_classes=80,
in_channels=256,
num_feat_levels=2,
corner_emb_channels=0,
loss_heatmap=dict(
type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1),
loss_guiding_shift=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=0.05),
loss_centripetal_shift=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=1)),
# training and testing settings
train_cfg=None,
test_cfg=dict(
corner_topk=100,
local_maximum_kernel=3,
distance_threshold=0.5,
score_thr=0.05,
max_per_img=100,
nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
# data settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
test_mode=False,
test_pad_mode=None,
**img_norm_cfg),
dict(type='Resize', img_scale=(511, 511), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(
type='MultiScaleFlipAug',
scale_factor=1.0,
flip=True,
transforms=[
dict(type='Resize'),
dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
test_mode=True,
test_pad_mode=['logical_or', 127],
**img_norm_cfg),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(
type='Collect',
keys=['img'],
meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'flip', 'img_norm_cfg', 'border')),
])
]
data = dict(
samples_per_gpu=6,
workers_per_gpu=3,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='Adam', lr=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[190])
runner = dict(type='EpochBasedRunner', max_epochs=210)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (16 GPUs) x (6 samples per GPU)
auto_scale_lr = dict(base_batch_size=96)
|
_base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
# model settings
model = dict(
type='CornerNet',
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, 2, 2, 2, 2, 4],
norm_cfg=dict(type='BN', requires_grad=True)),
neck=None,
bbox_head=dict(
type='CentripetalHead',
num_classes=80,
in_channels=256,
num_feat_levels=2,
corner_emb_channels=0,
loss_heatmap=dict(
type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1),
loss_guiding_shift=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=0.05),
loss_centripetal_shift=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=1)),
# training and testing settings
train_cfg=None,
test_cfg=dict(
corner_topk=100,
local_maximum_kernel=3,
distance_threshold=0.5,
score_thr=0.05,
max_per_img=100,
nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
# data settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
test_mode=False,
test_pad_mode=None,
**img_norm_cfg),
dict(type='Resize', img_scale=(511, 511), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(
type='MultiScaleFlipAug',
scale_factor=1.0,
flip=True,
transforms=[
dict(type='Resize'),
dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
test_mode=True,
test_pad_mode=['logical_or', 127],
**img_norm_cfg),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(
type='Collect',
keys=['img'],
meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'flip', 'img_norm_cfg', 'border')),
])
]
data = dict(
samples_per_gpu=6,
workers_per_gpu=3,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='Adam', lr=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[190])
runner = dict(type='EpochBasedRunner', max_epochs=210)
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.evaluation.SparseBinaryClassificationEvaluator import (
SparseBinaryClassificationEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseEmbeddingSimilarityEvaluator import (
SparseEmbeddingSimilarityEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseInformationRetrievalEvaluator import (
SparseInformationRetrievalEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseMSEEvaluator import (
SparseMSEEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseMSEEvaluatorDataFrame import (
SparseMSEEvaluatorDataFrame,
)
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import (
SparseNanoBEIREvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseReranking import (
SparseReranking,
)
from sentence_transformers.sparse_encoder.evaluation.SparseTranslationEvaluator import (
SparseTranslationEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseTripletEvaluator import (
SparseTripletEvaluator,
)
__all__ = [
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseBinaryClassificationEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTripletEvaluator",
"SparseTranslationEvaluator",
"SparseReranking",
"SparseMSEEvaluatorDataFrame",
]
# TODO: SparseMSEEvaluatorDataFrame : As for now handle sparse embed with numpy because of : trg_embeddings = np.asarray(self.embed_inputs(model, trg_sentences)) in MSEEvaluatorFromDataFrame check if needed and adapt to it
# TODO: Adapt ParaphraseMiningEvaluator for handling Sparse override (but lot of fct to check esp in utils)
# TODO: Check label accuracy (not understand how to adapt yet) if possible to have Sparse version
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.evaluation.SparseBinaryClassificationEvaluator import (
SparseBinaryClassificationEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseEmbeddingSimilarityEvaluator import (
SparseEmbeddingSimilarityEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseInformationRetrievalEvaluator import (
SparseInformationRetrievalEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseMSEEvaluator import (
SparseMSEEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import (
SparseNanoBEIREvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseTripletEvaluator import (
SparseTripletEvaluator,
)
__all__ = [
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseBinaryClassificationEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTripletEvaluator",
]
|
"""
This file evaluates CrossEncoder on the TREC 2019 Deep Learning (DL) Track: https://arxiv.org/abs/2003.07820
TREC 2019 DL is based on the corpus of MS Marco. MS Marco provides a sparse annotation, i.e., usually only a single
passage is marked as relevant for a given query. Many other highly relevant passages are not annotated and hence are treated
as an error if a model ranks those high.
TREC DL instead annotated up to 200 passages per query for their relevance to a given query. It is better suited to estimate
the model performance for the task of reranking in Information Retrieval.
Run:
python eval_cross-encoder-trec-dl.py cross-encoder-model-name
"""
import gzip
from collections import defaultdict
import logging
import tqdm
import numpy as np
import sys
import pytrec_eval
from sentence_transformers import util, CrossEncoder
import os
data_folder = "trec2019-data"
os.makedirs(data_folder, exist_ok=True)
# Read test queries
queries = {}
queries_filepath = os.path.join(data_folder, "msmarco-test2019-queries.tsv.gz")
if not os.path.exists(queries_filepath):
logging.info("Download " + os.path.basename(queries_filepath))
util.http_get(
"https://msmarco.blob.core.windows.net/msmarcoranking/msmarco-test2019-queries.tsv.gz", queries_filepath
)
with gzip.open(queries_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
qid, query = line.strip().split("\t")
queries[qid] = query
# Read which passages are relevant
relevant_docs = defaultdict(lambda: defaultdict(int))
qrels_filepath = os.path.join(data_folder, "2019qrels-pass.txt")
if not os.path.exists(qrels_filepath):
logging.info("Download " + os.path.basename(qrels_filepath))
util.http_get("https://trec.nist.gov/data/deep/2019qrels-pass.txt", qrels_filepath)
with open(qrels_filepath) as fIn:
for line in fIn:
qid, _, pid, score = line.strip().split()
score = int(score)
if score > 0:
relevant_docs[qid][pid] = score
# Only use queries that have at least one relevant passage
relevant_qid = []
for qid in queries:
if len(relevant_docs[qid]) > 0:
relevant_qid.append(qid)
# Read the top 1000 passages that are supposed to be re-ranked
passage_filepath = os.path.join(data_folder, "msmarco-passagetest2019-top1000.tsv.gz")
if not os.path.exists(passage_filepath):
logging.info("Download " + os.path.basename(passage_filepath))
util.http_get(
"https://msmarco.blob.core.windows.net/msmarcoranking/msmarco-passagetest2019-top1000.tsv.gz", passage_filepath
)
passage_cand = {}
with gzip.open(passage_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
qid, pid, query, passage = line.strip().split("\t")
if qid not in passage_cand:
passage_cand[qid] = []
passage_cand[qid].append([pid, passage])
logging.info("Queries: {}".format(len(queries)))
queries_result_list = []
run = {}
model = CrossEncoder(sys.argv[1], max_length=512)
for qid in tqdm.tqdm(relevant_qid):
query = queries[qid]
cand = passage_cand[qid]
pids = [c[0] for c in cand]
corpus_sentences = [c[1] for c in cand]
cross_inp = [[query, sent] for sent in corpus_sentences]
if model.config.num_labels > 1: # Cross-Encoder that predict more than 1 score, we use the last and apply softmax
cross_scores = model.predict(cross_inp, apply_softmax=True)[:, 1].tolist()
else:
cross_scores = model.predict(cross_inp).tolist()
cross_scores_sparse = {}
for idx, pid in enumerate(pids):
cross_scores_sparse[pid] = cross_scores[idx]
sparse_scores = cross_scores_sparse
run[qid] = {}
for pid in sparse_scores:
run[qid][pid] = float(sparse_scores[pid])
evaluator = pytrec_eval.RelevanceEvaluator(relevant_docs, {"ndcg_cut.10"})
scores = evaluator.evaluate(run)
print("Queries:", len(relevant_qid))
print("NDCG@10: {:.2f}".format(np.mean([ele["ndcg_cut_10"] for ele in scores.values()]) * 100))
|
"""
This file evaluates CrossEncoder on the TREC 2019 Deep Learning (DL) Track: https://arxiv.org/abs/2003.07820
TREC 2019 DL is based on the corpus of MS Marco. MS Marco provides a sparse annotation, i.e., usually only a single
passage is marked as relevant for a given query. Many other highly relevant passages are not annotated and hence are treated
as an error if a model ranks those high.
TREC DL instead annotated up to 200 passages per query for their relevance to a given query. It is better suited to estimate
the model performance for the task of reranking in Information Retrieval.
Run:
python eval_cross-encoder-trec-dl.py cross-encoder-model-name
"""
import gzip
from collections import defaultdict
import logging
import tqdm
import numpy as np
import sys
import pytrec_eval
from sentence_transformers import util, CrossEncoder
import os
data_folder = "trec2019-data"
os.makedirs(data_folder, exist_ok=True)
# Read test queries
queries = {}
queries_filepath = os.path.join(data_folder, "msmarco-test2019-queries.tsv.gz")
if not os.path.exists(queries_filepath):
logging.info("Download " + os.path.basename(queries_filepath))
util.http_get(
"https://msmarco.blob.core.windows.net/msmarcoranking/msmarco-test2019-queries.tsv.gz", queries_filepath
)
with gzip.open(queries_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
qid, query = line.strip().split("\t")
queries[qid] = query
# Read which passages are relevant
relevant_docs = defaultdict(lambda: defaultdict(int))
qrels_filepath = os.path.join(data_folder, "2019qrels-pass.txt")
if not os.path.exists(qrels_filepath):
logging.info("Download " + os.path.basename(qrels_filepath))
util.http_get("https://trec.nist.gov/data/deep/2019qrels-pass.txt", qrels_filepath)
with open(qrels_filepath) as fIn:
for line in fIn:
qid, _, pid, score = line.strip().split()
score = int(score)
if score > 0:
relevant_docs[qid][pid] = score
# Only use queries that have at least one relevant passage
relevant_qid = []
for qid in queries:
if len(relevant_docs[qid]) > 0:
relevant_qid.append(qid)
# Read the top 1000 passages that are supposed to be re-ranked
passage_filepath = os.path.join(data_folder, "msmarco-passagetest2019-top1000.tsv.gz")
if not os.path.exists(passage_filepath):
logging.info("Download " + os.path.basename(passage_filepath))
util.http_get(
"https://msmarco.blob.core.windows.net/msmarcoranking/msmarco-passagetest2019-top1000.tsv.gz", passage_filepath
)
passage_cand = {}
with gzip.open(passage_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
qid, pid, query, passage = line.strip().split("\t")
if qid not in passage_cand:
passage_cand[qid] = []
passage_cand[qid].append([pid, passage])
logging.info("Queries: {}".format(len(queries)))
queries_result_list = []
run = {}
model = CrossEncoder(sys.argv[1], max_length=512)
for qid in tqdm.tqdm(relevant_qid):
query = queries[qid]
cand = passage_cand[qid]
pids = [c[0] for c in cand]
corpus_sentences = [c[1] for c in cand]
cross_inp = [[query, sent] for sent in corpus_sentences]
if model.config.num_labels > 1: # Cross-Encoder that predict more than 1 score, we use the last and apply softmax
cross_scores = model.predict(cross_inp, apply_softmax=True)[:, 1].tolist()
else:
cross_scores = model.predict(cross_inp).tolist()
cross_scores_sparse = {}
for idx, pid in enumerate(pids):
cross_scores_sparse[pid] = cross_scores[idx]
sparse_scores = cross_scores_sparse
run[qid] = {}
for pid in sparse_scores:
run[qid][pid] = float(sparse_scores[pid])
evaluator = pytrec_eval.RelevanceEvaluator(relevant_docs, {"ndcg_cut.10"})
scores = evaluator.evaluate(run)
print("Queries:", len(relevant_qid))
print("NDCG@10: {:.2f}".format(np.mean([ele["ndcg_cut_10"] for ele in scores.values()]) * 100))
|
# Basic unittests to test functioning of module's top-level
__author__ = "Yaroslav Halchenko"
__license__ = "BSD"
try:
from sklearn import * # noqa: F403
_top_import_error = None
except Exception as e:
_top_import_error = e
def test_import_skl():
# Test either above import has failed for some reason
# "import *" is discouraged outside of the module level, hence we
# rely on setting up the variable above
assert _top_import_error is None
|
# Basic unittests to test functioning of module's top-level
__author__ = "Yaroslav Halchenko"
__license__ = "BSD"
try:
from sklearn import * # noqa
_top_import_error = None
except Exception as e:
_top_import_error = e
def test_import_skl():
# Test either above import has failed for some reason
# "import *" is discouraged outside of the module level, hence we
# rely on setting up the variable above
assert _top_import_error is None
|
"""Module for async requests generator."""
from typing import AsyncIterator, Optional, Dict, TYPE_CHECKING
from jina.clients.request.helper import _new_data_request_from_batch, _new_data_request
from jina.enums import DataInputType
from jina.importer import ImportExtensions
from jina.logging.predefined import default_logger
from jina.types.request import Request
if TYPE_CHECKING:
from jina.clients.request import GeneratorSourceType
async def request_generator(
exec_endpoint: str,
data: 'GeneratorSourceType',
request_size: int = 0,
data_type: DataInputType = DataInputType.AUTO,
target_executor: Optional[str] = None,
parameters: Optional[Dict] = None,
**kwargs, # do not remove this, add on purpose to suppress unknown kwargs
) -> AsyncIterator['Request']:
"""An async :function:`request_generator`.
:param exec_endpoint: the endpoint string, by convention starts with `/`
:param data: the data to use in the request
:param request_size: the number of Documents per request
:param data_type: if ``data`` is an iterator over self-contained document, i.e. :class:`DocumentSourceType`;
or an iterator over possible Document content (set to text, blob and buffer).
:param parameters: the kwargs that will be sent to the executor
:param target_executor: a regex string. Only matching Executors will process the request.
:param kwargs: additional arguments
:yield: request
"""
_kwargs = dict(extra_kwargs=kwargs)
try:
if data is None:
# this allows empty inputs, i.e. a data request with only parameters
yield _new_data_request(
endpoint=exec_endpoint, target=target_executor, parameters=parameters
)
else:
with ImportExtensions(required=True):
import aiostream
async for batch in aiostream.stream.chunks(data, request_size):
yield _new_data_request_from_batch(
_kwargs=kwargs,
batch=batch,
data_type=data_type,
endpoint=exec_endpoint,
target=target_executor,
parameters=parameters,
)
except Exception as ex:
# must be handled here, as grpc channel wont handle Python exception
default_logger.critical(f'inputs is not valid! {ex!r}', exc_info=True)
raise
|
"""Module for async requests generator."""
from typing import AsyncIterator, Optional, Dict, TYPE_CHECKING
from jina.clients.request.helper import _new_data_request_from_batch, _new_data_request
from jina.enums import DataInputType
from jina.importer import ImportExtensions
from jina.logging.predefined import default_logger
from jina.types.request import Request
if TYPE_CHECKING:
from jina.clients.request import GeneratorSourceType
async def request_generator(
exec_endpoint: str,
data: 'GeneratorSourceType',
request_size: int = 0,
data_type: DataInputType = DataInputType.AUTO,
target_executor: Optional[str] = None,
parameters: Optional[Dict] = None,
**kwargs, # do not remove this, add on purpose to suppress unknown kwargs
) -> AsyncIterator['Request']:
"""An async :function:`request_generator`.
:param exec_endpoint: the endpoint string, by convention starts with `/`
:param data: the data to use in the request
:param request_size: the number of Documents per request
:param data_type: if ``data`` is an iterator over self-contained document, i.e. :class:`DocumentSourceType`;
or an iterator over possible Document content (set to text, blob and buffer).
:param parameters: the kwargs that will be sent to the executor
:param target_executor: a regex string. Only matching Executors will process the request.
:param kwargs: additional arguments
:yield: request
"""
_kwargs = dict(extra_kwargs=kwargs)
try:
if data is None:
# this allows empty inputs, i.e. a data request with only parameters
yield _new_data_request(
endpoint=exec_endpoint, target=target_executor, parameters=parameters
)
else:
with ImportExtensions(required=True):
import aiostream
async for batch in aiostream.stream.chunks(data, request_size):
yield _new_data_request_from_batch(
_kwargs=kwargs,
batch=batch,
data_type=data_type,
endpoint=exec_endpoint,
target=target_executor,
parameters=parameters,
)
except Exception as ex:
# must be handled here, as grpc channel wont handle Python exception
default_logger.critical(f'inputs is not valid! {ex!r}', exc_info=True)
|
_base_ = [
'../_base_/models/cascade-rcnn_r50_fpn.py',
'../common/lsj-200e_coco-detection.py'
]
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
# disable allowed_border to avoid potential errors.
model = dict(
data_preprocessor=dict(batch_augments=batch_augments),
train_cfg=dict(rpn=dict(allowed_border=-1)))
train_dataloader = dict(batch_size=8, num_workers=4)
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=dict(
type='SGD', lr=0.02 * 4, momentum=0.9, weight_decay=0.00004))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/cascade_rcnn_r50_fpn.py',
'../common/lsj_200e_coco_detection.py'
]
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
# disable allowed_border to avoid potential errors.
model = dict(
data_preprocessor=dict(batch_augments=batch_augments),
train_cfg=dict(rpn=dict(allowed_border=-1)))
train_dataloader = dict(batch_size=8, num_workers=4)
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=dict(
type='SGD', lr=0.02 * 4, momentum=0.9, weight_decay=0.00004))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import AutoConfig
logger = logging.get_logger(__name__)
class EncoderDecoderConfig(PretrainedConfig):
r"""
[`EncoderDecoderConfig`] is the configuration class to store the configuration of a [`EncoderDecoderModel`]. It is
used to instantiate an Encoder Decoder model according to the specified arguments, defining the encoder and decoder
configs.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
kwargs (*optional*):
Dictionary of keyword arguments. Notably:
- **encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
the encoder config.
- **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
the decoder config.
Examples:
```python
>>> from transformers import BertConfig, EncoderDecoderConfig, EncoderDecoderModel
>>> # Initializing a BERT google-bert/bert-base-uncased style configuration
>>> config_encoder = BertConfig()
>>> config_decoder = BertConfig()
>>> config = EncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder)
>>> # Initializing a Bert2Bert model (with random weights) from the google-bert/bert-base-uncased style configurations
>>> model = EncoderDecoderModel(config=config)
>>> # Accessing the model configuration
>>> config_encoder = model.config.encoder
>>> config_decoder = model.config.decoder
>>> # set decoder config to causal lm
>>> config_decoder.is_decoder = True
>>> config_decoder.add_cross_attention = True
>>> # Saving the model, including its configuration
>>> model.save_pretrained("my-model")
>>> # loading model and config from pretrained folder
>>> encoder_decoder_config = EncoderDecoderConfig.from_pretrained("my-model")
>>> model = EncoderDecoderModel.from_pretrained("my-model", config=encoder_decoder_config)
```"""
model_type = "encoder-decoder"
sub_configs = {"encoder": AutoConfig, "decoder": AutoConfig}
has_no_defaults_at_init = True
def __init__(self, **kwargs):
super().__init__(**kwargs)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"A configuration of type {self.model_type} cannot be instantiated because "
f"both `encoder` and `decoder` sub-configurations were not passed, only {kwargs}"
)
encoder_config = kwargs.pop("encoder")
encoder_model_type = encoder_config.pop("model_type")
decoder_config = kwargs.pop("decoder")
decoder_model_type = decoder_config.pop("model_type")
self.encoder = AutoConfig.for_model(encoder_model_type, **encoder_config)
self.decoder = AutoConfig.for_model(decoder_model_type, **decoder_config)
self.is_encoder_decoder = True
@classmethod
def from_encoder_decoder_configs(
cls, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs
) -> PretrainedConfig:
r"""
Instantiate a [`EncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model configuration and
decoder model configuration.
Returns:
[`EncoderDecoderConfig`]: An instance of a configuration object
"""
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config")
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs)
__all__ = ["EncoderDecoderConfig"]
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import AutoConfig
logger = logging.get_logger(__name__)
class EncoderDecoderConfig(PretrainedConfig):
r"""
[`EncoderDecoderConfig`] is the configuration class to store the configuration of a [`EncoderDecoderModel`]. It is
used to instantiate an Encoder Decoder model according to the specified arguments, defining the encoder and decoder
configs.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
kwargs (*optional*):
Dictionary of keyword arguments. Notably:
- **encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
the encoder config.
- **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
the decoder config.
Examples:
```python
>>> from transformers import BertConfig, EncoderDecoderConfig, EncoderDecoderModel
>>> # Initializing a BERT google-bert/bert-base-uncased style configuration
>>> config_encoder = BertConfig()
>>> config_decoder = BertConfig()
>>> config = EncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder)
>>> # Initializing a Bert2Bert model (with random weights) from the google-bert/bert-base-uncased style configurations
>>> model = EncoderDecoderModel(config=config)
>>> # Accessing the model configuration
>>> config_encoder = model.config.encoder
>>> config_decoder = model.config.decoder
>>> # set decoder config to causal lm
>>> config_decoder.is_decoder = True
>>> config_decoder.add_cross_attention = True
>>> # Saving the model, including its configuration
>>> model.save_pretrained("my-model")
>>> # loading model and config from pretrained folder
>>> encoder_decoder_config = EncoderDecoderConfig.from_pretrained("my-model")
>>> model = EncoderDecoderModel.from_pretrained("my-model", config=encoder_decoder_config)
```"""
model_type = "encoder-decoder"
sub_configs = {"encoder": AutoConfig, "decoder": AutoConfig}
has_no_defaults_at_init = True
def __init__(self, **kwargs):
super().__init__(**kwargs)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"A configuraton of type {self.model_type} cannot be instantiated because "
f"both `encoder` and `decoder` sub-configurations were not passed, only {kwargs}"
)
encoder_config = kwargs.pop("encoder")
encoder_model_type = encoder_config.pop("model_type")
decoder_config = kwargs.pop("decoder")
decoder_model_type = decoder_config.pop("model_type")
self.encoder = AutoConfig.for_model(encoder_model_type, **encoder_config)
self.decoder = AutoConfig.for_model(decoder_model_type, **decoder_config)
self.is_encoder_decoder = True
@classmethod
def from_encoder_decoder_configs(
cls, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs
) -> PretrainedConfig:
r"""
Instantiate a [`EncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model configuration and
decoder model configuration.
Returns:
[`EncoderDecoderConfig`]: An instance of a configuration object
"""
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config")
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs)
__all__ = ["EncoderDecoderConfig"]
|
_base_ = './ms-rcnn_r50-caffe_fpn_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './ms_rcnn_r50_caffe_fpn_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
import contextlib
import logging
import typing
import fastapi
import fastapi.responses
import starlette.middleware.cors
import uvicorn
from autogpt_libs.feature_flag.client import (
initialize_launchdarkly,
shutdown_launchdarkly,
)
import backend.data.block
import backend.data.db
import backend.data.graph
import backend.data.user
import backend.server.routers.v1
import backend.util.service
import backend.util.settings
settings = backend.util.settings.Settings()
logger = logging.getLogger(__name__)
logging.getLogger("autogpt_libs").setLevel(logging.INFO)
@contextlib.asynccontextmanager
async def lifespan_context(app: fastapi.FastAPI):
await backend.data.db.connect()
await backend.data.block.initialize_blocks()
await backend.data.user.migrate_and_encrypt_user_integrations()
await backend.data.graph.fix_llm_provider_credentials()
initialize_launchdarkly()
yield
shutdown_launchdarkly()
await backend.data.db.disconnect()
docs_url = (
"/docs"
if settings.config.app_env == backend.util.settings.AppEnvironment.LOCAL
else None
)
app = fastapi.FastAPI(
title="AutoGPT Agent Server",
description=(
"This server is used to execute agents that are created by the "
"AutoGPT system."
),
summary="AutoGPT Agent Server",
version="0.1",
lifespan=lifespan_context,
docs_url=docs_url,
)
def handle_internal_http_error(status_code: int = 500, log_error: bool = True):
def handler(request: fastapi.Request, exc: Exception):
if log_error:
logger.exception(f"{request.method} {request.url.path} failed: {exc}")
return fastapi.responses.JSONResponse(
content={
"message": f"{request.method} {request.url.path} failed",
"detail": str(exc),
},
status_code=status_code,
)
return handler
app.add_exception_handler(ValueError, handle_internal_http_error(400))
app.add_exception_handler(Exception, handle_internal_http_error(500))
app.include_router(backend.server.routers.v1.v1_router, tags=["v1"])
@app.get(path="/health", tags=["health"], dependencies=[])
async def health():
return {"status": "healthy"}
class AgentServer(backend.util.service.AppProcess):
def run(self):
server_app = starlette.middleware.cors.CORSMiddleware(
app=app,
allow_origins=settings.config.backend_cors_allow_origins,
allow_credentials=True,
allow_methods=["*"], # Allows all methods
allow_headers=["*"], # Allows all headers
)
uvicorn.run(
server_app,
host=backend.util.settings.Config().agent_api_host,
port=backend.util.settings.Config().agent_api_port,
)
@staticmethod
async def test_execute_graph(
graph_id: str, node_input: dict[typing.Any, typing.Any], user_id: str
):
return backend.server.routers.v1.execute_graph(graph_id, node_input, user_id)
@staticmethod
async def test_create_graph(
create_graph: backend.server.routers.v1.CreateGraph,
user_id: str,
is_template=False,
):
return await backend.server.routers.v1.create_new_graph(create_graph, user_id)
@staticmethod
async def test_get_graph_run_status(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_status(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_get_graph_run_node_execution_results(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_node_execution_results(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_delete_graph(graph_id: str, user_id: str):
return await backend.server.routers.v1.delete_graph(graph_id, user_id)
def set_test_dependency_overrides(self, overrides: dict):
app.dependency_overrides.update(overrides)
|
import contextlib
import logging
import typing
import fastapi
import fastapi.responses
import starlette.middleware.cors
import uvicorn
import backend.data.block
import backend.data.db
import backend.data.graph
import backend.data.user
import backend.server.routers.v1
import backend.util.service
import backend.util.settings
settings = backend.util.settings.Settings()
logger = logging.getLogger(__name__)
@contextlib.asynccontextmanager
async def lifespan_context(app: fastapi.FastAPI):
await backend.data.db.connect()
await backend.data.block.initialize_blocks()
await backend.data.user.migrate_and_encrypt_user_integrations()
await backend.data.graph.fix_llm_provider_credentials()
yield
await backend.data.db.disconnect()
def handle_internal_http_error(status_code: int = 500, log_error: bool = True):
def handler(request: fastapi.Request, exc: Exception):
if log_error:
logger.exception(f"{request.method} {request.url.path} failed: {exc}")
return fastapi.responses.JSONResponse(
content={
"message": f"{request.method} {request.url.path} failed",
"detail": str(exc),
},
status_code=status_code,
)
return handler
docs_url = (
"/docs"
if settings.config.app_env == backend.util.settings.AppEnvironment.LOCAL
else None
)
app = fastapi.FastAPI(
title="AutoGPT Agent Server",
description=(
"This server is used to execute agents that are created by the "
"AutoGPT system."
),
summary="AutoGPT Agent Server",
version="0.1",
lifespan=lifespan_context,
docs_url=docs_url,
)
app.add_exception_handler(ValueError, handle_internal_http_error(400))
app.add_exception_handler(500, handle_internal_http_error(500))
app.include_router(backend.server.routers.v1.v1_router, tags=["v1"])
@app.get(path="/health", tags=["health"], dependencies=[])
async def health():
return {"status": "healthy"}
class AgentServer(backend.util.service.AppProcess):
def run(self):
server_app = starlette.middleware.cors.CORSMiddleware(
app=app,
allow_origins=settings.config.backend_cors_allow_origins,
allow_credentials=True,
allow_methods=["*"], # Allows all methods
allow_headers=["*"], # Allows all headers
)
uvicorn.run(
server_app,
host=backend.util.settings.Config().agent_api_host,
port=backend.util.settings.Config().agent_api_port,
)
@staticmethod
async def test_execute_graph(
graph_id: str, node_input: dict[typing.Any, typing.Any], user_id: str
):
return await backend.server.routers.v1.execute_graph(
graph_id, node_input, user_id
)
@staticmethod
async def test_create_graph(
create_graph: backend.server.routers.v1.CreateGraph,
user_id: str,
is_template=False,
):
return await backend.server.routers.v1.create_new_graph(create_graph, user_id)
@staticmethod
async def test_get_graph_run_status(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_status(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_get_graph_run_node_execution_results(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_node_execution_results(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_delete_graph(graph_id: str, user_id: str):
return await backend.server.routers.v1.delete_graph(graph_id, user_id)
def set_test_dependency_overrides(self, overrides: dict):
app.dependency_overrides.update(overrides)
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
T = TypeVar('T', bound='AudioTensorFlowTensor')
@_register_proto(proto_type_name='audio_tensorflow_tensor')
class AudioTensorFlowTensor(
AbstractAudioTensor, TensorFlowTensor, metaclass=metaTensorFlow
):
"""
Subclass of TensorFlowTensor, to represent an audio tensor.
Adds audio-specific features to the tensor.
```
---
from typing import Optional
import tensorflow as tf
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import AudioTensorFlowTensor, AudioUrl
class MyAudioDoc(BaseDoc):
title: str
audio_tensor: Optional[AudioTensorFlowTensor]
url: Optional[AudioUrl]
bytes_: Optional[bytes]
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=tf.random.normal((1000, 2)),
)
doc_1.audio_tensor.save(file_path='path/to/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor = doc_2.url.load()
doc_2.audio_tensor.save(file_path='path/to/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
```
---
"""
...
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
T = TypeVar('T', bound='AudioTensorFlowTensor')
@_register_proto(proto_type_name='audio_tensorflow_tensor')
class AudioTensorFlowTensor(
AbstractAudioTensor, TensorFlowTensor, metaclass=metaTensorFlow
):
"""
Subclass of TensorFlowTensor, to represent an audio tensor.
Adds audio-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import tensorflow as tf
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import AudioTensorFlowTensor, AudioUrl
class MyAudioDoc(BaseDoc):
title: str
audio_tensor: Optional[AudioTensorFlowTensor]
url: Optional[AudioUrl]
bytes_: Optional[bytes]
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=tf.random.normal((1000, 2)),
)
doc_1.audio_tensor.save(file_path='path/to/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor = doc_2.url.load()
doc_2.audio_tensor.save(file_path='path/to/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
"""
...
|
import base64
import json
import pickle
from abc import ABC, abstractmethod
from typing import Any
from pydantic import BaseModel
from llama_index.core.schema import BaseComponent
from .utils import import_module_from_qualified_name, get_qualified_name
class BaseSerializer(ABC):
@abstractmethod
def serialize(self, value: Any) -> str: ...
@abstractmethod
def deserialize(self, value: str) -> Any: ...
class JsonSerializer(BaseSerializer):
def _serialize_value(self, value: Any) -> Any:
"""Helper to serialize a single value."""
if isinstance(value, BaseComponent):
return {
"__is_component": True,
"value": value.to_dict(),
"qualified_name": get_qualified_name(value),
}
elif isinstance(value, BaseModel):
return {
"__is_pydantic": True,
"value": value.model_dump(),
"qualified_name": get_qualified_name(value),
}
elif isinstance(value, dict):
return {k: self._serialize_value(v) for k, v in value.items()}
elif isinstance(value, list):
return [self._serialize_value(item) for item in value]
return value
def serialize(self, value: Any) -> str:
try:
serialized_value = self._serialize_value(value)
return json.dumps(serialized_value)
except Exception as e:
raise ValueError(f"Failed to serialize value: {type(value)}: {value!s}")
def _deserialize_value(self, data: Any) -> Any:
"""Helper to deserialize a single value."""
if isinstance(data, dict):
if data.get("__is_pydantic") and data.get("qualified_name"):
module_class = import_module_from_qualified_name(data["qualified_name"])
return module_class.model_validate(data["value"])
elif data.get("__is_component") and data.get("qualified_name"):
module_class = import_module_from_qualified_name(data["qualified_name"])
return module_class.from_dict(data["value"])
return {k: self._deserialize_value(v) for k, v in data.items()}
elif isinstance(data, list):
return [self._deserialize_value(item) for item in data]
return data
def deserialize(self, value: str) -> Any:
data = json.loads(value)
return self._deserialize_value(data)
class JsonPickleSerializer(JsonSerializer):
def serialize(self, value: Any) -> str:
"""Serialize while prioritizing JSON, falling back to Pickle."""
try:
return super().serialize(value)
except Exception:
return base64.b64encode(pickle.dumps(value)).decode("utf-8")
def deserialize(self, value: str) -> Any:
"""Deserialize while prioritizing Pickle, falling back to JSON."""
try:
return pickle.loads(base64.b64decode(value))
except Exception:
return super().deserialize(value)
|
import base64
import json
import pickle
from abc import ABC, abstractmethod
from typing import Any
from pydantic import BaseModel
from llama_index.core.schema import BaseComponent
from .utils import import_module_from_qualified_name, get_qualified_name
class BaseSerializer(ABC):
@abstractmethod
def serialize(self, value: Any) -> str:
...
@abstractmethod
def deserialize(self, value: str) -> Any:
...
class JsonSerializer(BaseSerializer):
def _serialize_value(self, value: Any) -> Any:
"""Helper to serialize a single value."""
if isinstance(value, BaseComponent):
return {
"__is_component": True,
"value": value.to_dict(),
"qualified_name": get_qualified_name(value),
}
elif isinstance(value, BaseModel):
return {
"__is_pydantic": True,
"value": value.model_dump(),
"qualified_name": get_qualified_name(value),
}
elif isinstance(value, dict):
return {k: self._serialize_value(v) for k, v in value.items()}
elif isinstance(value, list):
return [self._serialize_value(item) for item in value]
return value
def serialize(self, value: Any) -> str:
try:
serialized_value = self._serialize_value(value)
return json.dumps(serialized_value)
except Exception as e:
raise ValueError(f"Failed to serialize value: {type(value)}: {value!s}")
def _deserialize_value(self, data: Any) -> Any:
"""Helper to deserialize a single value."""
if isinstance(data, dict):
if data.get("__is_pydantic") and data.get("qualified_name"):
module_class = import_module_from_qualified_name(data["qualified_name"])
return module_class.model_validate(data["value"])
elif data.get("__is_component") and data.get("qualified_name"):
module_class = import_module_from_qualified_name(data["qualified_name"])
return module_class.from_dict(data["value"])
return {k: self._deserialize_value(v) for k, v in data.items()}
elif isinstance(data, list):
return [self._deserialize_value(item) for item in data]
return data
def deserialize(self, value: str) -> Any:
data = json.loads(value)
return self._deserialize_value(data)
class JsonPickleSerializer(JsonSerializer):
def serialize(self, value: Any) -> str:
"""Serialize while prioritizing JSON, falling back to Pickle."""
try:
return super().serialize(value)
except Exception:
return base64.b64encode(pickle.dumps(value)).decode("utf-8")
def deserialize(self, value: str) -> Any:
"""Deserialize while prioritizing Pickle, falling back to JSON."""
try:
return pickle.loads(base64.b64decode(value))
except Exception:
return super().deserialize(value)
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmengine import MessageHub
class TestMessageHub:
def test_init(self):
message_hub = MessageHub('name')
assert message_hub.instance_name == 'name'
assert len(message_hub.log_buffers) == 0
assert len(message_hub.log_buffers) == 0
def test_update_log(self):
message_hub = MessageHub.get_instance('mmengine')
# test create target `LogBuffer` by name
message_hub.update_log('name', 1)
log_buffer = message_hub.log_buffers['name']
assert (log_buffer._log_history == np.array([1])).all()
# test update target `LogBuffer` by name
message_hub.update_log('name', 1)
assert (log_buffer._log_history == np.array([1, 1])).all()
# unmatched string will raise a key error
def test_update_info(self):
message_hub = MessageHub.get_instance('mmengine')
# test runtime value can be overwritten.
message_hub.update_info('key', 2)
assert message_hub.runtime_info['key'] == 2
message_hub.update_info('key', 1)
assert message_hub.runtime_info['key'] == 1
def test_get_log_buffers(self):
message_hub = MessageHub.get_instance('mmengine')
# Get undefined key will raise error
with pytest.raises(KeyError):
message_hub.get_log('unknown')
# test get log_buffer as wished
log_history = np.array([1, 2, 3, 4, 5])
count = np.array([1, 1, 1, 1, 1])
for i in range(len(log_history)):
message_hub.update_log('test_value', float(log_history[i]),
int(count[i]))
recorded_history, recorded_count = \
message_hub.get_log('test_value').data
assert (log_history == recorded_history).all()
assert (recorded_count == count).all()
def test_get_runtime(self):
message_hub = MessageHub.get_instance('mmengine')
with pytest.raises(KeyError):
message_hub.get_info('unknown')
recorded_dict = dict(a=1, b=2)
message_hub.update_info('test_value', recorded_dict)
assert message_hub.get_info('test_value') == recorded_dict
def test_get_log_vars(self):
message_hub = MessageHub.get_instance('mmengine')
log_dict = dict(
loss=1,
loss_cls=torch.tensor(2),
loss_bbox=np.array(3),
loss_iou=dict(value=1, count=2))
message_hub.update_log_vars(log_dict)
loss = message_hub.get_log('loss')
loss_cls = message_hub.get_log('loss_cls')
loss_bbox = message_hub.get_log('loss_bbox')
loss_iou = message_hub.get_log('loss_iou')
assert loss.current() == 1
assert loss_cls.current() == 2
assert loss_bbox.current() == 3
assert loss_iou.mean() == 0.5
with pytest.raises(TypeError):
loss_dict = dict(error_type=[])
message_hub.update_log_vars(loss_dict)
with pytest.raises(AssertionError):
loss_dict = dict(error_type=dict(count=1))
message_hub.update_log_vars(loss_dict)
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmengine import MessageHub
class TestMessageHub:
def test_init(self):
message_hub = MessageHub('name')
assert message_hub.instance_name == 'name'
assert len(message_hub.log_buffers) == 0
assert len(message_hub.log_buffers) == 0
def test_update_log(self):
message_hub = MessageHub.create_instance()
# test create target `LogBuffer` by name
message_hub.update_log('name', 1)
log_buffer = message_hub.log_buffers['name']
assert (log_buffer._log_history == np.array([1])).all()
# test update target `LogBuffer` by name
message_hub.update_log('name', 1)
assert (log_buffer._log_history == np.array([1, 1])).all()
# unmatched string will raise a key error
def test_update_info(self):
message_hub = MessageHub.create_instance()
# test runtime value can be overwritten.
message_hub.update_info('key', 2)
assert message_hub.runtime_info['key'] == 2
message_hub.update_info('key', 1)
assert message_hub.runtime_info['key'] == 1
def test_get_log_buffers(self):
message_hub = MessageHub.create_instance()
# Get undefined key will raise error
with pytest.raises(KeyError):
message_hub.get_log('unknown')
# test get log_buffer as wished
log_history = np.array([1, 2, 3, 4, 5])
count = np.array([1, 1, 1, 1, 1])
for i in range(len(log_history)):
message_hub.update_log('test_value', float(log_history[i]),
int(count[i]))
recorded_history, recorded_count = \
message_hub.get_log('test_value').data
assert (log_history == recorded_history).all()
assert (recorded_count == count).all()
def test_get_runtime(self):
message_hub = MessageHub.create_instance()
with pytest.raises(KeyError):
message_hub.get_info('unknown')
recorded_dict = dict(a=1, b=2)
message_hub.update_info('test_value', recorded_dict)
assert message_hub.get_info('test_value') == recorded_dict
def test_get_log_vars(self):
message_hub = MessageHub.create_instance()
log_dict = dict(
loss=1,
loss_cls=torch.tensor(2),
loss_bbox=np.array(3),
loss_iou=dict(value=1, count=2))
message_hub.update_log_vars(log_dict)
loss = message_hub.get_log('loss')
loss_cls = message_hub.get_log('loss_cls')
loss_bbox = message_hub.get_log('loss_bbox')
loss_iou = message_hub.get_log('loss_iou')
assert loss.current() == 1
assert loss_cls.current() == 2
assert loss_bbox.current() == 3
assert loss_iou.mean() == 0.5
with pytest.raises(TypeError):
loss_dict = dict(error_type=[])
message_hub.update_log_vars(loss_dict)
with pytest.raises(AssertionError):
loss_dict = dict(error_type=dict(count=1))
message_hub.update_log_vars(loss_dict)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
from pathlib import Path
from .misc import is_str
def is_filepath(x):
return is_str(x) or isinstance(x, Path)
def fopen(filepath, *args, **kwargs):
if is_str(filepath):
return open(filepath, *args, **kwargs)
elif isinstance(filepath, Path):
return filepath.open(*args, **kwargs)
raise ValueError('`filepath` should be a string or a Path')
def check_file_exist(filename, msg_tmpl='file "{}" does not exist'):
if not osp.isfile(filename):
raise FileNotFoundError(msg_tmpl.format(filename))
def mkdir_or_exist(dir_name, mode=0o777):
if dir_name == '':
return
dir_name = osp.expanduser(dir_name)
os.makedirs(dir_name, mode=mode, exist_ok=True)
def symlink(src, dst, overwrite=True, **kwargs):
if os.path.lexists(dst) and overwrite:
os.remove(dst)
os.symlink(src, dst, **kwargs)
def scandir(dir_path, suffix=None, recursive=False, case_sensitive=True):
"""Scan a directory to find the interested files.
Args:
dir_path (str | :obj:`Path`): Path of the directory.
suffix (str | tuple(str), optional): File suffix that we are
interested in. Default: None.
recursive (bool, optional): If set to True, recursively scan the
directory. Default: False.
case_sensitive (bool, optional) : If set to False, ignore the case of
suffix. Default: True.
Returns:
A generator for all the interested files with relative paths.
"""
if isinstance(dir_path, (str, Path)):
dir_path = str(dir_path)
else:
raise TypeError('"dir_path" must be a string or Path object')
if (suffix is not None) and not isinstance(suffix, (str, tuple)):
raise TypeError('"suffix" must be a string or tuple of strings')
if suffix is not None and not case_sensitive:
suffix = suffix.lower() if isinstance(suffix, str) else tuple(
item.lower() for item in suffix)
root = dir_path
def _scandir(dir_path, suffix, recursive, case_sensitive):
for entry in os.scandir(dir_path):
if not entry.name.startswith('.') and entry.is_file():
rel_path = osp.relpath(entry.path, root)
_rel_path = rel_path if case_sensitive else rel_path.lower()
if suffix is None or _rel_path.endswith(suffix):
yield rel_path
elif recursive and os.path.isdir(entry.path):
# scan recursively if entry.path is a directory
yield from _scandir(entry.path, suffix, recursive,
case_sensitive)
return _scandir(dir_path, suffix, recursive, case_sensitive)
def find_vcs_root(path, markers=('.git', )):
"""Finds the root directory (including itself) of specified markers.
Args:
path (str): Path of directory or file.
markers (list[str], optional): List of file or directory names.
Returns:
The directory contained one of the markers or None if not found.
"""
if osp.isfile(path):
path = osp.dirname(path)
prev, cur = None, osp.abspath(osp.expanduser(path))
while cur != prev:
if any(osp.exists(osp.join(cur, marker)) for marker in markers):
return cur
prev, cur = cur, osp.split(cur)[0]
return None
def is_abs(path: str) -> bool:
"""Check if path is an absolute path in different backends.
Args:
path (str): path of directory or file.
Returns:
bool: whether path is an absolute path.
"""
if osp.isabs(path) or path.startswith(('http://', 'https://', 's3://')):
return True
else:
return False
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
from pathlib import Path
from .misc import is_str
def is_filepath(x):
return is_str(x) or isinstance(x, Path)
def fopen(filepath, *args, **kwargs):
if is_str(filepath):
return open(filepath, *args, **kwargs)
elif isinstance(filepath, Path):
return filepath.open(*args, **kwargs)
raise ValueError('`filepath` should be a string or a Path')
def check_file_exist(filename, msg_tmpl='file "{}" does not exist'):
if not osp.isfile(filename):
raise FileNotFoundError(msg_tmpl.format(filename))
def mkdir_or_exist(dir_name, mode=0o777):
if dir_name == '':
return
dir_name = osp.expanduser(dir_name)
os.makedirs(dir_name, mode=mode, exist_ok=True)
def symlink(src, dst, overwrite=True, **kwargs):
if os.path.lexists(dst) and overwrite:
os.remove(dst)
os.symlink(src, dst, **kwargs)
def scandir(dir_path, suffix=None, recursive=False, case_sensitive=True):
"""Scan a directory to find the interested files.
Args:
dir_path (str | :obj:`Path`): Path of the directory.
suffix (str | tuple(str), optional): File suffix that we are
interested in. Default: None.
recursive (bool, optional): If set to True, recursively scan the
directory. Default: False.
case_sensitive (bool, optional) : If set to False, ignore the case of
suffix. Default: True.
Returns:
A generator for all the interested files with relative paths.
"""
if isinstance(dir_path, (str, Path)):
dir_path = str(dir_path)
else:
raise TypeError('"dir_path" must be a string or Path object')
if (suffix is not None) and not isinstance(suffix, (str, tuple)):
raise TypeError('"suffix" must be a string or tuple of strings')
if suffix is not None and not case_sensitive:
suffix = suffix.lower() if isinstance(suffix, str) else tuple(
item.lower() for item in suffix)
root = dir_path
def _scandir(dir_path, suffix, recursive, case_sensitive):
for entry in os.scandir(dir_path):
if not entry.name.startswith('.') and entry.is_file():
rel_path = osp.relpath(entry.path, root)
_rel_path = rel_path if case_sensitive else rel_path.lower()
if suffix is None or _rel_path.endswith(suffix):
yield rel_path
elif recursive and os.path.isdir(entry.path):
# scan recursively if entry.path is a directory
yield from _scandir(entry.path, suffix, recursive,
case_sensitive)
return _scandir(dir_path, suffix, recursive, case_sensitive)
def find_vcs_root(path, markers=('.git', )):
"""Finds the root directory (including itself) of specified markers.
Args:
path (str): Path of directory or file.
markers (list[str], optional): List of file or directory names.
Returns:
The directory contained one of the markers or None if not found.
"""
if osp.isfile(path):
path = osp.dirname(path)
prev, cur = None, osp.abspath(osp.expanduser(path))
while cur != prev:
if any(osp.exists(osp.join(cur, marker)) for marker in markers):
return cur
prev, cur = cur, osp.split(cur)[0]
return None
def is_abs(path: str) -> bool:
"""Check if path is an absolute path in different backends.
Args:
path (str): path of directory or file.
Returns:
bool: whether path is an absolute path.
"""
if osp.isabs(path) or path.startswith(('http', 'https', 's3')):
return True
else:
return False
|
from typing import Optional
import numpy as np
import pytest
from pydantic import BaseModel, ValidationError
from typing_extensions import TypedDict
from docarray import BaseDoc, DocList
from docarray.documents import AudioDoc, ImageDoc, TextDoc
from docarray.documents.helper import (
create_doc,
create_doc_from_dict,
create_doc_from_typeddict,
)
from docarray.typing import AudioNdArray
def test_multi_modal_doc():
class MyMultiModalDoc(BaseDoc):
image: ImageDoc
text: TextDoc
doc = MyMultiModalDoc(
image=ImageDoc(tensor=np.zeros((3, 224, 224))), text=TextDoc(text='hello')
)
assert isinstance(doc.image, BaseDoc)
assert isinstance(doc.image, ImageDoc)
assert isinstance(doc.text, TextDoc)
assert doc.text.text == 'hello'
assert (doc.image.tensor == np.zeros((3, 224, 224))).all()
def test_nested_chunks_document():
class ChunksDocument(BaseDoc):
text: str
images: DocList[ImageDoc]
doc = ChunksDocument(
text='hello',
images=DocList[ImageDoc]([ImageDoc() for _ in range(10)]),
)
assert isinstance(doc.images, DocList)
def test_create_doc():
with pytest.raises(ValueError):
_ = create_doc(
'MyMultiModalDoc',
__base__=BaseModel,
image=(ImageDoc, ...),
text=(TextDoc, ...),
)
MyMultiModalDoc = create_doc(
'MyMultiModalDoc', image=(ImageDoc, ...), text=(TextDoc, ...)
)
assert issubclass(MyMultiModalDoc, BaseDoc)
doc = MyMultiModalDoc(
image=ImageDoc(tensor=np.zeros((3, 224, 224))), text=TextDoc(text='hello')
)
assert isinstance(doc.image, BaseDoc)
assert isinstance(doc.image, ImageDoc)
assert isinstance(doc.text, TextDoc)
assert doc.text.text == 'hello'
assert (doc.image.tensor == np.zeros((3, 224, 224))).all()
MyAudio = create_doc(
'MyAudio',
__base__=AudioDoc,
title=(str, ...),
tensor=(Optional[AudioNdArray], ...),
)
assert issubclass(MyAudio, BaseDoc)
assert issubclass(MyAudio, AudioDoc)
def test_create_doc_from_typeddict():
class MyMultiModalDoc(TypedDict):
image: ImageDoc
text: TextDoc
with pytest.raises(ValueError):
_ = create_doc_from_typeddict(MyMultiModalDoc, __base__=BaseModel)
Doc = create_doc_from_typeddict(MyMultiModalDoc)
assert issubclass(Doc, BaseDoc)
class MyAudio(TypedDict):
title: str
tensor: Optional[AudioNdArray]
Doc = create_doc_from_typeddict(MyAudio, __base__=AudioDoc)
assert issubclass(Doc, BaseDoc)
assert issubclass(Doc, AudioDoc)
def test_create_doc_from_dict():
data_dict = {
'image': ImageDoc(tensor=np.random.rand(3, 224, 224)),
'text': TextDoc(text='hello'),
'id': 123,
}
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict=data_dict)
assert issubclass(MyDoc, BaseDoc)
doc = MyDoc(
image=ImageDoc(tensor=np.random.rand(3, 224, 224)),
text=TextDoc(text='hey'),
id=111,
)
assert isinstance(doc, BaseDoc)
assert isinstance(doc.text, TextDoc)
assert isinstance(doc.image, ImageDoc)
assert isinstance(doc.id, int)
# Create a doc with an incorrect type
with pytest.raises(ValidationError):
doc = MyDoc(
image=ImageDoc(tensor=np.random.rand(3, 224, 224)),
text=['some', 'text'], # should be TextDoc
id=111,
)
# Handle empty data_dict
with pytest.raises(ValueError):
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict={})
# Data with a None value
data_dict = {'text': 'some text', 'other': None}
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict=data_dict)
assert issubclass(MyDoc, BaseDoc)
doc1 = MyDoc(text='txt', other=10)
doc2 = MyDoc(text='txt', other='also text')
assert isinstance(doc1, BaseDoc) and isinstance(doc2, BaseDoc)
|
from typing import Optional
import numpy as np
import pytest
from pydantic import BaseModel, ValidationError
from typing_extensions import TypedDict
from docarray import BaseDoc, DocArray
from docarray.documents import AudioDoc, ImageDoc, TextDoc
from docarray.documents.helper import (
create_doc,
create_doc_from_dict,
create_doc_from_typeddict,
)
from docarray.typing import AudioNdArray
def test_multi_modal_doc():
class MyMultiModalDoc(BaseDoc):
image: ImageDoc
text: TextDoc
doc = MyMultiModalDoc(
image=ImageDoc(tensor=np.zeros((3, 224, 224))), text=TextDoc(text='hello')
)
assert isinstance(doc.image, BaseDoc)
assert isinstance(doc.image, ImageDoc)
assert isinstance(doc.text, TextDoc)
assert doc.text.text == 'hello'
assert (doc.image.tensor == np.zeros((3, 224, 224))).all()
def test_nested_chunks_document():
class ChunksDocument(BaseDoc):
text: str
images: DocArray[ImageDoc]
doc = ChunksDocument(
text='hello',
images=DocArray[ImageDoc]([ImageDoc() for _ in range(10)]),
)
assert isinstance(doc.images, DocArray)
def test_create_doc():
with pytest.raises(ValueError):
_ = create_doc(
'MyMultiModalDoc',
__base__=BaseModel,
image=(ImageDoc, ...),
text=(TextDoc, ...),
)
MyMultiModalDoc = create_doc(
'MyMultiModalDoc', image=(ImageDoc, ...), text=(TextDoc, ...)
)
assert issubclass(MyMultiModalDoc, BaseDoc)
doc = MyMultiModalDoc(
image=ImageDoc(tensor=np.zeros((3, 224, 224))), text=TextDoc(text='hello')
)
assert isinstance(doc.image, BaseDoc)
assert isinstance(doc.image, ImageDoc)
assert isinstance(doc.text, TextDoc)
assert doc.text.text == 'hello'
assert (doc.image.tensor == np.zeros((3, 224, 224))).all()
MyAudio = create_doc(
'MyAudio',
__base__=AudioDoc,
title=(str, ...),
tensor=(Optional[AudioNdArray], ...),
)
assert issubclass(MyAudio, BaseDoc)
assert issubclass(MyAudio, AudioDoc)
def test_create_doc_from_typeddict():
class MyMultiModalDoc(TypedDict):
image: ImageDoc
text: TextDoc
with pytest.raises(ValueError):
_ = create_doc_from_typeddict(MyMultiModalDoc, __base__=BaseModel)
Doc = create_doc_from_typeddict(MyMultiModalDoc)
assert issubclass(Doc, BaseDoc)
class MyAudio(TypedDict):
title: str
tensor: Optional[AudioNdArray]
Doc = create_doc_from_typeddict(MyAudio, __base__=AudioDoc)
assert issubclass(Doc, BaseDoc)
assert issubclass(Doc, AudioDoc)
def test_create_doc_from_dict():
data_dict = {
'image': ImageDoc(tensor=np.random.rand(3, 224, 224)),
'text': TextDoc(text='hello'),
'id': 123,
}
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict=data_dict)
assert issubclass(MyDoc, BaseDoc)
doc = MyDoc(
image=ImageDoc(tensor=np.random.rand(3, 224, 224)),
text=TextDoc(text='hey'),
id=111,
)
assert isinstance(doc, BaseDoc)
assert isinstance(doc.text, TextDoc)
assert isinstance(doc.image, ImageDoc)
assert isinstance(doc.id, int)
# Create a doc with an incorrect type
with pytest.raises(ValidationError):
doc = MyDoc(
image=ImageDoc(tensor=np.random.rand(3, 224, 224)),
text=['some', 'text'], # should be TextDoc
id=111,
)
# Handle empty data_dict
with pytest.raises(ValueError):
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict={})
# Data with a None value
data_dict = {'text': 'some text', 'other': None}
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict=data_dict)
assert issubclass(MyDoc, BaseDoc)
doc1 = MyDoc(text='txt', other=10)
doc2 = MyDoc(text='txt', other='also text')
assert isinstance(doc1, BaseDoc) and isinstance(doc2, BaseDoc)
|
import importlib
import os
import re
import types
from typing import Any, Optional, Literal
import numpy as np
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
tf_imported = False
else:
tf_imported = True
try:
import jax.numpy as jnp # type: ignore # noqa: F401
except (ImportError, TypeError):
jnp_imported = False
else:
jnp_imported = True
INSTALL_INSTRUCTIONS = {
'google.protobuf': '"docarray[proto]"',
'lz4': '"docarray[proto]"',
'pandas': '"docarray[pandas]"',
'PIL': '"docarray[image]"',
'pydub': '"docarray[audio]"',
'av': '"docarray[video]"',
'trimesh': '"docarray[mesh]"',
'hnswlib': '"docarray[hnswlib]"',
'elasticsearch': '"docarray[elasticsearch]"',
'elastic_transport': '"docarray[elasticsearch]"',
'weaviate': '"docarray[weaviate]"',
'qdrant_client': '"docarray[qdrant]"',
'fastapi': '"docarray[web]"',
'torch': '"docarray[torch]"',
'tensorflow': 'protobuf==3.19.0 tensorflow',
'smart_open': '"docarray[aws]"',
'boto3': '"docarray[aws]"',
'botocore': '"docarray[aws]"',
'redis': '"docarray[redis]"',
'pymilvus': '"docarray[milvus]"',
}
ProtocolType = Literal[
'protobuf', 'pickle', 'json', 'json-array', 'protobuf-array', 'pickle-array'
]
def import_library(
package: str, raise_error: bool = True
) -> Optional[types.ModuleType]:
lib: Optional[types.ModuleType]
try:
lib = importlib.import_module(package)
except (ModuleNotFoundError, ImportError):
lib = None
if lib is None and raise_error:
raise ImportError(
f'The following required library is not installed: {package} \n'
f'To install all necessary libraries, run: `pip install {INSTALL_INSTRUCTIONS[package]}`.'
)
else:
return lib
def _get_path_from_docarray_root_level(file_path: str) -> str:
path = os.path.dirname(file_path)
rel_path = re.sub('(?s:.*)docarray', 'docarray', path).replace('/', '.')
return rel_path
def is_torch_available():
return torch_imported
def is_tf_available():
return tf_imported
def is_jax_available():
return jnp_imported
def is_np_int(item: Any) -> bool:
dtype = getattr(item, 'dtype', None)
ndim = getattr(item, 'ndim', None)
if dtype is not None and ndim is not None:
try:
return ndim == 0 and np.issubdtype(dtype, np.integer)
except TypeError:
return False
return False # this is unreachable, but mypy wants it
def is_notebook() -> bool:
"""
Check if we're running in a Jupyter notebook, using magic command
`get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
shell = get_ipython().__class__.__name__ # type: ignore
except NameError:
return False
if shell == 'ZMQInteractiveShell':
return True
elif shell == 'Shell':
return True
elif shell == 'TerminalInteractiveShell':
return False
else:
return False
|
import importlib
import os
import re
import types
from typing import Any, Optional
import numpy as np
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
tf_imported = False
else:
tf_imported = True
try:
import jax.numpy as jnp # type: ignore # noqa: F401
except (ImportError, TypeError):
jnp_imported = False
else:
jnp_imported = True
INSTALL_INSTRUCTIONS = {
'google.protobuf': '"docarray[proto]"',
'lz4': '"docarray[proto]"',
'pandas': '"docarray[pandas]"',
'PIL': '"docarray[image]"',
'pydub': '"docarray[audio]"',
'av': '"docarray[video]"',
'trimesh': '"docarray[mesh]"',
'hnswlib': '"docarray[hnswlib]"',
'elasticsearch': '"docarray[elasticsearch]"',
'elastic_transport': '"docarray[elasticsearch]"',
'weaviate': '"docarray[weaviate]"',
'qdrant_client': '"docarray[qdrant]"',
'fastapi': '"docarray[web]"',
'torch': '"docarray[torch]"',
'tensorflow': 'protobuf==3.19.0 tensorflow',
'smart_open': '"docarray[aws]"',
'boto3': '"docarray[aws]"',
'botocore': '"docarray[aws]"',
'redis': '"docarray[redis]"',
'pymilvus': '"docarray[milvus]"',
}
def import_library(
package: str, raise_error: bool = True
) -> Optional[types.ModuleType]:
lib: Optional[types.ModuleType]
try:
lib = importlib.import_module(package)
except (ModuleNotFoundError, ImportError):
lib = None
if lib is None and raise_error:
raise ImportError(
f'The following required library is not installed: {package} \n'
f'To install all necessary libraries, run: `pip install {INSTALL_INSTRUCTIONS[package]}`.'
)
else:
return lib
def _get_path_from_docarray_root_level(file_path: str) -> str:
path = os.path.dirname(file_path)
rel_path = re.sub('(?s:.*)docarray', 'docarray', path).replace('/', '.')
return rel_path
def is_torch_available():
return torch_imported
def is_tf_available():
return tf_imported
def is_jax_available():
return jnp_imported
def is_np_int(item: Any) -> bool:
dtype = getattr(item, 'dtype', None)
ndim = getattr(item, 'ndim', None)
if dtype is not None and ndim is not None:
try:
return ndim == 0 and np.issubdtype(dtype, np.integer)
except TypeError:
return False
return False # this is unreachable, but mypy wants it
def is_notebook() -> bool:
"""
Check if we're running in a Jupyter notebook, using magic command
`get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
shell = get_ipython().__class__.__name__ # type: ignore
except NameError:
return False
if shell == 'ZMQInteractiveShell':
return True
elif shell == 'Shell':
return True
elif shell == 'TerminalInteractiveShell':
return False
else:
return False
|
import pytest
from llama_index.core import MockEmbedding, StorageContext, VectorStoreIndex
from llama_index.core.llms import MockLLM
from llama_index.core.vector_stores.types import BasePydanticVectorStore
from llama_index.vector_stores.redis import RedisVectorStore
def test_class():
names_of_base_classes = [b.__name__ for b in RedisVectorStore.__mro__]
assert BasePydanticVectorStore.__name__ in names_of_base_classes
def test_default_usage(documents, turtle_test, redis_client):
vector_store = RedisVectorStore(redis_client=redis_client)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents,
embed_model=MockEmbedding(embed_dim=1536),
storage_context=storage_context,
)
# create retrievers
query_engine = index.as_query_engine(llm=MockLLM(), similarity_top_k=1)
retriever = index.as_retriever(similarity_top_k=1)
result_nodes = retriever.retrieve(turtle_test["question"])
query_res = query_engine.query(turtle_test["question"])
# test they get data
assert result_nodes[0].metadata == turtle_test["metadata"]
assert query_res.source_nodes[0].text == turtle_test["text"]
# test delete
vector_store.delete([doc.doc_id for doc in documents])
res = redis_client.ft("llama_index").search("*")
assert len(res.docs) == 0
# test delete index
vector_store.delete_index()
@pytest.mark.asyncio
async def test_async_default_usage(
documents, turtle_test, redis_client_async, redis_client
):
vector_store = RedisVectorStore(
redis_client=redis_client, redis_client_async=redis_client_async
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents,
embed_model=MockEmbedding(embed_dim=1536),
storage_context=storage_context,
)
# create retrievers
query_engine = index.as_query_engine(llm=MockLLM(), similarity_top_k=1)
retriever = index.as_retriever(similarity_top_k=1)
result_nodes = await retriever.aretrieve(turtle_test["question"])
query_res = await query_engine.aquery(turtle_test["question"])
# test they get data
assert result_nodes[0].metadata == turtle_test["metadata"]
assert query_res.source_nodes[0].text == turtle_test["text"]
# test delete
await vector_store.adelete([doc.doc_id for doc in documents])
res = await redis_client_async.ft("llama_index").search("*")
assert len(res.docs) == 0
# test delete index
await vector_store.async_delete_index()
|
from llama_index.core import MockEmbedding, StorageContext, VectorStoreIndex
from llama_index.core.llms import MockLLM
from llama_index.core.vector_stores.types import BasePydanticVectorStore
from llama_index.vector_stores.redis import RedisVectorStore
def test_class():
names_of_base_classes = [b.__name__ for b in RedisVectorStore.__mro__]
assert BasePydanticVectorStore.__name__ in names_of_base_classes
def test_default_usage(documents, turtle_test, redis_client):
vector_store = RedisVectorStore(redis_client=redis_client)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents,
embed_model=MockEmbedding(embed_dim=1536),
storage_context=storage_context,
)
# create retrievers
query_engine = index.as_query_engine(llm=MockLLM(), similarity_top_k=1)
retriever = index.as_retriever(similarity_top_k=1)
result_nodes = retriever.retrieve(turtle_test["question"])
query_res = query_engine.query(turtle_test["question"])
# test they get data
assert result_nodes[0].metadata == turtle_test["metadata"]
assert query_res.source_nodes[0].text == turtle_test["text"]
# test delete
vector_store.delete([doc.doc_id for doc in documents])
res = redis_client.ft("llama_index").search("*")
assert len(res.docs) == 0
# test delete index
vector_store.delete_index()
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from mmcv.runner import BaseModule
from ..builder import build_shared_head
class BaseRoIHead(BaseModule, metaclass=ABCMeta):
"""Base class for RoIHeads."""
def __init__(self,
bbox_roi_extractor=None,
bbox_head=None,
mask_roi_extractor=None,
mask_head=None,
shared_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(BaseRoIHead, self).__init__(init_cfg)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if shared_head is not None:
shared_head.pretrained = pretrained
self.shared_head = build_shared_head(shared_head)
if bbox_head is not None:
self.init_bbox_head(bbox_roi_extractor, bbox_head)
if mask_head is not None:
self.init_mask_head(mask_roi_extractor, mask_head)
self.init_assigner_sampler()
@property
def with_bbox(self):
"""bool: whether the RoI head contains a `bbox_head`"""
return hasattr(self, 'bbox_head') and self.bbox_head is not None
@property
def with_mask(self):
"""bool: whether the RoI head contains a `mask_head`"""
return hasattr(self, 'mask_head') and self.mask_head is not None
@property
def with_shared_head(self):
"""bool: whether the RoI head contains a `shared_head`"""
return hasattr(self, 'shared_head') and self.shared_head is not None
@abstractmethod
def init_bbox_head(self):
"""Initialize ``bbox_head``"""
pass
@abstractmethod
def init_mask_head(self):
"""Initialize ``mask_head``"""
pass
@abstractmethod
def init_assigner_sampler(self):
"""Initialize assigner and sampler."""
pass
@abstractmethod
def forward_train(self, x, proposal_list, data_samples, **kwargs):
"""Forward function during training."""
async def async_simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False,
**kwargs):
"""Asynchronized test function."""
raise NotImplementedError
def simple_test(self,
x,
proposal_list,
img_meta,
proposals=None,
rescale=False,
**kwargs):
"""Test without augmentation."""
def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from mmcv.runner import BaseModule
from ..builder import build_shared_head
class BaseRoIHead(BaseModule, metaclass=ABCMeta):
"""Base class for RoIHeads."""
def __init__(self,
bbox_roi_extractor=None,
bbox_head=None,
mask_roi_extractor=None,
mask_head=None,
shared_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(BaseRoIHead, self).__init__(init_cfg)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if shared_head is not None:
shared_head.pretrained = pretrained
self.shared_head = build_shared_head(shared_head)
if bbox_head is not None:
self.init_bbox_head(bbox_roi_extractor, bbox_head)
if mask_head is not None:
self.init_mask_head(mask_roi_extractor, mask_head)
self.init_assigner_sampler()
@property
def with_bbox(self):
"""bool: whether the RoI head contains a `bbox_head`"""
return hasattr(self, 'bbox_head') and self.bbox_head is not None
@property
def with_mask(self):
"""bool: whether the RoI head contains a `mask_head`"""
return hasattr(self, 'mask_head') and self.mask_head is not None
@property
def with_shared_head(self):
"""bool: whether the RoI head contains a `shared_head`"""
return hasattr(self, 'shared_head') and self.shared_head is not None
@abstractmethod
def init_bbox_head(self):
"""Initialize ``bbox_head``"""
pass
@abstractmethod
def init_mask_head(self):
"""Initialize ``mask_head``"""
pass
@abstractmethod
def init_assigner_sampler(self):
"""Initialize assigner and sampler."""
pass
@abstractmethod
def forward_train(self,
x,
img_meta,
proposal_list,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
**kwargs):
"""Forward function during training."""
async def async_simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False,
**kwargs):
"""Asynchronized test function."""
raise NotImplementedError
def simple_test(self,
x,
proposal_list,
img_meta,
proposals=None,
rescale=False,
**kwargs):
"""Test without augmentation."""
def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
|
import enum
import pathlib
from typing import Any, BinaryIO, Optional, Union
from torchdata.datapipes.iter import CSVParser, Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
read_categories_file,
)
from torchvision.prototype.tv_tensors import Label
from .._api import register_dataset, register_info
NAME = "dtd"
class DTDDemux(enum.IntEnum):
SPLIT = 0
JOINT_CATEGORIES = 1
IMAGES = 2
@register_info(NAME)
def _info() -> dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class DTD(Dataset):
"""DTD Dataset.
homepage="https://www.robots.ox.ac.uk/~vgg/data/dtd/",
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
fold: int = 1,
skip_validation_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "val", "test"})
if not (1 <= fold <= 10):
raise ValueError(f"The fold parameter should be an integer in [1, 10]. Got {fold}")
self._fold = fold
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_validation_check)
def _resources(self) -> list[OnlineResource]:
archive = HttpResource(
"https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz",
sha256="e42855a52a4950a3b59612834602aa253914755c95b0cff9ead6d07395f8e205",
preprocess="decompress",
)
return [archive]
def _classify_archive(self, data: tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.parent.name == "labels":
if path.name == "labels_joint_anno.txt":
return DTDDemux.JOINT_CATEGORIES
return DTDDemux.SPLIT
elif path.parents[1].name == "images":
return DTDDemux.IMAGES
else:
return None
def _image_key_fn(self, data: tuple[str, Any]) -> str:
path = pathlib.Path(data[0])
# The split files contain hardcoded posix paths for the images, e.g. banded/banded_0001.jpg
return str(path.relative_to(path.parents[1]).as_posix())
def _prepare_sample(self, data: tuple[tuple[str, list[str]], tuple[str, BinaryIO]]) -> dict[str, Any]:
(_, joint_categories_data), image_data = data
_, *joint_categories = joint_categories_data
path, buffer = image_data
category = pathlib.Path(path).parent.name
return dict(
joint_categories={category for category in joint_categories if category},
label=Label.from_category(category, categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _datapipe(self, resource_dps: list[IterDataPipe]) -> IterDataPipe[dict[str, Any]]:
archive_dp = resource_dps[0]
splits_dp, joint_categories_dp, images_dp = Demultiplexer(
archive_dp, 3, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
splits_dp = Filter(splits_dp, path_comparator("name", f"{self._split}{self._fold}.txt"))
splits_dp = LineReader(splits_dp, decode=True, return_path=False)
splits_dp = hint_shuffling(splits_dp)
splits_dp = hint_sharding(splits_dp)
joint_categories_dp = CSVParser(joint_categories_dp, delimiter=" ")
dp = IterKeyZipper(
splits_dp,
joint_categories_dp,
key_fn=getitem(),
ref_key_fn=getitem(0),
buffer_size=INFINITE_BUFFER_SIZE,
)
dp = IterKeyZipper(
dp,
images_dp,
key_fn=getitem(0),
ref_key_fn=self._image_key_fn,
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def _filter_images(self, data: tuple[str, Any]) -> bool:
return self._classify_archive(data) == DTDDemux.IMAGES
def _generate_categories(self) -> list[str]:
resources = self._resources()
dp = resources[0].load(self._root)
dp = Filter(dp, self._filter_images)
return sorted({pathlib.Path(path).parent.name for path, _ in dp})
def __len__(self) -> int:
return 1_880 # All splits have the same length
|
import enum
import pathlib
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import CSVParser, Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
read_categories_file,
)
from torchvision.prototype.tv_tensors import Label
from .._api import register_dataset, register_info
NAME = "dtd"
class DTDDemux(enum.IntEnum):
SPLIT = 0
JOINT_CATEGORIES = 1
IMAGES = 2
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class DTD(Dataset):
"""DTD Dataset.
homepage="https://www.robots.ox.ac.uk/~vgg/data/dtd/",
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
fold: int = 1,
skip_validation_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "val", "test"})
if not (1 <= fold <= 10):
raise ValueError(f"The fold parameter should be an integer in [1, 10]. Got {fold}")
self._fold = fold
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_validation_check)
def _resources(self) -> List[OnlineResource]:
archive = HttpResource(
"https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz",
sha256="e42855a52a4950a3b59612834602aa253914755c95b0cff9ead6d07395f8e205",
preprocess="decompress",
)
return [archive]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.parent.name == "labels":
if path.name == "labels_joint_anno.txt":
return DTDDemux.JOINT_CATEGORIES
return DTDDemux.SPLIT
elif path.parents[1].name == "images":
return DTDDemux.IMAGES
else:
return None
def _image_key_fn(self, data: Tuple[str, Any]) -> str:
path = pathlib.Path(data[0])
# The split files contain hardcoded posix paths for the images, e.g. banded/banded_0001.jpg
return str(path.relative_to(path.parents[1]).as_posix())
def _prepare_sample(self, data: Tuple[Tuple[str, List[str]], Tuple[str, BinaryIO]]) -> Dict[str, Any]:
(_, joint_categories_data), image_data = data
_, *joint_categories = joint_categories_data
path, buffer = image_data
category = pathlib.Path(path).parent.name
return dict(
joint_categories={category for category in joint_categories if category},
label=Label.from_category(category, categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
splits_dp, joint_categories_dp, images_dp = Demultiplexer(
archive_dp, 3, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
splits_dp = Filter(splits_dp, path_comparator("name", f"{self._split}{self._fold}.txt"))
splits_dp = LineReader(splits_dp, decode=True, return_path=False)
splits_dp = hint_shuffling(splits_dp)
splits_dp = hint_sharding(splits_dp)
joint_categories_dp = CSVParser(joint_categories_dp, delimiter=" ")
dp = IterKeyZipper(
splits_dp,
joint_categories_dp,
key_fn=getitem(),
ref_key_fn=getitem(0),
buffer_size=INFINITE_BUFFER_SIZE,
)
dp = IterKeyZipper(
dp,
images_dp,
key_fn=getitem(0),
ref_key_fn=self._image_key_fn,
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def _filter_images(self, data: Tuple[str, Any]) -> bool:
return self._classify_archive(data) == DTDDemux.IMAGES
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
dp = Filter(dp, self._filter_images)
return sorted({pathlib.Path(path).parent.name for path, _ in dp})
def __len__(self) -> int:
return 1_880 # All splits have the same length
|
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_additional_imports = {}
_import_structure = {"pipeline_output": ["FluxPipelineOutput", "FluxPriorReduxPipelineOutput"]}
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["modeling_flux"] = ["ReduxImageEncoder"]
_import_structure["pipeline_flux"] = ["FluxPipeline"]
_import_structure["pipeline_flux_control"] = ["FluxControlPipeline"]
_import_structure["pipeline_flux_control_img2img"] = ["FluxControlImg2ImgPipeline"]
_import_structure["pipeline_flux_control_inpaint"] = ["FluxControlInpaintPipeline"]
_import_structure["pipeline_flux_controlnet"] = ["FluxControlNetPipeline"]
_import_structure["pipeline_flux_controlnet_image_to_image"] = ["FluxControlNetImg2ImgPipeline"]
_import_structure["pipeline_flux_controlnet_inpainting"] = ["FluxControlNetInpaintPipeline"]
_import_structure["pipeline_flux_fill"] = ["FluxFillPipeline"]
_import_structure["pipeline_flux_img2img"] = ["FluxImg2ImgPipeline"]
_import_structure["pipeline_flux_inpaint"] = ["FluxInpaintPipeline"]
_import_structure["pipeline_flux_kontext"] = ["FluxKontextPipeline"]
_import_structure["pipeline_flux_prior_redux"] = ["FluxPriorReduxPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .modeling_flux import ReduxImageEncoder
from .pipeline_flux import FluxPipeline
from .pipeline_flux_control import FluxControlPipeline
from .pipeline_flux_control_img2img import FluxControlImg2ImgPipeline
from .pipeline_flux_control_inpaint import FluxControlInpaintPipeline
from .pipeline_flux_controlnet import FluxControlNetPipeline
from .pipeline_flux_controlnet_image_to_image import FluxControlNetImg2ImgPipeline
from .pipeline_flux_controlnet_inpainting import FluxControlNetInpaintPipeline
from .pipeline_flux_fill import FluxFillPipeline
from .pipeline_flux_img2img import FluxImg2ImgPipeline
from .pipeline_flux_inpaint import FluxInpaintPipeline
from .pipeline_flux_kontext import FluxKontextPipeline
from .pipeline_flux_prior_redux import FluxPriorReduxPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
for name, value in _additional_imports.items():
setattr(sys.modules[__name__], name, value)
|
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_additional_imports = {}
_import_structure = {"pipeline_output": ["FluxPipelineOutput", "FluxPriorReduxPipelineOutput"]}
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["modeling_flux"] = ["ReduxImageEncoder"]
_import_structure["pipeline_flux"] = ["FluxPipeline"]
_import_structure["pipeline_flux_control"] = ["FluxControlPipeline"]
_import_structure["pipeline_flux_control_img2img"] = ["FluxControlImg2ImgPipeline"]
_import_structure["pipeline_flux_control_inpaint"] = ["FluxControlInpaintPipeline"]
_import_structure["pipeline_flux_controlnet"] = ["FluxControlNetPipeline"]
_import_structure["pipeline_flux_controlnet_image_to_image"] = ["FluxControlNetImg2ImgPipeline"]
_import_structure["pipeline_flux_controlnet_inpainting"] = ["FluxControlNetInpaintPipeline"]
_import_structure["pipeline_flux_fill"] = ["FluxFillPipeline"]
_import_structure["pipeline_flux_img2img"] = ["FluxImg2ImgPipeline"]
_import_structure["pipeline_flux_inpaint"] = ["FluxInpaintPipeline"]
_import_structure["pipeline_flux_prior_redux"] = ["FluxPriorReduxPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .modeling_flux import ReduxImageEncoder
from .pipeline_flux import FluxPipeline
from .pipeline_flux_control import FluxControlPipeline
from .pipeline_flux_control_img2img import FluxControlImg2ImgPipeline
from .pipeline_flux_control_inpaint import FluxControlInpaintPipeline
from .pipeline_flux_controlnet import FluxControlNetPipeline
from .pipeline_flux_controlnet_image_to_image import FluxControlNetImg2ImgPipeline
from .pipeline_flux_controlnet_inpainting import FluxControlNetInpaintPipeline
from .pipeline_flux_fill import FluxFillPipeline
from .pipeline_flux_img2img import FluxImg2ImgPipeline
from .pipeline_flux_inpaint import FluxInpaintPipeline
from .pipeline_flux_prior_redux import FluxPriorReduxPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
for name, value in _additional_imports.items():
setattr(sys.modules[__name__], name, value)
|
from __future__ import annotations
from collections.abc import Iterable
from typing import Any
import torch
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.util import fullname
class CosineSimilarityLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
"""
CosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SentenceTransformer model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
References:
- `Training Examples > Semantic Textual Similarity <../../../examples/sentence_transformer/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`CoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, CoSENTLoss is recommended.
- :class:`AnglELoss` is :class:`CoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than CosineSimilarityLoss.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CosineSimilarityLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.loss_fct = loss_fct
self.cos_score_transformation = cos_score_transformation
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(embeddings, labels)
def compute_loss_from_embeddings(self, embeddings: list[Tensor], labels: Tensor) -> Tensor:
"""
Compute the CosineSimilarity loss from embeddings.
Args:
embeddings: List of embeddings
labels: Labels indicating the similarity scores of the pairs
Returns:
Loss value
"""
output = self.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
return self.loss_fct(output, labels.float().view(-1))
def get_config_dict(self) -> dict[str, Any]:
return {"loss_fct": fullname(self.loss_fct)}
|
from __future__ import annotations
from collections.abc import Iterable
from typing import Any
import torch
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.util import fullname
class CosineSimilarityLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
"""
CosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SentenceTransformer model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
References:
- `Training Examples > Semantic Textual Similarity <../../../examples/sentence_transformer/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`CoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, CoSENTLoss is recommended.
- :class:`AnglELoss` is :class:`CoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than CosineSimilarityLoss.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CosineSimilarityLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.loss_fct = loss_fct
self.cos_score_transformation = cos_score_transformation
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
output = self.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
return self.loss_fct(output, labels.float().view(-1))
def get_config_dict(self) -> dict[str, Any]:
return {"loss_fct": fullname(self.loss_fct)}
|
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_doc import BaseDoc
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import AnyEmbedding
T = TypeVar('T', bound='TextDoc')
class TextDoc(BaseDoc):
"""
Document for handling text.
It can contain:
- a [`TextUrl`][docarray.typing.url.TextUrl] (`TextDoc.url`)
- a `str` (`TextDoc.text`)
- an [`AnyEmbedding`](../../../api_references/typing/tensor/embedding) (`TextDoc.embedding`)
- a `bytes` object (`TextDoc.bytes_`)
You can use this Document directly:
```python
from docarray.documents import TextDoc
# use it directly
txt_doc = TextDoc(url='https://www.gutenberg.org/files/1065/1065-0.txt')
txt_doc.text = txt_doc.url.load()
# model = MyEmbeddingModel()
# txt_doc.embedding = model(txt_doc.text)
```
You can initialize directly from a string:
```python
from docarray.documents import TextDoc
txt_doc = TextDoc('hello world')
```
You can extend this Document:
```python
from docarray.documents import TextDoc
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyText(TextDoc):
second_embedding: Optional[AnyEmbedding]
txt_doc = MyText(url='https://www.gutenberg.org/files/1065/1065-0.txt')
txt_doc.text = txt_doc.url.load()
# model = MyEmbeddingModel()
# txt_doc.embedding = model(txt_doc.text)
# txt_doc.second_embedding = model(txt_doc.text)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import ImageDoc, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
image_doc: ImageDoc
text_doc: TextDoc
mmdoc = MultiModalDoc(
image_doc=ImageDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true'
),
text_doc=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.image_doc.tensor = mmdoc.image_doc.url.load()
# or
mmdoc.image_doc.bytes_ = mmdoc.image_doc.url.load_bytes()
mmdoc.image_doc.tensor = mmdoc.image_doc.bytes_.load()
```
This Document can be compared against another Document of the same type or a string.
When compared against another object of the same type, the pydantic BaseModel
equality check will apply which checks the equality of every attribute,
excluding `id`. When compared against a str, it will check the equality
of the `text` attribute against the given string.
```python
from docarray.documents import TextDoc
doc = TextDoc(text='This is the main text', url='exampleurl.com/file')
doc2 = TextDoc(text='This is the main text', url='exampleurl.com/file')
doc == 'This is the main text' # True
doc == doc2 # True
```
"""
text: Optional[str]
url: Optional[TextUrl]
embedding: Optional[AnyEmbedding]
bytes_: Optional[bytes]
def __init__(self, text: Optional[str] = None, **kwargs):
if 'text' not in kwargs:
kwargs['text'] = text
super().__init__(**kwargs)
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(text=value)
return super().validate(value)
def __eq__(self, other: Any) -> bool:
if isinstance(other, str):
return self.text == other
else:
# BaseModel has a default equality
return super().__eq__(other)
def __contains__(self, item: str) -> bool:
"""
This method makes `TextDoc` behave the same as an `str`.
:param item: A string to be checked if is a substring of `text` attribute
:return: A boolean determining the presence of `item` as a substring in `text`
```python
from docarray.documents import TextDoc
t = TextDoc(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
```
"""
if self.text is not None:
return self.text.__contains__(item)
else:
return False
def _get_string_for_regex_filter(self):
return self.text
|
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_doc import BaseDoc
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import AnyEmbedding
T = TypeVar('T', bound='TextDoc')
class TextDoc(BaseDoc):
"""
Document for handling text.
It can contain:
- a [`TextUrl`][docarray.typing.url.TextUrl] (`TextDoc.url`)
- a `str` (`TextDoc.text`)
- an [`AnyEmbedding`](../../../api_references/typing/tensor/embedding) (`TextDoc.embedding`)
- a `bytes` object (`TextDoc.bytes_`)
You can use this Document directly:
```python
from docarray.documents import TextDoc
# use it directly
txt_doc = TextDoc(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
# model = MyEmbeddingModel()
# txt_doc.embedding = model(txt_doc.text)
```
You can initialize directly from a string:
```python
from docarray.documents import TextDoc
txt_doc = TextDoc('hello world')
```
You can extend this Document:
```python
from docarray.documents import TextDoc
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyText(TextDoc):
second_embedding: Optional[AnyEmbedding]
txt_doc = MyText(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
# model = MyEmbeddingModel()
# txt_doc.embedding = model(txt_doc.text)
# txt_doc.second_embedding = model(txt_doc.text)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import ImageDoc, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
image_doc: ImageDoc
text_doc: TextDoc
mmdoc = MultiModalDoc(
image_doc=ImageDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true'
),
text_doc=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.image_doc.tensor = mmdoc.image_doc.url.load()
# or
mmdoc.image_doc.bytes_ = mmdoc.image_doc.url.load_bytes()
mmdoc.image_doc.tensor = mmdoc.image_doc.bytes_.load()
```
This Document can be compared against another Document of the same type or a string.
When compared against another object of the same type, the pydantic BaseModel
equality check will apply which checks the equality of every attribute,
excluding `id`. When compared against a str, it will check the equality
of the `text` attribute against the given string.
```python
from docarray.documents import TextDoc
doc = TextDoc(text='This is the main text', url='exampleurl.com')
doc2 = TextDoc(text='This is the main text', url='exampleurl.com')
doc == 'This is the main text' # True
doc == doc2 # True
```
"""
text: Optional[str]
url: Optional[TextUrl]
embedding: Optional[AnyEmbedding]
bytes_: Optional[bytes]
def __init__(self, text: Optional[str] = None, **kwargs):
if 'text' not in kwargs:
kwargs['text'] = text
super().__init__(**kwargs)
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(text=value)
return super().validate(value)
def __eq__(self, other: Any) -> bool:
if isinstance(other, str):
return self.text == other
else:
# BaseModel has a default equality
return super().__eq__(other)
def __contains__(self, item: str) -> bool:
"""
This method makes `TextDoc` behave the same as an `str`.
:param item: A string to be checked if is a substring of `text` attribute
:return: A boolean determining the presence of `item` as a substring in `text`
```python
from docarray.documents import TextDoc
t = TextDoc(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
```
"""
if self.text is not None:
return self.text.__contains__(item)
else:
return False
def _get_string_for_regex_filter(self):
return self.text
|
from types import SimpleNamespace
from unittest.mock import patch
import pytest
from llama_index.core.base.llms.types import (
CompletionResponse,
ChatMessage,
ChatResponse,
)
from llama_index.llms.dashscope.base import DashScope
class FakeDashscopeResponse:
def __init__(self, data: dict):
self.status_code = data["status_code"]
self.output = SimpleNamespace(**data["output"])
def __repr__(self) -> str:
return f"<FakeDashscopeResponse status_code={self.status_code}>"
@pytest.fixture()
def dashscope_llm():
return DashScope(api_key="test")
@pytest.fixture()
def dashscope_api_response():
return {
"status_code": 200,
"request_id": "4438deec-2d21-9b9c-b405-a47459fd8f75",
"code": "",
"message": "",
"output": {
"choices": [
{
"finish_reason": "stop",
"message": {"role": "assistant", "content": "hi, there!"},
}
]
},
"usage": {"total_tokens": 161, "output_tokens": 91, "input_tokens": 70},
}
@pytest.fixture()
def prompt() -> str:
return "hi, there!"
@patch("llama_index.llms.dashscope.base.call_with_messages")
def test_dashscope_complete(
mock_call_with_messages, dashscope_llm, dashscope_api_response, prompt
):
mock_call_with_messages.return_value = dashscope_api_response
response = dashscope_llm.complete(prompt)
assert isinstance(response, CompletionResponse)
assert response.text == "hi, there!"
@patch("llama_index.llms.dashscope.base.call_with_messages")
def test_dashscope_chat(
mock_call_with_messages, dashscope_llm, dashscope_api_response, prompt
):
mock_call_with_messages.return_value = dashscope_api_response
response = dashscope_llm.chat(messages=[ChatMessage.from_str(prompt)])
assert isinstance(response, ChatResponse)
assert response.message.content == "hi, there!"
@pytest.mark.asyncio()
@patch("llama_index.llms.dashscope.base.astream_call_with_messages")
async def test_dashscope_astream_complete(
mock_astream_call_with_messages, dashscope_llm, dashscope_api_response, prompt
):
async def async_response_generator():
yield FakeDashscopeResponse(dashscope_api_response)
mock_astream_call_with_messages.return_value = async_response_generator()
responses = []
gen = await dashscope_llm.astream_complete(prompt) # 先 await 获取异步生成器
async for partial_resp in gen:
responses.append(partial_resp)
assert len(responses) == 1
assert isinstance(responses[0], CompletionResponse)
assert responses[0].text == "hi, there!"
assert responses[0].delta == "hi, there!"
@pytest.mark.asyncio()
@patch("llama_index.llms.dashscope.base.astream_call_with_messages")
async def test_dashscope_astream_chat(
mock_astream_call_with_messages, dashscope_llm, dashscope_api_response, prompt
):
async def async_response_generator():
yield FakeDashscopeResponse(dashscope_api_response)
mock_astream_call_with_messages.return_value = async_response_generator()
responses = []
gen = await dashscope_llm.astream_chat(messages=[ChatMessage.from_str(prompt)])
async for partial_chat_resp in gen:
responses.append(partial_chat_resp)
assert len(responses) == 1
assert isinstance(responses[0], ChatResponse)
assert responses[0].message.content == "hi, there!"
assert responses[0].delta == "hi, there!"
assert responses[0].message.role == "assistant"
|
from unittest.mock import patch
import pytest
from llama_index.core.base.llms.types import (
CompletionResponse,
ChatMessage,
ChatResponse,
)
from llama_index.llms.dashscope.base import DashScope
@pytest.fixture()
def dashscope_llm():
return DashScope(api_key="test")
@pytest.fixture()
def dashscope_api_response():
return {
"status_code": 200,
"request_id": "4438deec-2d21-9b9c-b405-a47459fd8f75",
"code": "",
"message": "",
"output": {
"choices": [
{
"finish_reason": "stop",
"message": {"role": "assistant", "content": "hi, there!"},
}
]
},
"usage": {"total_tokens": 161, "output_tokens": 91, "input_tokens": 70},
}
@pytest.fixture()
def prompt() -> str:
return "hi, there!"
@patch("llama_index.llms.dashscope.base.call_with_messages")
def test_dashscope_complete(
mock_call_with_messages, dashscope_llm, dashscope_api_response, prompt
):
mock_call_with_messages.return_value = dashscope_api_response
response = dashscope_llm.complete(prompt)
assert isinstance(response, CompletionResponse)
assert response.text == "hi, there!"
@patch("llama_index.llms.dashscope.base.call_with_messages")
def test_dashscope_chat(
mock_call_with_messages, dashscope_llm, dashscope_api_response, prompt
):
mock_call_with_messages.return_value = dashscope_api_response
response = dashscope_llm.chat(messages=[ChatMessage.from_str(prompt)])
assert isinstance(response, ChatResponse)
assert response.message.content == "hi, there!"
|
import warnings
from typing import List, Optional, TypeVar
from docarray.typing.bytes.video_bytes import VideoBytes, VideoLoadResult
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.mimetypes import VIDEO_MIMETYPE
from docarray.utils._internal.misc import is_notebook
T = TypeVar('T', bound='VideoUrl')
@_register_proto(proto_type_name='video_url')
class VideoUrl(AnyUrl):
"""
URL to a video file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def mime_type(cls) -> str:
return VIDEO_MIMETYPE
@classmethod
def extra_extensions(cls) -> List[str]:
"""
Returns a list of additional file extensions that are valid for this class
but cannot be identified by the mimetypes library.
"""
return []
def load(self: T, **kwargs) -> VideoLoadResult:
"""
Load the data from the url into a `NamedTuple` of
[`VideoNdArray`][docarray.typing.VideoNdArray],
[`AudioNdArray`][docarray.typing.AudioNdArray]
and [`NdArray`][docarray.typing.NdArray].
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import VideoUrl, VideoNdArray, AudioNdArray, NdArray
class MyDoc(BaseDoc):
video_url: VideoUrl
video: Optional[VideoNdArray] = None
audio: Optional[AudioNdArray] = None
key_frame_indices: Optional[NdArray] = None
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
)
doc.video, doc.audio, doc.key_frame_indices = doc.video_url.load()
assert isinstance(doc.video, VideoNdArray)
assert isinstance(doc.audio, AudioNdArray)
assert isinstance(doc.key_frame_indices, NdArray)
```
---
You can load only the key frames (or video, audio respectively):
---
```python
from pydantic import parse_obj_as
from docarray.typing import NdArray, VideoUrl
url = parse_obj_as(
VideoUrl,
'https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true',
)
key_frame_indices = url.load().key_frame_indices
assert isinstance(key_frame_indices, NdArray)
```
---
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described [here](https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open)
:return: [`AudioNdArray`][docarray.typing.AudioNdArray] representing the audio content,
[`VideoNdArray`][docarray.typing.VideoNdArray] representing the images of the video,
[`NdArray`][docarray.typing.NdArray] of the key frame indices.
"""
buffer = self.load_bytes(**kwargs)
return buffer.load()
def load_bytes(self, timeout: Optional[float] = None) -> VideoBytes:
"""
Convert url to [`VideoBytes`][docarray.typing.VideoBytes]. This will either load or download
the file and save it into an [`VideoBytes`][docarray.typing.VideoBytes] object.
:param timeout: timeout for urlopen. Only relevant if url is not local
:return: [`VideoBytes`][docarray.typing.VideoBytes] object
"""
bytes_ = super().load_bytes(timeout=timeout)
return VideoBytes(bytes_)
def display(self):
"""
Play video from url in notebook.
"""
if is_notebook():
from IPython.display import display
remote_url = True if self.startswith('http') else False
if remote_url:
from IPython.display import Video
b = self.load_bytes()
display(Video(data=b, embed=True, mimetype='video/mp4'))
else:
import os
from IPython.display import HTML
path = os.path.relpath(self)
src = f'''
<body>
<video width="320" height="240" autoplay muted controls>
<source src="{path}">
Your browser does not support the video tag.
</video>
</body>
'''
display(HTML(src))
else:
warnings.warn('Display of video is only possible in a notebook.')
|
import warnings
from typing import List, Optional, TypeVar
from docarray.typing.bytes.video_bytes import VideoBytes, VideoLoadResult
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.mimetypes import VIDEO_MIMETYPE
from docarray.utils._internal.misc import is_notebook
T = TypeVar('T', bound='VideoUrl')
@_register_proto(proto_type_name='video_url')
class VideoUrl(AnyUrl):
"""
URL to a video file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def mime_type(cls) -> str:
return VIDEO_MIMETYPE
@classmethod
def extra_extensions(cls) -> List[str]:
"""
Returns a list of additional file extensions that are valid for this class
but cannot be identified by the mimetypes library.
"""
return []
def load(self: T, **kwargs) -> VideoLoadResult:
"""
Load the data from the url into a `NamedTuple` of
[`VideoNdArray`][docarray.typing.VideoNdArray],
[`AudioNdArray`][docarray.typing.AudioNdArray]
and [`NdArray`][docarray.typing.NdArray].
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import VideoUrl, VideoNdArray, AudioNdArray, NdArray
class MyDoc(BaseDoc):
video_url: VideoUrl
video: Optional[VideoNdArray]
audio: Optional[AudioNdArray]
key_frame_indices: Optional[NdArray]
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
)
doc.video, doc.audio, doc.key_frame_indices = doc.video_url.load()
assert isinstance(doc.video, VideoNdArray)
assert isinstance(doc.audio, AudioNdArray)
assert isinstance(doc.key_frame_indices, NdArray)
```
---
You can load only the key frames (or video, audio respectively):
---
```python
from pydantic import parse_obj_as
from docarray.typing import NdArray, VideoUrl
url = parse_obj_as(
VideoUrl,
'https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true',
)
key_frame_indices = url.load().key_frame_indices
assert isinstance(key_frame_indices, NdArray)
```
---
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described [here](https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open)
:return: [`AudioNdArray`][docarray.typing.AudioNdArray] representing the audio content,
[`VideoNdArray`][docarray.typing.VideoNdArray] representing the images of the video,
[`NdArray`][docarray.typing.NdArray] of the key frame indices.
"""
buffer = self.load_bytes(**kwargs)
return buffer.load()
def load_bytes(self, timeout: Optional[float] = None) -> VideoBytes:
"""
Convert url to [`VideoBytes`][docarray.typing.VideoBytes]. This will either load or download
the file and save it into an [`VideoBytes`][docarray.typing.VideoBytes] object.
:param timeout: timeout for urlopen. Only relevant if url is not local
:return: [`VideoBytes`][docarray.typing.VideoBytes] object
"""
bytes_ = super().load_bytes(timeout=timeout)
return VideoBytes(bytes_)
def display(self):
"""
Play video from url in notebook.
"""
if is_notebook():
from IPython.display import display
remote_url = True if self.startswith('http') else False
if remote_url:
from IPython.display import Video
b = self.load_bytes()
display(Video(data=b, embed=True, mimetype='video/mp4'))
else:
import os
from IPython.display import HTML
path = os.path.relpath(self)
src = f'''
<body>
<video width="320" height="240" autoplay muted controls>
<source src="{path}">
Your browser does not support the video tag.
</video>
</body>
'''
display(HTML(src))
else:
warnings.warn('Display of video is only possible in a notebook.')
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.23.6'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.23.5'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda
from torchaudio_unittest.models.rnnt.rnnt_test_impl import RNNTTestImpl
@skipIfNoCuda
class RNNTFloat32GPUTest(RNNTTestImpl, PytorchTestCase):
dtype = torch.float32
device = torch.device("cuda")
@skipIfNoCuda
class RNNTFloat64GPUTest(RNNTTestImpl, PytorchTestCase):
dtype = torch.float64
device = torch.device("cuda")
|
import torch
from torchaudio_unittest.common_utils import skipIfNoCuda, PytorchTestCase
from torchaudio_unittest.models.rnnt.rnnt_test_impl import RNNTTestImpl
@skipIfNoCuda
class RNNTFloat32GPUTest(RNNTTestImpl, PytorchTestCase):
dtype = torch.float32
device = torch.device("cuda")
@skipIfNoCuda
class RNNTFloat64GPUTest(RNNTTestImpl, PytorchTestCase):
dtype = torch.float64
device = torch.device("cuda")
|
_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py']
num_things_classes = 80
num_stuff_classes = 0
num_classes = num_things_classes + num_stuff_classes
image_size = (1024, 1024)
batch_augments = [
dict(
type='BatchFixedSizePad',
size=image_size,
img_pad_value=0,
pad_mask=True,
mask_pad_value=0,
pad_seg=False)
]
data_preprocessor = dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
pad_mask=True,
mask_pad_value=0,
pad_seg=False,
batch_augments=batch_augments)
model = dict(
data_preprocessor=data_preprocessor,
panoptic_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes,
loss_cls=dict(class_weight=[1.0] * num_classes + [0.1])),
panoptic_fusion_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes),
test_cfg=dict(panoptic_on=False))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
# large scale jittering
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
resize_type='Resize',
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=image_size,
crop_type='absolute',
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-5, 1e-5), by_mask=True),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
train_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
pipeline=train_pipeline))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
_delete_=True,
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = val_evaluator
|
_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py']
num_things_classes = 80
num_stuff_classes = 0
num_classes = num_things_classes + num_stuff_classes
image_size = (1024, 1024)
batch_augments = [
dict(
type='BatchFixedSizePad',
size=image_size,
img_pad_value=0,
pad_mask=True,
mask_pad_value=0,
pad_seg=False)
]
data_preprocessor = dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
pad_mask=True,
mask_pad_value=0,
pad_seg=False,
batch_augments=batch_augments)
model = dict(
data_preprocessor=data_preprocessor,
panoptic_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes,
loss_cls=dict(class_weight=[1.0] * num_classes + [0.1])),
panoptic_fusion_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes),
test_cfg=dict(panoptic_on=False))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
# large scale jittering
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
resize_type='Resize',
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=image_size,
crop_type='absolute',
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-5, 1e-5), by_mask=True),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
train_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
pipeline=train_pipeline))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
_delete_=True,
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = val_evaluator
|
from typing import Union
from google.oauth2.service_account import Credentials # type: ignore
from google.cloud import aiplatform, storage
from google.cloud.aiplatform import telemetry
from google.cloud.aiplatform.matching_engine import (
MatchingEngineIndex,
MatchingEngineIndexEndpoint,
)
from llama_index.vector_stores.vertexaivectorsearch.utils import (
get_client_info,
get_user_agent,
)
class VectorSearchSDKManager:
"""
Class in charge of building all Google Cloud SDK Objects needed to build
VectorStores from project_id, credentials or other specifications. Abstracts
away the authentication layer.
"""
def __init__(
self,
*,
project_id: str,
region: str,
credentials: Union[Credentials, None] = None,
credentials_path: Union[str, None] = None,
) -> None:
"""
Constructor.
If `credentials` is provided, those credentials are used. If not provided
`credentials_path` is used to retrieve credentials from a file. If also not
provided, falls back to default credentials.
Args:
project_id: Id of the project.
region: Region of the project. E.j. 'us-central1'
credentials: Google cloud Credentials object.
credentials_path: Google Cloud Credentials json file path.
"""
self._project_id = project_id
self._region = region
if credentials is not None:
self._credentials = credentials
elif credentials_path is not None:
self._credentials = Credentials.from_service_account_file(credentials_path)
else:
self._credentials = None
self.initialize_aiplatform()
def initialize_aiplatform(self) -> None:
"""Initializes aiplatform."""
aiplatform.init(
project=self._project_id,
location=self._region,
credentials=self._credentials,
)
def get_gcs_client(self) -> storage.Client:
"""
Retrieves a Google Cloud Storage client.
Returns:
Google Cloud Storage Agent.
"""
return storage.Client(
project=self._project_id,
credentials=self._credentials,
client_info=get_client_info(
module="llama-index-vector-stores-vertexaivectorsearch"
),
)
def get_gcs_bucket(self, bucket_name: str) -> storage.Bucket:
"""
Retrieves a Google Cloud Bucket by bucket name.
Args:
bucket_name: Name of the bucket to be retrieved.
Returns:
Google Cloud Bucket.
"""
client = self.get_gcs_client()
return client.get_bucket(bucket_name)
def get_index(self, index_id: str) -> MatchingEngineIndex:
"""
Retrieves a MatchingEngineIndex (VectorSearchIndex) by id.
Args:
index_id: Id of the index to be retrieved.
Returns:
MatchingEngineIndex instance.
"""
_, user_agent = get_user_agent("llama-index-vector-stores-vertexaivectorsearch")
with telemetry.tool_context_manager(user_agent):
return MatchingEngineIndex(
index_name=index_id,
project=self._project_id,
location=self._region,
credentials=self._credentials,
)
def get_endpoint(self, endpoint_id: str) -> MatchingEngineIndexEndpoint:
"""
Retrieves a MatchingEngineIndexEndpoint (VectorSearchIndexEndpoint) by id.
Args:
endpoint_id: Id of the endpoint to be retrieved.
Returns:
MatchingEngineIndexEndpoint instance.
"""
_, user_agent = get_user_agent("llama-index-vector-stores-vertexaivectorsearch")
with telemetry.tool_context_manager(user_agent):
return MatchingEngineIndexEndpoint(
index_endpoint_name=endpoint_id,
project=self._project_id,
location=self._region,
credentials=self._credentials,
)
|
from typing import Union
from google.oauth2.service_account import Credentials # type: ignore
from google.cloud import aiplatform, storage
from google.cloud.aiplatform import telemetry
from google.cloud.aiplatform.matching_engine import (
MatchingEngineIndex,
MatchingEngineIndexEndpoint,
)
from llama_index.vector_stores.vertexaivectorsearch.utils import (
get_client_info,
get_user_agent,
)
class VectorSearchSDKManager:
"""Class in charge of building all Google Cloud SDK Objects needed to build
VectorStores from project_id, credentials or other specifications. Abstracts
away the authentication layer.
"""
def __init__(
self,
*,
project_id: str,
region: str,
credentials: Union[Credentials, None] = None,
credentials_path: Union[str, None] = None,
) -> None:
"""Constructor.
If `credentials` is provided, those credentials are used. If not provided
`credentials_path` is used to retrieve credentials from a file. If also not
provided, falls back to default credentials.
Args:
project_id: Id of the project.
region: Region of the project. E.j. 'us-central1'
credentials: Google cloud Credentials object.
credentials_path: Google Cloud Credentials json file path.
"""
self._project_id = project_id
self._region = region
if credentials is not None:
self._credentials = credentials
elif credentials_path is not None:
self._credentials = Credentials.from_service_account_file(credentials_path)
else:
self._credentials = None
self.initialize_aiplatform()
def initialize_aiplatform(self) -> None:
"""Initializes aiplatform."""
aiplatform.init(
project=self._project_id,
location=self._region,
credentials=self._credentials,
)
def get_gcs_client(self) -> storage.Client:
"""Retrieves a Google Cloud Storage client.
Returns:
Google Cloud Storage Agent.
"""
return storage.Client(
project=self._project_id,
credentials=self._credentials,
client_info=get_client_info(
module="llama-index-vector-stores-vertexaivectorsearch"
),
)
def get_gcs_bucket(self, bucket_name: str) -> storage.Bucket:
"""Retrieves a Google Cloud Bucket by bucket name.
Args:
bucket_name: Name of the bucket to be retrieved.
Returns:
Google Cloud Bucket.
"""
client = self.get_gcs_client()
return client.get_bucket(bucket_name)
def get_index(self, index_id: str) -> MatchingEngineIndex:
"""Retrieves a MatchingEngineIndex (VectorSearchIndex) by id.
Args:
index_id: Id of the index to be retrieved.
Returns:
MatchingEngineIndex instance.
"""
_, user_agent = get_user_agent("llama-index-vector-stores-vertexaivectorsearch")
with telemetry.tool_context_manager(user_agent):
return MatchingEngineIndex(
index_name=index_id,
project=self._project_id,
location=self._region,
credentials=self._credentials,
)
def get_endpoint(self, endpoint_id: str) -> MatchingEngineIndexEndpoint:
"""Retrieves a MatchingEngineIndexEndpoint (VectorSearchIndexEndpoint) by id.
Args:
endpoint_id: Id of the endpoint to be retrieved.
Returns:
MatchingEngineIndexEndpoint instance.
"""
_, user_agent = get_user_agent("llama-index-vector-stores-vertexaivectorsearch")
with telemetry.tool_context_manager(user_agent):
return MatchingEngineIndexEndpoint(
index_endpoint_name=endpoint_id,
project=self._project_id,
location=self._region,
credentials=self._credentials,
)
|
from __future__ import annotations
import pytest
from torch import Tensor
from sentence_transformers import SparseEncoder
@pytest.mark.parametrize(
"model_name",
[
("sentence-transformers/all-MiniLM-L6-v2"),
],
)
def test_load_and_encode(model_name: str) -> None:
# Ensure that SparseEncoder can be initialized with a base model and can encode
try:
model = SparseEncoder(model_name)
except Exception as e:
pytest.fail(f"Failed to load SparseEncoder with {model_name}: {e}")
sentences = [
"This is a test sentence.",
"Another example sentence here.",
"Sparse encoders are interesting.",
]
try:
embeddings = model.encode(sentences)
except Exception as e:
pytest.fail(f"SparseEncoder failed to encode sentences: {e}")
assert embeddings is not None
assert isinstance(embeddings, Tensor), "Embeddings should be a tensor for sparse encoders"
assert len(embeddings) == len(sentences), "Number of embeddings should match number of sentences"
decoded_embeddings = model.decode(embeddings)
assert len(decoded_embeddings) == len(sentences), "Decoded embeddings should match number of sentences"
assert all(isinstance(emb, list) for emb in decoded_embeddings), "Decoded embeddings should be a list of lists"
# Check a known property: encoding a single sentence
single_sentence_emb = model.encode(["A single sentence."], convert_to_tensor=False)
assert isinstance(
single_sentence_emb, list
), "Encoding a single sentence with convert_to_tensor=False should return a list of len 1"
assert len(single_sentence_emb) == 1, "Single sentence embedding dict should not be empty"
# If we're using a string instead of a list, we should get a single tensor embedding
single_sentence_emb_tensor = model.encode("A single sentence.", convert_to_tensor=False)
assert isinstance(
single_sentence_emb_tensor, Tensor
), "Encoding a single sentence with convert_to_tensor=False should return a tensor"
assert single_sentence_emb_tensor.dim() == 1, "Single sentence embedding tensor should be 1D"
# Check encoding with show_progress_bar
try:
embeddings_with_progress = model.encode(sentences, show_progress_bar=True)
assert len(embeddings_with_progress) == len(sentences)
except Exception as e:
pytest.fail(f"SparseEncoder failed to encode with progress bar: {e}")
|
from __future__ import annotations
import pytest
from torch import Tensor
from sentence_transformers import SparseEncoder
@pytest.mark.parametrize(
"model_name",
[
("sentence-transformers/all-MiniLM-L6-v2"),
],
)
def test_load_and_encode(model_name: str) -> None:
# Ensure that SparseEncoder can be initialized with a base model and can encode
try:
model = SparseEncoder(model_name)
except Exception as e:
pytest.fail(f"Failed to load SparseEncoder with {model_name}: {e}")
sentences = [
"This is a test sentence.",
"Another example sentence here.",
"Sparse encoders are interesting.",
]
try:
embeddings = model.encode(sentences)
except Exception as e:
pytest.fail(f"SparseEncoder failed to encode sentences: {e}")
assert embeddings is not None
assert isinstance(embeddings, Tensor), "Embeddings should be a tensor for sparse encoders"
assert len(embeddings) == len(sentences), "Number of embeddings should match number of sentences"
decoded_embeddings = model.decode(embeddings)
assert len(decoded_embeddings) == len(sentences), "Decoded embeddings should match number of sentences"
assert all(isinstance(emb, list) for emb in decoded_embeddings), "Decoded embeddings should be a list of lists"
# Check a known property: encoding a single sentence
single_sentence_emb = model.encode("A single sentence.", convert_to_tensor=False)
assert isinstance(
single_sentence_emb, list
), "Encoding a single sentence with convert_to_tensor=False should return a list of len 1"
assert len(single_sentence_emb) > 0, "Single sentence embedding dict should not be empty"
# Check encoding with show_progress_bar
try:
embeddings_with_progress = model.encode(sentences, show_progress_bar=True)
assert len(embeddings_with_progress) == len(sentences)
except Exception as e:
pytest.fail(f"SparseEncoder failed to encode with progress bar: {e}")
|
from __future__ import annotations
import pytest
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer
from sentence_transformers.model_card import generate_model_card
from sentence_transformers.util import is_datasets_available, is_training_available
if is_datasets_available():
from datasets import Dataset, DatasetDict
if not is_training_available():
pytest.skip(
reason='Sentence Transformers was not installed with the `["train"]` extra.',
allow_module_level=True,
)
@pytest.fixture(scope="session")
def dummy_dataset():
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"anchor": ["anchor 1", "anchor 2", ..., "anchor 10"],
"positive": ["positive 1", "positive 2", ..., "positive 10"],
"negative": ["negative 1", "negative 2", ..., "negative 10"],
}
"""
return Dataset.from_dict(
{
"anchor": [f"anchor {i}" for i in range(1, 11)],
"positive": [f"positive {i}" for i in range(1, 11)],
"negative": [f"negative {i}" for i in range(1, 11)],
}
)
@pytest.mark.parametrize(
("num_datasets", "expected_substrings"),
[
# 0 actually refers to just a single dataset
(
0,
[
"This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers-testing/stsb-bert-tiny-safetensors](https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors).",
"**Maximum Sequence Length:** 512 tokens",
"**Output Dimensionality:** 128 dimensions",
"**Similarity Function:** Cosine Similarity",
"#### Unnamed Dataset",
" | <code>anchor 1</code> | <code>positive 1</code> | <code>negative 1</code> |",
"* Loss: [<code>CoSENTLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) with these parameters:",
],
),
(
1,
[
"This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers-testing/stsb-bert-tiny-safetensors](https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors) on the train_0 dataset.",
"#### train_0",
],
),
(
2,
[
"This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers-testing/stsb-bert-tiny-safetensors](https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors) on the train_0 and train_1 datasets.",
"#### train_0",
"#### train_1",
],
),
(
10,
[
"This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers-testing/stsb-bert-tiny-safetensors](https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors) on the train_0, train_1, train_2, train_3, train_4, train_5, train_6, train_7, train_8 and train_9 datasets.",
"<details><summary>train_0</summary>", # We start using <details><summary> if we have more than 3 datasets
"#### train_0",
"</details>\n<details><summary>train_9</summary>",
"#### train_9",
],
),
# We start using "50 datasets" when the ", "-joined dataset name exceed 200 characters
(
50,
[
"This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers-testing/stsb-bert-tiny-safetensors](https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors) on 50 datasets.",
"<details><summary>train_0</summary>",
"#### train_0",
"</details>\n<details><summary>train_49</summary>",
"#### train_49",
],
),
],
)
def test_model_card_base(
stsb_bert_tiny_model: SentenceTransformer,
dummy_dataset: Dataset,
num_datasets: int,
expected_substrings: list[str],
) -> None:
model = stsb_bert_tiny_model
train_dataset = dummy_dataset
if num_datasets:
train_dataset = DatasetDict({f"train_{i}": train_dataset for i in range(num_datasets)})
# This adds data to model.model_card_data
SentenceTransformerTrainer(
model,
train_dataset=train_dataset,
)
model_card = generate_model_card(model)
# For debugging purposes, we save the model card to a file
# with open(f"test_model_card_{num_datasets}.md", "w", encoding="utf8") as f:
# f.write(model_card)
for substring in expected_substrings:
assert substring in model_card
# We don't want to have two consecutive empty lines anywhere
assert "\n\n\n" not in model_card
|
from __future__ import annotations
import pytest
from datasets import Dataset, DatasetDict
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer
from sentence_transformers.model_card import generate_model_card
@pytest.fixture(scope="session")
def dummy_dataset():
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"anchor": ["anchor 1", "anchor 2", ..., "anchor 10"],
"positive": ["positive 1", "positive 2", ..., "positive 10"],
"negative": ["negative 1", "negative 2", ..., "negative 10"],
}
"""
return Dataset.from_dict(
{
"anchor": [f"anchor {i}" for i in range(1, 11)],
"positive": [f"positive {i}" for i in range(1, 11)],
"negative": [f"negative {i}" for i in range(1, 11)],
}
)
@pytest.mark.parametrize(
("num_datasets", "expected_substrings"),
[
# 0 actually refers to just a single dataset
(
0,
[
"This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers-testing/stsb-bert-tiny-safetensors](https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors).",
"**Maximum Sequence Length:** 512 tokens",
"**Output Dimensionality:** 128 dimensions",
"**Similarity Function:** Cosine Similarity",
"#### Unnamed Dataset",
" | <code>anchor 1</code> | <code>positive 1</code> | <code>negative 1</code> |",
"* Loss: [<code>CoSENTLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) with these parameters:",
],
),
(
1,
[
"This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers-testing/stsb-bert-tiny-safetensors](https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors) on the train_0 dataset.",
"#### train_0",
],
),
(
2,
[
"This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers-testing/stsb-bert-tiny-safetensors](https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors) on the train_0 and train_1 datasets.",
"#### train_0",
"#### train_1",
],
),
(
10,
[
"This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers-testing/stsb-bert-tiny-safetensors](https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors) on the train_0, train_1, train_2, train_3, train_4, train_5, train_6, train_7, train_8 and train_9 datasets.",
"<details><summary>train_0</summary>", # We start using <details><summary> if we have more than 3 datasets
"#### train_0",
"</details>\n<details><summary>train_9</summary>",
"#### train_9",
],
),
# We start using "50 datasets" when the ", "-joined dataset name exceed 200 characters
(
50,
[
"This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers-testing/stsb-bert-tiny-safetensors](https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors) on 50 datasets.",
"<details><summary>train_0</summary>",
"#### train_0",
"</details>\n<details><summary>train_49</summary>",
"#### train_49",
],
),
],
)
def test_model_card_base(
stsb_bert_tiny_model: SentenceTransformer,
dummy_dataset: Dataset,
num_datasets: int,
expected_substrings: list[str],
) -> None:
model = stsb_bert_tiny_model
train_dataset = dummy_dataset
if num_datasets:
train_dataset = DatasetDict({f"train_{i}": train_dataset for i in range(num_datasets)})
# This adds data to model.model_card_data
SentenceTransformerTrainer(
model,
train_dataset=train_dataset,
)
model_card = generate_model_card(model)
# For debugging purposes, we save the model card to a file
# with open(f"test_model_card_{num_datasets}.md", "w", encoding="utf8") as f:
# f.write(model_card)
for substring in expected_substrings:
assert substring in model_card
# We don't want to have two consecutive empty lines anywhere
assert "\n\n\n" not in model_card
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTripletEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load triplets from the AllNLI dataset
# The dataset contains triplets of (anchor, positive, negative) sentences
dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev[:1000]")
# Initialize the SparseTripletEvaluator
evaluator = SparseTripletEvaluator(
anchors=dataset[:1000]["anchor"],
positives=dataset[:1000]["positive"],
negatives=dataset[:1000]["negative"],
name="all_nli_dev",
batch_size=32,
show_progress_bar=True,
)
# Run the evaluation
results = evaluator(model)
"""
TripletEvaluator: Evaluating the model on the all_nli_dev dataset:
Accuracy Dot Similarity: 85.10%
Model Anchor Sparsity: Active Dimensions: 105.5, Sparsity Ratio: 0.9965
Model Positive Sparsity: Active Dimensions: 69.8, Sparsity Ratio: 0.9977
Model Negative Sparsity: Active Dimensions: 68.6, Sparsity Ratio: 0.9978
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: all_nli_dev_dot_accuracy
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8510
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTripletEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load triplets from the AllNLI dataset
# The dataset contains triplets of (anchor, positive, negative) sentences
dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev[:1000]")
# Initialize the SparseTripletEvaluator
evaluator = SparseTripletEvaluator(
anchors=dataset[:1000]["anchor"],
positives=dataset[:1000]["positive"],
negatives=dataset[:1000]["negative"],
name="all_nli_dev",
batch_size=32,
show_progress_bar=True,
)
# Run the evaluation
results = evaluator(model, ".")
"""
TripletEvaluator: Evaluating the model on the all_nli_dev dataset:
Accuracy Dot Similarity: 85.10%
Model Sparsity Stats Query : Row Non-Zero Mean: 105.4530029296875, Row Sparsity Mean: 0.9965449571609497
Model Sparsity Stats Corpus : Row Non-Zero Mean: 69.18349838256836, Row Sparsity Mean: 0.9977333247661591
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: all_nli_dev_dot_accuracy
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8510
|
import subprocess
import pytest
from clip_text import CLIPTextEncoder
from jina import Document, DocumentArray, Flow
_EMBEDDING_DIM = 512
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=CLIPTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:"cuda"',
],
timeout=30,
check=True,
)
|
import subprocess
import pytest
from clip_text import CLIPTextEncoder
from jina import Document, DocumentArray, Flow
_EMBEDDING_DIM = 512
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=CLIPTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
import datasets
from ..folder_based_builder import folder_based_builder
logger = datasets.utils.logging.get_logger(__name__)
class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""Builder Config for AudioFolder."""
drop_labels: bool = None
drop_metadata: bool = None
def __post_init__(self):
super().__post_init__()
class AudioFolder(folder_based_builder.FolderBasedBuilder):
BASE_FEATURE = datasets.Audio
BASE_COLUMN_NAME = "audio"
BUILDER_CONFIG_CLASS = AudioFolderConfig
EXTENSIONS: list[str] # definition at the bottom of the script
# Obtained with:
# ```
# import soundfile as sf
#
# AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()]
#
# # .opus decoding is supported if libsndfile >= 1.0.31:
# AUDIO_EXTENSIONS.extend([".opus"])
# ```
# We intentionally do not run this code on launch because:
# (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed
# (2) To ensure the list of supported extensions is deterministic
AUDIO_EXTENSIONS = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
".3gp",
".3g2",
".avi",
".asf",
".flv",
".mp4",
".mov",
".m4v",
".mkv",
".mpg",
".webm",
".f4v",
".wmv",
".wma",
".ogg",
".ogm",
".mxf",
".nut",
]
AudioFolder.EXTENSIONS = AUDIO_EXTENSIONS
|
import datasets
from ..folder_based_builder import folder_based_builder
logger = datasets.utils.logging.get_logger(__name__)
class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""Builder Config for AudioFolder."""
drop_labels: bool = None
drop_metadata: bool = None
def __post_init__(self):
super().__post_init__()
class AudioFolder(folder_based_builder.FolderBasedBuilder):
BASE_FEATURE = datasets.Audio
BASE_COLUMN_NAME = "audio"
BUILDER_CONFIG_CLASS = AudioFolderConfig
EXTENSIONS: list[str] # definition at the bottom of the script
# Obtained with:
# ```
# import soundfile as sf
#
# AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()]
#
# # .opus decoding is supported if libsndfile >= 1.0.31:
# AUDIO_EXTENSIONS.extend([".opus"])
# ```
# We intentionally do not run this code on launch because:
# (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed
# (2) To ensure the list of supported extensions is deterministic
AUDIO_EXTENSIONS = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
AudioFolder.EXTENSIONS = AUDIO_EXTENSIONS
|
from __future__ import annotations
from .CSRSparsity import CSRSparsity
from .IDF import IDF
from .MLMTransformer import MLMTransformer
from .SpladePooling import SpladePooling
__all__ = ["CSRSparsity", "MLMTransformer", "SpladePooling", "IDF"]
|
from __future__ import annotations
from .CSRSparsity import CSRSparsity
from .MLMTransformer import MLMTransformer
from .SpladePooling import SpladePooling
__all__ = ["CSRSparsity", "MLMTransformer", "SpladePooling"]
|
"""Standard LangChain interface tests"""
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.rate_limiters import InMemoryRateLimiter
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import (
ChatModelIntegrationTests,
)
from langchain_groq import ChatGroq
rate_limiter = InMemoryRateLimiter(requests_per_second=0.2)
class BaseTestGroq(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatGroq
@pytest.mark.xfail(reason="Not yet implemented.")
def test_tool_message_histories_list_content(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
super().test_tool_message_histories_list_content(model, my_adder_tool)
@property
def supports_json_mode(self) -> bool:
return True
class TestGroqGemma(BaseTestGroq):
@property
def chat_model_params(self) -> dict:
return {"model": "gemma2-9b-it", "rate_limiter": rate_limiter}
@property
def supports_json_mode(self) -> bool:
return True
|
"""Standard LangChain interface tests"""
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.rate_limiters import InMemoryRateLimiter
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import (
ChatModelIntegrationTests,
)
from langchain_groq import ChatGroq
rate_limiter = InMemoryRateLimiter(requests_per_second=0.2)
class BaseTestGroq(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatGroq
@pytest.mark.xfail(reason="Not yet implemented.")
def test_tool_message_histories_list_content(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
super().test_tool_message_histories_list_content(model, my_adder_tool)
@property
def supports_json_mode(self) -> bool:
return True
class TestGroqLlama(BaseTestGroq):
@property
def chat_model_params(self) -> dict:
return {
"model": "llama-3.1-8b-instant",
"temperature": 0,
"rate_limiter": rate_limiter,
}
@property
def supports_json_mode(self) -> bool:
return True
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import (
IFPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
load_numpy,
require_accelerator,
require_hf_hub_version_greater,
require_torch_gpu,
require_transformers_version_greater,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class IFPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase):
pipeline_class = IFPipeline
params = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"}
def get_dummy_components(self):
return self._get_dummy_components()
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def test_save_load_optional_components(self):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU")
@require_accelerator
def test_save_load_float16(self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_float16(expected_max_diff=1e-1)
def test_attention_slicing_forward_pass(self):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def test_save_load_local(self):
self._test_save_load_local()
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2,
)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),
reason="XFormers attention is only available with CUDA and `xformers` installed",
)
def test_xformers_attention_forwardGenerator_pass(self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
@require_hf_hub_version_greater("0.26.5")
@require_transformers_version_greater("4.47.1")
def test_save_load_dduf(self):
super().test_save_load_dduf(atol=1e-2, rtol=1e-2)
@slow
@require_torch_gpu
class IFPipelineSlowTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_if_text_to_image(self):
pipe = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16)
pipe.unet.set_attn_processor(AttnAddedKVProcessor())
pipe.enable_model_cpu_offload()
torch.cuda.reset_max_memory_allocated()
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
generator = torch.Generator(device="cpu").manual_seed(0)
output = pipe(
prompt="anime turtle",
num_inference_steps=2,
generator=generator,
output_type="np",
)
image = output.images[0]
mem_bytes = torch.cuda.max_memory_allocated()
assert mem_bytes < 12 * 10**9
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy"
)
assert_mean_pixel_difference(image, expected_image)
pipe.remove_all_hooks()
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import (
IFPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
load_numpy,
require_accelerator,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class IFPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase):
pipeline_class = IFPipeline
params = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"}
def get_dummy_components(self):
return self._get_dummy_components()
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def test_save_load_optional_components(self):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU")
@require_accelerator
def test_save_load_float16(self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_float16(expected_max_diff=1e-1)
def test_attention_slicing_forward_pass(self):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def test_save_load_local(self):
self._test_save_load_local()
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2,
)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),
reason="XFormers attention is only available with CUDA and `xformers` installed",
)
def test_xformers_attention_forwardGenerator_pass(self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
@slow
@require_torch_gpu
class IFPipelineSlowTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_if_text_to_image(self):
pipe = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16)
pipe.unet.set_attn_processor(AttnAddedKVProcessor())
pipe.enable_model_cpu_offload()
torch.cuda.reset_max_memory_allocated()
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
generator = torch.Generator(device="cpu").manual_seed(0)
output = pipe(
prompt="anime turtle",
num_inference_steps=2,
generator=generator,
output_type="np",
)
image = output.images[0]
mem_bytes = torch.cuda.max_memory_allocated()
assert mem_bytes < 12 * 10**9
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy"
)
assert_mean_pixel_difference(image, expected_image)
pipe.remove_all_hooks()
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'MMDetection'
copyright = '2018-2021, OpenMMLab'
author = 'MMDetection Authors'
version_file = '../../mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'myst_parser',
'sphinx_markdown_tables',
'sphinx_copybutton',
]
autodoc_mock_imports = [
'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmdetection'
},
],
# Specify the language of shared menu
'menu_lang':
'en'
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'MMDetection'
copyright = '2018-2021, OpenMMLab'
author = 'MMDetection Authors'
version_file = '../../mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'recommonmark',
'sphinx_markdown_tables',
'sphinx_copybutton',
]
autodoc_mock_imports = [
'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmdetection'
},
],
# Specify the language of shared menu
'menu_lang':
'en'
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
|
"""Init file of LlamaIndex."""
__version__ = "0.12.37"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Init file of LlamaIndex."""
__version__ = "0.12.36"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from itertools import groupby
from typing import Iterable, Dict
from jina import Executor, requests, DocumentArray
class MinRanker(Executor):
"""
:class:`MinRanker` aggregates the score of the matched doc from the matched chunks.
For each matched doc, the score is aggregated from all the matched chunks belonging to that doc.
:param metric: the distance metric used in `scores`
:param default_traversal_paths: traverse path on docs, e.g. ['r'], ['c']
:param args: Additional positional arguments
:param kwargs: Additional keyword arguments
"""
def __init__(
self,
metric: str = 'cosine',
default_traversal_paths: Iterable[str] = ('r',),
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.metric = metric
self.default_traversal_paths = default_traversal_paths
@requests(on='/search')
def rank(self, docs: DocumentArray, parameters: Dict, *args, **kwargs):
traversal_paths = parameters.get(
'traversal_paths', self.default_traversal_paths
)
for doc in docs.traverse_flat(traversal_paths):
matches_of_chunks = []
for chunk in doc.chunks:
matches_of_chunks.extend(chunk.matches)
groups = groupby(
sorted(matches_of_chunks, key=lambda d: d.parent_id),
lambda d: d.parent_id,
)
for key, group in groups:
chunk_match_list = list(group)
chunk_match_list.sort(key=lambda m: -m.scores[self.metric].value)
match = chunk_match_list[0]
match.id = chunk_match_list[0].parent_id
doc.matches.append(match)
doc.matches.sort(key=lambda d: -d.scores[self.metric].value)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from itertools import groupby
from typing import Iterable, Dict
from jina import Executor, requests, DocumentArray
class MinRanker(Executor):
"""
:class:`MinRanker` aggregates the score of the matched doc from the matched chunks.
For each matched doc, the score is aggregated from all the matched chunks belonging to that doc.
:param metric: the distance metric used in `scores`
:param default_traversal_paths: traverse path on docs, e.g. ['r'], ['c']
:param args: Additional positional arguments
:param kwargs: Additional keyword arguments
"""
def __init__(
self,
metric: str,
default_traversal_paths: Iterable[str] = ('r',),
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.metric = metric
self.default_traversal_paths = default_traversal_paths
@requests(on='/search')
def rank(self, docs: DocumentArray, parameters: Dict, *args, **kwargs):
traversal_paths = parameters.get(
'traversal_paths', self.default_traversal_paths
)
for doc in docs.traverse_flat(traversal_paths):
matches_of_chunks = []
for chunk in doc.chunks:
matches_of_chunks.extend(chunk.matches)
groups = groupby(
sorted(matches_of_chunks, key=lambda d: d.parent_id),
lambda d: d.parent_id,
)
for key, group in groups:
chunk_match_list = list(group)
chunk_match_list.sort(key=lambda m: -m.scores[self.metric].value)
match = chunk_match_list[0]
match.id = chunk_match_list[0].parent_id
doc.matches.append(match)
doc.matches.sort(key=lambda d: -d.scores[self.metric].value)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import shutil
import subprocess
from pathlib import Path
import pytest
from jina import Document, DocumentArray
@pytest.fixture(scope="session", autouse=True)
def download_cache():
subprocess.run(
'scripts/download_full.sh', cwd=Path(__file__).parents[1], check=True
)
yield
shutil.rmtree('.cache')
@pytest.fixture(scope='session')
def build_docker_image() -> str:
img_name = Path(__file__).parents[1].stem.lower()
subprocess.run(['docker', 'build', '-t', img_name, '.'], check=True)
return img_name
@pytest.fixture()
def data_generator():
def _generator():
data_file_path = Path(__file__).parent / 'texts' / 'test_data.txt'
with open(data_file_path, 'r') as file:
lines = file.readlines()
for line in lines:
yield Document(text=line.strip())
return _generator
@pytest.fixture()
def docs_with_text() -> DocumentArray:
return DocumentArray([Document(text='hello world') for _ in range(10)])
@pytest.fixture()
def docs_with_chunk_text() -> DocumentArray:
chunks = [Document(text='hello world') for _ in range(10)]
return DocumentArray([Document(chunks=chunks)])
@pytest.fixture()
def docs_with_chunk_chunk_text() -> DocumentArray:
root = Document()
chunks = [Document() for _ in range(10)]
chunks_2 = [[Document(text='hello world') for _ in range(10)] for _ in range(10)]
root.chunks.extend(chunks)
for i, chunk in enumerate(root.chunks):
chunk.chunks.extend(chunks_2[i])
return DocumentArray([root])
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import shutil
from pathlib import Path
import pytest
from jina import Document, DocumentArray
@pytest.fixture(scope="session", autouse=True)
def download_cache():
os.system('scripts/download_full.sh')
yield
shutil.rmtree('.cache')
@pytest.fixture()
def data_generator():
def _generator():
data_file_path = Path(__file__).parent / 'texts' / 'test_data.txt'
with open(data_file_path, 'r') as file:
lines = file.readlines()
for line in lines:
yield Document(text=line.strip())
return _generator
@pytest.fixture()
def docs_with_text() -> DocumentArray:
return DocumentArray([Document(text='hello world') for _ in range(10)])
@pytest.fixture()
def docs_with_chunk_text() -> DocumentArray:
chunks = [Document(text='hello world') for _ in range(10)]
return DocumentArray([Document(chunks=chunks)])
@pytest.fixture()
def docs_with_chunk_chunk_text() -> DocumentArray:
root = Document()
chunks = [Document() for _ in range(10)]
chunks_2 = [[Document(text='hello world') for _ in range(10)] for _ in range(10)]
root.chunks.extend(chunks)
for i, chunk in enumerate(chunks):
chunk.chunks.extend(chunks_2[i])
return DocumentArray([root])
|
from pathlib import Path
import pytest
from torchaudio.datasets import dr_vctk
from torchaudio_unittest.common_utils import (
get_whitenoise,
save_wav,
TempDirMixin,
TorchaudioTestCase,
)
_SUBSETS = ["train", "test"]
_CONDITIONS = ["clean", "device-recorded"]
_SOURCES = ["DR-VCTK_Office1_ClosedWindow", "DR-VCTK_Office1_OpenedWindow"]
_SPEAKER_IDS = range(226, 230)
_CHANNEL_IDS = range(1, 6)
def get_mock_dataset(root_dir):
"""
root_dir: root directory of the mocked data
"""
mocked_samples = {}
dataset_dir = Path(root_dir) / "DR-VCTK" / "DR-VCTK"
dataset_dir.mkdir(parents=True, exist_ok=True)
config_dir = dataset_dir / "configurations"
config_dir.mkdir(parents=True, exist_ok=True)
sample_rate = 16000
seed = 0
for subset in _SUBSETS:
mocked_samples[subset] = []
for condition in _CONDITIONS:
audio_dir = dataset_dir / f"{condition}_{subset}set_wav_16k"
audio_dir.mkdir(parents=True, exist_ok=True)
config_filepath = config_dir / f"{subset}_ch_log.txt"
with open(config_filepath, "w") as f:
if subset == "train":
f.write("\n")
f.write("File Name\tMain Source\tChannel Idx\n")
for speaker_id in _SPEAKER_IDS:
utterance_id = 1
for source in _SOURCES:
for channel_id in _CHANNEL_IDS:
filename = f"p{speaker_id}_{utterance_id:03d}.wav"
f.write(f"{filename}\t{source}\t{channel_id}\n")
data = {}
for condition in _CONDITIONS:
data[condition] = get_whitenoise(
sample_rate=sample_rate, duration=0.01, n_channels=1, dtype="float32", seed=seed
)
audio_dir = dataset_dir / f"{condition}_{subset}set_wav_16k"
audio_file_path = audio_dir / filename
save_wav(audio_file_path, data[condition], sample_rate)
seed += 1
sample = (
data[_CONDITIONS[0]],
sample_rate,
data[_CONDITIONS[1]],
sample_rate,
"p" + str(speaker_id),
f"{utterance_id:03d}",
source,
channel_id,
)
mocked_samples[subset].append(sample)
utterance_id += 1
return mocked_samples
class TestDRVCTK(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
samples = {}
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.samples = get_mock_dataset(cls.root_dir)
def _test_dr_vctk(self, dataset, subset):
num_samples = 0
for i, (
waveform_clean,
sample_rate_clean,
waveform_dr,
sample_rate_dr,
speaker_id,
utterance_id,
source,
channel_id,
) in enumerate(dataset):
self.assertEqual(waveform_clean, self.samples[subset][i][0], atol=5e-5, rtol=1e-8)
assert sample_rate_clean == self.samples[subset][i][1]
self.assertEqual(waveform_dr, self.samples[subset][i][2], atol=5e-5, rtol=1e-8)
assert sample_rate_dr == self.samples[subset][i][3]
assert speaker_id == self.samples[subset][i][4]
assert utterance_id == self.samples[subset][i][5]
assert source == self.samples[subset][i][6]
assert channel_id == self.samples[subset][i][7]
num_samples += 1
assert num_samples == len(self.samples[subset])
def test_dr_vctk_train_str(self):
subset = "train"
dataset = dr_vctk.DR_VCTK(self.root_dir, subset=subset)
self._test_dr_vctk(dataset, subset)
def test_dr_vctk_test_str(self):
subset = "test"
dataset = dr_vctk.DR_VCTK(self.root_dir, subset=subset)
self._test_dr_vctk(dataset, subset)
def test_dr_vctk_train_path(self):
subset = "train"
dataset = dr_vctk.DR_VCTK(Path(self.root_dir), subset=subset)
self._test_dr_vctk(dataset, subset)
def test_dr_vctk_test_path(self):
subset = "test"
dataset = dr_vctk.DR_VCTK(Path(self.root_dir), subset=subset)
self._test_dr_vctk(dataset, subset)
def test_dr_vctk_invalid_subset(self):
subset = "invalid"
with pytest.raises(RuntimeError, match=f"The subset '{subset}' does not match any of the supported subsets"):
dr_vctk.DR_VCTK(self.root_dir, subset=subset)
|
from pathlib import Path
import pytest
from torchaudio.datasets import dr_vctk
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise,
save_wav,
)
_SUBSETS = ["train", "test"]
_CONDITIONS = ["clean", "device-recorded"]
_SOURCES = ["DR-VCTK_Office1_ClosedWindow", "DR-VCTK_Office1_OpenedWindow"]
_SPEAKER_IDS = range(226, 230)
_CHANNEL_IDS = range(1, 6)
def get_mock_dataset(root_dir):
"""
root_dir: root directory of the mocked data
"""
mocked_samples = {}
dataset_dir = Path(root_dir) / "DR-VCTK" / "DR-VCTK"
dataset_dir.mkdir(parents=True, exist_ok=True)
config_dir = dataset_dir / "configurations"
config_dir.mkdir(parents=True, exist_ok=True)
sample_rate = 16000
seed = 0
for subset in _SUBSETS:
mocked_samples[subset] = []
for condition in _CONDITIONS:
audio_dir = dataset_dir / f"{condition}_{subset}set_wav_16k"
audio_dir.mkdir(parents=True, exist_ok=True)
config_filepath = config_dir / f"{subset}_ch_log.txt"
with open(config_filepath, "w") as f:
if subset == "train":
f.write("\n")
f.write("File Name\tMain Source\tChannel Idx\n")
for speaker_id in _SPEAKER_IDS:
utterance_id = 1
for source in _SOURCES:
for channel_id in _CHANNEL_IDS:
filename = f"p{speaker_id}_{utterance_id:03d}.wav"
f.write(f"{filename}\t{source}\t{channel_id}\n")
data = {}
for condition in _CONDITIONS:
data[condition] = get_whitenoise(
sample_rate=sample_rate, duration=0.01, n_channels=1, dtype="float32", seed=seed
)
audio_dir = dataset_dir / f"{condition}_{subset}set_wav_16k"
audio_file_path = audio_dir / filename
save_wav(audio_file_path, data[condition], sample_rate)
seed += 1
sample = (
data[_CONDITIONS[0]],
sample_rate,
data[_CONDITIONS[1]],
sample_rate,
"p" + str(speaker_id),
f"{utterance_id:03d}",
source,
channel_id,
)
mocked_samples[subset].append(sample)
utterance_id += 1
return mocked_samples
class TestDRVCTK(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
samples = {}
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.samples = get_mock_dataset(cls.root_dir)
def _test_dr_vctk(self, dataset, subset):
num_samples = 0
for i, (
waveform_clean,
sample_rate_clean,
waveform_dr,
sample_rate_dr,
speaker_id,
utterance_id,
source,
channel_id,
) in enumerate(dataset):
self.assertEqual(waveform_clean, self.samples[subset][i][0], atol=5e-5, rtol=1e-8)
assert sample_rate_clean == self.samples[subset][i][1]
self.assertEqual(waveform_dr, self.samples[subset][i][2], atol=5e-5, rtol=1e-8)
assert sample_rate_dr == self.samples[subset][i][3]
assert speaker_id == self.samples[subset][i][4]
assert utterance_id == self.samples[subset][i][5]
assert source == self.samples[subset][i][6]
assert channel_id == self.samples[subset][i][7]
num_samples += 1
assert num_samples == len(self.samples[subset])
def test_dr_vctk_train_str(self):
subset = "train"
dataset = dr_vctk.DR_VCTK(self.root_dir, subset=subset)
self._test_dr_vctk(dataset, subset)
def test_dr_vctk_test_str(self):
subset = "test"
dataset = dr_vctk.DR_VCTK(self.root_dir, subset=subset)
self._test_dr_vctk(dataset, subset)
def test_dr_vctk_train_path(self):
subset = "train"
dataset = dr_vctk.DR_VCTK(Path(self.root_dir), subset=subset)
self._test_dr_vctk(dataset, subset)
def test_dr_vctk_test_path(self):
subset = "test"
dataset = dr_vctk.DR_VCTK(Path(self.root_dir), subset=subset)
self._test_dr_vctk(dataset, subset)
def test_dr_vctk_invalid_subset(self):
subset = "invalid"
with pytest.raises(RuntimeError, match=f"The subset '{subset}' does not match any of the supported subsets"):
dr_vctk.DR_VCTK(self.root_dir, subset=subset)
|
from typing import Any, Optional, Sequence, Union
from deprecated import deprecated
from llama_index.core.base.llms.generic_utils import (
chat_response_to_completion_response,
stream_chat_response_to_completion_response,
astream_chat_response_to_completion_response,
)
from llama_index.core.base.llms.types import (
ChatMessage,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
MessageRole,
ImageBlock,
)
from llama_index.core.schema import ImageNode
from llama_index.core.base.llms.generic_utils import image_node_to_image_block
from llama_index.llms.azure_openai import AzureOpenAI
@deprecated(
reason="This class is deprecated and will be no longer maintained. Use AzureOpenAI from llama-index-llms-azure-openai instead. See Multi Modal LLMs documentation for a complete guide on migration: https://docs.llamaindex.ai/en/stable/understanding/using_llms/using_llms/#multi-modal-llms",
version="0.4.1",
)
class AzureOpenAIMultiModal(AzureOpenAI):
@classmethod
def class_name(cls) -> str:
return "azure_openai_multi_modal_llm"
def _get_multi_modal_chat_message(
self,
prompt: str,
role: str,
image_documents: Sequence[Union[ImageNode, ImageBlock]],
image_detail: Optional[str] = "low",
**kwargs: Any,
) -> ChatMessage:
chat_msg = ChatMessage(role=role, content=prompt)
if not image_documents:
# if image_documents is empty, return text only chat message
return chat_msg
for image_document in image_documents:
if isinstance(image_document, ImageNode):
chat_msg.blocks.append(image_node_to_image_block(image_document))
else:
chat_msg.blocks.append(image_document)
return chat_msg
def complete(
self,
prompt: str,
image_documents: Sequence[Union[ImageNode, ImageBlock]],
**kwargs: Any,
) -> CompletionResponse:
chat_message = self._get_multi_modal_chat_message(
prompt=prompt,
role=MessageRole.USER,
image_documents=image_documents,
)
chat_response = self.chat([chat_message], **kwargs)
return chat_response_to_completion_response(chat_response)
def stream_complete(
self,
prompt: str,
image_documents: Sequence[Union[ImageNode, ImageBlock]],
**kwargs: Any,
) -> CompletionResponseGen:
chat_message = self._get_multi_modal_chat_message(
prompt=prompt,
role=MessageRole.USER,
image_documents=image_documents,
)
chat_response = self.stream_chat([chat_message], **kwargs)
return stream_chat_response_to_completion_response(chat_response)
# ===== Async Endpoints =====
async def acomplete(
self,
prompt: str,
image_documents: Sequence[Union[ImageNode, ImageBlock]],
**kwargs: Any,
) -> CompletionResponse:
chat_message = self._get_multi_modal_chat_message(
prompt=prompt,
role=MessageRole.USER,
image_documents=image_documents,
)
chat_response = await self.achat([chat_message], **kwargs)
return chat_response_to_completion_response(chat_response)
async def astream_complete(
self,
prompt: str,
image_documents: Sequence[Union[ImageNode, ImageBlock]],
**kwargs: Any,
) -> CompletionResponseAsyncGen:
chat_message = self._get_multi_modal_chat_message(
prompt=prompt,
role=MessageRole.USER,
image_documents=image_documents,
)
chat_response = await self.astream_chat([chat_message], **kwargs)
return astream_chat_response_to_completion_response(chat_response)
|
from typing import Any, Optional, Sequence
from pathlib import Path
from llama_index.core.base.llms.generic_utils import (
chat_response_to_completion_response,
stream_chat_response_to_completion_response,
astream_chat_response_to_completion_response,
)
from llama_index.core.base.llms.types import (
ChatMessage,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
MessageRole,
ImageBlock,
)
from llama_index.core.schema import ImageNode
from llama_index.llms.azure_openai import AzureOpenAI
class AzureOpenAIMultiModal(AzureOpenAI):
@classmethod
def class_name(cls) -> str:
return "azure_openai_multi_modal_llm"
def _get_multi_modal_chat_message(
self,
prompt: str,
role: str,
image_documents: Sequence[ImageNode],
image_detail: Optional[str] = "low",
**kwargs: Any,
) -> ChatMessage:
chat_msg = ChatMessage(role=role, content=prompt)
if not image_documents:
# if image_documents is empty, return text only chat message
return chat_msg
for image_document in image_documents:
# Create the appropriate ContentBlock depending on the document content
if image_document.image:
chat_msg.blocks.append(
ImageBlock(
image=bytes(image_document.image, encoding="utf-8"),
detail=image_detail,
)
)
elif image_document.image_url:
chat_msg.blocks.append(
ImageBlock(url=image_document.image_url, detail=image_detail)
)
elif image_document.image_path:
chat_msg.blocks.append(
ImageBlock(
path=Path(image_document.image_path),
detail=image_detail,
image_mimetype=image_document.image_mimetype
or image_document.metadata.get("file_type"),
)
)
elif f_path := image_document.metadata.get("file_path"):
chat_msg.blocks.append(
ImageBlock(
path=Path(f_path),
detail=image_detail,
image_mimetype=image_document.metadata.get("file_type"),
)
)
return chat_msg
def complete(
self, prompt: str, image_documents: Sequence[ImageNode], **kwargs: Any
) -> CompletionResponse:
chat_message = self._get_multi_modal_chat_message(
prompt=prompt,
role=MessageRole.USER,
image_documents=image_documents,
)
chat_response = self.chat([chat_message], **kwargs)
return chat_response_to_completion_response(chat_response)
def stream_complete(
self, prompt: str, image_documents: Sequence[ImageNode], **kwargs: Any
) -> CompletionResponseGen:
chat_message = self._get_multi_modal_chat_message(
prompt=prompt,
role=MessageRole.USER,
image_documents=image_documents,
)
chat_response = self.stream_chat([chat_message], **kwargs)
return stream_chat_response_to_completion_response(chat_response)
# ===== Async Endpoints =====
async def acomplete(
self, prompt: str, image_documents: Sequence[ImageNode], **kwargs: Any
) -> CompletionResponse:
chat_message = self._get_multi_modal_chat_message(
prompt=prompt,
role=MessageRole.USER,
image_documents=image_documents,
)
chat_response = await self.achat([chat_message], **kwargs)
return chat_response_to_completion_response(chat_response)
async def astream_complete(
self, prompt: str, image_documents: Sequence[ImageNode], **kwargs: Any
) -> CompletionResponseAsyncGen:
chat_message = self._get_multi_modal_chat_message(
prompt=prompt,
role=MessageRole.USER,
image_documents=image_documents,
)
chat_response = await self.astream_chat([chat_message], **kwargs)
return astream_chat_response_to_completion_response(chat_response)
|
"""LLMResult class."""
from __future__ import annotations
from copy import deepcopy
from typing import Literal, Optional, Union
from pydantic import BaseModel
from langchain_core.outputs.chat_generation import ChatGeneration, ChatGenerationChunk
from langchain_core.outputs.generation import Generation, GenerationChunk
from langchain_core.outputs.run_info import RunInfo
class LLMResult(BaseModel):
"""A container for results of an LLM call.
Both chat models and LLMs generate an LLMResult object. This object contains
the generated outputs and any additional information that the model provider
wants to return.
"""
generations: list[
list[Union[Generation, ChatGeneration, GenerationChunk, ChatGenerationChunk]]
]
"""Generated outputs.
The first dimension of the list represents completions for different input
prompts.
The second dimension of the list represents different candidate generations
for a given prompt.
When returned from an LLM the type is List[List[Generation]].
When returned from a chat model the type is List[List[ChatGeneration]].
ChatGeneration is a subclass of Generation that has a field for a structured
chat message.
"""
llm_output: Optional[dict] = None
"""For arbitrary LLM provider specific output.
This dictionary is a free-form dictionary that can contain any information that the
provider wants to return. It is not standardized and is provider-specific.
Users should generally avoid relying on this field and instead rely on
accessing relevant information from standardized fields present in
AIMessage.
"""
run: Optional[list[RunInfo]] = None
"""List of metadata info for model call for each input."""
type: Literal["LLMResult"] = "LLMResult" # type: ignore[assignment]
"""Type is used exclusively for serialization purposes."""
def flatten(self) -> list[LLMResult]:
"""Flatten generations into a single list.
Unpack List[List[Generation]] -> List[LLMResult] where each returned LLMResult
contains only a single Generation. If token usage information is available,
it is kept only for the LLMResult corresponding to the top-choice
Generation, to avoid over-counting of token usage downstream.
Returns:
List of LLMResults where each returned LLMResult contains a single
Generation.
"""
llm_results = []
for i, gen_list in enumerate(self.generations):
# Avoid double counting tokens in OpenAICallback
if i == 0:
llm_results.append(
LLMResult(
generations=[gen_list],
llm_output=self.llm_output,
)
)
else:
if self.llm_output is not None:
llm_output = deepcopy(self.llm_output)
llm_output["token_usage"] = {}
else:
llm_output = None
llm_results.append(
LLMResult(
generations=[gen_list],
llm_output=llm_output,
)
)
return llm_results
def __eq__(self, other: object) -> bool:
"""Check for LLMResult equality by ignoring any metadata related to runs."""
if not isinstance(other, LLMResult):
return NotImplemented
return (
self.generations == other.generations
and self.llm_output == other.llm_output
)
|
from __future__ import annotations
from copy import deepcopy
from typing import Literal, Optional, Union
from pydantic import BaseModel
from langchain_core.outputs.chat_generation import ChatGeneration, ChatGenerationChunk
from langchain_core.outputs.generation import Generation, GenerationChunk
from langchain_core.outputs.run_info import RunInfo
class LLMResult(BaseModel):
"""A container for results of an LLM call.
Both chat models and LLMs generate an LLMResult object. This object contains
the generated outputs and any additional information that the model provider
wants to return.
"""
generations: list[
list[Union[Generation, ChatGeneration, GenerationChunk, ChatGenerationChunk]]
]
"""Generated outputs.
The first dimension of the list represents completions for different input
prompts.
The second dimension of the list represents different candidate generations
for a given prompt.
When returned from an LLM the type is List[List[Generation]].
When returned from a chat model the type is List[List[ChatGeneration]].
ChatGeneration is a subclass of Generation that has a field for a structured
chat message.
"""
llm_output: Optional[dict] = None
"""For arbitrary LLM provider specific output.
This dictionary is a free-form dictionary that can contain any information that the
provider wants to return. It is not standardized and is provider-specific.
Users should generally avoid relying on this field and instead rely on
accessing relevant information from standardized fields present in
AIMessage.
"""
run: Optional[list[RunInfo]] = None
"""List of metadata info for model call for each input."""
type: Literal["LLMResult"] = "LLMResult" # type: ignore[assignment]
"""Type is used exclusively for serialization purposes."""
def flatten(self) -> list[LLMResult]:
"""Flatten generations into a single list.
Unpack List[List[Generation]] -> List[LLMResult] where each returned LLMResult
contains only a single Generation. If token usage information is available,
it is kept only for the LLMResult corresponding to the top-choice
Generation, to avoid over-counting of token usage downstream.
Returns:
List of LLMResults where each returned LLMResult contains a single
Generation.
"""
llm_results = []
for i, gen_list in enumerate(self.generations):
# Avoid double counting tokens in OpenAICallback
if i == 0:
llm_results.append(
LLMResult(
generations=[gen_list],
llm_output=self.llm_output,
)
)
else:
if self.llm_output is not None:
llm_output = deepcopy(self.llm_output)
llm_output["token_usage"] = {}
else:
llm_output = None
llm_results.append(
LLMResult(
generations=[gen_list],
llm_output=llm_output,
)
)
return llm_results
def __eq__(self, other: object) -> bool:
"""Check for LLMResult equality by ignoring any metadata related to runs."""
if not isinstance(other, LLMResult):
return NotImplemented
return (
self.generations == other.generations
and self.llm_output == other.llm_output
)
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
data_preprocessor=dict(
# The mean and std are used in PyCls when training RegNets
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False),
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_3.2gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')),
neck=dict(
type='FPN',
in_channels=[96, 192, 432, 1008],
out_channels=256,
num_outs=5))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005),
clip_grad=dict(max_norm=35, norm_type=2))
# learning policy
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
data_preprocessor=dict(
# The mean and std are used in PyCls when training RegNets
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False),
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_3.2gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')),
neck=dict(
type='FPN',
in_channels=[96, 192, 432, 1008],
out_channels=256,
num_outs=5))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005),
clip_grad=dict(max_norm=35, norm_type=2))
# learning policy
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
|
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.utils import argument_validation
@keras_export("keras.layers.ZeroPadding1D")
class ZeroPadding1D(Layer):
"""Zero-padding layer for 1D input (e.g. temporal sequence).
Example:
>>> input_shape = (2, 2, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> x
[[[ 0 1 2]
[ 3 4 5]]
[[ 6 7 8]
[ 9 10 11]]]
>>> y = keras.layers.ZeroPadding1D(padding=2)(x)
>>> y
[[[ 0 0 0]
[ 0 0 0]
[ 0 1 2]
[ 3 4 5]
[ 0 0 0]
[ 0 0 0]]
[[ 0 0 0]
[ 0 0 0]
[ 6 7 8]
[ 9 10 11]
[ 0 0 0]
[ 0 0 0]]]
Args:
padding: Int, or tuple of int (length 2), or dictionary.
- If int: how many zeros to add at the beginning and end of
the padding dimension (axis 1).
- If tuple of 2 ints: how many zeros to add at the beginning and the
end of the padding dimension (`(left_pad, right_pad)`).
data_format: A string, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch_size, axis_to_pad, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch_size, channels, axis_to_pad)`.
When unspecified, uses `image_data_format` value found in your Keras
config file at `~/.keras/keras.json` (if exists). Defaults to
`"channels_last"`.
Input shape:
3D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, axis_to_pad, features)`
- If `data_format` is `"channels_first"`:
`(batch_size, features, axis_to_pad)`
Output shape:
3D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, padded_axis, features)`
- If `data_format` is `"channels_first"`:
`(batch_size, features, padded_axis)`
"""
def __init__(self, padding=1, data_format=None, **kwargs):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
self.padding = argument_validation.standardize_tuple(
padding, 2, "padding", allow_zero=True
)
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
padding_dim = 2 if self.data_format == "channels_first" else 1
if output_shape[padding_dim] is not None:
output_shape[padding_dim] += self.padding[0] + self.padding[1]
return tuple(output_shape)
def call(self, inputs):
if self.data_format == "channels_first":
all_dims_padding = ((0, 0), (0, 0), self.padding)
else:
all_dims_padding = ((0, 0), self.padding, (0, 0))
return ops.pad(inputs, all_dims_padding)
def get_config(self):
config = {"padding": self.padding, "data_format": self.data_format}
base_config = super().get_config()
return {**base_config, **config}
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.utils import argument_validation
@keras_export("keras.layers.ZeroPadding1D")
class ZeroPadding1D(Layer):
"""Zero-padding layer for 1D input (e.g. temporal sequence).
Example:
>>> input_shape = (2, 2, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> x
[[[ 0 1 2]
[ 3 4 5]]
[[ 6 7 8]
[ 9 10 11]]]
>>> y = keras.layers.ZeroPadding1D(padding=2)(x)
>>> y
[[[ 0 0 0]
[ 0 0 0]
[ 0 1 2]
[ 3 4 5]
[ 0 0 0]
[ 0 0 0]]
[[ 0 0 0]
[ 0 0 0]
[ 6 7 8]
[ 9 10 11]
[ 0 0 0]
[ 0 0 0]]]
Args:
padding: Int, or tuple of int (length 2), or dictionary.
- If int: how many zeros to add at the beginning and end of
the padding dimension (axis 1).
- If tuple of 2 ints: how many zeros to add at the beginning and the
end of the padding dimension (`(left_pad, right_pad)`).
Input shape:
3D tensor with shape `(batch_size, axis_to_pad, features)`
Output shape:
3D tensor with shape `(batch_size, padded_axis, features)`
"""
def __init__(self, padding=1, **kwargs):
super().__init__(**kwargs)
self.padding = argument_validation.standardize_tuple(
padding, 2, "padding", allow_zero=True
)
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
if output_shape[1] is not None:
output_shape[1] += self.padding[0] + self.padding[1]
return tuple(output_shape)
def call(self, inputs):
all_dims_padding = ((0, 0), self.padding, (0, 0))
return ops.pad(inputs, all_dims_padding)
def get_config(self):
config = {"padding": self.padding}
base_config = super().get_config()
return {**base_config, **config}
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class ParquetConfig(datasets.BuilderConfig):
"""BuilderConfig for Parquet."""
batch_size: int = 10_000
columns: Optional[List[str]] = None
features: Optional[datasets.Features] = None
class Parquet(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = ParquetConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(files):
with open(file, "rb") as f:
self.info.features = datasets.Features.from_arrow_schema(pq.read_schema(f))
break
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.info.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
schema = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema) != sorted(self.config.columns):
raise ValueError(
f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'"
)
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
parquet_file = pq.ParquetFile(f)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size, columns=self.config.columns)
):
pa_table = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class ParquetConfig(datasets.BuilderConfig):
"""BuilderConfig for Parquet."""
batch_size: int = 10_000
columns: Optional[List[str]] = None
features: Optional[datasets.Features] = None
class Parquet(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = ParquetConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.config.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
schema = self.config.features.arrow_schema if self.config.features is not None else None
if self.config.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema) != sorted(self.config.columns):
raise ValueError(
f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.config.features}'"
)
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
parquet_file = pq.ParquetFile(f)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size, columns=self.config.columns)
):
pa_table = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise
|
from typing import Dict, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim):
"""
This class implements CoSENT (Cosine Sentence) loss.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`CosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, CoSENTLoss may be used
as a drop-in replacement for :class:`CosineSimilarityLoss` in any training script.
Args:
model: SentenceTransformerModel
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`AnglELoss` is CoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than CoSENTLoss. In our experiments, CoSENTLoss is recommended.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CoSENTLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(CoSENTLoss, self).__init__()
self.model = model
self.similarity_fct = similarity_fct
self.scale = scale
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
scores = self.similarity_fct(embeddings[0], embeddings[1])
scores = scores * self.scale
scores = scores[:, None] - scores[None, :]
# label matrix indicating which pairs are relevant
labels = labels[:, None] < labels[None, :]
labels = labels.float()
# mask out irrelevant pairs so they are negligible after exp()
scores = scores - (1 - labels) * 1e12
# append a zero as e^0 = 1
scores = torch.cat((torch.zeros(1).to(scores.device), scores.view(-1)), dim=0)
loss = torch.logsumexp(scores, dim=0)
return loss
def get_config_dict(self):
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
@property
def citation(self) -> str:
return """
@online{kexuefm-8847,
title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT},
author={Su Jianlin},
year={2022},
month={Jan},
url={https://kexue.fm/archives/8847},
}
"""
|
import torch
from torch import nn, Tensor
from typing import Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
from .. import util
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim):
"""
This class implements CoSENT (Cosine Sentence) loss.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`CosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, CoSENTLoss may be used
as a drop-in replacement for :class:`CosineSimilarityLoss` in any training script.
Args:
model: SentenceTransformerModel
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`AnglELoss` is CoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than CoSENTLoss. In our experiments, CoSENTLoss is recommended.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CoSENTLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(CoSENTLoss, self).__init__()
self.model = model
self.similarity_fct = similarity_fct
self.scale = scale
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
scores = self.similarity_fct(embeddings[0], embeddings[1])
scores = scores * self.scale
scores = scores[:, None] - scores[None, :]
# label matrix indicating which pairs are relevant
labels = labels[:, None] < labels[None, :]
labels = labels.float()
# mask out irrelevant pairs so they are negligible after exp()
scores = scores - (1 - labels) * 1e12
# append a zero as e^0 = 1
scores = torch.cat((torch.zeros(1).to(scores.device), scores.view(-1)), dim=0)
loss = torch.logsumexp(scores, dim=0)
return loss
def get_config_dict(self):
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
@property
def citation(self) -> str:
return """
@online{kexuefm-8847,
title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT},
author={Su Jianlin},
year={2022},
month={Jan},
url={https://kexue.fm/archives/8847},
}
"""
|
import numpy as np
from pydantic.tools import parse_obj_as
from docarray.typing import ImageUrl, Tensor
def test_image_url():
uri = parse_obj_as(ImageUrl, 'http://jina.ai/img.png')
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
|
from pydantic.tools import parse_obj_as
from docarray.typing import ImageUrl, Tensor
def test_image_url():
uri = parse_obj_as(ImageUrl, 'http://jina.ai/img.png')
tensor = uri.load()
assert isinstance(tensor, Tensor)
|
from jina.schemas.helper import _cli_to_schema
from jina_cli.export import api_to_dict
for s in ('flow', 'gateway', 'executor', 'deployment'):
a = _cli_to_schema(api_to_dict(), s)
table = ['| Name | Description | Type | Default |', '|----|----|----|----|']
for k, v in a[f'Jina::{s.capitalize()}']['properties'].items():
desc = v["description"].replace("\n", "<br>")
if k in ('port', 'port_monitoring'):
v[
'default'
] = 'random in [49152, 65535]' # avoid random numbers cause devbot forever committing
type = None if v['type'] == 'null' else v['type']
table.append(f'| `{k}` | {desc} | `{type}` | `{v["default"]}` |')
with open(f'../docs/concepts/flow/{s}-args.md', 'w', encoding='utf-8') as fp:
fp.write('\n'.join(table))
|
from jina.schemas.helper import _cli_to_schema
from jina_cli.export import api_to_dict
for s in ('flow', 'gateway', 'executor', 'deployment'):
a = _cli_to_schema(api_to_dict(), s)
table = ['| Name | Description | Type | Default |', '|----|----|----|----|']
for k, v in a[f'Jina::{s.capitalize()}']['properties'].items():
desc = v["description"].replace("\n", "<br>")
if k in ('port', 'port_monitoring'):
v[
'default'
] = 'random in [49152, 65535]' # avoid random numbers cause devbot forever committing
type = None if v['type'] == 'null' else v['type']
table.append(f'| `{k}` | {desc} | `{type}` | `{v["default"]}` |')
with open(f'../docs/concepts/flow/{s}-args.md', 'w') as fp:
fp.write('\n'.join(table))
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.retrievers import GoogleDocumentAIWarehouseRetriever
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GoogleDocumentAIWarehouseRetriever": "langchain_community.retrievers",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GoogleDocumentAIWarehouseRetriever",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.retrievers import GoogleDocumentAIWarehouseRetriever
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GoogleDocumentAIWarehouseRetriever": "langchain_community.retrievers"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GoogleDocumentAIWarehouseRetriever",
]
|
# mypy: ignore-errors
import argparse
import torchgen.model as model
from torchgen.gen import FileManager, parse_native_yaml
def num_leading_spaces(line: str) -> int:
return len(line) - len(line.lstrip())
def deindent(code: str) -> str:
lines = code.split("\n")
min_leading_spaces = min(map(num_leading_spaces, lines))
lines = [line[min_leading_spaces:] for line in lines]
return "\n".join(lines)
def gen_external(native_functions_path, tags_path, external_path):
native_functions = parse_native_yaml(native_functions_path, tags_path)
func_decls = []
func_registrations = []
for func in native_functions:
schema = func.func
name = schema.name.name.base
args = schema.arguments
# Only supports extern calls for functions with out variants
if not schema.is_out_fn():
continue
# Doesn't currently support functions with more than one out parameter
if len(args.out) > 1:
continue
# Doesn't currently support kwarg arguments
if (
len(args.pre_tensor_options_kwarg_only) > 0
or len(args.post_tensor_options_kwarg_only) > 0
):
continue
self_arg = [args.self_arg.argument] if args.self_arg is not None else []
args = (
list(args.pre_self_positional) + self_arg + list(args.post_self_positional)
)
tensor_args = [
arg
for arg in args
if isinstance(arg.type, model.BaseType)
and arg.type.name == model.BaseTy.Tensor
]
if len(tensor_args) != len(args):
continue
arg_names = [None] * len(args)
tensor_decls = []
for idx, arg in enumerate(tensor_args):
s = f"const at::Tensor& {arg.name} = tensors[{idx + 1}];"
tensor_decls.append(s)
arg_names[idx] = arg.name
nl = "\n"
# print(tensor_decls, name, arg_names)
func_decl = f"""\
void nnc_aten_{name}(
int64_t bufs_num,
void** buf_data,
int64_t* buf_ranks,
int64_t* buf_dims,
int64_t* buf_strides,
int8_t* buf_dtypes,
int64_t args_num,
int64_t* extra_args) {{
std::vector<at::Tensor> tensors =
constructTensors(bufs_num, buf_data, buf_ranks, buf_dims, buf_strides, buf_dtypes);
at::Tensor& r = tensors[0];
{nl.join(tensor_decls)}
try {{
at::{name}_out({", ".join(["r"] + arg_names)});
}} catch (...) {{
}}
}}"""
func_registration = f"""\
const static RegisterNNCExternalFunction nnc_{name}(
"nnc_aten_{name}",
nnc_aten_{name});"""
func_decls.append(func_decl)
func_registrations.append(func_registration)
fm = FileManager(install_dir=".", template_dir=".", dry_run=False)
fm.write_with_template(
"external_functions_codegen.cpp",
external_path,
lambda: {
"external_registrations": func_registrations,
"external_functions": func_decls,
},
)
def main() -> None:
parser = argparse.ArgumentParser(description="Generate annotated_fn_args script")
parser.add_argument(
"--native-functions",
"--native_functions",
help="path to native_functions.yaml",
default="../../../../aten/src/ATen/native/native_functions.yaml",
)
parser.add_argument(
"--tags",
help="path to tags.yaml",
default="../../../../aten/src/ATen/native/tags.yaml",
)
parser.add_argument(
"--template-path",
"--template_path",
help="path to external_functions_codegen_template.cpp",
default="../../../../tools/jit/templates/external_functions_codegen_template.cpp",
)
args = parser.parse_args()
gen_external(args.native_functions, args.tags, args.template_path)
if __name__ == "__main__":
main()
|
# mypy: ignore-errors
import argparse
import torchgen.model as model
from torchgen.gen import FileManager, parse_native_yaml
def num_leading_spaces(line: str) -> int:
return len(line) - len(line.lstrip())
def deindent(code: str) -> str:
lines = code.split("\n")
min_leading_spaces = min(map(num_leading_spaces, lines))
lines = [line[min_leading_spaces:] for line in lines]
return "\n".join(lines)
def gen_external(native_functions_path, tags_path, external_path):
native_functions = parse_native_yaml(native_functions_path, tags_path)
func_decls = []
func_registrations = []
for func in native_functions:
schema = func.func
name = schema.name.name.base
args = schema.arguments
# Only supports extern calls for functions with out variants
if not schema.is_out_fn():
continue
# Doesn't currently support functions with more than one out parameter
if len(args.out) > 1:
continue
# Doesn't currently support kwarg arguments
if (
len(args.pre_tensor_options_kwarg_only) > 0
or len(args.post_tensor_options_kwarg_only) > 0
):
continue
self_arg = [args.self_arg.argument] if args.self_arg is not None else []
args = (
list(args.pre_self_positional) + self_arg + list(args.post_self_positional)
)
tensor_args = [
arg
for arg in args
if isinstance(arg.type, model.BaseType)
and arg.type.name == model.BaseTy.Tensor
]
if len(tensor_args) != len(args):
continue
arg_names = [None] * len(args)
tensor_decls = []
for idx, arg in enumerate(tensor_args):
s = f"const at::Tensor& {arg.name} = tensors[{idx + 1}];"
tensor_decls.append(s)
arg_names[idx] = arg.name
nl = "\n"
# print(tensor_decls, name, arg_names)
func_decl = f"""\
void nnc_aten_{name}(
int64_t bufs_num,
void** buf_data,
int64_t* buf_ranks,
int64_t* buf_dims,
int64_t* buf_strides,
int8_t* buf_dtypes,
int64_t args_num,
int64_t* extra_args) {{
std::vector<at::Tensor> tensors =
constructTensors(bufs_num, buf_data, buf_ranks, buf_dims, buf_strides, buf_dtypes);
at::Tensor& r = tensors[0];
{nl.join(tensor_decls)}
try {{
at::{name}_out({', '.join(['r'] + arg_names)});
}} catch (...) {{
}}
}}"""
func_registration = f"""\
const static RegisterNNCExternalFunction nnc_{name}(
"nnc_aten_{name}",
nnc_aten_{name});"""
func_decls.append(func_decl)
func_registrations.append(func_registration)
fm = FileManager(install_dir=".", template_dir=".", dry_run=False)
fm.write_with_template(
"external_functions_codegen.cpp",
external_path,
lambda: {
"external_registrations": func_registrations,
"external_functions": func_decls,
},
)
def main() -> None:
parser = argparse.ArgumentParser(description="Generate annotated_fn_args script")
parser.add_argument(
"--native-functions",
"--native_functions",
help="path to native_functions.yaml",
default="../../../../aten/src/ATen/native/native_functions.yaml",
)
parser.add_argument(
"--tags",
help="path to tags.yaml",
default="../../../../aten/src/ATen/native/tags.yaml",
)
parser.add_argument(
"--template-path",
"--template_path",
help="path to external_functions_codegen_template.cpp",
default="../../../../tools/jit/templates/external_functions_codegen_template.cpp",
)
args = parser.parse_args()
gen_external(args.native_functions, args.tags, args.template_path)
if __name__ == "__main__":
main()
|
import pytest
from llama_index.core.base.embeddings.base_sparse import BaseSparseEmbedding
from llama_index.sparse_embeddings.fastembed import FastEmbedSparseEmbedding
def test_class():
names_of_base_classes = [b.__name__ for b in FastEmbedSparseEmbedding.__mro__]
assert BaseSparseEmbedding.__name__ in names_of_base_classes
def test_e2e():
embed_model = FastEmbedSparseEmbedding(model_name="Qdrant/bm25")
texts = ["hello", "world"]
embeddings = embed_model.get_text_embedding_batch(texts)
assert len(embeddings) == len(texts)
queries = ["foo"]
embedding = embed_model.get_query_embedding(queries[0])
assert len(embedding) == 1
@pytest.mark.asyncio
async def test_e2e_async():
embed_model = FastEmbedSparseEmbedding(model_name="Qdrant/bm25")
texts = ["hello", "world"]
embeddings = await embed_model.aget_text_embedding_batch(texts)
assert len(embeddings) == len(texts)
queries = ["foo"]
embedding = await embed_model.aget_query_embedding(queries[0])
assert len(embedding) == 1
|
import pytest
from llama_index.core.base.embeddings.base_sparse import BaseSparseEmbedding
from llama_index.sparse_embeddings.fastembed import FastEmbedSparseEmbedding
def test_class():
names_of_base_classes = [b.__name__ for b in FastEmbedSparseEmbedding.__mro__]
assert BaseSparseEmbedding.__name__ in names_of_base_classes
def test_e2e():
embed_model = FastEmbedSparseEmbedding(model_name="Qdrant/bm25")
texts = ["hello", "world"]
embeddings = embed_model.get_text_embedding_batch(texts)
assert len(embeddings) == len(texts)
queries = ["foo"]
embedding = embed_model.get_query_embedding(queries[0])
assert len(embedding) == 1
@pytest.mark.asyncio()
async def test_e2e_async():
embed_model = FastEmbedSparseEmbedding(model_name="Qdrant/bm25")
texts = ["hello", "world"]
embeddings = await embed_model.aget_text_embedding_batch(texts)
assert len(embeddings) == len(texts)
queries = ["foo"]
embedding = await embed_model.aget_query_embedding(queries[0])
assert len(embedding) == 1
|
import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
|
import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
pass
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class FlopsLoss(nn.Module):
def __init__(self, model: SparseEncoder, threshold: float = None) -> None:
"""
FlopsLoss implements a regularization technique to promote sparsity in sparse encoder models.
It calculates the squared L2 norm of the mean embedding vector, which helps reduce the number of floating-point
operations (FLOPs) required during inference by encouraging more zero values in the embeddings.
It can use a threshold to ignore embeddings with too few non-zero (active) elements.
This loss is used as a regularization component within other losses like :class:`SpladeLoss` rather than
being used as a standalone loss function.
Args:
model: SparseEncoder model to be regularized
threshold: Optional threshold for the number of non-zero (active) elements in the embeddings.
If specified, only embeddings with more than this number of non-zero (active) elements will be considered.
This can help to ignore embeddings that are too sparse and may not contribute meaningfully to the loss.
References:
- For further details, see: https://arxiv.org/pdf/2004.05665 for the general FLOPS loss and https://arxiv.org/pdf/2504.14839 for FLOPS with thresholds, a.k.a. FLOPS with l0 masking.
Relations:
- Used as a component within :class:`SpladeLoss` to regularize both query and document embeddings
Example:
- This loss is typically used within the :class:`SpladeLoss` class, which combines it with other loss components.
"""
super().__init__()
self.model = model
self.threshold = threshold
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise NotImplementedError(
"FlopsLoss is not intended to be used directly. Use it as a regulizer within the SpladeLoss class."
)
def compute_loss_from_embeddings(self, embeddings: list[torch.Tensor]) -> torch.Tensor:
if self.threshold is not None:
l0_norm = (embeddings != 0).sum(dim=1)
mask = (l0_norm > self.threshold).float()
embeddings = embeddings * mask.unsqueeze(1)
return torch.sum(torch.mean(embeddings, dim=0) ** 2)
@property
def citation(self) -> str:
return """
@article{paria2020minimizing,
title={Minimizing flops to learn efficient sparse representations},
author={Paria, Biswajit and Yeh, Chih-Kuan and Yen, Ian EH and Xu, Ning and Ravikumar, Pradeep and P{\'o}czos, Barnab{\'a}s},
journal={arXiv preprint arXiv:2004.05665},
year={2020}
}
"""
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class FlopsLoss(nn.Module):
def __init__(self, model: SparseEncoder, threshold: float = None) -> None:
"""
FlopsLoss implements a regularization technique to promote sparsity in sparse encoder models.
It calculates the squared L2 norm of the mean embedding vector, which helps reduce the number of floating-point
operations (FLOPs) required during inference by encouraging more zero values in the embeddings.
It can use a threshold to ignore embeddings with too few non-zero elements.
This loss is used as a regularization component within other losses like :class:`SpladeLoss` rather than
being used as a standalone loss function.
Args:
model: SparseEncoder model to be regularized
threshold: Optional threshold for the number of non-zero elements in the embeddings.
If specified, only embeddings with more than this number of non-zero elements will be considered.
This can help to ignore embeddings that are too sparse and may not contribute meaningfully to the loss.
References:
- For further details, see: https://arxiv.org/pdf/2004.05665 for the general FLOPS loss and https://arxiv.org/pdf/2504.14839 for FLOPS with thresholds, a.k.a. FLOPS with l0 masking.
Relations:
- Used as a component within :class:`SpladeLoss` to regularize both query and document embeddings
Example:
- This loss is typically used within the :class:`SpladeLoss` class, which combines it with other loss components.
"""
super().__init__()
self.model = model
self.threshold = threshold
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise NotImplementedError(
"FlopsLoss is not intended to be used directly. Use it as a regulizer within the SpladeLoss class."
)
def compute_loss_from_embeddings(self, embeddings: list[torch.Tensor]) -> torch.Tensor:
if self.threshold is not None:
l0_norm = (embeddings != 0).sum(dim=1)
mask = (l0_norm > self.threshold).float()
embeddings = embeddings * mask.unsqueeze(1)
return torch.sum(torch.mean(embeddings, dim=0) ** 2)
@property
def citation(self) -> str:
return """
@article{paria2020minimizing,
title={Minimizing flops to learn efficient sparse representations},
author={Paria, Biswajit and Yeh, Chih-Kuan and Yen, Ian EH and Xu, Ning and Ravikumar, Pradeep and P{\'o}czos, Barnab{\'a}s},
journal={arXiv preprint arXiv:2004.05665},
year={2020}
}
"""
|
import os
import os.path as osp
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from mmdet.evaluation import CityScapesMetric
try:
import cityscapesscripts
except ImportError:
cityscapesscripts = None
class TestCityScapesMetric(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.tmp_dir.cleanup()
@unittest.skipIf(cityscapesscripts is None,
'cityscapesscripts is not installed.')
def test_init(self):
# test with outfile_prefix = None
with self.assertRaises(AssertionError):
CityScapesMetric(outfile_prefix=None)
@unittest.skipIf(cityscapesscripts is None,
'cityscapesscripts is not installed.')
def test_evaluate(self):
dummy_mask1 = np.zeros((1, 20, 20), dtype=np.uint8)
dummy_mask1[:, :10, :10] = 1
dummy_mask2 = np.zeros((1, 20, 20), dtype=np.uint8)
dummy_mask2[:, :10, :10] = 1
self.outfile_prefix = osp.join(self.tmp_dir.name, 'test')
self.seg_prefix = osp.join(self.tmp_dir.name, 'cityscapes/gtFine/val')
city = 'lindau'
sequenceNb = '000000'
frameNb = '000019'
img_name1 = f'{city}_{sequenceNb}_{frameNb}_gtFine_instanceIds.png'
img_path1 = osp.join(self.seg_prefix, city, img_name1)
frameNb = '000020'
img_name2 = f'{city}_{sequenceNb}_{frameNb}_gtFine_instanceIds.png'
img_path2 = osp.join(self.seg_prefix, city, img_name2)
os.makedirs(osp.join(self.seg_prefix, city))
masks1 = np.zeros((20, 20), dtype=np.int32)
masks1[:10, :10] = 24 * 1000
Image.fromarray(masks1).save(img_path1)
masks2 = np.zeros((20, 20), dtype=np.int32)
masks2[:10, :10] = 24 * 1000 + 1
Image.fromarray(masks2).save(img_path2)
data_samples = [{
'img_path': img_path1,
'pred_instances': {
'scores': torch.from_numpy(np.array([1.0])),
'labels': torch.from_numpy(np.array([0])),
'masks': torch.from_numpy(dummy_mask1)
}
}, {
'img_path': img_path2,
'pred_instances': {
'scores': torch.from_numpy(np.array([0.98])),
'labels': torch.from_numpy(np.array([1])),
'masks': torch.from_numpy(dummy_mask2)
}
}]
target = {'cityscapes/mAP': 0.5, 'cityscapes/AP@50': 0.5}
metric = CityScapesMetric(
seg_prefix=self.seg_prefix,
format_only=False,
outfile_prefix=self.outfile_prefix)
metric.dataset_meta = dict(
classes=('person', 'rider', 'car', 'truck', 'bus', 'train',
'motorcycle', 'bicycle'))
metric.process({}, data_samples)
results = metric.evaluate(size=2)
self.assertDictEqual(results, target)
del metric
self.assertTrue(not osp.exists('{self.outfile_prefix}.results'))
# test format_only
metric = CityScapesMetric(
seg_prefix=self.seg_prefix,
format_only=True,
outfile_prefix=self.outfile_prefix)
metric.dataset_meta = dict(
classes=('person', 'rider', 'car', 'truck', 'bus', 'train',
'motorcycle', 'bicycle'))
metric.process({}, data_samples)
results = metric.evaluate(size=2)
self.assertDictEqual(results, dict())
|
import os
import os.path as osp
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from mmdet.evaluation import CityScapesMetric
try:
import cityscapesscripts
except ImportError:
cityscapesscripts = None
class TestCityScapesMetric(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.tmp_dir.cleanup()
@unittest.skipIf(cityscapesscripts is None,
'cityscapesscripts is not installed.')
def test_init(self):
# test with outfile_prefix = None
with self.assertRaises(AssertionError):
CityScapesMetric(outfile_prefix=None)
# test with format_only=True, keep_results=False
with self.assertRaises(AssertionError):
CityScapesMetric(
outfile_prefix=self.tmp_dir.name + 'test',
format_only=True,
keep_results=False)
@unittest.skipIf(cityscapesscripts is None,
'cityscapesscripts is not installed.')
def test_evaluate(self):
dummy_mask1 = np.zeros((1, 20, 20), dtype=np.uint8)
dummy_mask1[:, :10, :10] = 1
dummy_mask2 = np.zeros((1, 20, 20), dtype=np.uint8)
dummy_mask2[:, :10, :10] = 1
self.outfile_prefix = osp.join(self.tmp_dir.name, 'test')
self.seg_prefix = osp.join(self.tmp_dir.name, 'cityscapes/gtFine/val')
city = 'lindau'
sequenceNb = '000000'
frameNb = '000019'
img_name1 = f'{city}_{sequenceNb}_{frameNb}_gtFine_instanceIds.png'
img_path1 = osp.join(self.seg_prefix, city, img_name1)
frameNb = '000020'
img_name2 = f'{city}_{sequenceNb}_{frameNb}_gtFine_instanceIds.png'
img_path2 = osp.join(self.seg_prefix, city, img_name2)
os.makedirs(osp.join(self.seg_prefix, city))
masks1 = np.zeros((20, 20), dtype=np.int32)
masks1[:10, :10] = 24 * 1000
Image.fromarray(masks1).save(img_path1)
masks2 = np.zeros((20, 20), dtype=np.int32)
masks2[:10, :10] = 24 * 1000 + 1
Image.fromarray(masks2).save(img_path2)
data_samples = [{
'img_path': img_path1,
'pred_instances': {
'scores': torch.from_numpy(np.array([1.0])),
'labels': torch.from_numpy(np.array([0])),
'masks': torch.from_numpy(dummy_mask1)
}
}, {
'img_path': img_path2,
'pred_instances': {
'scores': torch.from_numpy(np.array([0.98])),
'labels': torch.from_numpy(np.array([1])),
'masks': torch.from_numpy(dummy_mask2)
}
}]
target = {'cityscapes/mAP': 0.5, 'cityscapes/AP@50': 0.5}
metric = CityScapesMetric(
seg_prefix=self.seg_prefix,
format_only=False,
keep_results=False,
outfile_prefix=self.outfile_prefix)
metric.dataset_meta = dict(
classes=('person', 'rider', 'car', 'truck', 'bus', 'train',
'motorcycle', 'bicycle'))
metric.process({}, data_samples)
results = metric.evaluate(size=2)
self.assertDictEqual(results, target)
del metric
self.assertTrue(not osp.exists('{self.outfile_prefix}.results'))
# test format_only
metric = CityScapesMetric(
seg_prefix=self.seg_prefix,
format_only=True,
keep_results=True,
outfile_prefix=self.outfile_prefix)
metric.dataset_meta = dict(
classes=('person', 'rider', 'car', 'truck', 'bus', 'train',
'motorcycle', 'bicycle'))
metric.process({}, data_samples)
results = metric.evaluate(size=2)
self.assertDictEqual(results, dict())
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.14.2.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.14.1"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
import pathlib
from typing import Any, BinaryIO, Dict, Iterator, List, Tuple, Union
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper, Zipper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
path_comparator,
read_categories_file,
read_mat,
)
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import BoundingBoxes
from .._api import register_dataset, register_info
class StanfordCarsLabelReader(IterDataPipe[Tuple[int, int, int, int, int, str]]):
def __init__(self, datapipe: IterDataPipe[Dict[str, Any]]) -> None:
self.datapipe = datapipe
def __iter__(self) -> Iterator[Tuple[int, int, int, int, int, str]]:
for _, file in self.datapipe:
data = read_mat(file, squeeze_me=True)
for ann in data["annotations"]:
yield tuple(ann) # type: ignore[misc]
NAME = "stanford-cars"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class StanfordCars(Dataset):
"""Stanford Cars dataset.
homepage="https://ai.stanford.edu/~jkrause/cars/car_dataset.html",
dependencies=scipy
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("scipy",))
_URL_ROOT = "https://ai.stanford.edu/~jkrause/"
_URLS = {
"train": f"{_URL_ROOT}car196/cars_train.tgz",
"test": f"{_URL_ROOT}car196/cars_test.tgz",
"cars_test_annos_withlabels": f"{_URL_ROOT}car196/cars_test_annos_withlabels.mat",
"car_devkit": f"{_URL_ROOT}cars/car_devkit.tgz",
}
_CHECKSUM = {
"train": "b97deb463af7d58b6bfaa18b2a4de9829f0f79e8ce663dfa9261bf7810e9accd",
"test": "bffea656d6f425cba3c91c6d83336e4c5f86c6cffd8975b0f375d3a10da8e243",
"cars_test_annos_withlabels": "790f75be8ea34eeded134cc559332baf23e30e91367e9ddca97d26ed9b895f05",
"car_devkit": "512b227b30e2f0a8aab9e09485786ab4479582073a144998da74d64b801fd288",
}
def _resources(self) -> List[OnlineResource]:
resources: List[OnlineResource] = [HttpResource(self._URLS[self._split], sha256=self._CHECKSUM[self._split])]
if self._split == "train":
resources.append(HttpResource(url=self._URLS["car_devkit"], sha256=self._CHECKSUM["car_devkit"]))
else:
resources.append(
HttpResource(
self._URLS["cars_test_annos_withlabels"], sha256=self._CHECKSUM["cars_test_annos_withlabels"]
)
)
return resources
def _prepare_sample(self, data: Tuple[Tuple[str, BinaryIO], Tuple[int, int, int, int, int, str]]) -> Dict[str, Any]:
image, target = data
path, buffer = image
image = EncodedImage.from_file(buffer)
return dict(
path=path,
image=image,
label=Label(target[4] - 1, categories=self._categories),
bounding_boxes=BoundingBoxes(target[:4], format="xyxy", spatial_size=image.spatial_size),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
images_dp, targets_dp = resource_dps
if self._split == "train":
targets_dp = Filter(targets_dp, path_comparator("name", "cars_train_annos.mat"))
targets_dp = StanfordCarsLabelReader(targets_dp)
dp = Zipper(images_dp, targets_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def _generate_categories(self) -> List[str]:
resources = self._resources()
devkit_dp = resources[1].load(self._root)
meta_dp = Filter(devkit_dp, path_comparator("name", "cars_meta.mat"))
_, meta_file = next(iter(meta_dp))
return list(read_mat(meta_file, squeeze_me=True)["class_names"])
def __len__(self) -> int:
return 8_144 if self._split == "train" else 8_041
|
import pathlib
from typing import Any, BinaryIO, Dict, Iterator, List, Tuple, Union
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper, Zipper
from torchvision.datapoints import BoundingBoxes
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
path_comparator,
read_categories_file,
read_mat,
)
from .._api import register_dataset, register_info
class StanfordCarsLabelReader(IterDataPipe[Tuple[int, int, int, int, int, str]]):
def __init__(self, datapipe: IterDataPipe[Dict[str, Any]]) -> None:
self.datapipe = datapipe
def __iter__(self) -> Iterator[Tuple[int, int, int, int, int, str]]:
for _, file in self.datapipe:
data = read_mat(file, squeeze_me=True)
for ann in data["annotations"]:
yield tuple(ann) # type: ignore[misc]
NAME = "stanford-cars"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class StanfordCars(Dataset):
"""Stanford Cars dataset.
homepage="https://ai.stanford.edu/~jkrause/cars/car_dataset.html",
dependencies=scipy
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("scipy",))
_URL_ROOT = "https://ai.stanford.edu/~jkrause/"
_URLS = {
"train": f"{_URL_ROOT}car196/cars_train.tgz",
"test": f"{_URL_ROOT}car196/cars_test.tgz",
"cars_test_annos_withlabels": f"{_URL_ROOT}car196/cars_test_annos_withlabels.mat",
"car_devkit": f"{_URL_ROOT}cars/car_devkit.tgz",
}
_CHECKSUM = {
"train": "b97deb463af7d58b6bfaa18b2a4de9829f0f79e8ce663dfa9261bf7810e9accd",
"test": "bffea656d6f425cba3c91c6d83336e4c5f86c6cffd8975b0f375d3a10da8e243",
"cars_test_annos_withlabels": "790f75be8ea34eeded134cc559332baf23e30e91367e9ddca97d26ed9b895f05",
"car_devkit": "512b227b30e2f0a8aab9e09485786ab4479582073a144998da74d64b801fd288",
}
def _resources(self) -> List[OnlineResource]:
resources: List[OnlineResource] = [HttpResource(self._URLS[self._split], sha256=self._CHECKSUM[self._split])]
if self._split == "train":
resources.append(HttpResource(url=self._URLS["car_devkit"], sha256=self._CHECKSUM["car_devkit"]))
else:
resources.append(
HttpResource(
self._URLS["cars_test_annos_withlabels"], sha256=self._CHECKSUM["cars_test_annos_withlabels"]
)
)
return resources
def _prepare_sample(self, data: Tuple[Tuple[str, BinaryIO], Tuple[int, int, int, int, int, str]]) -> Dict[str, Any]:
image, target = data
path, buffer = image
image = EncodedImage.from_file(buffer)
return dict(
path=path,
image=image,
label=Label(target[4] - 1, categories=self._categories),
bounding_boxes=BoundingBoxes(target[:4], format="xyxy", spatial_size=image.spatial_size),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
images_dp, targets_dp = resource_dps
if self._split == "train":
targets_dp = Filter(targets_dp, path_comparator("name", "cars_train_annos.mat"))
targets_dp = StanfordCarsLabelReader(targets_dp)
dp = Zipper(images_dp, targets_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def _generate_categories(self) -> List[str]:
resources = self._resources()
devkit_dp = resources[1].load(self._root)
meta_dp = Filter(devkit_dp, path_comparator("name", "cars_meta.mat"))
_, meta_file = next(iter(meta_dp))
return list(read_mat(meta_file, squeeze_me=True)["class_names"])
def __len__(self) -> int:
return 8_144 if self._split == "train" else 8_041
|
"""
This is a simple application for sentence embeddings: semantic search
We have a corpus with various sentences. Then, for a given query sentence,
we want to find the most similar sentence in this corpus.
This script outputs for various queries the top 5 most similar sentences in the corpus.
"""
import torch
from sentence_transformers import SentenceTransformer
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example documents
corpus = [
"Machine learning is a field of study that gives computers the ability to learn without being explicitly programmed.",
"Deep learning is part of a broader family of machine learning methods based on artificial neural networks with representation learning.",
"Neural networks are computing systems vaguely inspired by the biological neural networks that constitute animal brains.",
"Mars rovers are robotic vehicles designed to travel on the surface of Mars to collect data and perform experiments.",
"The James Webb Space Telescope is the largest optical telescope in space, designed to conduct infrared astronomy.",
"SpaceX's Starship is designed to be a fully reusable transportation system capable of carrying humans to Mars and beyond.",
"Global warming is the long-term heating of Earth's climate system observed since the pre-industrial period due to human activities.",
"Renewable energy sources include solar, wind, hydro, and geothermal power that naturally replenish over time.",
"Carbon capture technologies aim to collect CO2 emissions before they enter the atmosphere and store them underground.",
]
# Use "convert_to_tensor=True" to keep the tensors on GPU (if available)
corpus_embeddings = embedder.encode_document(corpus, convert_to_tensor=True)
# Query sentences:
queries = [
"How do artificial neural networks work?",
"What technology is used for modern space exploration?",
"How can we address climate change challenges?",
]
# Find the closest 5 sentences of the corpus for each query sentence based on cosine similarity
top_k = min(5, len(corpus))
for query in queries:
query_embedding = embedder.encode_query(query, convert_to_tensor=True)
# We use cosine-similarity and torch.topk to find the highest 5 scores
similarity_scores = embedder.similarity(query_embedding, corpus_embeddings)[0]
scores, indices = torch.topk(similarity_scores, k=top_k)
print("\nQuery:", query)
print("Top 5 most similar sentences in corpus:")
for score, idx in zip(scores, indices):
print(f"(Score: {score:.4f})", corpus[idx])
"""
# Alternatively, we can also use util.semantic_search to perform cosine similarty + topk
hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=5)
hits = hits[0] #Get the hits for the first query
for hit in hits:
print(corpus[hit['corpus_id']], "(Score: {:.4f})".format(hit['score']))
"""
|
"""
This is a simple application for sentence embeddings: semantic search
We have a corpus with various sentences. Then, for a given query sentence,
we want to find the most similar sentence in this corpus.
This script outputs for various queries the top 5 most similar sentences in the corpus.
"""
import torch
from sentence_transformers import SentenceTransformer
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"The girl is carrying a baby.",
"A man is riding a horse.",
"A woman is playing violin.",
"Two men pushed carts through the woods.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"A cheetah is running behind its prey.",
]
# Use "convert_to_tensor=True" to keep the tensors on GPU (if available)
corpus_embeddings = embedder.encode(corpus, convert_to_tensor=True)
# Query sentences:
queries = [
"A man is eating pasta.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah chases prey on across a field.",
]
# Find the closest 5 sentences of the corpus for each query sentence based on cosine similarity
top_k = min(5, len(corpus))
for query in queries:
query_embedding = embedder.encode(query, convert_to_tensor=True)
# We use cosine-similarity and torch.topk to find the highest 5 scores
similarity_scores = embedder.similarity(query_embedding, corpus_embeddings)[0]
scores, indices = torch.topk(similarity_scores, k=top_k)
print("\nQuery:", query)
print("Top 5 most similar sentences in corpus:")
for score, idx in zip(scores, indices):
print(corpus[idx], f"(Score: {score:.4f})")
"""
# Alternatively, we can also use util.semantic_search to perform cosine similarty + topk
hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=5)
hits = hits[0] #Get the hits for the first query
for hit in hits:
print(corpus[hit['corpus_id']], "(Score: {:.4f})".format(hit['score']))
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.runner import Runner
from mmdet.registry import RUNNERS
from mmdet.utils import register_all_modules
# TODO: support fuse_conv_bn and format_only
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing evaluation metrics')
parser.add_argument(
'--show', action='store_true', help='show prediction results')
parser.add_argument(
'--show-dir',
help='directory where painted images will be saved. '
'If specified, it will be automatically saved '
'to the work_dir/timestamp/show_dir')
parser.add_argument(
'--wait-time', type=float, default=2, help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def trigger_visualization_hook(cfg, args):
default_hooks = cfg.default_hooks
if 'visualization' in default_hooks:
visualization_hook = default_hooks['visualization']
# Turn on visualization
visualization_hook['draw'] = True
if args.show:
visualization_hook['show'] = True
visualization_hook['wait_time'] = args.wait_time
if args.show_dir:
visualization_hook['test_out_dir'] = args.show_dir
else:
raise RuntimeError(
'VisualizationHook must be included in default_hooks.'
'refer to usage '
'"visualization=dict(type=\'VisualizationHook\')"')
return cfg
def main():
args = parse_args()
# register all modules in mmdet into the registries
# do not init the default scope here because it will be init in the runner
register_all_modules(init_default_scope=False)
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
cfg.load_from = args.checkpoint
if args.show or args.show_dir:
cfg = trigger_visualization_hook(cfg, args)
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start testing
runner.test()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.runner import Runner
from mmdet.registry import RUNNERS
from mmdet.utils import register_all_modules, replace_cfg_vals
# TODO: support fuse_conv_bn and format_only
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing evaluation metrics')
parser.add_argument(
'--show', action='store_true', help='show prediction results')
parser.add_argument(
'--show-dir',
help='directory where painted images will be saved. '
'If specified, it will be automatically saved '
'to the work_dir/timestamp/show_dir')
parser.add_argument(
'--wait-time', type=float, default=2, help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def trigger_visualization_hook(cfg, args):
default_hooks = cfg.default_hooks
if 'visualization' in default_hooks:
visualization_hook = default_hooks['visualization']
# Turn on visualization
visualization_hook['draw'] = True
if args.show:
visualization_hook['show'] = True
visualization_hook['wait_time'] = args.wait_time
if args.show_dir:
visualization_hook['test_out_dir'] = args.show_dir
else:
raise RuntimeError(
'VisualizationHook must be included in default_hooks.'
'refer to usage '
'"visualization=dict(type=\'VisualizationHook\')"')
return cfg
def main():
args = parse_args()
# register all modules in mmdet into the registries
# do not init the default scope here because it will be init in the runner
register_all_modules(init_default_scope=False)
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
cfg.load_from = args.checkpoint
if args.show or args.show_dir:
cfg = trigger_visualization_hook(cfg, args)
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start testing
runner.test()
if __name__ == '__main__':
main()
|
_base_ = './faster-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './faster_rcnn_r50_fpn_lsj_200e_8x8_fp16_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
import fnmatch
import os
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidationError,
)
class FileSearchInput(BaseModel):
"""Input for FileSearchTool."""
dir_path: str = Field(
default=".",
description="Subdirectory to search in.",
)
pattern: str = Field(
...,
description="Unix shell regex, where * matches everything.",
)
class FileSearchTool(BaseFileToolMixin, BaseTool):
"""Tool that searches for files in a subdirectory that match a regex pattern."""
name: str = "file_search"
args_schema: Type[BaseModel] = FileSearchInput
description: str = (
"Recursively search for files in a subdirectory that match the regex pattern"
)
def _run(
self,
pattern: str,
dir_path: str = ".",
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
dir_path_ = self.get_relative_path(dir_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name="dir_path", value=dir_path)
matches = []
try:
for root, _, filenames in os.walk(dir_path_):
for filename in fnmatch.filter(filenames, pattern):
absolute_path = os.path.join(root, filename)
relative_path = os.path.relpath(absolute_path, dir_path_)
matches.append(relative_path)
if matches:
return "\n".join(matches)
else:
return f"No files found for pattern {pattern} in directory {dir_path}"
except Exception as e:
return "Error: " + str(e)
# TODO: Add aiofiles method
|
import fnmatch
import os
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidationError,
)
class FileSearchInput(BaseModel):
"""Input for FileSearchTool."""
dir_path: str = Field(
default=".",
description="Subdirectory to search in.",
)
pattern: str = Field(
...,
description="Unix shell regex, where * matches everything.",
)
class FileSearchTool(BaseFileToolMixin, BaseTool): # type: ignore[override, override]
"""Tool that searches for files in a subdirectory that match a regex pattern."""
name: str = "file_search"
args_schema: Type[BaseModel] = FileSearchInput
description: str = (
"Recursively search for files in a subdirectory that match the regex pattern"
)
def _run(
self,
pattern: str,
dir_path: str = ".",
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
dir_path_ = self.get_relative_path(dir_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name="dir_path", value=dir_path)
matches = []
try:
for root, _, filenames in os.walk(dir_path_):
for filename in fnmatch.filter(filenames, pattern):
absolute_path = os.path.join(root, filename)
relative_path = os.path.relpath(absolute_path, dir_path_)
matches.append(relative_path)
if matches:
return "\n".join(matches)
else:
return f"No files found for pattern {pattern} in directory {dir_path}"
except Exception as e:
return "Error: " + str(e)
# TODO: Add aiofiles method
|
from langchain_core.prompts.few_shot import (
FewShotChatMessagePromptTemplate,
FewShotPromptTemplate,
_FewShotPromptTemplateMixin,
)
__all__ = [
"FewShotChatMessagePromptTemplate",
"FewShotPromptTemplate",
"_FewShotPromptTemplateMixin",
]
|
from langchain_core.prompts.few_shot import (
FewShotChatMessagePromptTemplate,
FewShotPromptTemplate,
_FewShotPromptTemplateMixin,
)
__all__ = [
"FewShotPromptTemplate",
"FewShotChatMessagePromptTemplate",
"_FewShotPromptTemplateMixin",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .builder import DATASETS, PIPELINES, build_dataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'DATASETS', 'PIPELINES', 'build_dataset',
'get_loading_pipeline', 'CocoPanopticDataset', 'MultiImageMixDataset',
'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .builder import DATASETS, PIPELINES, build_dataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'DATASETS', 'PIPELINES', 'build_dataset',
'get_loading_pipeline', 'CocoPanopticDataset', 'MultiImageMixDataset',
'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler'
]
|
__version__ = "2.8.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from .datasets import SentencesDataset, ParallelSentencesDataset
from .LoggingHandler import LoggingHandler
from .SentenceTransformer import SentenceTransformer
from .readers import InputExample
from .cross_encoder.CrossEncoder import CrossEncoder
from .trainer import SentenceTransformerTrainer
from .training_args import SentenceTransformerTrainingArguments
from .model_card import SentenceTransformerModelCardData
from .quantization import quantize_embeddings
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"InputExample",
"CrossEncoder",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
]
|
__version__ = "2.8.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
from .datasets import SentencesDataset, ParallelSentencesDataset
from .LoggingHandler import LoggingHandler
from .SentenceTransformer import SentenceTransformer
from .readers import InputExample
from .cross_encoder.CrossEncoder import CrossEncoder
from .quantization import quantize_embeddings
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"InputExample",
"CrossEncoder",
"quantize_embeddings",
]
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa
model = dict(
type='LAD',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
# student
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
optim_wrapper = dict(type='AmpOptimWrapper', optimizer=dict(lr=0.01))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
type='LAD',
# student
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
optim_wrapper = dict(optimizer=dict(lr=0.01))
# TODO: MMEngine does not support fp16 yet.
# fp16 = dict(loss_scale=512.)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection video demo')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# build test pipeline
model.cfg.test_dataloader.dataset.pipeline[
0].type = 'mmdet.LoadImageFromNDArray'
test_pipeline = Compose(model.cfg.test_dataloader.dataset.pipeline)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
video_reader = mmcv.VideoReader(args.video)
video_writer = None
if args.out:
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video_writer = cv2.VideoWriter(
args.out, fourcc, video_reader.fps,
(video_reader.width, video_reader.height))
for frame in track_iter_progress((video_reader, len(video_reader))):
result = inference_detector(model, frame, test_pipeline=test_pipeline)
visualizer.add_datasample(
name='video',
image=frame,
data_sample=result,
draw_gt=False,
show=False,
pred_score_thr=args.score_thr)
frame = visualizer.get_image()
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame, 'video', args.wait_time)
if args.out:
video_writer.write(frame)
if video_writer:
video_writer.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection video demo')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# build test pipeline
model.cfg.test_dataloader.dataset.pipeline[
0].type = 'mmdet.LoadImageFromNDArray'
test_pipeline = Compose(model.cfg.test_dataloader.dataset.pipeline)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
video_reader = mmcv.VideoReader(args.video)
video_writer = None
if args.out:
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video_writer = cv2.VideoWriter(
args.out, fourcc, video_reader.fps,
(video_reader.width, video_reader.height))
for frame in track_iter_progress(video_reader):
result = inference_detector(model, frame, test_pipeline=test_pipeline)
visualizer.add_datasample(
name='video',
image=frame,
data_sample=result,
draw_gt=False,
show=False,
pred_score_thr=args.score_thr)
frame = visualizer.get_image()
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame, 'video', args.wait_time)
if args.out:
video_writer.write(frame)
if video_writer:
video_writer.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
"""Util that calls Bing Search."""
from typing import Any, Dict, List
import requests
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, Field, model_validator
# BING_SEARCH_ENDPOINT is the default endpoint for Bing Web Search API.
# Currently There are two web-based Bing Search services available on Azure,
# i.e. Bing Web Search[1] and Bing Custom Search[2]. Compared to Bing Custom Search,
# Both services that provides a wide range of search results, while Bing Custom
# Search requires you to provide an additional custom search instance, `customConfig`.
# Both services are available for BingSearchAPIWrapper.
# History of Azure Bing Search API:
# Before shown in Azure Marketplace as a separate service, Bing Search APIs were
# part of Azure Cognitive Services, the endpoint of which is unique, and the user
# must specify the endpoint when making a request. After transitioning to Azure
# Marketplace, the endpoint is standardized and the user does not need to specify
# the endpoint[3].
# Reference:
# 1. https://learn.microsoft.com/en-us/bing/search-apis/bing-web-search/overview
# 2. https://learn.microsoft.com/en-us/bing/search-apis/bing-custom-search/overview
# 3. https://azure.microsoft.com/en-in/updates/bing-search-apis-will-transition-from-azure-cognitive-services-to-azure-marketplace-on-31-october-2023/
DEFAULT_BING_SEARCH_ENDPOINT = "https://api.bing.microsoft.com/v7.0/search"
class BingSearchAPIWrapper(BaseModel):
"""Wrapper for Bing Web Search API."""
bing_subscription_key: str
bing_search_url: str
k: int = 10
search_kwargs: dict = Field(default_factory=dict)
"""Additional keyword arguments to pass to the search request."""
model_config = ConfigDict(
extra="forbid",
)
def _bing_search_results(self, search_term: str, count: int) -> List[dict]:
headers = {"Ocp-Apim-Subscription-Key": self.bing_subscription_key}
params = {
"q": search_term,
"count": count,
"textDecorations": True,
"textFormat": "HTML",
**self.search_kwargs,
}
response = requests.get(
self.bing_search_url,
headers=headers,
params=params,
)
response.raise_for_status()
search_results = response.json()
if "webPages" in search_results:
return search_results["webPages"]["value"]
return []
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and endpoint exists in environment."""
bing_subscription_key = get_from_dict_or_env(
values, "bing_subscription_key", "BING_SUBSCRIPTION_KEY"
)
values["bing_subscription_key"] = bing_subscription_key
bing_search_url = get_from_dict_or_env(
values,
"bing_search_url",
"BING_SEARCH_URL",
default=DEFAULT_BING_SEARCH_ENDPOINT,
)
values["bing_search_url"] = bing_search_url
return values
def run(self, query: str) -> str:
"""Run query through BingSearch and parse result."""
snippets = []
results = self._bing_search_results(query, count=self.k)
if len(results) == 0:
return "No good Bing Search Result was found"
for result in results:
snippets.append(result["snippet"])
return " ".join(snippets)
def results(self, query: str, num_results: int) -> List[Dict]:
"""Run query through BingSearch and return metadata.
Args:
query: The query to search for.
num_results: The number of results to return.
Returns:
A list of dictionaries with the following keys:
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
"""
metadata_results = []
results = self._bing_search_results(query, count=num_results)
if len(results) == 0:
return [{"Result": "No good Bing Search Result was found"}]
for result in results:
metadata_result = {
"snippet": result["snippet"],
"title": result["name"],
"link": result["url"],
}
metadata_results.append(metadata_result)
return metadata_results
|
"""Util that calls Bing Search."""
from typing import Any, Dict, List
import requests
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, Field, model_validator
# BING_SEARCH_ENDPOINT is the default endpoint for Bing Web Search API.
# Currently There are two web-based Bing Search services available on Azure,
# i.e. Bing Web Search[1] and Bing Custom Search[2]. Compared to Bing Custom Search,
# Both services that provides a wide range of search results, while Bing Custom
# Search requires you to provide an additional custom search instance, `customConfig`.
# Both services are available for BingSearchAPIWrapper.
# History of Azure Bing Search API:
# Before shown in Azure Marketplace as a separate service, Bing Search APIs were
# part of Azure Cognitive Services, the endpoint of which is unique, and the user
# must specify the endpoint when making a request. After transitioning to Azure
# Marketplace, the endpoint is standardized and the user does not need to specify
# the endpoint[3].
# Reference:
# 1. https://learn.microsoft.com/en-us/bing/search-apis/bing-web-search/overview
# 2. https://learn.microsoft.com/en-us/bing/search-apis/bing-custom-search/overview
# 3. https://azure.microsoft.com/en-in/updates/bing-search-apis-will-transition-from-azure-cognitive-services-to-azure-marketplace-on-31-october-2023/
DEFAULT_BING_SEARCH_ENDPOINT = "https://api.bing.microsoft.com/v7.0/search"
class BingSearchAPIWrapper(BaseModel):
"""Wrapper for Bing Web Search API."""
bing_subscription_key: str
bing_search_url: str
k: int = 10
search_kwargs: dict = Field(default_factory=dict)
"""Additional keyword arguments to pass to the search request."""
model_config = ConfigDict(
extra="forbid",
)
def _bing_search_results(self, search_term: str, count: int) -> List[dict]:
headers = {"Ocp-Apim-Subscription-Key": self.bing_subscription_key}
params = {
"q": search_term,
"count": count,
"textDecorations": True,
"textFormat": "HTML",
**self.search_kwargs,
}
response = requests.get(
self.bing_search_url,
headers=headers,
params=params, # type: ignore
)
response.raise_for_status()
search_results = response.json()
if "webPages" in search_results:
return search_results["webPages"]["value"]
return []
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and endpoint exists in environment."""
bing_subscription_key = get_from_dict_or_env(
values, "bing_subscription_key", "BING_SUBSCRIPTION_KEY"
)
values["bing_subscription_key"] = bing_subscription_key
bing_search_url = get_from_dict_or_env(
values,
"bing_search_url",
"BING_SEARCH_URL",
default=DEFAULT_BING_SEARCH_ENDPOINT,
)
values["bing_search_url"] = bing_search_url
return values
def run(self, query: str) -> str:
"""Run query through BingSearch and parse result."""
snippets = []
results = self._bing_search_results(query, count=self.k)
if len(results) == 0:
return "No good Bing Search Result was found"
for result in results:
snippets.append(result["snippet"])
return " ".join(snippets)
def results(self, query: str, num_results: int) -> List[Dict]:
"""Run query through BingSearch and return metadata.
Args:
query: The query to search for.
num_results: The number of results to return.
Returns:
A list of dictionaries with the following keys:
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
"""
metadata_results = []
results = self._bing_search_results(query, count=num_results)
if len(results) == 0:
return [{"Result": "No good Bing Search Result was found"}]
for result in results:
metadata_result = {
"snippet": result["snippet"],
"title": result["name"],
"link": result["url"],
}
metadata_results.append(metadata_result)
return metadata_results
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import AudioTorchTensor, AudioUrl
from docarray.utils._internal.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing.tensor.audio import AudioTensorFlowTensor
AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
REMOTE_AUDIO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/olleh.wav?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
tensor, _ = uri.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_torch_tensor_field(file_url):
class MyAudioDoc(BaseDoc):
audio_url: AudioUrl
tensor: Optional[AudioTorchTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor, _ = doc.audio_url.load()
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, AudioTorchTensor)
@pytest.mark.tensorflow
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_tensorflow_tensor_field(file_url):
class MyAudioDoc(BaseDoc):
audio_url: AudioUrl
tensor: Optional[AudioTensorFlowTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor, _ = doc.audio_url.load()
assert isinstance(doc.tensor, AudioTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load(file_url):
url = parse_obj_as(AudioUrl, file_url)
tensor, _ = url.load()
assert isinstance(tensor, np.ndarray)
def test_json_schema():
schema_json_of(AudioUrl)
def test_dump_json():
url = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[
*[file for file in AUDIO_FILES],
REMOTE_AUDIO_FILE,
],
)
def test_validation(path_to_file):
url = parse_obj_as(AudioUrl, path_to_file)
assert isinstance(url, AudioUrl)
assert isinstance(url, str)
@pytest.mark.parametrize(
'path_to_file',
[
'my/local/text/file.txt',
'my/local/text/file.png',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='AudioUrl'):
parse_obj_as(AudioUrl, path_to_file)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_proto_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
proto = uri._to_node_protobuf()
assert 'audio_url' in str(proto)
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import AudioTorchTensor, AudioUrl
from docarray.utils.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing.tensor.audio import AudioTensorFlowTensor
AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
REMOTE_AUDIO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/olleh.wav?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
tensor, _ = uri.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_torch_tensor_field(file_url):
class MyAudioDoc(BaseDoc):
audio_url: AudioUrl
tensor: Optional[AudioTorchTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor, _ = doc.audio_url.load()
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, AudioTorchTensor)
@pytest.mark.tensorflow
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_tensorflow_tensor_field(file_url):
class MyAudioDoc(BaseDoc):
audio_url: AudioUrl
tensor: Optional[AudioTensorFlowTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor, _ = doc.audio_url.load()
assert isinstance(doc.tensor, AudioTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load(file_url):
url = parse_obj_as(AudioUrl, file_url)
tensor, _ = url.load()
assert isinstance(tensor, np.ndarray)
def test_json_schema():
schema_json_of(AudioUrl)
def test_dump_json():
url = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[
*[file for file in AUDIO_FILES],
REMOTE_AUDIO_FILE,
],
)
def test_validation(path_to_file):
url = parse_obj_as(AudioUrl, path_to_file)
assert isinstance(url, AudioUrl)
assert isinstance(url, str)
@pytest.mark.parametrize(
'path_to_file',
[
'my/local/text/file.txt',
'my/local/text/file.png',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='AudioUrl'):
parse_obj_as(AudioUrl, path_to_file)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_proto_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
proto = uri._to_node_protobuf()
assert 'audio_url' in str(proto)
|
import base64
import os
import pytest
import requests
from llama_index.core.llms import LLM
from llama_index.core.schema import ImageNode
from llama_index.multi_modal_llms.gemini import GeminiMultiModal
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in GeminiMultiModal.__mro__]
assert LLM.__name__ in names_of_base_classes
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
@pytest.mark.asyncio()
async def test_streaming_async():
response = requests.get(
"https://storage.googleapis.com/generativeai-downloads/data/scene.jpg",
headers={"User-agent": "Mozilla/5.0"},
)
image_str = base64.b64encode(response.content).decode("UTF-8")
node = ImageNode(image=image_str)
m = GeminiMultiModal()
streaming_handler = await m.astream_complete(
"Tell me what's in this image",
image_documents=[node],
)
async for chunk in streaming_handler:
assert chunk.delta
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_streaming():
response = requests.get(
"https://storage.googleapis.com/generativeai-downloads/data/scene.jpg",
headers={"User-agent": "Mozilla/5.0"},
)
image_str = base64.b64encode(response.content).decode("UTF-8")
node = ImageNode(image=image_str)
m = GeminiMultiModal()
streaming_handler = m.stream_complete(
"Tell me what's in this image",
image_documents=[node],
)
for chunk in streaming_handler:
assert chunk.delta
|
from llama_index.core.multi_modal_llms.base import MultiModalLLM
from llama_index.multi_modal_llms.gemini import GeminiMultiModal
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in GeminiMultiModal.__mro__]
assert MultiModalLLM.__name__ in names_of_base_classes
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.utils import setup_cache_size_limit_of_dynamo
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--auto-scale-lr',
action='store_true',
help='enable automatically scaling LR.')
parser.add_argument(
'--resume',
nargs='?',
type=str,
const='auto',
help='If specify checkpoint path, resume from it, while if not '
'specify, try to auto resume from the latest checkpoint '
'in the work directory.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
# When using PyTorch version >= 2.0.0, the `torch.distributed.launch`
# will pass the `--local-rank` parameter to `tools/train.py` instead
# of `--local_rank`.
parser.add_argument('--local_rank', '--local-rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# Reduce the number of repeated compilations and improve
# training speed.
setup_cache_size_limit_of_dynamo()
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# enable automatic-mixed-precision training
if args.amp is True:
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# enable automatically scaling LR
if args.auto_scale_lr:
if 'auto_scale_lr' in cfg and \
'enable' in cfg.auto_scale_lr and \
'base_batch_size' in cfg.auto_scale_lr:
cfg.auto_scale_lr.enable = True
else:
raise RuntimeError('Can not find "auto_scale_lr" or '
'"auto_scale_lr.enable" or '
'"auto_scale_lr.base_batch_size" in your'
' configuration file.')
# resume is determined in this priority: resume from > auto_resume
if args.resume == 'auto':
cfg.resume = True
cfg.load_from = None
elif args.resume is not None:
cfg.resume = True
cfg.load_from = args.resume
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start training
runner.train()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.logging import print_log
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.utils import setup_cache_size_limit_of_dynamo
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--auto-scale-lr',
action='store_true',
help='enable automatically scaling LR.')
parser.add_argument(
'--resume',
nargs='?',
type=str,
const='auto',
help='If specify checkpoint path, resume from it, while if not '
'specify, try to auto resume from the latest checkpoint '
'in the work directory.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
# When using PyTorch version >= 2.0.0, the `torch.distributed.launch`
# will pass the `--local-rank` parameter to `tools/train.py` instead
# of `--local_rank`.
parser.add_argument('--local_rank', '--local-rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# Reduce the number of repeated compilations and improve
# training speed.
setup_cache_size_limit_of_dynamo()
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.type
if optim_wrapper == 'AmpOptimWrapper':
print_log(
'AMP training is already enabled in your config.',
logger='current',
level=logging.WARNING)
else:
assert optim_wrapper == 'OptimWrapper', (
'`--amp` is only supported when the optimizer wrapper type is '
f'`OptimWrapper` but got {optim_wrapper}.')
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# enable automatically scaling LR
if args.auto_scale_lr:
if 'auto_scale_lr' in cfg and \
'enable' in cfg.auto_scale_lr and \
'base_batch_size' in cfg.auto_scale_lr:
cfg.auto_scale_lr.enable = True
else:
raise RuntimeError('Can not find "auto_scale_lr" or '
'"auto_scale_lr.enable" or '
'"auto_scale_lr.base_batch_size" in your'
' configuration file.')
# resume is determined in this priority: resume from > auto_resume
if args.resume == 'auto':
cfg.resume = True
cfg.load_from = None
elif args.resume is not None:
cfg.resume = True
cfg.load_from = args.resume
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start training
runner.train()
if __name__ == '__main__':
main()
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Any
from docarray.base_doc.io.json import orjson_dumps
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from fastapi.responses import JSONResponse
else:
fastapi = import_library('fastapi', raise_error=True)
JSONResponse = fastapi.responses.JSONResponse
class DocArrayResponse(JSONResponse):
"""
This is a custom Response class for FastAPI and starlette. This is needed
to handle serialization of the Document types when using FastAPI
---
```python
from docarray.documets import Text
from docarray.base_doc import DocResponse
@app.post("/doc/", response_model=Text, response_class=DocResponse)
async def create_item(doc: Text) -> Text:
return doc
```
---
"""
def render(self, content: Any) -> bytes:
return orjson_dumps(content)
|
from typing import TYPE_CHECKING, Any
from docarray.base_doc.io.json import orjson_dumps
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from fastapi.responses import JSONResponse
else:
fastapi = import_library('fastapi', raise_error=True)
JSONResponse = fastapi.responses.JSONResponse
class DocArrayResponse(JSONResponse):
"""
This is a custom Response class for FastAPI and starlette. This is needed
to handle serialization of the Document types when using FastAPI
---
```python
from docarray.documets import Text
from docarray.base_doc import DocResponse
@app.post("/doc/", response_model=Text, response_class=DocResponse)
async def create_item(doc: Text) -> Text:
return doc
```
---
"""
def render(self, content: Any) -> bytes:
return orjson_dumps(content)
|
import argparse
import os
import shlex
import subprocess
def execute_command(command):
command_list = shlex.split(command)
subprocess.run(command_list, check=True, text=True)
def main():
comment = os.environ["COMMENT"].splitlines()[0].strip()
# Extract the command-line arguments from the comment
prefix = "@scikit-learn-bot update lock-files"
assert comment.startswith(prefix)
all_args_list = shlex.split(comment[len(prefix) :])
# Parse the options for the lock-file script
parser = argparse.ArgumentParser()
parser.add_argument("--select-build", default="")
parser.add_argument("--skip-build", default=None)
parser.add_argument("--select-tag", default=None)
args, extra_args_list = parser.parse_known_args(all_args_list)
# Rebuild the command-line arguments for the lock-file script
args_string = ""
if args.select_build != "":
args_string += f" --select-build {args.select_build}"
if args.skip_build is not None:
args_string += f" --skip-build {args.skip_build}"
if args.select_tag is not None:
args_string += f" --select-tag {args.select_tag}"
# Parse extra arguments
extra_parser = argparse.ArgumentParser()
extra_parser.add_argument("--commit-marker", default=None)
extra_args, _ = extra_parser.parse_known_args(extra_args_list)
marker = ""
# Additional markers based on the tag
if args.select_tag == "main-ci":
marker += "[doc build] "
elif args.select_tag == "scipy-dev":
marker += "[scipy-dev] "
elif args.select_tag == "arm":
marker += "[cirrus arm] "
elif len(all_args_list) == 0:
# No arguments which will update all lock files so add all markers
marker += "[doc build] [scipy-dev] [cirrus arm] "
# The additional `--commit-marker` argument
if extra_args.commit_marker is not None:
marker += extra_args.commit_marker + " "
execute_command(
f"python build_tools/update_environments_and_lock_files.py{args_string}"
)
execute_command('git config --global user.name "scikit-learn-bot"')
execute_command('git config --global user.email "[email protected]"')
execute_command("git add -A")
# Avoiding commiting the scripts that are downloaded from main
execute_command("git reset build_tools/shared.sh")
execute_command("git reset build_tools/update_environments_and_lock_files.py")
execute_command(
"git reset build_tools/on_pr_comment_update_environments_and_lock_files.py"
)
# Using --allow-empty to handle cases where the lock-file has not changed
execute_command(f'git commit --allow-empty -m "{marker}Update lock files"')
execute_command("git push")
if __name__ == "__main__":
main()
|
import argparse
import os
import shlex
import subprocess
def execute_command(command):
command_list = shlex.split(command)
subprocess.run(command_list, check=True, text=True)
def main():
comment = os.environ["COMMENT"].splitlines()[0].strip()
# Extract the command-line arguments from the comment
prefix = "@scikit-learn-bot update lock-files"
assert comment.startswith(prefix)
all_args_list = shlex.split(comment[len(prefix) :])
# Parse the options for the lock-file script
parser = argparse.ArgumentParser()
parser.add_argument("--select-build", default="")
parser.add_argument("--skip-build", default=None)
parser.add_argument("--select-tag", default=None)
args, extra_args_list = parser.parse_known_args(all_args_list)
# Rebuild the command-line arguments for the lock-file script
args_string = ""
if args.select_build != "":
args_string += f" --select-build {args.select_build}"
if args.skip_build is not None:
args_string += f" --skip-build {args.skip_build}"
if args.select_tag is not None:
args_string += f" --select-tag {args.select_tag}"
# Parse extra arguments
extra_parser = argparse.ArgumentParser()
extra_parser.add_argument("--commit-marker", default=None)
extra_args, _ = extra_parser.parse_known_args(extra_args_list)
marker = ""
# Additional markers based on the tag
if args.select_tag == "main-ci":
marker += "[doc build] "
elif args.select_tag == "scipy-dev":
marker += "[scipy-dev] "
elif args.select_tag == "arm":
marker += "[cirrus arm] "
elif len(all_args_list) == 0:
# No arguments which will update all lock files so add all markers
marker += "[doc build] [scipy-dev] [cirrus arm] "
# The additional `--commit-marker` argument
if extra_args.commit_marker is not None:
marker += extra_args.commit_marker + " "
execute_command(
f"python build_tools/update_environments_and_lock_files.py{args_string}"
)
execute_command('git config --global user.name "scikit-learn-bot"')
execute_command('git config --global user.email "[email protected]"')
execute_command("git add -A")
# Avoiding commiting the scripts that are downloaded from main
execute_command("git reset build_tools/shared.sh")
execute_command("git reset build_tools/update_environments_and_lock_files.py")
execute_command(
"git reset build_tools/on_pr_comment_update_environments_and_lock_files.py"
)
execute_command(f'git commit -m "{marker}Update lock files"')
execute_command("git push")
if __name__ == "__main__":
main()
|
import os
import time
import pytest
from jina import Document, DocumentArray
from ..redis_storage import RedisStorage
@pytest.fixture(scope='function')
def indexer():
return RedisStorage()
@pytest.fixture()
def docker_compose(request):
os.system(
f'docker-compose -f {request.param} --project-directory . up --build -d --remove-orphans'
)
time.sleep(5)
yield
os.system(
f'docker-compose -f {request.param} --project-directory . down --remove-orphans'
)
@pytest.fixture(scope='function')
def docs():
return DocumentArray(
[
Document(content=value)
for value in ['cat', 'dog', 'crow', 'pikachu', 'magikarp']
]
)
|
import os
import time
from jina import Document, DocumentArray
import pytest
from ..redis_storage import RedisStorage
@pytest.fixture(scope='function')
def indexer():
return RedisStorage()
@pytest.fixture()
def docker_compose(request):
os.system(
f'docker-compose -f {request.param} --project-directory . up --build -d --remove-orphans'
)
time.sleep(5)
yield
os.system(
f'docker-compose -f {request.param} --project-directory . down --remove-orphans'
)
@pytest.fixture(scope='function')
def docs():
return DocumentArray([
Document(content=value)
for value in ['cat', 'dog', 'crow', 'pikachu', 'magikarp']
])
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Iterable, Optional
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from jina_commons.batching import get_docs_batch_generator
from laserembeddings import Laser
class LaserEncoder(Executor):
"""
LaserEncoder is a text encoder based on Facebook Research's LASER encoder.
This encoder is suitable for producing multi-lingual sentence embeddings, enabling
you to have sentences from multiple languages in the same latent space.
:param path_to_bpe_codes: path to bpe codes from Laser. Defaults to
``Laser.DEFAULT_BPE_CODES_FILE.``
:param path_to_bpe_vocab: path to bpe vocabs from Laser. Defaults to
``Laser.DEFAULT_BPE_VOCAB_FILE``.
:param path_to_encoder: path to the encoder from Laser. Defaults to
``Laser.DEFAULT_ENCODER_FILE``.
:param download_data: Whether data should be downloaded on initialization. This is
convenient when just trying out the encoder, but should be turned off in a
production setting (where you should already have the data on disk), as it can
lead to large startup times.
:param default_language: The default language of the text. Can be overriden by a
request parameter. The full list of possible values can be found at
[LASER](https://github.com/facebookresearch/LASER#supported-languages)
with the language code
([ISO 639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes))
:param cpu: if True, forces the use of the CPU even when a GPU is available.
:param default_batch_size: size of each batch
:param default_traversal_paths: traversal path of the Documents, (e.g. 'r', 'c')
:param args: Additional positional arguments
:param kwargs: Additional keyword arguments
"""
def __init__(
self,
path_to_bpe_codes: Optional[str] = None,
path_to_bpe_vocab: Optional[str] = None,
path_to_encoder: Optional[str] = None,
download_data: bool = True,
default_language: str = 'en',
cpu: bool = False,
default_batch_size: int = 32,
default_traversal_paths: Iterable[str] = ('r',),
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.logger = JinaLogger(self.__class__.__name__)
self._path_to_bpe_codes = path_to_bpe_codes
self._path_to_bpe_vocab = path_to_bpe_vocab
self._path_to_encoder = path_to_encoder
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
self.default_language = default_language
if download_data:
self.logger.info("Downloading data for the Laser model")
subprocess.run(
['python', '-m', 'laserembeddings', 'download-models'], check=True
)
self.model = Laser(
bpe_codes=self._path_to_bpe_codes,
bpe_vocab=self._path_to_bpe_vocab,
encoder=self._path_to_encoder,
embedding_options={'cpu': cpu},
)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: dict, **kwargs):
"""
Encode all docs with text and store the encodings in the embedding attribute
of the docs.
:param docs: documents sent to the encoder. The docs must have the ``text``
attribute.
:param parameters: dictionary to define the ``traversal_path``, the
``batch_size`` and ``language``. For example,
``{'traversal_paths': ['r'], 'batch_size': 10}``. This will override the
default parameters set at init.
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
)
for document_batch in document_batches_generator:
text_batch = [d.text for d in document_batch]
language = parameters.get('language', self.default_language)
embeddings = self.model.embed_sentences(text_batch, lang=language)
for document, embedding in zip(document_batch, embeddings):
document.embedding = embedding
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Iterable, Optional
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from jina_commons.batching import get_docs_batch_generator
from laserembeddings import Laser
class LaserEncoder(Executor):
"""
LaserEncoder is a text encoder based on Facebook Research's LASER encoder.
This encoder is suitable for producing multi-lingual sentence embeddings, enabling
you to have sentences from multiple languages in the same latent space.
:param path_to_bpe_codes: path to bpe codes from Laser. Defaults to
``Laser.DEFAULT_BPE_CODES_FILE.``
:param path_to_bpe_vocab: path to bpe vocabs from Laser. Defaults to
``Laser.DEFAULT_BPE_VOCAB_FILE``.
:param path_to_encoder: path to the encoder from Laser. Defaults to
``Laser.DEFAULT_ENCODER_FILE``.
:param download_data: Whether data should be downloaded on initialization. This is
convenient when just trying out the encoder, but should be turned off in a
production setting (where you should already have the data on disk), as it can
lead to large startup times.
:param default_language: The default language of the text. Can be overriden by a
request parameter.
:param cpu: if True, forces the use of the CPU even when a GPU is available.
:param default_batch_size: size of each batch
:param default_traversal_paths: traversal path of the Documents, (e.g. 'r', 'c')
:param args: Additional positional arguments
:param kwargs: Additional keyword arguments
"""
def __init__(
self,
path_to_bpe_codes: Optional[str] = None,
path_to_bpe_vocab: Optional[str] = None,
path_to_encoder: Optional[str] = None,
download_data: bool = True,
default_language: str = 'en',
cpu: bool = False,
default_batch_size: int = 32,
default_traversal_paths: Iterable[str] = ('r',),
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.logger = JinaLogger(self.__class__.__name__)
self._path_to_bpe_codes = path_to_bpe_codes
self._path_to_bpe_vocab = path_to_bpe_vocab
self._path_to_encoder = path_to_encoder
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
self.default_language = default_language
if download_data:
self.logger.info("Downloading data for the Laser model")
subprocess.run(
['python', '-m', 'laserembeddings', 'download-models'], check=True
)
self.model = Laser(
bpe_codes=self._path_to_bpe_codes,
bpe_vocab=self._path_to_bpe_vocab,
encoder=self._path_to_encoder,
embedding_options={'cpu': cpu},
)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: dict, **kwargs):
"""
Encode all docs with text and store the encodings in the embedding attribute
of the docs.
:param docs: documents sent to the encoder. The docs must have the ``text``
attribute.
:param parameters: dictionary to define the ``traversal_path`` and the
``batch_size``. For example,
``{'traversal_paths': ['r'], 'batch_size': 10}``. This will override the
default parameters set at init.
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
)
for document_batch in document_batches_generator:
text_batch = [d.text for d in document_batch]
language = parameters.get('language', self.default_language)
embeddings = self.model.embed_sentences(text_batch, lang=language)
for document, embedding in zip(document_batch, embeddings):
document.embedding = embedding
|
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for TimmWrapper models"""
from typing import Any
from ...configuration_utils import PretrainedConfig
from ...utils import is_timm_available, logging, requires_backends
if is_timm_available():
from timm.data import ImageNetInfo, infer_imagenet_subset
logger = logging.get_logger(__name__)
class TimmWrapperConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration for a timm backbone [`TimmWrapper`].
It is used to instantiate a timm model according to the specified arguments, defining the model.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Config loads imagenet label descriptions and stores them in `id2label` attribute, `label2id` attribute for default
imagenet models is set to `None` due to occlusions in the label descriptions.
Args:
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
do_pooling (`bool`, *optional*, defaults to `True`):
Whether to do pooling for the last_hidden_state in `TimmWrapperModel` or not.
Example:
```python
>>> from transformers import TimmWrapperModel
>>> # Initializing a timm model
>>> model = TimmWrapperModel.from_pretrained("timm/resnet18.a1_in1k")
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "timm_wrapper"
def __init__(self, initializer_range: float = 0.02, do_pooling: bool = True, **kwargs):
self.initializer_range = initializer_range
self.do_pooling = do_pooling
super().__init__(**kwargs)
@classmethod
def from_dict(cls, config_dict: dict[str, Any], **kwargs):
label_names = config_dict.get("label_names", None)
is_custom_model = "num_labels" in kwargs or "id2label" in kwargs
# if no labels added to config, use imagenet labeller in timm
if label_names is None and not is_custom_model:
requires_backends(cls, ["timm"])
imagenet_subset = infer_imagenet_subset(config_dict)
if imagenet_subset:
dataset_info = ImageNetInfo(imagenet_subset)
synsets = dataset_info.label_names()
label_descriptions = dataset_info.label_descriptions(as_dict=True)
label_names = [label_descriptions[synset] for synset in synsets]
if label_names is not None and not is_custom_model:
kwargs["id2label"] = dict(enumerate(label_names))
# if all label names are unique, create label2id mapping as well
if len(set(label_names)) == len(label_names):
kwargs["label2id"] = {name: i for i, name in enumerate(label_names)}
else:
kwargs["label2id"] = None
# timm config stores the `num_classes` attribute in both the root of config and in the "pretrained_cfg" dict.
# We are removing these attributes in order to have the native `transformers` num_labels attribute in config
# and to avoid duplicate attributes
num_labels_in_kwargs = kwargs.pop("num_labels", None)
num_labels_in_dict = config_dict.pop("num_classes", None)
# passed num_labels has priority over num_classes in config_dict
kwargs["num_labels"] = num_labels_in_kwargs or num_labels_in_dict
# pop num_classes from "pretrained_cfg",
# it is not necessary to have it, only root one is used in timm
if "pretrained_cfg" in config_dict and "num_classes" in config_dict["pretrained_cfg"]:
config_dict["pretrained_cfg"].pop("num_classes", None)
return super().from_dict(config_dict, **kwargs)
def to_dict(self) -> dict[str, Any]:
output = super().to_dict()
output["num_classes"] = self.num_labels
output["label_names"] = list(self.id2label.values())
output.pop("id2label", None)
output.pop("label2id", None)
return output
__all__ = ["TimmWrapperConfig"]
|
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for TimmWrapper models"""
from typing import Any, Dict
from ...configuration_utils import PretrainedConfig
from ...utils import is_timm_available, logging, requires_backends
if is_timm_available():
from timm.data import ImageNetInfo, infer_imagenet_subset
logger = logging.get_logger(__name__)
class TimmWrapperConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration for a timm backbone [`TimmWrapper`].
It is used to instantiate a timm model according to the specified arguments, defining the model.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Config loads imagenet label descriptions and stores them in `id2label` attribute, `label2id` attribute for default
imagenet models is set to `None` due to occlusions in the label descriptions.
Args:
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
do_pooling (`bool`, *optional*, defaults to `True`):
Whether to do pooling for the last_hidden_state in `TimmWrapperModel` or not.
Example:
```python
>>> from transformers import TimmWrapperModel
>>> # Initializing a timm model
>>> model = TimmWrapperModel.from_pretrained("timm/resnet18.a1_in1k")
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "timm_wrapper"
def __init__(self, initializer_range: float = 0.02, do_pooling: bool = True, **kwargs):
self.initializer_range = initializer_range
self.do_pooling = do_pooling
super().__init__(**kwargs)
@classmethod
def from_dict(cls, config_dict: Dict[str, Any], **kwargs):
label_names = config_dict.get("label_names", None)
is_custom_model = "num_labels" in kwargs or "id2label" in kwargs
# if no labels added to config, use imagenet labeller in timm
if label_names is None and not is_custom_model:
requires_backends(cls, ["timm"])
imagenet_subset = infer_imagenet_subset(config_dict)
if imagenet_subset:
dataset_info = ImageNetInfo(imagenet_subset)
synsets = dataset_info.label_names()
label_descriptions = dataset_info.label_descriptions(as_dict=True)
label_names = [label_descriptions[synset] for synset in synsets]
if label_names is not None and not is_custom_model:
kwargs["id2label"] = dict(enumerate(label_names))
# if all label names are unique, create label2id mapping as well
if len(set(label_names)) == len(label_names):
kwargs["label2id"] = {name: i for i, name in enumerate(label_names)}
else:
kwargs["label2id"] = None
# timm config stores the `num_classes` attribute in both the root of config and in the "pretrained_cfg" dict.
# We are removing these attributes in order to have the native `transformers` num_labels attribute in config
# and to avoid duplicate attributes
num_labels_in_kwargs = kwargs.pop("num_labels", None)
num_labels_in_dict = config_dict.pop("num_classes", None)
# passed num_labels has priority over num_classes in config_dict
kwargs["num_labels"] = num_labels_in_kwargs or num_labels_in_dict
# pop num_classes from "pretrained_cfg",
# it is not necessary to have it, only root one is used in timm
if "pretrained_cfg" in config_dict and "num_classes" in config_dict["pretrained_cfg"]:
config_dict["pretrained_cfg"].pop("num_classes", None)
return super().from_dict(config_dict, **kwargs)
def to_dict(self) -> Dict[str, Any]:
output = super().to_dict()
output["num_classes"] = self.num_labels
output["label_names"] = list(self.id2label.values())
output.pop("id2label", None)
output.pop("label2id", None)
return output
__all__ = ["TimmWrapperConfig"]
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import numpy as np
from mmengine.config import Config, DictAction
from mmengine.utils import ProgressBar
from mmdet.models.utils import mask2ndarray
from mmdet.registry import DATASETS, VISUALIZERS
from mmdet.structures.bbox import BaseBoxes
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# register all modules in mmdet into the registries
register_all_modules()
dataset = DATASETS.build(cfg.train_dataloader.dataset)
visualizer = VISUALIZERS.build(cfg.visualizer)
visualizer.dataset_meta = dataset.metainfo
progress_bar = ProgressBar(len(dataset))
for item in dataset:
img = item['inputs'].permute(1, 2, 0).numpy()
data_sample = item['data_sample'].numpy()
gt_instances = data_sample.gt_instances
img_path = osp.basename(item['data_sample'].img_path)
out_file = osp.join(
args.output_dir,
osp.basename(img_path)) if args.output_dir is not None else None
img = img[..., [2, 1, 0]] # bgr to rgb
gt_bboxes = gt_instances.get('bboxes', None)
if gt_bboxes is not None and isinstance(gt_bboxes, BaseBoxes):
gt_instances.bboxes = gt_bboxes.tensor
gt_masks = gt_instances.get('masks', None)
if gt_masks is not None:
masks = mask2ndarray(gt_masks)
gt_instances.masks = masks.astype(np.bool)
data_sample.gt_instances = gt_instances
visualizer.add_datasample(
osp.basename(img_path),
img,
data_sample,
show=not args.not_show,
wait_time=args.show_interval,
out_file=out_file)
progress_bar.update()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import mmcv
import numpy as np
from mmcv import Config, DictAction
from mmdet.models.utils import mask2ndarray
from mmdet.registry import DATASETS, VISUALIZERS
from mmdet.structures.bbox import BaseBoxes
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# register all modules in mmdet into the registries
register_all_modules()
dataset = DATASETS.build(cfg.train_dataloader.dataset)
visualizer = VISUALIZERS.build(cfg.visualizer)
visualizer.dataset_meta = dataset.metainfo
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
img = item['inputs'].permute(1, 2, 0).numpy()
data_sample = item['data_sample'].numpy()
gt_instances = data_sample.gt_instances
img_path = osp.basename(item['data_sample'].img_path)
out_file = osp.join(
args.output_dir,
osp.basename(img_path)) if args.output_dir is not None else None
img = img[..., [2, 1, 0]] # bgr to rgb
gt_bboxes = gt_instances.get('bboxes', None)
if gt_bboxes is not None and isinstance(gt_bboxes, BaseBoxes):
gt_instances.bboxes = gt_bboxes.tensor
gt_masks = gt_instances.get('masks', None)
if gt_masks is not None:
masks = mask2ndarray(gt_masks)
gt_instances.masks = masks.astype(np.bool)
data_sample.gt_instances = gt_instances
visualizer.add_datasample(
osp.basename(img_path),
img,
data_sample,
show=not args.not_show,
wait_time=args.show_interval,
out_file=out_file)
progress_bar.update()
if __name__ == '__main__':
main()
|
from functools import wraps
from typing import TYPE_CHECKING, List
from jina.excepts import FlowBuildLevelError
# noinspection PyUnreachableCode
if TYPE_CHECKING:
from jina.enums import FlowBuildLevel
from jina.orchestrate.flow.base import Flow
def allowed_levels(levels: List['FlowBuildLevel']):
"""Annotate a function so that it requires certain build level to run.
Example:
.. highlight:: python
.. code-block:: python
@build_required(FlowBuildLevel.RUNTIME)
def foo():
print(1)
:param levels: required build level to run this function.
:return: annotated function
"""
def __build_level(func):
@wraps(func)
def arg_wrapper(self, *args, **kwargs):
if hasattr(self, '_build_level'):
if self._build_level in levels:
return func(self, *args, **kwargs)
else:
raise FlowBuildLevelError(
f'build_level check failed for {func!r}, required level: {levels}, actual level: {self._build_level}'
)
else:
raise AttributeError(f'{self!r} has no attribute "_build_level"')
return arg_wrapper
return __build_level
def _hanging_deployments(op_flow: 'Flow') -> List[str]:
"""
:param op_flow: the Flow we're operating on
:return: names of floating Deployments (nobody recv from them) in the Flow.
"""
all_needs = {k for p, v in op_flow for k in v.needs}
all_names = {p for p, v in op_flow if not v.args.floating}
# all_names is always a superset of all_needs
return list(all_names.difference(all_needs))
|
from functools import wraps
from typing import TYPE_CHECKING, List
from jina.excepts import FlowBuildLevelError
# noinspection PyUnreachableCode
if TYPE_CHECKING:
from jina.enums import FlowBuildLevel
from jina.orchestrate.flow.base import Flow
def allowed_levels(levels: List['FlowBuildLevel']):
"""Annotate a function so that it requires certain build level to run.
Example:
.. highlight:: python
.. code-block:: python
@build_required(FlowBuildLevel.RUNTIME)
def foo():
print(1)
:param levels: required build level to run this function.
:return: annotated function
"""
def __build_level(func):
@wraps(func)
def arg_wrapper(self, *args, **kwargs):
if hasattr(self, '_build_level'):
if self._build_level in levels:
return func(self, *args, **kwargs)
else:
raise FlowBuildLevelError(
f'build_level check failed for {func!r}, required level: {levels}, actual level: {self._build_level}'
)
else:
raise AttributeError(f'{self!r} has no attribute "_build_level"')
return arg_wrapper
return __build_level
def _hanging_deployments(op_flow: 'Flow') -> List[str]:
"""
:param op_flow: the Flow we're operating on
:return: names of hanging Deployments (nobody recv from them) in the Flow.
"""
all_needs = {k for p, v in op_flow for k in v.needs}
all_names = {p for p, v in op_flow if not v.args.floating}
# all_names is always a superset of all_needs
return list(all_names.difference(all_needs))
|
from typing import Any, Dict
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2.utils import is_simple_tensor
class UniformTemporalSubsample(Transform):
"""[BETA] Uniformly subsample ``num_samples`` indices from the temporal dimension of the video.
.. betastatus:: UniformTemporalSubsample transform
Videos are expected to be of shape ``[..., T, C, H, W]`` where ``T`` denotes the temporal dimension.
When ``num_samples`` is larger than the size of temporal dimension of the video, it
will sample frames based on nearest neighbor interpolation.
Args:
num_samples (int): The number of equispaced samples to be selected
"""
_transformed_types = (is_simple_tensor, datapoints.Video)
def __init__(self, num_samples: int):
super().__init__()
self.num_samples = num_samples
def _transform(self, inpt: datapoints._VideoType, params: Dict[str, Any]) -> datapoints._VideoType:
return F.uniform_temporal_subsample(inpt, self.num_samples)
|
from typing import Any, Dict
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2.utils import is_simple_tensor
class UniformTemporalSubsample(Transform):
_transformed_types = (is_simple_tensor, datapoints.Video)
def __init__(self, num_samples: int):
super().__init__()
self.num_samples = num_samples
def _transform(self, inpt: datapoints._VideoType, params: Dict[str, Any]) -> datapoints._VideoType:
return F.uniform_temporal_subsample(inpt, self.num_samples)
|
# pylint: disable=protected-access
"""Shared typing definition."""
import ctypes
import os
from typing import (
TYPE_CHECKING,
Any,
AnyStr,
Callable,
Dict,
List,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
# os.PathLike/string/numpy.array/scipy.sparse/pd.DataFrame/dt.Frame/
# cudf.DataFrame/cupy.array/dlpack
import numpy as np
DataType = Any
FeatureInfo = Sequence[str]
FeatureNames = FeatureInfo
FeatureTypes = FeatureInfo
BoosterParam = Union[List, Dict[str, Any]] # better be sequence
ArrayLike = Any
if TYPE_CHECKING:
PathLike = Union[str, os.PathLike[str]]
else:
PathLike = Union[str, os.PathLike]
CupyT = ArrayLike # maybe need a stub for cupy arrays
NumpyOrCupy = Any
NumpyDType = Union[str, Type[np.number]] # pylint: disable=invalid-name
PandasDType = Any # real type is pandas.core.dtypes.base.ExtensionDtype
FloatCompatible = Union[float, np.float32, np.float64]
# typing.SupportsInt is not suitable here since floating point values are convertible to
# integers as well.
Integer = Union[int, np.integer]
IterationRange = Tuple[Integer, Integer]
# callables
FPreProcCallable = Callable
# ctypes
# c_bst_ulong corresponds to bst_ulong defined in xgboost/c_api.h
c_bst_ulong = ctypes.c_uint64 # pylint: disable=C0103
ModelIn = Union[os.PathLike[AnyStr], bytearray, str]
CTypeT = TypeVar(
"CTypeT",
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_int,
ctypes.c_float,
ctypes.c_uint,
ctypes.c_size_t,
)
# supported numeric types
CNumeric = Union[
ctypes.c_float,
ctypes.c_double,
ctypes.c_uint,
ctypes.c_uint64,
ctypes.c_int32,
ctypes.c_int64,
]
# c pointer types
if TYPE_CHECKING:
CStrPtr = ctypes._Pointer[ctypes.c_char]
CStrPptr = ctypes._Pointer[ctypes.c_char_p]
CFloatPtr = ctypes._Pointer[ctypes.c_float]
CNumericPtr = Union[
ctypes._Pointer[ctypes.c_float],
ctypes._Pointer[ctypes.c_double],
ctypes._Pointer[ctypes.c_uint],
ctypes._Pointer[ctypes.c_uint64],
ctypes._Pointer[ctypes.c_int32],
ctypes._Pointer[ctypes.c_int64],
]
else:
CStrPtr = ctypes._Pointer
CStrPptr = ctypes._Pointer
CFloatPtr = ctypes._Pointer
CNumericPtr = Union[
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
]
# The second arg is actually Optional[List[cudf.Series]], skipped for easier type check.
# The cudf Series is the obtained cat codes, preserved in the `DataIter` to prevent it
# being freed.
TransformedData = Tuple[Any, Optional[FeatureNames], Optional[FeatureTypes]]
# template parameter
_T = TypeVar("_T")
_F = TypeVar("_F", bound=Callable[..., Any])
|
# pylint: disable=protected-access
"""Shared typing definition."""
import ctypes
import os
from typing import (
TYPE_CHECKING,
Any,
AnyStr,
Callable,
Dict,
List,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
# os.PathLike/string/numpy.array/scipy.sparse/pd.DataFrame/dt.Frame/
# cudf.DataFrame/cupy.array/dlpack
import numpy as np
DataType = Any
FeatureInfo = Sequence[str]
FeatureNames = FeatureInfo
FeatureTypes = FeatureInfo
BoosterParam = Union[List, Dict[str, Any]] # better be sequence
ArrayLike = Any
if TYPE_CHECKING:
PathLike = Union[str, os.PathLike[str]]
else:
PathLike = Union[str, os.PathLike]
CupyT = ArrayLike # maybe need a stub for cupy arrays
NumpyOrCupy = Any
NumpyDType = Union[str, Type[np.number]] # pylint: disable=invalid-name
PandasDType = Any # real type is pandas.core.dtypes.base.ExtensionDtype
FloatCompatible = Union[float, np.float32, np.float64]
# typing.SupportsInt is not suitable here since floating point values are convertible to
# integers as well.
Integer = Union[int, np.integer]
IterationRange = Tuple[Integer, Integer]
# callables
FPreProcCallable = Callable
# ctypes
# c_bst_ulong corresponds to bst_ulong defined in xgboost/c_api.h
c_bst_ulong = ctypes.c_uint64 # pylint: disable=C0103
ModelIn = Union[os.PathLike[AnyStr], bytearray, str]
CTypeT = TypeVar(
"CTypeT",
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_int,
ctypes.c_float,
ctypes.c_uint,
ctypes.c_size_t,
)
# supported numeric types
CNumeric = Union[
ctypes.c_float,
ctypes.c_double,
ctypes.c_uint,
ctypes.c_uint64,
ctypes.c_int32,
ctypes.c_int64,
]
# c pointer types
if TYPE_CHECKING:
CStrPtr = ctypes._Pointer[ctypes.c_char]
CStrPptr = ctypes._Pointer[ctypes.c_char_p]
CFloatPtr = ctypes._Pointer[ctypes.c_float]
CNumericPtr = Union[
ctypes._Pointer[ctypes.c_float],
ctypes._Pointer[ctypes.c_double],
ctypes._Pointer[ctypes.c_uint],
ctypes._Pointer[ctypes.c_uint64],
ctypes._Pointer[ctypes.c_int32],
ctypes._Pointer[ctypes.c_int64],
]
else:
CStrPtr = ctypes._Pointer
CStrPptr = ctypes._Pointer
CFloatPtr = ctypes._Pointer
CNumericPtr = Union[
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
]
# The second arg is actually Optional[List[cudf.Series]], skipped for easier type check.
# The cudf Series is the obtained cat codes, preserved in the `DataIter` to prevent it
# being freed.
TransformedData = Tuple[
Any, Optional[List], Optional[FeatureNames], Optional[FeatureTypes]
]
# template parameter
_T = TypeVar("_T")
_F = TypeVar("_F", bound=Callable[..., Any])
|
import os
import random
import time
from typing import Dict, OrderedDict
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor, Flow, requests
from jina_commons.indexers.dump import dump_docs
from jinahub.indexers.compound.FaissLMDBSearcher.faiss_lmdb import FaissLMDBSearcher
from jinahub.indexers.storage.LMDBStorage.lmdb_storage import LMDBStorage
random.seed(0)
np.random.seed(0)
cur_dir = os.path.dirname(os.path.abspath(__file__))
ORIGIN_TAG = 'origin'
TOP_K = 100
METRIC = 'cosine'
class TagMatchMerger(Executor):
@requests(on='/tag_search')
def merge(self, docs_matrix, parameters: Dict, **kwargs):
if docs_matrix:
# noinspection PyTypeHints
results = OrderedDict()
for docs in docs_matrix:
for doc in docs:
if doc.id in results:
results[doc.id].matches.extend(doc.matches)
else:
results[doc.id] = doc
limit = parameters.get('limit')
if limit:
limit = int(limit)
for doc in results.values():
doc.matches = sorted(
doc.matches,
key=lambda m: m.scores[METRIC].value,
reverse=True,
)[:limit]
docs = DocumentArray(list(results.values()))
return docs
class TaggingFileSearcher(LMDBStorage):
def __init__(
self,
**kwargs,
):
super().__init__(**kwargs)
def search(self, docs: DocumentArray, parameters: Dict = None, **kwargs) -> None:
# TODO shouldn't be necessary
parameters = {'traversal_paths': ['m']}
LMDBStorage.search(self, docs, parameters=parameters, **kwargs)
for doc in docs:
for match in doc.matches:
match.tags[ORIGIN_TAG] = self.runtime_args.pea_id
class FaissTaggingFileSearcher(FaissLMDBSearcher):
def __init__(
self,
dump_path=None,
**kwargs,
):
super().__init__(**kwargs)
self._kv_indexer = TaggingFileSearcher(dump_path=dump_path, **kwargs)
@requests(on='/tag_search')
def search(self, docs: 'DocumentArray', parameters: Dict = None, **kwargs):
super().search(docs, parameters, **kwargs)
def random_docs(start, end, embed_dim=10):
for j in range(start, end):
d = Document()
d.content = f'hello world from {j}'
d.embedding = np.random.random([embed_dim]).astype(dtype=np.float32)
yield d
def validate_diff_sources(results, num_shards, docs_before: DocumentArray):
distinct_shards = {}
for doc in results[0].docs:
for match in doc.matches:
if match.tags[ORIGIN_TAG] not in distinct_shards:
distinct_shards[match.tags[ORIGIN_TAG]] = 0
distinct_shards[match.tags[ORIGIN_TAG]] += 1
np.testing.assert_equal(len(distinct_shards.keys()), num_shards)
np.testing.assert_equal(sum(distinct_shards.values()), TOP_K)
# TODO we do not support shards=1 for replicas>1
def assert_folder(dump_path, num_shards):
assert os.path.exists(dump_path)
for i in range(num_shards):
assert os.path.exists(os.path.join(dump_path, str(i)))
assert os.path.exists(os.path.join(dump_path, str(i), 'ids'))
assert os.path.exists(os.path.join(dump_path, str(i), 'vectors'))
assert os.path.exists(os.path.join(dump_path, str(i), 'metas'))
@pytest.mark.parametrize('num_shards', (2, 3, 7))
def test_shards_numpy_filequery(tmpdir, num_shards):
pod_name = 'index'
os.environ['WORKSPACE'] = str(tmpdir)
os.environ['SHARDS'] = str(num_shards)
docs_indexed = list(random_docs(0, 201))
dump_path = os.path.join(tmpdir, 'dump_path')
dump_docs(docs_indexed, dump_path, num_shards)
assert_folder(dump_path, num_shards)
inputs = list(random_docs(0, 1))
# TODO workspace is wrongly saved to curdir
with Flow.load_config('flow.yml') as flow:
flow.rolling_update(pod_name=pod_name, dump_path=dump_path)
time.sleep(2)
results = flow.post(
on='/tag_search',
inputs=inputs,
parameters={'limit': TOP_K},
return_results=True,
)
validate_diff_sources(results, num_shards, docs_indexed)
|
import os
import random
import time
from typing import Dict, OrderedDict
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor, Flow, requests
from jina_commons.indexers.dump import dump_docs
from jinahub.indexers.compound.FaissLMDBSearcher.faiss_lmdb import FaissLMDBSearcher
from jinahub.indexers.storage.LMDBStorage.lmdb_storage import LMDBStorage
random.seed(0)
np.random.seed(0)
cur_dir = os.path.dirname(os.path.abspath(__file__))
ORIGIN_TAG = 'origin'
TOP_K = 100
METRIC = 'cosine'
class TagMatchMerger(Executor):
@requests(on='/tag_search')
def merge(self, docs_matrix, parameters: Dict, **kwargs):
if docs_matrix:
# noinspection PyTypeHints
results = OrderedDict()
for docs in docs_matrix:
for doc in docs:
if doc.id in results:
results[doc.id].matches.extend(doc.matches)
else:
results[doc.id] = doc
top_k = parameters.get('top_k')
if top_k:
top_k = int(top_k)
for doc in results.values():
doc.matches = sorted(
doc.matches,
key=lambda m: m.scores[METRIC].value,
reverse=True,
)[:top_k]
docs = DocumentArray(list(results.values()))
return docs
class TaggingFileSearcher(LMDBStorage):
def __init__(
self,
**kwargs,
):
super().__init__(**kwargs)
def search(self, docs: DocumentArray, parameters: Dict = None, **kwargs) -> None:
# TODO shouldn't be necessary
parameters = {'traversal_paths': ['m']}
LMDBStorage.search(self, docs, parameters=parameters, **kwargs)
for doc in docs:
for match in doc.matches:
match.tags[ORIGIN_TAG] = self.runtime_args.pea_id
class FaissTaggingFileSearcher(FaissLMDBSearcher):
def __init__(
self,
dump_path=None,
**kwargs,
):
super().__init__(**kwargs)
self._kv_indexer = TaggingFileSearcher(dump_path=dump_path, **kwargs)
@requests(on='/tag_search')
def search(self, docs: 'DocumentArray', parameters: Dict = None, **kwargs):
super().search(docs, parameters, **kwargs)
def random_docs(start, end, embed_dim=10):
for j in range(start, end):
d = Document()
d.content = f'hello world from {j}'
d.embedding = np.random.random([embed_dim]).astype(dtype=np.float32)
yield d
def validate_diff_sources(results, num_shards, docs_before: DocumentArray):
distinct_shards = {}
for doc in results[0].docs:
for match in doc.matches:
if match.tags[ORIGIN_TAG] not in distinct_shards:
distinct_shards[match.tags[ORIGIN_TAG]] = 0
distinct_shards[match.tags[ORIGIN_TAG]] += 1
np.testing.assert_equal(len(distinct_shards.keys()), num_shards)
np.testing.assert_equal(sum(distinct_shards.values()), TOP_K)
# TODO we do not support shards=1 for replicas>1
def assert_folder(dump_path, num_shards):
assert os.path.exists(dump_path)
for i in range(num_shards):
assert os.path.exists(os.path.join(dump_path, str(i)))
assert os.path.exists(os.path.join(dump_path, str(i), 'ids'))
assert os.path.exists(os.path.join(dump_path, str(i), 'vectors'))
assert os.path.exists(os.path.join(dump_path, str(i), 'metas'))
@pytest.mark.parametrize('num_shards', (2, 3, 7))
def test_shards_numpy_filequery(tmpdir, num_shards):
pod_name = 'index'
os.environ['WORKSPACE'] = str(tmpdir)
os.environ['SHARDS'] = str(num_shards)
docs_indexed = list(random_docs(0, 201))
dump_path = os.path.join(tmpdir, 'dump_path')
dump_docs(docs_indexed, dump_path, num_shards)
assert_folder(dump_path, num_shards)
inputs = list(random_docs(0, 1))
# TODO workspace is wrongly saved to curdir
with Flow.load_config('flow.yml') as flow:
flow.rolling_update(pod_name=pod_name, dump_path=dump_path)
time.sleep(2)
results = flow.post(
on='/tag_search',
inputs=inputs,
parameters={'top_k': TOP_K},
return_results=True,
)
validate_diff_sources(results, num_shards, docs_indexed)
|
"""Simple reader for mbox (mailbox) files."""
import os
from pathlib import Path
from typing import Any, List
from llama_index.core.readers.base import BaseReader
from llama_index.readers.file import MboxReader as MboxFileReader
from llama_index.core.schema import Document
class MboxReader(BaseReader):
"""
Mbox e-mail reader.
Reads a set of e-mails saved in the mbox format.
"""
def __init__(self) -> None:
"""Initialize."""
def load_data(self, input_dir: str, **load_kwargs: Any) -> List[Document]:
"""
Load data from the input directory.
load_kwargs:
max_count (int): Maximum amount of messages to read.
message_format (str): Message format overriding default.
"""
docs: List[Document] = []
for dirpath, dirnames, filenames in os.walk(input_dir):
dirnames[:] = [d for d in dirnames if not d.startswith(".")]
for filename in filenames:
if filename.endswith(".mbox"):
filepath = os.path.join(dirpath, filename)
file_docs = MboxFileReader(**load_kwargs).load_data(Path(filepath))
docs.extend(file_docs)
return docs
|
"""Simple reader for mbox (mailbox) files."""
import os
from pathlib import Path
from typing import Any, List
from llama_index.core.readers.base import BaseReader
from llama_index.readers.file import MboxReader as MboxFileReader
from llama_index.core.schema import Document
class MboxReader(BaseReader):
"""Mbox e-mail reader.
Reads a set of e-mails saved in the mbox format.
"""
def __init__(self) -> None:
"""Initialize."""
def load_data(self, input_dir: str, **load_kwargs: Any) -> List[Document]:
"""Load data from the input directory.
load_kwargs:
max_count (int): Maximum amount of messages to read.
message_format (str): Message format overriding default.
"""
docs: List[Document] = []
for dirpath, dirnames, filenames in os.walk(input_dir):
dirnames[:] = [d for d in dirnames if not d.startswith(".")]
for filename in filenames:
if filename.endswith(".mbox"):
filepath = os.path.join(dirpath, filename)
file_docs = MboxFileReader(**load_kwargs).load_data(Path(filepath))
docs.extend(file_docs)
return docs
|
"""Simple Reader that reads transcript of youtube video."""
import re
from typing import Any, List, Optional
from youtube_transcript_api import YouTubeTranscriptApi
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
from llama_index.readers.youtube_transcript.utils import YOUTUBE_URL_PATTERNS
class YoutubeTranscriptReader(BasePydanticReader):
"""Youtube Transcript reader."""
is_remote: bool = True
@classmethod
def class_name(cls) -> str:
"""Get the name identifier of the class."""
return "YoutubeTranscriptReader"
def load_data(
self,
ytlinks: List[str],
languages: Optional[List[str]] = ["en"],
**load_kwargs: Any,
) -> List[Document]:
"""
Load data from the input directory.
Args:
pages (List[str]): List of youtube links \
for which transcripts are to be read.
"""
results = []
for link in ytlinks:
video_id = self._extract_video_id(link)
if not video_id:
raise ValueError(
f"Supplied url {link} is not a supported youtube URL."
"Supported formats include:"
" youtube.com/watch?v=\\{video_id\\} "
"(with or without 'www.')\n"
" youtube.com/embed?v=\\{video_id\\} "
"(with or without 'www.')\n"
" youtu.be/{video_id\\} (never includes www subdomain)"
)
transcript_chunks = YouTubeTranscriptApi.get_transcript(
video_id, languages=languages
)
chunk_text = [chunk["text"] for chunk in transcript_chunks]
transcript = "\n".join(chunk_text)
results.append(
Document(
text=transcript, id_=video_id, extra_info={"video_id": video_id}
)
)
return results
@staticmethod
def _extract_video_id(yt_link) -> Optional[str]:
for pattern in YOUTUBE_URL_PATTERNS:
match = re.search(pattern, yt_link)
if match:
return match.group(1)
# return None if no match is found
return None
|
"""Simple Reader that reads transcript of youtube video."""
import re
from typing import Any, List, Optional
from youtube_transcript_api import YouTubeTranscriptApi
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
from llama_index.readers.youtube_transcript.utils import YOUTUBE_URL_PATTERNS
class YoutubeTranscriptReader(BasePydanticReader):
"""Youtube Transcript reader."""
is_remote: bool = True
@classmethod
def class_name(cls) -> str:
"""Get the name identifier of the class."""
return "YoutubeTranscriptReader"
def load_data(
self,
ytlinks: List[str],
languages: Optional[List[str]] = ["en"],
**load_kwargs: Any,
) -> List[Document]:
"""
Load data from the input directory.
Args:
pages (List[str]): List of youtube links \
for which transcripts are to be read.
"""
results = []
for link in ytlinks:
video_id = self._extract_video_id(link)
if not video_id:
raise ValueError(
f"Supplied url {link} is not a supported youtube URL."
"Supported formats include:"
" youtube.com/watch?v=\\{video_id\\} "
"(with or without 'www.')\n"
" youtube.com/embed?v=\\{video_id\\} "
"(with or without 'www.')\n"
" youtu.be/{video_id\\} (never includes www subdomain)"
)
transcript_chunks = YouTubeTranscriptApi.get_transcript(
video_id, languages=languages
)
chunk_text = [chunk["text"] for chunk in transcript_chunks]
transcript = "\n".join(chunk_text)
results.append(
Document(
text=transcript, id_=video_id, extra_info={"video_id": video_id}
)
)
return results
@staticmethod
def _extract_video_id(yt_link) -> Optional[str]:
for pattern in YOUTUBE_URL_PATTERNS:
match = re.search(pattern, yt_link)
if match:
return match.group(1)
# return None if no match is found
return None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.