input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import unittest
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class TorchScriptConsistencyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(device=self.device, dtype=self.dtype)
inputs_.append(i)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(*inputs_)
torch.random.manual_seed(40)
ts_output = ts_func(*inputs_)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
@nested_params(
["convolve", "fftconvolve"],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (2, 3, 2)
L_x, L_y = 32, 55
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
self._assert_consistency(getattr(F, fn), (x, y, mode))
def test_add_noise(self):
leading_dims = (2, 3)
L = 31
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self._assert_consistency(F.add_noise, (waveform, noise, lengths, snr))
def test_barkscale_fbanks(self):
if self.device != torch.device("cpu"):
raise unittest.SkipTest("No need to perform test on device other than CPU")
n_stft = 100
f_min = 0.0
f_max = 20.0
n_barks = 10
sample_rate = 16000
self._assert_consistency(F.barkscale_fbanks, (n_stft, f_min, f_max, n_barks, sample_rate, "traunmuller"))
def test_oscillator_bank(self):
num_frames, num_pitches, sample_rate = 8000, 8, 8000
freq = torch.rand((num_frames, num_pitches), dtype=self.dtype, device=self.device)
amps = torch.ones_like(freq)
self._assert_consistency(F.oscillator_bank, (freq, amps, sample_rate, "sum"))
def test_extend_pitch(self):
num_frames = 5
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype)
num_pitches = 7
pattern = [i + 1.0 for i in range(num_pitches)]
self._assert_consistency(F.extend_pitch, (input, num_pitches))
self._assert_consistency(F.extend_pitch, (input, pattern))
self._assert_consistency(F.extend_pitch, (input, torch.tensor(pattern)))
|
import unittest
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class TorchScriptConsistencyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(device=self.device, dtype=self.dtype)
inputs_.append(i)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(*inputs_)
torch.random.manual_seed(40)
ts_output = ts_func(*inputs_)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
@nested_params(
[F.convolve, F.fftconvolve],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (2, 3, 2)
L_x, L_y = 32, 55
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
self._assert_consistency(fn, (x, y, mode))
def test_add_noise(self):
leading_dims = (2, 3)
L = 31
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self._assert_consistency(F.add_noise, (waveform, noise, lengths, snr))
def test_barkscale_fbanks(self):
if self.device != torch.device("cpu"):
raise unittest.SkipTest("No need to perform test on device other than CPU")
n_stft = 100
f_min = 0.0
f_max = 20.0
n_barks = 10
sample_rate = 16000
self._assert_consistency(F.barkscale_fbanks, (n_stft, f_min, f_max, n_barks, sample_rate, "traunmuller"))
def test_oscillator_bank(self):
num_frames, num_pitches, sample_rate = 8000, 8, 8000
freq = torch.rand((num_frames, num_pitches), dtype=self.dtype, device=self.device)
amps = torch.ones_like(freq)
self._assert_consistency(F.oscillator_bank, (freq, amps, sample_rate, "sum"))
def test_extend_pitch(self):
num_frames = 5
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype)
num_pitches = 7
pattern = [i + 1.0 for i in range(num_pitches)]
self._assert_consistency(F.extend_pitch, (input, num_pitches))
self._assert_consistency(F.extend_pitch, (input, pattern))
self._assert_consistency(F.extend_pitch, (input, torch.tensor(pattern)))
|
# Copyright (c) OpenMMLab. All rights reserved.
from .activations import SiLU
from .bbox_nms import fast_nms, multiclass_nms
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .dropblock import DropBlock
from .ema import ExpMomentumEMA
from .inverted_residual import InvertedResidual
from .matrix_nms import mask_matrix_nms
from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder
from .normed_predictor import NormedConv2d, NormedLinear
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import ChannelAttention, DyReLU, SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, PatchMerging, Transformer,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
__all__ = [
'fast_nms', 'multiclass_nms', 'mask_matrix_nms', 'DropBlock',
'PixelDecoder', 'TransformerEncoderPixelDecoder',
'MSDeformAttnPixelDecoder', 'ResLayer', 'DetrTransformerDecoderLayer',
'DetrTransformerDecoder', 'Transformer', 'PatchMerging',
'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv',
'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'InvertedResidual',
'SELayer', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d',
'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'DyReLU',
'ExpMomentumEMA', 'inverse_sigmoid', 'ChannelAttention', 'SiLU'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_nms import fast_nms, multiclass_nms
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .dropblock import DropBlock
from .ema import ExpMomentumEMA
from .inverted_residual import InvertedResidual
from .matrix_nms import mask_matrix_nms
from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder
from .normed_predictor import NormedConv2d, NormedLinear
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import ChannelAttention, DyReLU, SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, PatchMerging, Transformer,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
__all__ = [
'fast_nms', 'multiclass_nms', 'mask_matrix_nms', 'DropBlock',
'PixelDecoder', 'TransformerEncoderPixelDecoder',
'MSDeformAttnPixelDecoder', 'ResLayer', 'DetrTransformerDecoderLayer',
'DetrTransformerDecoder', 'Transformer', 'PatchMerging',
'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv',
'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'InvertedResidual',
'SELayer', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d',
'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'DyReLU',
'ExpMomentumEMA', 'inverse_sigmoid', 'ChannelAttention'
]
|
from typing import TYPE_CHECKING
import numpy as np
if TYPE_CHECKING: # pragma: no cover
from docarray import Document
def image_setter(value) -> 'Document':
from docarray import Document
doc = Document(modality='image')
if isinstance(value, str):
doc.uri = value
doc._metadata['image_type'] = 'uri'
doc.load_uri_to_image_tensor()
elif isinstance(value, np.ndarray):
doc.tensor = value
doc._metadata['image_type'] = 'ndarray'
else:
from PIL.Image import Image
if isinstance(value, Image):
doc.tensor = np.array(value)
doc._metadata['image_type'] = 'PIL'
return doc
def text_setter(value) -> 'Document':
from docarray import Document
return Document(text=value, modality='text')
def uri_setter(value) -> 'Document':
from docarray import Document
return Document(uri=value)
def audio_setter(value) -> 'Document':
from docarray import Document
if isinstance(value, np.ndarray):
return Document(tensor=value, _metadata={'audio_type': 'ndarray'})
else:
return Document(
uri=value, modality='audio', _metadata={'audio_type': 'uri'}
).load_uri_to_audio_tensor()
def video_setter(value) -> 'Document':
from docarray import Document
if isinstance(value, np.ndarray):
return Document(tensor=value, _metadata={'video_type': 'ndarray'})
else:
return Document(
uri=value, modality='video', _metadata={'video_type': 'uri'}
).load_uri_to_video_tensor()
def mesh_setter(value) -> 'Document':
from docarray import Document
if isinstance(value, np.ndarray):
return Document(tensor=value, _metadata={'mesh_type': 'ndarray'})
else:
return Document(
uri=value, modality='mesh', _metadata={'mesh_type': 'uri'}
).load_uri_to_point_cloud_tensor(1000)
def blob_setter(value) -> 'Document':
from docarray import Document
if isinstance(value, bytes):
return Document(blob=value, _metadata={'blob_type': 'bytes'})
else:
return Document(uri=value, _metadata={'blob_type': 'uri'}).load_uri_to_blob()
def json_setter(value) -> 'Document':
from docarray import Document
return Document(modality='json', tags=value)
def tabular_setter(value) -> 'Document':
from docarray import Document, DocumentArray
return Document(uri=value, chunks=DocumentArray.from_csv(value), modality='tabular')
|
from typing import TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from docarray import Document
def image_setter(value) -> 'Document':
from docarray import Document
doc = Document(modality='image')
if isinstance(value, str):
doc.uri = value
doc._metadata['image_type'] = 'uri'
doc.load_uri_to_image_tensor()
elif isinstance(value, np.ndarray):
doc.tensor = value
doc._metadata['image_type'] = 'ndarray'
else:
from PIL.Image import Image
if isinstance(value, Image):
doc.tensor = np.array(value)
doc._metadata['image_type'] = 'PIL'
return doc
def text_setter(value) -> 'Document':
from docarray import Document
return Document(text=value, modality='text')
def uri_setter(value) -> 'Document':
from docarray import Document
return Document(uri=value)
def audio_setter(value) -> 'Document':
from docarray import Document
if isinstance(value, np.ndarray):
return Document(tensor=value, _metadata={'audio_type': 'ndarray'})
else:
return Document(
uri=value, modality='audio', _metadata={'audio_type': 'uri'}
).load_uri_to_audio_tensor()
def video_setter(value) -> 'Document':
from docarray import Document
if isinstance(value, np.ndarray):
return Document(tensor=value, _metadata={'video_type': 'ndarray'})
else:
return Document(
uri=value, modality='video', _metadata={'video_type': 'uri'}
).load_uri_to_video_tensor()
def mesh_setter(value) -> 'Document':
from docarray import Document
if isinstance(value, np.ndarray):
return Document(tensor=value, _metadata={'mesh_type': 'ndarray'})
else:
return Document(
uri=value, modality='mesh', _metadata={'mesh_type': 'uri'}
).load_uri_to_point_cloud_tensor(1000)
def blob_setter(value) -> 'Document':
from docarray import Document
if isinstance(value, bytes):
return Document(blob=value, _metadata={'blob_type': 'bytes'})
else:
return Document(uri=value, _metadata={'blob_type': 'uri'}).load_uri_to_blob()
def json_setter(value) -> 'Document':
from docarray import Document
return Document(modality='json', tags=value)
def tabular_setter(value) -> 'Document':
from docarray import Document, DocumentArray
return Document(uri=value, chunks=DocumentArray.from_csv(value), modality='tabular')
|
"""FastAPI framework, high performance, easy to learn, fast to code, ready for production"""
__version__ = "0.115.8"
from starlette import status as status
from .applications import FastAPI as FastAPI
from .background import BackgroundTasks as BackgroundTasks
from .datastructures import UploadFile as UploadFile
from .exceptions import HTTPException as HTTPException
from .exceptions import WebSocketException as WebSocketException
from .param_functions import Body as Body
from .param_functions import Cookie as Cookie
from .param_functions import Depends as Depends
from .param_functions import File as File
from .param_functions import Form as Form
from .param_functions import Header as Header
from .param_functions import Path as Path
from .param_functions import Query as Query
from .param_functions import Security as Security
from .requests import Request as Request
from .responses import Response as Response
from .routing import APIRouter as APIRouter
from .websockets import WebSocket as WebSocket
from .websockets import WebSocketDisconnect as WebSocketDisconnect
|
"""FastAPI framework, high performance, easy to learn, fast to code, ready for production"""
__version__ = "0.115.7"
from starlette import status as status
from .applications import FastAPI as FastAPI
from .background import BackgroundTasks as BackgroundTasks
from .datastructures import UploadFile as UploadFile
from .exceptions import HTTPException as HTTPException
from .exceptions import WebSocketException as WebSocketException
from .param_functions import Body as Body
from .param_functions import Cookie as Cookie
from .param_functions import Depends as Depends
from .param_functions import File as File
from .param_functions import Form as Form
from .param_functions import Header as Header
from .param_functions import Path as Path
from .param_functions import Query as Query
from .param_functions import Security as Security
from .requests import Request as Request
from .responses import Response as Response
from .routing import APIRouter as APIRouter
from .websockets import WebSocket as WebSocket
from .websockets import WebSocketDisconnect as WebSocketDisconnect
|
import os
from typing import BinaryIO, Optional, Tuple, Union
import torch
import torchaudio
from .backend import Backend
from .common import AudioMetaData
sox_ext = torchaudio._extension.lazy_import_sox_ext()
class SoXBackend(Backend):
@staticmethod
def info(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str], buffer_size: int = 4096) -> AudioMetaData:
if hasattr(uri, "read"):
raise ValueError(
"SoX backend does not support reading from file-like objects. ",
"Please use an alternative backend that does support reading from file-like objects, e.g. FFmpeg.",
)
else:
sinfo = sox_ext.get_info(uri, format)
if sinfo:
return AudioMetaData(*sinfo)
else:
raise RuntimeError(f"Failed to fetch metadata for {uri}.")
@staticmethod
def load(
uri: Union[BinaryIO, str, os.PathLike],
frame_offset: int = 0,
num_frames: int = -1,
normalize: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
buffer_size: int = 4096,
) -> Tuple[torch.Tensor, int]:
if hasattr(uri, "read"):
raise ValueError(
"SoX backend does not support loading from file-like objects. ",
"Please use an alternative backend that does support loading from file-like objects, e.g. FFmpeg.",
)
else:
ret = sox_ext.load_audio_file(uri, frame_offset, num_frames, normalize, channels_first, format)
if not ret:
raise RuntimeError(f"Failed to load audio from {uri}.")
return ret
@staticmethod
def save(
uri: Union[BinaryIO, str, os.PathLike],
src: torch.Tensor,
sample_rate: int,
channels_first: bool = True,
format: Optional[str] = None,
encoding: Optional[str] = None,
bits_per_sample: Optional[int] = None,
buffer_size: int = 4096,
) -> None:
if hasattr(uri, "write"):
raise ValueError(
"SoX backend does not support writing to file-like objects. ",
"Please use an alternative backend that does support writing to file-like objects, e.g. FFmpeg.",
)
else:
sox_ext.save_audio_file(
uri,
src,
sample_rate,
channels_first,
None,
format,
encoding,
bits_per_sample,
)
@staticmethod
def can_decode(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str]) -> bool:
# i.e. not a file-like object.
return not hasattr(uri, "read")
@staticmethod
def can_encode(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str]) -> bool:
# i.e. not a file-like object.
return not hasattr(uri, "write")
|
import os
from typing import BinaryIO, Optional, Tuple, Union
import torch
from .backend import Backend
from .common import AudioMetaData
class SoXBackend(Backend):
@staticmethod
def info(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str], buffer_size: int = 4096) -> AudioMetaData:
if hasattr(uri, "read"):
raise ValueError(
"SoX backend does not support reading from file-like objects. ",
"Please use an alternative backend that does support reading from file-like objects, e.g. FFmpeg.",
)
else:
sinfo = torch.ops.torchaudio.sox_io_get_info(uri, format)
if sinfo:
return AudioMetaData(*sinfo)
else:
raise RuntimeError(f"Failed to fetch metadata for {uri}.")
@staticmethod
def load(
uri: Union[BinaryIO, str, os.PathLike],
frame_offset: int = 0,
num_frames: int = -1,
normalize: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
buffer_size: int = 4096,
) -> Tuple[torch.Tensor, int]:
if hasattr(uri, "read"):
raise ValueError(
"SoX backend does not support loading from file-like objects. ",
"Please use an alternative backend that does support loading from file-like objects, e.g. FFmpeg.",
)
else:
ret = torch.ops.torchaudio.sox_io_load_audio_file(
uri, frame_offset, num_frames, normalize, channels_first, format
)
if not ret:
raise RuntimeError(f"Failed to load audio from {uri}.")
return ret
@staticmethod
def save(
uri: Union[BinaryIO, str, os.PathLike],
src: torch.Tensor,
sample_rate: int,
channels_first: bool = True,
format: Optional[str] = None,
encoding: Optional[str] = None,
bits_per_sample: Optional[int] = None,
buffer_size: int = 4096,
) -> None:
if hasattr(uri, "write"):
raise ValueError(
"SoX backend does not support writing to file-like objects. ",
"Please use an alternative backend that does support writing to file-like objects, e.g. FFmpeg.",
)
else:
torch.ops.torchaudio.sox_io_save_audio_file(
uri,
src,
sample_rate,
channels_first,
None,
format,
encoding,
bits_per_sample,
)
@staticmethod
def can_decode(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str]) -> bool:
# i.e. not a file-like object.
return not hasattr(uri, "read")
@staticmethod
def can_encode(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str]) -> bool:
# i.e. not a file-like object.
return not hasattr(uri, "write")
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='ATSS',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=128),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=[
dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
dict(
type='DyHead',
in_channels=256,
out_channels=256,
num_blocks=6,
# disable zero_init_offset to follow official implementation
zero_init_offset=False)
],
bbox_head=dict(
type='ATSSHead',
num_classes=80,
in_channels=256,
pred_kernel_size=1, # follow DyHead official implementation
stacked_convs=0,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128],
center_offset=0.5), # follow DyHead official implementation
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend='pillow'),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend='pillow'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='ATSS',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=128),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=[
dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
dict(
type='DyHead',
in_channels=256,
out_channels=256,
num_blocks=6,
# disable zero_init_offset to follow official implementation
zero_init_offset=False)
],
bbox_head=dict(
type='ATSSHead',
num_classes=80,
in_channels=256,
pred_kernel_size=1, # follow DyHead official implementation
stacked_convs=0,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128],
center_offset=0.5), # follow DyHead official implementation
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend='pillow'),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend='pillow'),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from jina import Flow
from ...minranker import MinRanker
def test_integration(documents_chunk):
with Flow().add(uses=MinRanker, uses_with={'metric': 'cosine'}) as flow:
resp = flow.post(on='/search', inputs=documents_chunk, return_results=True)
for r in resp:
for doc in r.docs:
assert doc.matches
for i in range(len(doc.matches) - 1):
match = doc.matches[i]
assert match.tags
assert (
match.scores['cosine'].value
>= doc.matches[i + 1].scores['cosine'].value
)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from jina import Flow
from ...minranker import MinRanker
def test_integration(documents_chunk):
with Flow().add(uses=MinRanker, override_with={'metric': 'cosine'}) as flow:
resp = flow.post(on='/search', inputs=documents_chunk, return_results=True)
for r in resp:
for doc in r.docs:
assert doc.matches
for i in range(len(doc.matches) - 1):
match = doc.matches[i]
assert match.tags
assert (
match.scores['cosine'].value
>= doc.matches[i + 1].scores['cosine'].value
)
|
_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py'
model = dict(
bbox_head=dict(
_delete_=True,
type='FreeAnchorRetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.75)))
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
|
_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py'
model = dict(
bbox_head=dict(
_delete_=True,
type='FreeAnchorRetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.75)))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Literal
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseEmbeddingSimilarityEvaluator(EmbeddingSimilarityEvaluator):
def __init__(
self,
sentences1: list[str],
sentences2: list[str],
scores: list[float],
batch_size: int = 16,
main_similarity: str | SimilarityFunction | None = None,
similarity_fn_names: list[Literal["cosine", "euclidean", "manhattan", "dot"]] | None = None,
name: str = "",
show_progress_bar: bool = False,
write_csv: bool = True,
precision: Literal["float32", "int8", "uint8", "binary", "ubinary"] | None = None,
truncate_dim: int | None = None,
):
return super().__init__(
sentences1=sentences1,
sentences2=sentences2,
scores=scores,
batch_size=batch_size,
main_similarity=main_similarity,
similarity_fn_names=similarity_fn_names,
name=name,
show_progress_bar=show_progress_bar,
write_csv=write_csv,
precision=precision,
truncate_dim=truncate_dim,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> Tensor:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
precision=self.precision,
normalize_embeddings=bool(self.precision),
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self.name, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Literal
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseEmbeddingSimilarityEvaluator(EmbeddingSimilarityEvaluator):
def __init__(
self,
sentences1: list[str],
sentences2: list[str],
scores: list[float],
batch_size: int = 16,
main_similarity: str | SimilarityFunction | None = None,
similarity_fn_names: list[Literal["cosine", "euclidean", "manhattan", "dot"]] | None = None,
name: str = "",
show_progress_bar: bool = False,
write_csv: bool = True,
precision: Literal["float32", "int8", "uint8", "binary", "ubinary"] | None = None,
truncate_dim: int | None = None,
):
super().__init__(
sentences1,
sentences2,
scores,
batch_size,
main_similarity,
similarity_fn_names,
name,
show_progress_bar,
write_csv,
precision,
truncate_dim,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path, epoch, steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> Tensor:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
precision=self.precision,
normalize_embeddings=bool(self.precision),
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import mmcv
import numpy as np
import pytest
from mmdet.core.mask import BitmapMasks
from mmdet.datasets.pipelines import (FilterAnnotations, LoadImageFromFile,
LoadImageFromWebcam,
LoadMultiChannelImageFromFiles)
class TestLoading:
@classmethod
def setup_class(cls):
cls.data_prefix = osp.join(osp.dirname(__file__), '../../data')
def test_load_img(self):
results = dict(
img_prefix=self.data_prefix, img_info=dict(filename='color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['filename'] == osp.join(self.data_prefix, 'color.jpg')
assert results['ori_filename'] == 'color.jpg'
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3)
assert results['ori_shape'] == (288, 512, 3)
assert repr(transform) == transform.__class__.__name__ + \
"(to_float32=False, color_type='color', channel_order='bgr', " + \
"file_client_args={'backend': 'disk'})"
# no img_prefix
results = dict(
img_prefix=None, img_info=dict(filename='tests/data/color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['filename'] == 'tests/data/color.jpg'
assert results['ori_filename'] == 'tests/data/color.jpg'
assert results['img'].shape == (288, 512, 3)
# to_float32
transform = LoadImageFromFile(to_float32=True)
results = transform(copy.deepcopy(results))
assert results['img'].dtype == np.float32
# gray image
results = dict(
img_prefix=self.data_prefix, img_info=dict(filename='gray.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
transform = LoadImageFromFile(color_type='unchanged')
results = transform(copy.deepcopy(results))
assert results['img'].shape == (288, 512)
assert results['img'].dtype == np.uint8
def test_load_multi_channel_img(self):
results = dict(
img_prefix=self.data_prefix,
img_info=dict(filename=['color.jpg', 'color.jpg']))
transform = LoadMultiChannelImageFromFiles()
results = transform(copy.deepcopy(results))
assert results['filename'] == [
osp.join(self.data_prefix, 'color.jpg'),
osp.join(self.data_prefix, 'color.jpg')
]
assert results['ori_filename'] == ['color.jpg', 'color.jpg']
assert results['img'].shape == (288, 512, 3, 2)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3, 2)
assert results['ori_shape'] == (288, 512, 3, 2)
assert results['pad_shape'] == (288, 512, 3, 2)
assert results['scale_factor'] == 1.0
assert repr(transform) == transform.__class__.__name__ + \
"(to_float32=False, color_type='unchanged', " + \
"file_client_args={'backend': 'disk'})"
def test_load_webcam_img(self):
img = mmcv.imread(osp.join(self.data_prefix, 'color.jpg'))
results = dict(img=img)
transform = LoadImageFromWebcam()
results = transform(copy.deepcopy(results))
assert results['filename'] is None
assert results['ori_filename'] is None
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3)
assert results['ori_shape'] == (288, 512, 3)
def _build_filter_annotations_args():
kwargs = (dict(min_gt_bbox_wh=(100, 100)),
dict(min_gt_bbox_wh=(100, 100), keep_empty=False),
dict(min_gt_bbox_wh=(1, 1)), dict(min_gt_bbox_wh=(.01, .01)),
dict(min_gt_bbox_wh=(.01, .01),
by_mask=True), dict(by_mask=True),
dict(by_box=False, by_mask=True))
targets = (None, 0, 1, 2, 1, 1, 1)
return list(zip(targets, kwargs))
@pytest.mark.parametrize('target, kwargs', _build_filter_annotations_args())
def test_filter_annotations(target, kwargs):
filter_ann = FilterAnnotations(**kwargs)
bboxes = np.array([[2., 10., 4., 14.], [2., 10., 2.1, 10.1]])
raw_masks = np.zeros((2, 24, 24))
raw_masks[0, 10:14, 2:4] = 1
bitmap_masks = BitmapMasks(raw_masks, 24, 24)
results = dict(gt_bboxes=bboxes, gt_masks=bitmap_masks)
results = filter_ann(results)
if results is not None:
results = results['gt_bboxes'].shape[0]
assert results == target
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import mmcv
import numpy as np
from mmdet.datasets.pipelines import (LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles)
class TestLoading:
@classmethod
def setup_class(cls):
cls.data_prefix = osp.join(osp.dirname(__file__), '../../data')
def test_load_img(self):
results = dict(
img_prefix=self.data_prefix, img_info=dict(filename='color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['filename'] == osp.join(self.data_prefix, 'color.jpg')
assert results['ori_filename'] == 'color.jpg'
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3)
assert results['ori_shape'] == (288, 512, 3)
assert repr(transform) == transform.__class__.__name__ + \
"(to_float32=False, color_type='color', channel_order='bgr', " + \
"file_client_args={'backend': 'disk'})"
# no img_prefix
results = dict(
img_prefix=None, img_info=dict(filename='tests/data/color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['filename'] == 'tests/data/color.jpg'
assert results['ori_filename'] == 'tests/data/color.jpg'
assert results['img'].shape == (288, 512, 3)
# to_float32
transform = LoadImageFromFile(to_float32=True)
results = transform(copy.deepcopy(results))
assert results['img'].dtype == np.float32
# gray image
results = dict(
img_prefix=self.data_prefix, img_info=dict(filename='gray.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
transform = LoadImageFromFile(color_type='unchanged')
results = transform(copy.deepcopy(results))
assert results['img'].shape == (288, 512)
assert results['img'].dtype == np.uint8
def test_load_multi_channel_img(self):
results = dict(
img_prefix=self.data_prefix,
img_info=dict(filename=['color.jpg', 'color.jpg']))
transform = LoadMultiChannelImageFromFiles()
results = transform(copy.deepcopy(results))
assert results['filename'] == [
osp.join(self.data_prefix, 'color.jpg'),
osp.join(self.data_prefix, 'color.jpg')
]
assert results['ori_filename'] == ['color.jpg', 'color.jpg']
assert results['img'].shape == (288, 512, 3, 2)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3, 2)
assert results['ori_shape'] == (288, 512, 3, 2)
assert results['pad_shape'] == (288, 512, 3, 2)
assert results['scale_factor'] == 1.0
assert repr(transform) == transform.__class__.__name__ + \
"(to_float32=False, color_type='unchanged', " + \
"file_client_args={'backend': 'disk'})"
def test_load_webcam_img(self):
img = mmcv.imread(osp.join(self.data_prefix, 'color.jpg'))
results = dict(img=img)
transform = LoadImageFromWebcam()
results = transform(copy.deepcopy(results))
assert results['filename'] is None
assert results['ori_filename'] is None
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3)
assert results['ori_shape'] == (288, 512, 3)
|
import functools
import time
from threading import Thread
import numpy as np
import pytest
from jina import Client, Document, Flow
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'http'])
def test_gateway_concurrency(protocol, reraise):
port = 12345
CONCURRENCY = 2
def _validate(req, start, status_codes, durations, index):
end = time.time()
durations[index] = end - start
status_codes[index] = req.status.code
def _request(status_codes, durations, index):
with reraise:
start = time.time()
on_done = functools.partial(
_validate,
start=start,
status_codes=status_codes,
durations=durations,
index=index,
)
results = Client(port=port, protocol=protocol).index(
inputs=(Document() for _ in range(256)), _size=16, return_responses=True
)
assert len(results) > 0
for result in results:
on_done(result)
f = Flow(protocol=protocol, port=port).add(parallel=2)
with f:
threads = []
status_codes = [None] * CONCURRENCY
durations = [None] * CONCURRENCY
for i in range(CONCURRENCY):
t = Thread(target=_request, args=(status_codes, durations, i))
threads.append(t)
t.start()
for t in threads:
t.join()
success = status_codes.count(0)
failed = len(status_codes) - success
print(
f'clients: {len(durations)}\n'
f'min roundtrip time: {np.min(durations)}\n'
f'max roundtrip time: {np.max(durations)}\n'
f'mean roundtrip time: {np.mean(durations)}\n'
)
assert success >= 1
# In some slow environments, a certain degree of failed
# requests will occur. Here we limit the degree of failed
# requests.
rate = failed / success
assert rate < 0.1
def test_grpc_custom_otpions():
f = Flow(grpc_server_options={'grpc.max_send_message_length': -1})
with f:
pass
|
import functools
import time
from threading import Thread
import numpy as np
import pytest
from jina import Client, Document, Flow
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'http'])
def test_gateway_concurrency(protocol, reraise):
port = 12345
CONCURRENCY = 2
def _validate(req, start, status_codes, durations, index):
end = time.time()
durations[index] = end - start
status_codes[index] = req.status.code
def _request(status_codes, durations, index):
with reraise:
start = time.time()
on_done = functools.partial(
_validate,
start=start,
status_codes=status_codes,
durations=durations,
index=index,
)
results = Client(port=port, protocol=protocol).index(
inputs=(Document() for _ in range(256)), _size=16, return_responses=True
)
assert len(results) > 0
for result in results:
on_done(result)
f = Flow(protocol=protocol, port=port).add(parallel=2)
with f:
threads = []
status_codes = [None] * CONCURRENCY
durations = [None] * CONCURRENCY
for i in range(CONCURRENCY):
t = Thread(target=_request, args=(status_codes, durations, i))
threads.append(t)
t.start()
for t in threads:
t.join()
success = status_codes.count(0)
failed = len(status_codes) - success
print(
f'clients: {len(durations)}\n'
f'min roundtrip time: {np.min(durations)}\n'
f'max roundtrip time: {np.max(durations)}\n'
f'mean roundtrip time: {np.mean(durations)}\n'
)
assert success >= 1
# In some slow environments, a certain degree of failed
# requests will occur. Here we limit the degree of failed
# requests.
rate = failed / success
assert rate < 0.1
|
import prisma.enums
import prisma.types
from backend.blocks.io import IO_BLOCK_IDs
AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = {
"Input": True,
"Output": True,
"Webhook": True,
"AgentBlock": True,
}
AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore
}
EXECUTION_RESULT_INCLUDE: prisma.types.AgentNodeExecutionInclude = {
"Input": True,
"Output": True,
"AgentNode": True,
"AgentGraphExecution": True,
}
MAX_NODE_EXECUTIONS_FETCH = 1000
GRAPH_EXECUTION_INCLUDE_WITH_NODES: prisma.types.AgentGraphExecutionInclude = {
"AgentNodeExecutions": {
"include": {
"Input": True,
"Output": True,
"AgentNode": True,
"AgentGraphExecution": True,
},
"order_by": [
{"queuedTime": "desc"},
# Fallback: Incomplete execs has no queuedTime.
{"addedTime": "desc"},
],
"take": MAX_NODE_EXECUTIONS_FETCH, # Avoid loading excessive node executions.
}
}
GRAPH_EXECUTION_INCLUDE: prisma.types.AgentGraphExecutionInclude = {
"AgentNodeExecutions": {
**GRAPH_EXECUTION_INCLUDE_WITH_NODES["AgentNodeExecutions"], # type: ignore
"where": {
"AgentNode": {
"AgentBlock": {"id": {"in": IO_BLOCK_IDs}}, # type: ignore
},
"NOT": {
"executionStatus": prisma.enums.AgentExecutionStatus.INCOMPLETE,
},
},
}
}
INTEGRATION_WEBHOOK_INCLUDE: prisma.types.IntegrationWebhookInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore
}
def library_agent_include(user_id: str) -> prisma.types.LibraryAgentInclude:
return {
"Agent": {
"include": {
**AGENT_GRAPH_INCLUDE,
"AgentGraphExecution": {"where": {"userId": user_id}},
}
},
"Creator": True,
}
|
import prisma
from backend.blocks.io import IO_BLOCK_IDs
AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = {
"Input": True,
"Output": True,
"Webhook": True,
"AgentBlock": True,
}
AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore
}
EXECUTION_RESULT_INCLUDE: prisma.types.AgentNodeExecutionInclude = {
"Input": True,
"Output": True,
"AgentNode": True,
"AgentGraphExecution": True,
}
MAX_NODE_EXECUTIONS_FETCH = 1000
GRAPH_EXECUTION_INCLUDE_WITH_NODES: prisma.types.AgentGraphExecutionInclude = {
"AgentNodeExecutions": {
"include": {
"Input": True,
"Output": True,
"AgentNode": True,
"AgentGraphExecution": True,
},
"order_by": [
{"queuedTime": "desc"},
# Fallback: Incomplete execs has no queuedTime.
{"addedTime": "desc"},
],
"take": MAX_NODE_EXECUTIONS_FETCH, # Avoid loading excessive node executions.
}
}
GRAPH_EXECUTION_INCLUDE: prisma.types.AgentGraphExecutionInclude = {
"AgentNodeExecutions": {
**GRAPH_EXECUTION_INCLUDE_WITH_NODES["AgentNodeExecutions"], # type: ignore
"where": {
"AgentNode": {
"AgentBlock": {"id": {"in": IO_BLOCK_IDs}}, # type: ignore
},
},
}
}
INTEGRATION_WEBHOOK_INCLUDE: prisma.types.IntegrationWebhookInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore
}
def library_agent_include(user_id: str) -> prisma.types.LibraryAgentInclude:
return {
"Agent": {
"include": {
**AGENT_GRAPH_INCLUDE,
"AgentGraphExecution": {"where": {"userId": user_id}},
}
},
"Creator": True,
}
|
import types
from abc import ABC
from typing import Any, Callable, List, Optional, Tuple, TypeVar, Union
import numpy as np
from docarray.computation import AbstractComputationalBackend
T = TypeVar('T')
class AbstractNumpyBasedBackend(AbstractComputationalBackend[T], ABC):
"""
Abstract base class for computational backends that are based on numpy.
This includes numpy (np) itself and tensorflow.experimental.numpy (tnp).
The overlap of those two is gathered in this abstract backend. Other functions
should be defined in corresponding subclasses.
"""
_module: types.ModuleType
# The method _get_tensor() transforms the input of the backends methods to a
# handleable type that the backends _module can work with, whereas _cast_output()
# casts the output of a methods back to the original input type. This is especially
# relevant w.r.t. the TensorFlowTensor class:
# If a TensorFlowTensor instance is input to a function, we first want to transform
# it to a tf.Tensor, since the tf.Tensor is what the TensorFlowBackend's _module
# (tnp) works on. If the function returns a tf.Tensor, we want to cast it back to a
# TensorFlowTensor.
_cast_output: Callable
_get_tensor: Callable
@classmethod
def stack(cls, tensors: Union[List[T], Tuple[T]], dim: int = 0) -> T:
"""Stack a list of tensors along a new axis."""
t = [cls._get_tensor(t) for t in tensors]
return cls._cast_output(cls._module.stack(t, axis=dim))
@classmethod
def n_dim(cls, array: T) -> int:
"""Get the number of the array dimensions."""
return cls._module.ndim(cls._get_tensor(array))
@classmethod
def squeeze(cls, tensor: T) -> T:
"""
Returns a tensor with all the dimensions of tensor of size 1 removed.
"""
return cls._cast_output(cls._module.squeeze(cls._get_tensor(tensor)))
@classmethod
def empty(
cls,
shape: Tuple[int, ...],
dtype: Optional[Any] = None,
device: Optional[Any] = None,
) -> T:
if cls._module is np and device is not None:
raise NotImplementedError('Numpy does not support devices (GPU).')
return cls._cast_output(cls._module.empty(shape, dtype=dtype))
@classmethod
def shape(cls, array: T) -> Tuple[int, ...]:
"""Get shape of array"""
return tuple(cls._module.shape(cls._get_tensor(array)))
@classmethod
def reshape(cls, array: T, shape: Tuple[int, ...]) -> T:
"""
Gives a new shape to array without changing its data.
:param array: array to be reshaped
:param shape: the new shape
:return: a array with the same data and number of elements as array
but with the specified shape.
"""
return cls._cast_output(cls._module.reshape(cls._get_tensor(array), shape))
@classmethod
def isnan(cls, tensor: T) -> T:
"""Check element-wise for nan and return result as a boolean array"""
return cls._cast_output(cls._module.isnan(cls._get_tensor(tensor)))
@classmethod
def copy(cls, tensor: 'T') -> 'T':
"""return a copy/clone of the tensor"""
return cls._cast_output(cls._module.array(cls._get_tensor(tensor), copy=True))
|
import types
from abc import ABC
from typing import Any, Callable, List, Optional, Tuple, TypeVar, Union
import numpy as np
from docarray.computation import AbstractComputationalBackend
T = TypeVar('T')
class AbstractNumpyBasedBackend(AbstractComputationalBackend[T], ABC):
"""
Abstract base class for computational backends that are based on numpy.
This includes numpy (np) itself and tensorflow.experimental.numpy (tnp).
The overlap of those two is gathered in this abstract backend. Other functions
should be defined in corresponding subclasses.
"""
_module: types.ModuleType
# The method _get_tensor() transforms the input of the backends methods to a
# handleable type that the backends _module can work with, whereas _cast_output()
# casts the output of a methods back to the original input type. This is especially
# relevant w.r.t. the TensorFlowTensor class:
# If a TensorFlowTensor instance is input to a function, we first want to transform
# it to a tf.Tensor, since the tf.Tensor is what the TensorFlowBackend's _module
# (tnp) works on. If the function returns a tf.Tensor, we want to cast it back to a
# TensorFlowTensor.
_cast_output: Callable
_get_tensor: Callable
@classmethod
def stack(cls, tensors: Union[List[T], Tuple[T]], dim: int = 0) -> T:
"""Stack a list of tensors along a new axis."""
t = [cls._get_tensor(t) for t in tensors]
return cls._cast_output(cls._module.stack(t, axis=dim))
@classmethod
def n_dim(cls, array: T) -> int:
"""Get the number of the array dimensions."""
return cls._module.ndim(cls._get_tensor(array))
@classmethod
def squeeze(cls, tensor: T) -> T:
"""
Returns a tensor with all the dimensions of tensor of size 1 removed.
"""
return cls._cast_output(cls._module.squeeze(cls._get_tensor(tensor)))
@classmethod
def empty(
cls,
shape: Tuple[int, ...],
dtype: Optional[Any] = None,
device: Optional[Any] = None,
) -> T:
if cls._module is np and device is not None:
raise NotImplementedError('Numpy does not support devices (GPU).')
return cls._cast_output(cls._module.empty(shape, dtype=dtype))
@classmethod
def shape(cls, array: T) -> Tuple[int, ...]:
"""Get shape of array"""
return tuple(cls._module.shape(cls._get_tensor(array)))
@classmethod
def reshape(cls, array: T, shape: Tuple[int, ...]) -> T:
"""
Gives a new shape to array without changing its data.
:param array: array to be reshaped
:param shape: the new shape
:return: a array with the same data and number of elements as array
but with the specified shape.
"""
return cls._cast_output(cls._module.reshape(cls._get_tensor(array), shape))
@classmethod
def isnan(cls, tensor: T) -> T:
"""Check element-wise for nan and return result as a boolean array"""
return cls._cast_output(cls._module.isnan(cls._get_tensor(tensor)))
|
from workflows.handler import WorkflowHandler # noqa
|
import asyncio
from typing import Any, AsyncGenerator, List, Optional
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.errors import WorkflowDone
from llama_index.core.workflow.events import Event, StopEvent
from .types import RunResultT
from .utils import BUSY_WAIT_DELAY
class WorkflowHandler(asyncio.Future[RunResultT]):
def __init__(
self,
*args: Any,
ctx: Optional[Context] = None,
run_id: Optional[str] = None,
**kwargs: Any,
) -> None:
super().__init__(*args, **kwargs)
self.run_id = run_id
self._ctx = ctx
@property
def ctx(self) -> Optional[Context]:
return self._ctx
def __str__(self) -> str:
return str(self.result())
def is_done(self) -> bool:
return self.done()
async def stream_events(self) -> AsyncGenerator[Event, None]:
if self.ctx is None:
raise ValueError("Context is not set!")
while True:
ev = await self.ctx.streaming_queue.get()
yield ev
if isinstance(ev, StopEvent):
break
async def run_step(self) -> Optional[List[Event]]:
"""
Runs the next workflow step and returns the output Event.
If return is None, then the workflow is considered done.
Examples:
```python
handler = workflow.run(stepwise=True)
while not handler.is_done():
ev = await handler.run_step()
handler.ctx.send_event(ev)
result = handler.result()
print(result)
```
"""
# since event is sent before calling this method, we need to unblock the event loop
await asyncio.sleep(0)
if self.ctx is None:
raise ValueError("Context must be set to run a workflow step-wise!")
if not self.ctx.stepwise:
raise ValueError(
"Workflow must be created passing stepwise=True to call this method."
)
try:
# Reset the events collected in current step
self.ctx._step_events_holding = None
# Unblock all pending steps
for flag in self.ctx._step_flags.values():
flag.set()
# Yield back control to the event loop to give an unblocked step
# the chance to run (we won't actually sleep here).
await asyncio.sleep(0)
# check if we're done, or if a step raised error
we_done = False
exception_raised = None
retval = None
for t in self.ctx._tasks:
# Check if we're done
if not t.done():
continue
we_done = True
e = t.exception()
if type(e) is not WorkflowDone:
exception_raised = e
if we_done:
await self.ctx.shutdown()
if exception_raised:
raise exception_raised
if not self.done():
self.set_result(self.ctx.get_result())
else:
# Continue with running next step. Make sure we wait for the
# step function to return before proceeding.
in_progress = len(await self.ctx.running_steps())
while in_progress:
await asyncio.sleep(BUSY_WAIT_DELAY)
in_progress = len(await self.ctx.running_steps())
# notify unblocked task that we're ready to accept next event
async with self.ctx._step_condition:
self.ctx._step_condition.notify()
# Wait to be notified that the new_ev has been written
async with self.ctx._step_event_written:
await self.ctx._step_event_written.wait()
retval = self.ctx.get_holding_events()
except Exception as e:
if not self.is_done(): # Avoid InvalidStateError edge case
self.set_exception(e)
raise
return retval
async def cancel_run(self) -> None:
"""Method to cancel a Workflow execution."""
if self.ctx:
self.ctx._cancel_flag.set()
await asyncio.sleep(0)
|
import os # type: ignore[import-not-found]
from exa_py import Exa # type: ignore
from langchain_core.utils import convert_to_secret_str
def initialize_client(values: dict) -> dict:
"""Initialize the client."""
exa_api_key = values.get("exa_api_key") or os.environ.get("EXA_API_KEY") or ""
values["exa_api_key"] = convert_to_secret_str(exa_api_key)
args = {
"api_key": values["exa_api_key"].get_secret_value(),
}
if values.get("exa_base_url"):
args["base_url"] = values["exa_base_url"]
values["client"] = Exa(**args)
return values
|
import os # type: ignore[import-not-found]
from typing import Dict
from exa_py import Exa # type: ignore
from langchain_core.utils import convert_to_secret_str
def initialize_client(values: Dict) -> Dict:
"""Initialize the client."""
exa_api_key = values.get("exa_api_key") or os.environ.get("EXA_API_KEY") or ""
values["exa_api_key"] = convert_to_secret_str(exa_api_key)
args = {
"api_key": values["exa_api_key"].get_secret_value(),
}
if values.get("exa_base_url"):
args["base_url"] = values["exa_base_url"]
values["client"] = Exa(**args)
return values
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
affine_transform,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
clip_to_image_size,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
convert_format,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
crop,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
decode_deltas_to_boxes,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
encode_box_to_deltas,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
pad,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.iou import (
compute_ciou,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.iou import (
compute_iou,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
affine_transform,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
clip_to_image_size,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
convert_format,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
crop,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
pad,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
affine_transform,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
clip_to_image_size,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
convert_format,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
crop,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
decode_deltas_to_boxes,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
encode_box_to_deltas,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
pad,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.iou import (
compute_ciou,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.iou import (
compute_iou,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
affine_transform,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
clip_to_image_size,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
convert_format,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
crop,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
pad,
)
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class ParquetConfig(datasets.BuilderConfig):
"""BuilderConfig for Parquet."""
batch_size: Optional[int] = None
columns: Optional[List[str]] = None
features: Optional[datasets.Features] = None
class Parquet(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = ParquetConfig
def _info(self):
if (
self.config.columns is not None
and self.config.features is not None
and set(self.config.columns) != set(self.config.features)
):
raise ValueError(
"The columns and features argument must contain the same columns, but got ",
f"{self.config.columns} and {self.config.features}",
)
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
# Infer features if they are stored in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(files):
with open(file, "rb") as f:
self.info.features = datasets.Features.from_arrow_schema(pq.read_schema(f))
break
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
if self.config.columns is not None and set(self.config.columns) != set(self.info.features):
self.info.features = datasets.Features(
{col: feat for col, feat in self.info.features.items() if col in self.config.columns}
)
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.info.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
if self.config.features is not None and self.config.columns is not None:
if sorted(field.name for field in self.info.features.arrow_schema) != sorted(self.config.columns):
raise ValueError(
f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'"
)
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
parquet_file = pq.ParquetFile(f)
if parquet_file.metadata.num_row_groups > 0:
batch_size = self.config.batch_size or parquet_file.metadata.row_group(0).num_rows
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=batch_size, columns=self.config.columns)
):
pa_table = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class ParquetConfig(datasets.BuilderConfig):
"""BuilderConfig for Parquet."""
batch_size: int = 10_000
columns: Optional[List[str]] = None
features: Optional[datasets.Features] = None
class Parquet(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = ParquetConfig
def _info(self):
if (
self.config.columns is not None
and self.config.features is not None
and set(self.config.columns) != set(self.config.features)
):
raise ValueError(
"The columns and features argument must contain the same columns, but got ",
f"{self.config.columns} and {self.config.features}",
)
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
# Infer features if they are stored in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(files):
with open(file, "rb") as f:
self.info.features = datasets.Features.from_arrow_schema(pq.read_schema(f))
break
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
if self.config.columns is not None and set(self.config.columns) != set(self.info.features):
self.info.features = datasets.Features(
{col: feat for col, feat in self.info.features.items() if col in self.config.columns}
)
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.info.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
if self.config.features is not None and self.config.columns is not None:
if sorted(field.name for field in self.info.features.arrow_schema) != sorted(self.config.columns):
raise ValueError(
f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'"
)
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
parquet_file = pq.ParquetFile(f)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size, columns=self.config.columns)
):
pa_table = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise
|
from langchain_core.prompts.prompt import PromptTemplate
_CREATE_DRAFT_ANSWER_TEMPLATE = """{question}\n\n"""
CREATE_DRAFT_ANSWER_PROMPT = PromptTemplate(
input_variables=["question"], template=_CREATE_DRAFT_ANSWER_TEMPLATE
)
_LIST_ASSERTIONS_TEMPLATE = """Here is a statement:
{statement}
Make a bullet point list of the assumptions you made when producing the above statement.\n\n""" # noqa: E501
LIST_ASSERTIONS_PROMPT = PromptTemplate(
input_variables=["statement"], template=_LIST_ASSERTIONS_TEMPLATE
)
_CHECK_ASSERTIONS_TEMPLATE = """Here is a bullet point list of assertions:
{assertions}
For each assertion, determine whether it is true or false. If it is false, explain why.\n\n""" # noqa: E501
CHECK_ASSERTIONS_PROMPT = PromptTemplate(
input_variables=["assertions"], template=_CHECK_ASSERTIONS_TEMPLATE
)
_REVISED_ANSWER_TEMPLATE = """{checked_assertions}
Question: In light of the above assertions and checks, how would you answer the question '{question}'?
Answer:""" # noqa: E501
REVISED_ANSWER_PROMPT = PromptTemplate(
input_variables=["checked_assertions", "question"],
template=_REVISED_ANSWER_TEMPLATE,
)
|
# flake8: noqa
from langchain_core.prompts.prompt import PromptTemplate
_CREATE_DRAFT_ANSWER_TEMPLATE = """{question}\n\n"""
CREATE_DRAFT_ANSWER_PROMPT = PromptTemplate(
input_variables=["question"], template=_CREATE_DRAFT_ANSWER_TEMPLATE
)
_LIST_ASSERTIONS_TEMPLATE = """Here is a statement:
{statement}
Make a bullet point list of the assumptions you made when producing the above statement.\n\n"""
LIST_ASSERTIONS_PROMPT = PromptTemplate(
input_variables=["statement"], template=_LIST_ASSERTIONS_TEMPLATE
)
_CHECK_ASSERTIONS_TEMPLATE = """Here is a bullet point list of assertions:
{assertions}
For each assertion, determine whether it is true or false. If it is false, explain why.\n\n"""
CHECK_ASSERTIONS_PROMPT = PromptTemplate(
input_variables=["assertions"], template=_CHECK_ASSERTIONS_TEMPLATE
)
_REVISED_ANSWER_TEMPLATE = """{checked_assertions}
Question: In light of the above assertions and checks, how would you answer the question '{question}'?
Answer:"""
REVISED_ANSWER_PROMPT = PromptTemplate(
input_variables=["checked_assertions", "question"],
template=_REVISED_ANSWER_TEMPLATE,
)
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.port = self.runtime_args.port[0]
self.host = self.runtime_args.host
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/debug',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=self.host, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.port = self.runtime_args.port[0]
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/debug',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=__default_host__, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
import numpy as np
import orjson
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import NdArray
from docarray.typing.tensor import NdArrayEmbedding
def test_proto_tensor():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
tensor._to_node_protobuf()
def test_from_list():
tensor = parse_obj_as(NdArray, [[0.0, 0.0], [0.0, 0.0]])
assert (tensor == np.zeros((2, 2))).all()
def test_json_schema():
schema_json_of(NdArray)
def test_dump_json():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
orjson_dumps(tensor)
def test_load_json():
tensor = parse_obj_as(NdArray, np.zeros((2, 2)))
json = orjson_dumps(tensor)
print(json)
print(type(json))
new_tensor = orjson.loads(json)
assert (new_tensor == tensor).all()
def test_unwrap():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
ndarray = tensor.unwrap()
assert not isinstance(ndarray, NdArray)
assert isinstance(ndarray, np.ndarray)
assert isinstance(tensor, NdArray)
assert (ndarray == np.zeros((3, 224, 224))).all()
def test_parametrized():
# correct shape, single axis
tensor = parse_obj_as(NdArray[128], np.zeros(128))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (128,)
# correct shape, multiple axis
tensor = parse_obj_as(NdArray[3, 224, 224], np.zeros((3, 224, 224)))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (3, 224, 224)
# wrong but reshapable shape
tensor = parse_obj_as(NdArray[3, 224, 224], np.zeros((3, 224, 224)))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (3, 224, 224)
# wrong and not reshapable shape
with pytest.raises(ValueError):
parse_obj_as(NdArray[3, 224, 224], np.zeros((224, 224)))
# test independent variable dimensions
tensor = parse_obj_as(NdArray[3, 'x', 'y'], np.zeros((3, 224, 224)))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (3, 224, 224)
tensor = parse_obj_as(NdArray[3, 'x', 'y'], np.zeros((3, 60, 128)))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (3, 60, 128)
with pytest.raises(ValueError):
parse_obj_as(NdArray[3, 'x', 'y'], np.zeros((4, 224, 224)))
with pytest.raises(ValueError):
parse_obj_as(NdArray[3, 'x', 'y'], np.zeros((100, 1)))
# test dependent variable dimensions
tensor = parse_obj_as(NdArray[3, 'x', 'x'], np.zeros((3, 224, 224)))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (3, 224, 224)
with pytest.raises(ValueError):
tensor = parse_obj_as(NdArray[3, 'x', 'x'], np.zeros((3, 60, 128)))
with pytest.raises(ValueError):
tensor = parse_obj_as(NdArray[3, 'x', 'x'], np.zeros((3, 60)))
def test_np_embedding():
# correct shape
tensor = parse_obj_as(NdArrayEmbedding[128], np.zeros((128,)))
assert isinstance(tensor, NdArrayEmbedding)
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (128,)
# wrong shape at data setting time
with pytest.raises(ValueError):
parse_obj_as(NdArrayEmbedding[128], np.zeros((256,)))
# illegal shape at class creation time
with pytest.raises(ValueError):
parse_obj_as(NdArrayEmbedding[128, 128], np.zeros((128, 128)))
def test_parametrized_subclass():
c1 = NdArray[128]
c2 = NdArray[128]
assert issubclass(c1, c2)
assert issubclass(c1, NdArray)
assert issubclass(c1, np.ndarray)
assert not issubclass(c1, NdArray[256])
def test_parametrized_instance():
t = parse_obj_as(NdArray[128], np.zeros(128))
assert isinstance(t, NdArray[128])
assert isinstance(t, NdArray)
assert isinstance(t, np.ndarray)
assert not isinstance(t, NdArray[256])
assert not isinstance(t, NdArray[2, 64])
assert not isinstance(t, NdArray[2, 2, 32])
def test_parametrized_equality():
t1 = parse_obj_as(NdArray[128], np.zeros(128))
t2 = parse_obj_as(NdArray[128], np.zeros(128))
t3 = parse_obj_as(NdArray[256], np.zeros(256))
assert (t1 == t2).all()
assert not t1 == t3
def test_parametrized_operations():
t1 = parse_obj_as(NdArray[128], np.zeros(128))
t2 = parse_obj_as(NdArray[128], np.zeros(128))
t_result = t1 + t2
assert isinstance(t_result, np.ndarray)
assert isinstance(t_result, NdArray)
assert isinstance(t_result, NdArray[128])
|
import numpy as np
import orjson
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import NdArray
from docarray.typing.tensor import NdArrayEmbedding
def test_proto_tensor():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
tensor._to_node_protobuf()
def test_from_list():
tensor = parse_obj_as(NdArray, [[0.0, 0.0], [0.0, 0.0]])
assert (tensor == np.zeros((2, 2))).all()
def test_json_schema():
schema_json_of(NdArray)
def test_dump_json():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
orjson_dumps(tensor)
def test_load_json():
tensor = parse_obj_as(NdArray, np.zeros((2, 2)))
json = orjson_dumps(tensor)
print(json)
print(type(json))
new_tensor = orjson.loads(json)
assert (new_tensor == tensor).all()
def test_unwrap():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
ndarray = tensor.unwrap()
assert not isinstance(ndarray, NdArray)
assert isinstance(ndarray, np.ndarray)
assert isinstance(tensor, NdArray)
assert (ndarray == np.zeros((3, 224, 224))).all()
def test_parametrized():
# correct shape, single axis
tensor = parse_obj_as(NdArray[128], np.zeros(128))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (128,)
# correct shape, multiple axis
tensor = parse_obj_as(NdArray[3, 224, 224], np.zeros((3, 224, 224)))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (3, 224, 224)
# wrong but reshapable shape
tensor = parse_obj_as(NdArray[3, 224, 224], np.zeros((3, 224, 224)))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (3, 224, 224)
# wrong and not reshapable shape
with pytest.raises(ValueError):
parse_obj_as(NdArray[3, 224, 224], np.zeros((224, 224)))
# test independent variable dimensions
tensor = parse_obj_as(NdArray[3, 'x', 'y'], np.zeros((3, 224, 224)))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (3, 224, 224)
tensor = parse_obj_as(NdArray[3, 'x', 'y'], np.zeros((3, 60, 128)))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (3, 60, 128)
with pytest.raises(ValueError):
parse_obj_as(NdArray[3, 'x', 'y'], np.zeros((4, 224, 224)))
with pytest.raises(ValueError):
parse_obj_as(NdArray[3, 'x', 'y'], np.zeros((100, 1)))
# test dependent variable dimensions
tensor = parse_obj_as(NdArray[3, 'x', 'x'], np.zeros((3, 224, 224)))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (3, 224, 224)
with pytest.raises(ValueError):
tensor = parse_obj_as(NdArray[3, 'x', 'x'], np.zeros((3, 60, 128)))
with pytest.raises(ValueError):
tensor = parse_obj_as(NdArray[3, 'x', 'x'], np.zeros((3, 60)))
def test_np_embedding():
# correct shape
tensor = parse_obj_as(NdArrayEmbedding[128], np.zeros((128,)))
assert isinstance(tensor, NdArrayEmbedding)
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (128,)
# wrong shape at data setting time
with pytest.raises(ValueError):
parse_obj_as(NdArrayEmbedding[128], np.zeros((256,)))
# illegal shape at class creation time
with pytest.raises(ValueError):
parse_obj_as(NdArrayEmbedding[128, 128], np.zeros((128, 128)))
def test_parametrized_subclass():
c1 = NdArray[128]
c2 = NdArray[128]
assert issubclass(c1, c2)
assert issubclass(c1, NdArray)
assert issubclass(c1, np.ndarray)
assert not issubclass(c1, NdArray[256])
def test_parametrized_instance():
t = parse_obj_as(NdArray[128], np.zeros(128))
assert isinstance(t, NdArray[128])
assert isinstance(t, NdArray)
assert isinstance(t, np.ndarray)
assert not isinstance(t, NdArray[256])
def test_parametrized_equality():
t1 = parse_obj_as(NdArray[128], np.zeros(128))
t2 = parse_obj_as(NdArray[128], np.zeros(128))
t3 = parse_obj_as(NdArray[256], np.zeros(256))
assert (t1 == t2).all()
assert not t1 == t3
def test_parametrized_operations():
t1 = parse_obj_as(NdArray[128], np.zeros(128))
t2 = parse_obj_as(NdArray[128], np.zeros(128))
t_result = t1 + t2
assert isinstance(t_result, np.ndarray)
assert isinstance(t_result, NdArray)
assert isinstance(t_result, NdArray[128])
|
"""Defines utilities for switching audio backends"""
import os
import warnings
from typing import List, Optional
import torchaudio
from torchaudio._internal import module_utils as _mod_utils
from . import no_backend, soundfile_backend, sox_io_backend
__all__ = [
"list_audio_backends",
"get_audio_backend",
"set_audio_backend",
]
def _is_backend_dispatcher_enabled() -> bool:
return os.getenv("TORCHAUDIO_USE_BACKEND_DISPATCHER") == "1"
def list_audio_backends() -> List[str]:
"""List available backends
Returns:
List[str]: The list of available backends.
"""
if _is_backend_dispatcher_enabled():
warnings.warn("list_audio_backend's return value is irrelevant when the I/O backend dispatcher is enabled.")
backends = []
if _mod_utils.is_module_available("soundfile"):
backends.append("soundfile")
if torchaudio._extension._SOX_INITIALIZED:
backends.append("sox_io")
return backends
def set_audio_backend(backend: Optional[str]):
"""Set the backend for I/O operation
Args:
backend (str or None): Name of the backend.
One of ``"sox_io"`` or ``"soundfile"`` based on availability
of the system. If ``None`` is provided the current backend is unassigned.
"""
if _is_backend_dispatcher_enabled():
warnings.warn("set_audio_backend is a no-op when the I/O backend dispatcher is enabled.")
return
if backend is not None and backend not in list_audio_backends():
raise RuntimeError(f'Backend "{backend}" is not one of ' f"available backends: {list_audio_backends()}.")
if backend is None:
module = no_backend
elif backend == "sox_io":
module = sox_io_backend
elif backend == "soundfile":
module = soundfile_backend
else:
raise NotImplementedError(f'Unexpected backend "{backend}"')
for func in ["save", "load", "info"]:
setattr(torchaudio, func, getattr(module, func))
def _init_audio_backend():
backends = list_audio_backends()
if "sox_io" in backends:
set_audio_backend("sox_io")
elif "soundfile" in backends:
set_audio_backend("soundfile")
else:
warnings.warn("No audio backend is available.")
set_audio_backend(None)
def get_audio_backend() -> Optional[str]:
"""Get the name of the current backend
Returns:
Optional[str]: The name of the current backend or ``None`` if no backend is assigned.
"""
if _is_backend_dispatcher_enabled():
warnings.warn("get_audio_backend's return value is irrelevant when the I/O backend dispatcher is enabled.")
if torchaudio.load == no_backend.load:
return None
if torchaudio.load == sox_io_backend.load:
return "sox_io"
if torchaudio.load == soundfile_backend.load:
return "soundfile"
raise ValueError("Unknown backend.")
|
"""Defines utilities for switching audio backends"""
import warnings
from typing import List, Optional
import torchaudio
from torchaudio._internal import module_utils as _mod_utils
from . import no_backend, soundfile_backend, sox_io_backend
__all__ = [
"list_audio_backends",
"get_audio_backend",
"set_audio_backend",
]
def list_audio_backends() -> List[str]:
"""List available backends
Returns:
List[str]: The list of available backends.
"""
backends = []
if _mod_utils.is_module_available("soundfile"):
backends.append("soundfile")
if torchaudio._extension._SOX_INITIALIZED:
backends.append("sox_io")
return backends
def set_audio_backend(backend: Optional[str]):
"""Set the backend for I/O operation
Args:
backend (str or None): Name of the backend.
One of ``"sox_io"`` or ``"soundfile"`` based on availability
of the system. If ``None`` is provided the current backend is unassigned.
"""
if backend is not None and backend not in list_audio_backends():
raise RuntimeError(f'Backend "{backend}" is not one of ' f"available backends: {list_audio_backends()}.")
if backend is None:
module = no_backend
elif backend == "sox_io":
module = sox_io_backend
elif backend == "soundfile":
module = soundfile_backend
else:
raise NotImplementedError(f'Unexpected backend "{backend}"')
for func in ["save", "load", "info"]:
setattr(torchaudio, func, getattr(module, func))
def _init_audio_backend():
backends = list_audio_backends()
if "sox_io" in backends:
set_audio_backend("sox_io")
elif "soundfile" in backends:
set_audio_backend("soundfile")
else:
warnings.warn("No audio backend is available.")
set_audio_backend(None)
def get_audio_backend() -> Optional[str]:
"""Get the name of the current backend
Returns:
Optional[str]: The name of the current backend or ``None`` if no backend is assigned.
"""
if torchaudio.load == no_backend.load:
return None
if torchaudio.load == sox_io_backend.load:
return "sox_io"
if torchaudio.load == soundfile_backend.load:
return "soundfile"
raise ValueError("Unknown backend.")
|
_base_ = './detr_r50_8xb2-500e_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './detr_r50_8x2_500e_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
import base64
import json
import pickle
from abc import ABC, abstractmethod
from typing import Any
from pydantic import BaseModel
from llama_index.core.schema import BaseComponent
from .utils import import_module_from_qualified_name, get_qualified_name
class BaseSerializer(ABC):
@abstractmethod
def serialize(self, value: Any) -> str: ...
@abstractmethod
def deserialize(self, value: str) -> Any: ...
class JsonSerializer(BaseSerializer):
def _serialize_value(self, value: Any) -> Any:
"""Helper to serialize a single value."""
if isinstance(value, BaseComponent):
return {
"__is_component": True,
"value": value.to_dict(),
"qualified_name": get_qualified_name(value),
}
elif isinstance(value, BaseModel):
return {
"__is_pydantic": True,
"value": value.model_dump(),
"qualified_name": get_qualified_name(value),
}
elif isinstance(value, dict):
return {k: self._serialize_value(v) for k, v in value.items()}
elif isinstance(value, list):
return [self._serialize_value(item) for item in value]
return value
def serialize(self, value: Any) -> str:
try:
serialized_value = self._serialize_value(value)
return json.dumps(serialized_value)
except Exception as e:
raise ValueError(f"Failed to serialize value: {type(value)}: {value!s}")
def _deserialize_value(self, data: Any) -> Any:
"""Helper to deserialize a single value."""
if isinstance(data, dict):
if data.get("__is_pydantic") and data.get("qualified_name"):
module_class = import_module_from_qualified_name(data["qualified_name"])
return module_class.model_validate(data["value"])
elif data.get("__is_component") and data.get("qualified_name"):
module_class = import_module_from_qualified_name(data["qualified_name"])
return module_class.from_dict(data["value"])
return {k: self._deserialize_value(v) for k, v in data.items()}
elif isinstance(data, list):
return [self._deserialize_value(item) for item in data]
return data
def deserialize(self, value: str) -> Any:
data = json.loads(value)
return self._deserialize_value(data)
class PickleSerializer(JsonSerializer):
def serialize(self, value: Any) -> str:
"""Serialize while prioritizing JSON, falling back to Pickle."""
try:
return super().serialize(value)
except Exception:
return base64.b64encode(pickle.dumps(value)).decode("utf-8")
def deserialize(self, value: str) -> Any:
"""
Deserialize while prioritizing Pickle, falling back to JSON.
To avoid malicious exploits of the deserialization, deserialize objects
only when you deem it safe to do so.
"""
try:
return pickle.loads(base64.b64decode(value))
except Exception:
return super().deserialize(value)
JsonPickleSerializer = PickleSerializer
|
import base64
import json
import pickle
from abc import ABC, abstractmethod
from typing import Any
from pydantic import BaseModel
from llama_index.core.schema import BaseComponent
from .utils import import_module_from_qualified_name, get_qualified_name
class BaseSerializer(ABC):
@abstractmethod
def serialize(self, value: Any) -> str: ...
@abstractmethod
def deserialize(self, value: str) -> Any: ...
class JsonSerializer(BaseSerializer):
def _serialize_value(self, value: Any) -> Any:
"""Helper to serialize a single value."""
if isinstance(value, BaseComponent):
return {
"__is_component": True,
"value": value.to_dict(),
"qualified_name": get_qualified_name(value),
}
elif isinstance(value, BaseModel):
return {
"__is_pydantic": True,
"value": value.model_dump(),
"qualified_name": get_qualified_name(value),
}
elif isinstance(value, dict):
return {k: self._serialize_value(v) for k, v in value.items()}
elif isinstance(value, list):
return [self._serialize_value(item) for item in value]
return value
def serialize(self, value: Any) -> str:
try:
serialized_value = self._serialize_value(value)
return json.dumps(serialized_value)
except Exception as e:
raise ValueError(f"Failed to serialize value: {type(value)}: {value!s}")
def _deserialize_value(self, data: Any) -> Any:
"""Helper to deserialize a single value."""
if isinstance(data, dict):
if data.get("__is_pydantic") and data.get("qualified_name"):
module_class = import_module_from_qualified_name(data["qualified_name"])
return module_class.model_validate(data["value"])
elif data.get("__is_component") and data.get("qualified_name"):
module_class = import_module_from_qualified_name(data["qualified_name"])
return module_class.from_dict(data["value"])
return {k: self._deserialize_value(v) for k, v in data.items()}
elif isinstance(data, list):
return [self._deserialize_value(item) for item in data]
return data
def deserialize(self, value: str) -> Any:
data = json.loads(value)
return self._deserialize_value(data)
class JsonPickleSerializer(JsonSerializer):
def serialize(self, value: Any) -> str:
"""Serialize while prioritizing JSON, falling back to Pickle."""
try:
return super().serialize(value)
except Exception:
return base64.b64encode(pickle.dumps(value)).decode("utf-8")
def deserialize(self, value: str) -> Any:
"""Deserialize while prioritizing Pickle, falling back to JSON."""
try:
return pickle.loads(base64.b64decode(value))
except Exception:
return super().deserialize(value)
|
from langchain_core.prompts.prompt import PromptTemplate
_DEFAULT_ENTITY_EXTRACTION_TEMPLATE = """You are an AI assistant reading the transcript of a conversation between an AI and a human. Extract all of the proper nouns from the last line of conversation. As a guideline, a proper noun is generally capitalized. You should definitely extract all names and places.
The conversation history is provided just in case of a coreference (e.g. "What do you know about him" where "him" is defined in a previous line) -- ignore items mentioned there that are not in the last line.
Return the output as a single comma-separated list, or NONE if there is nothing of note to return (e.g. the user is just issuing a greeting or having a simple conversation).
EXAMPLE
Conversation history:
Person #1: how's it going today?
AI: "It's going great! How about you?"
Person #1: good! busy working on Langchain. lots to do.
AI: "That sounds like a lot of work! What kind of things are you doing to make Langchain better?"
Last line:
Person #1: i'm trying to improve Langchain's interfaces, the UX, its integrations with various products the user might want ... a lot of stuff.
Output: Langchain
END OF EXAMPLE
EXAMPLE
Conversation history:
Person #1: how's it going today?
AI: "It's going great! How about you?"
Person #1: good! busy working on Langchain. lots to do.
AI: "That sounds like a lot of work! What kind of things are you doing to make Langchain better?"
Last line:
Person #1: i'm trying to improve Langchain's interfaces, the UX, its integrations with various products the user might want ... a lot of stuff. I'm working with Person #2.
Output: Langchain, Person #2
END OF EXAMPLE
Conversation history (for reference only):
{history}
Last line of conversation (for extraction):
Human: {input}
Output:""" # noqa: E501
ENTITY_EXTRACTION_PROMPT = PromptTemplate(
input_variables=["history", "input"], template=_DEFAULT_ENTITY_EXTRACTION_TEMPLATE
)
|
# flake8: noqa
from langchain_core.prompts.prompt import PromptTemplate
_DEFAULT_ENTITY_EXTRACTION_TEMPLATE = """You are an AI assistant reading the transcript of a conversation between an AI and a human. Extract all of the proper nouns from the last line of conversation. As a guideline, a proper noun is generally capitalized. You should definitely extract all names and places.
The conversation history is provided just in case of a coreference (e.g. "What do you know about him" where "him" is defined in a previous line) -- ignore items mentioned there that are not in the last line.
Return the output as a single comma-separated list, or NONE if there is nothing of note to return (e.g. the user is just issuing a greeting or having a simple conversation).
EXAMPLE
Conversation history:
Person #1: how's it going today?
AI: "It's going great! How about you?"
Person #1: good! busy working on Langchain. lots to do.
AI: "That sounds like a lot of work! What kind of things are you doing to make Langchain better?"
Last line:
Person #1: i'm trying to improve Langchain's interfaces, the UX, its integrations with various products the user might want ... a lot of stuff.
Output: Langchain
END OF EXAMPLE
EXAMPLE
Conversation history:
Person #1: how's it going today?
AI: "It's going great! How about you?"
Person #1: good! busy working on Langchain. lots to do.
AI: "That sounds like a lot of work! What kind of things are you doing to make Langchain better?"
Last line:
Person #1: i'm trying to improve Langchain's interfaces, the UX, its integrations with various products the user might want ... a lot of stuff. I'm working with Person #2.
Output: Langchain, Person #2
END OF EXAMPLE
Conversation history (for reference only):
{history}
Last line of conversation (for extraction):
Human: {input}
Output:"""
ENTITY_EXTRACTION_PROMPT = PromptTemplate(
input_variables=["history", "input"], template=_DEFAULT_ENTITY_EXTRACTION_TEMPLATE
)
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import datasets
from datasets.table import table_cast
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class ArrowConfig(datasets.BuilderConfig):
"""BuilderConfig for Arrow."""
features: Optional[datasets.Features] = None
def __post_init__(self):
super().__post_init__()
class Arrow(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = ArrowConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
dl_manager.download_config.extract_on_the_fly = True
data_files = dl_manager.download_and_extract(self.config.data_files)
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
# Infer features if they are stored in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(files):
with open(file, "rb") as f:
try:
reader = pa.ipc.open_stream(f)
except pa.lib.ArrowInvalid:
reader = pa.ipc.open_file(f)
self.info.features = datasets.Features.from_arrow_schema(reader.schema)
break
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.info.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
try:
try:
batches = pa.ipc.open_stream(f)
except pa.lib.ArrowInvalid:
reader = pa.ipc.open_file(f)
batches = (reader.get_batch(i) for i in range(reader.num_record_batches))
for batch_idx, record_batch in enumerate(batches):
pa_table = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import datasets
from datasets.table import table_cast
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class ArrowConfig(datasets.BuilderConfig):
"""BuilderConfig for Arrow."""
features: Optional[datasets.Features] = None
def __post_init__(self):
super().__post_init__()
class Arrow(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = ArrowConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
dl_manager.download_config.extract_on_the_fly = True
data_files = dl_manager.download_and_extract(self.config.data_files)
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
# Infer features if they are stored in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(files):
with open(file, "rb") as f:
self.info.features = datasets.Features.from_arrow_schema(pa.ipc.open_stream(f).schema)
break
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.info.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
try:
for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)):
pa_table = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise
|
"""
Hub is a central trustworthy that is aware of the existence of isolated apps, and that can reliably receive user queries and route them to the appropriate apps.
"""
from typing import Optional, Sequence, Callable
from llama_index.core.agent.react.output_parser import ReActOutputParser
from llama_index.core.callbacks import (
CallbackManager,
)
from llama_index.core.llms.llm import LLM
from llama_index.core.memory.chat_memory_buffer import ChatMemoryBuffer
from llama_index.core.memory.types import BaseMemory
from llama_index.core.settings import Settings
from llama_index.core.tools import BaseTool, ToolOutput
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.core.llms import ChatMessage, MessageRole
from llama_index.core.llama_pack.base import BaseLlamaPack
from .planner import HubPlanner
from .tool_importer import ToolImporter
from .hub_operator import HubOperator
class SecGPTPack(BaseLlamaPack):
"""
SecGPT Hub.
A central trustworthy entity that routes user queries to appropriate isolated apps.
"""
def __init__(
self,
tools: Sequence[BaseTool],
tool_specs: Sequence[BaseToolSpec],
llm: LLM = None,
memory: BaseMemory = None,
output_parser: Optional[ReActOutputParser] = None,
verbose: bool = False,
handle_reasoning_failure_fn: Optional[
Callable[[CallbackManager, Exception], ToolOutput]
] = None,
user_id: Optional[str] = "0",
) -> None:
"""
Initialize the SecGPTPack.
Args:
tools (Sequence[BaseTool]): A sequence of available tools.
tool_specs (Sequence[BaseToolSpec]): Specifications for the tools.
llm (LLM, optional): Language Model used for processing. Defaults to Settings.llm.
memory (BaseMemory, optional): Memory component to keep track of conversation history. Defaults to ChatMemoryBuffer.
output_parser (Optional[ReActOutputParser], optional): Parser for handling the output. Defaults to None.
verbose (bool, optional): Flag to enable verbose output. Defaults to False.
handle_reasoning_failure_fn (Optional[Callable[[CallbackManager, Exception], ToolOutput]], optional): Callable function to handle reasoning failures. Defaults to None.
user_id (Optional[str], optional): User identifier. Defaults to "0".
"""
self.llm = llm or Settings.llm
self.memory = memory or ChatMemoryBuffer.from_defaults(
chat_history=[], llm=self.llm
)
self.output_parser = output_parser
self.verbose = verbose
self.handle_reasoning_failure_fn = handle_reasoning_failure_fn
self.user_id = user_id
self.planner = HubPlanner(self.llm)
self.tool_importer = ToolImporter(tools, tool_specs)
self.hub_operator = HubOperator(self.tool_importer, self.user_id)
def chat(
self,
query: str,
) -> str:
"""
Process a user query and generate a response.
Args:
query (str): The user query to process.
Returns:
str: The response generated by SecGPT.
"""
memory_content = self.memory.get()
self.memory.put(ChatMessage(role=MessageRole.USER, content=(query)))
tool_info = self.tool_importer.get_tool_info()
plan = self.planner.plan_generate(query, tool_info, memory_content)
response = self.hub_operator.run(query, plan)
self.memory.put(ChatMessage(role=MessageRole.CHATBOT, content=(response)))
return response
# Create an alias for the SecGPTPack class
Hub = SecGPTPack
|
"""
Hub is a central trustworthy that is aware of the existence of isolated apps, and that can reliably receive user queries and route them to the appropriate apps.
"""
from typing import Optional, Sequence, Callable
from llama_index.core.agent.react.output_parser import ReActOutputParser
from llama_index.core.callbacks import (
CallbackManager,
)
from llama_index.core.llms.llm import LLM
from llama_index.core.memory.chat_memory_buffer import ChatMemoryBuffer
from llama_index.core.memory.types import BaseMemory
from llama_index.core.settings import Settings
from llama_index.core.tools import BaseTool, ToolOutput
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.core.llms import ChatMessage, MessageRole
from llama_index.core.llama_pack.base import BaseLlamaPack
from .planner import HubPlanner
from .tool_importer import ToolImporter
from .hub_operator import HubOperator
class SecGPTPack(BaseLlamaPack):
"""SecGPT Hub.
A central trustworthy entity that routes user queries to appropriate isolated apps.
"""
def __init__(
self,
tools: Sequence[BaseTool],
tool_specs: Sequence[BaseToolSpec],
llm: LLM = None,
memory: BaseMemory = None,
output_parser: Optional[ReActOutputParser] = None,
verbose: bool = False,
handle_reasoning_failure_fn: Optional[
Callable[[CallbackManager, Exception], ToolOutput]
] = None,
user_id: Optional[str] = "0",
) -> None:
"""Initialize the SecGPTPack.
Args:
tools (Sequence[BaseTool]): A sequence of available tools.
tool_specs (Sequence[BaseToolSpec]): Specifications for the tools.
llm (LLM, optional): Language Model used for processing. Defaults to Settings.llm.
memory (BaseMemory, optional): Memory component to keep track of conversation history. Defaults to ChatMemoryBuffer.
output_parser (Optional[ReActOutputParser], optional): Parser for handling the output. Defaults to None.
verbose (bool, optional): Flag to enable verbose output. Defaults to False.
handle_reasoning_failure_fn (Optional[Callable[[CallbackManager, Exception], ToolOutput]], optional): Callable function to handle reasoning failures. Defaults to None.
user_id (Optional[str], optional): User identifier. Defaults to "0".
"""
self.llm = llm or Settings.llm
self.memory = memory or ChatMemoryBuffer.from_defaults(
chat_history=[], llm=self.llm
)
self.output_parser = output_parser
self.verbose = verbose
self.handle_reasoning_failure_fn = handle_reasoning_failure_fn
self.user_id = user_id
self.planner = HubPlanner(self.llm)
self.tool_importer = ToolImporter(tools, tool_specs)
self.hub_operator = HubOperator(self.tool_importer, self.user_id)
def chat(
self,
query: str,
) -> str:
"""Process a user query and generate a response.
Args:
query (str): The user query to process.
Returns:
str: The response generated by SecGPT.
"""
memory_content = self.memory.get()
self.memory.put(ChatMessage(role=MessageRole.USER, content=(query)))
tool_info = self.tool_importer.get_tool_info()
plan = self.planner.plan_generate(query, tool_info, memory_content)
response = self.hub_operator.run(query, plan)
self.memory.put(ChatMessage(role=MessageRole.CHATBOT, content=(response)))
return response
# Create an alias for the SecGPTPack class
Hub = SecGPTPack
|
from __future__ import annotations
__version__ = "4.2.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
import warnings
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder import (
CrossEncoder,
CrossEncoderModelCardData,
CrossEncoderTrainer,
CrossEncoderTrainingArguments,
)
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseEncoderModelCardData,
SparseEncoderTrainer,
SparseEncoderTrainingArguments,
)
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
from sentence_transformers.util import mine_hard_negatives
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
# Globally silence PyTorch sparse CSR tensor beta warning
warnings.filterwarnings("ignore", message="Sparse CSR tensor support is in beta state")
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"CrossEncoderTrainer",
"CrossEncoderTrainingArguments",
"CrossEncoderModelCardData",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"SparseEncoder",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
"SparseEncoderModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
"mine_hard_negatives",
]
|
from __future__ import annotations
__version__ = "4.1.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
import warnings
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder import (
CrossEncoder,
CrossEncoderModelCardData,
CrossEncoderTrainer,
CrossEncoderTrainingArguments,
)
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseEncoderModelCardData,
SparseEncoderTrainer,
SparseEncoderTrainingArguments,
)
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
from sentence_transformers.util import mine_hard_negatives
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
# Globally silence PyTorch sparse CSR tensor beta warning
warnings.filterwarnings("ignore", message="Sparse CSR tensor support is in beta state")
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"CrossEncoderTrainer",
"CrossEncoderTrainingArguments",
"CrossEncoderModelCardData",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"SparseEncoder",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
"SparseEncoderModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
"mine_hard_negatives",
]
|
"""
This example runs a CNN after the word embedding lookup. The output of the CNN is than pooled,
for example with mean-pooling.
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses, models
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-uncased"
num_train_epochs = 1
batch_size = 32
output_dir = "output/training_stsbenchmark_cnn-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 2. Define the model
# Map tokens to vectors using BERT
word_embedding_model = models.Transformer(model_name)
cnn = models.CNN(
in_word_embedding_dimension=word_embedding_model.get_word_embedding_dimension(),
out_channels=256,
kernel_sizes=[1, 3, 5],
)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
cnn.get_word_embedding_dimension(),
pooling_mode="mean",
)
model = SentenceTransformer(modules=[word_embedding_model, cnn, pooling_model])
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and
# one similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="cnn", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-cnn")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-cnn')`."
)
|
"""
This example runs a CNN after the word embedding lookup. The output of the CNN is than pooled,
for example with mean-pooling.
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses, util
from sentence_transformers import LoggingHandler, SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import os
import csv
import gzip
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Read the dataset
batch_size = 32
model_save_path = "output/training_stsbenchmark_cnn-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
# Map tokens to vectors using BERT
word_embedding_model = models.Transformer("bert-base-uncased")
cnn = models.CNN(
in_word_embedding_dimension=word_embedding_model.get_word_embedding_dimension(),
out_channels=256,
kernel_sizes=[1, 3, 5],
)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
cnn.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, cnn, pooling_model])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training
num_epochs = 10
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
model.evaluate(evaluator)
|
import numpy as np
import torch
from docarray import Document, Image, Text
from docarray.typing import (
AnyUrl,
Embedding,
ImageUrl,
NdArray,
Tensor,
TextUrl,
TorchTensor,
)
def test_multi_modal_doc_proto():
class MyMultiModalDoc(Document):
image: Image
text: Text
class MySUperDoc(Document):
doc: MyMultiModalDoc
description: str
doc = MyMultiModalDoc(
image=Image(tensor=np.zeros((3, 224, 224))), text=Text(text='hello')
)
MyMultiModalDoc.from_protobuf(doc.to_protobuf())
def test_all_types():
class MyDoc(Document):
img_url: ImageUrl
txt_url: TextUrl
any_url: AnyUrl
torch_tensor: TorchTensor
np_array: NdArray
generic_nd_array: Tensor
generic_torch_tensor: Tensor
embedding: Embedding
doc = MyDoc(
img_url='test.png',
txt_url='test.txt',
any_url='www.jina.ai',
torch_tensor=torch.zeros((3, 224, 224)),
np_array=np.zeros((3, 224, 224)),
generic_nd_array=np.zeros((3, 224, 224)),
generic_torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((3, 224, 224)),
)
doc = MyDoc.from_protobuf(doc.to_protobuf())
assert doc.img_url == 'test.png'
assert doc.txt_url == 'test.txt'
assert doc.any_url == 'www.jina.ai'
assert (doc.torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.torch_tensor, torch.Tensor)
assert (doc.np_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.np_array, np.ndarray)
assert (doc.generic_nd_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_nd_array, np.ndarray)
assert (doc.generic_torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_torch_tensor, torch.Tensor)
assert (doc.embedding == np.zeros((3, 224, 224))).all()
|
import numpy as np
from docarray import DocumentArray, Document, Image, Text
def test_multi_modal_doc_proto():
class MyMultiModalDoc(Document):
image: Image
text: Text
class MySUperDoc(Document):
doc: MyMultiModalDoc
description: str
doc = MyMultiModalDoc(
image=Image(tensor=np.zeros((3, 224, 224))), text=Text(text='hello')
)
MyMultiModalDoc.from_protobuf(doc.to_protobuf())
|
import logging
import warnings
from typing import Any, Optional, Dict, Type
from llama_index.core.bridge.pydantic import (
Field,
model_serializer,
ValidationError,
BaseModel,
)
from llama_index.core.tools import ToolSelection, ToolOutput
from llama_index.core.llms import ChatMessage
from llama_index.core.workflow import Event, StartEvent
logger = logging.getLogger(__name__)
class PydanticConversionWarning(Warning):
"""Warning raised when the conversion from a dictionary to a Pydantic model fails"""
class AgentInput(Event):
"""LLM input."""
input: list[ChatMessage]
current_agent_name: str
class AgentSetup(Event):
"""Agent setup."""
input: list[ChatMessage]
current_agent_name: str
class AgentStream(Event):
"""Agent stream."""
delta: str
response: str
current_agent_name: str
tool_calls: list[ToolSelection]
raw: Optional[Any] = Field(default=None, exclude=True)
class AgentOutput(Event):
"""LLM output."""
response: ChatMessage
tool_calls: list[ToolSelection]
raw: Optional[Any] = Field(default=None, exclude=True)
structured_response: Optional[Dict[str, Any]] = Field(default=None)
current_agent_name: str
def get_pydantic_model(self, model: Type[BaseModel]) -> Optional[BaseModel]:
if self.structured_response is None:
return self.structured_response
try:
return model.model_validate(self.structured_response)
except ValidationError as e:
warnings.warn(
f"Conversion of structured response to Pydantic model failed because:\n\n{e.title}\n\nPlease check the model you provided.",
PydanticConversionWarning,
)
return None
def __str__(self) -> str:
return self.response.content or ""
class ToolCall(Event):
"""All tool calls are surfaced."""
tool_name: str
tool_kwargs: dict
tool_id: str
class ToolCallResult(Event):
"""Tool call result."""
tool_name: str
tool_kwargs: dict
tool_id: str
tool_output: ToolOutput
return_direct: bool
class AgentWorkflowStartEvent(StartEvent):
def __init__(self, **data: Any) -> None:
"""Convert chat_history items to ChatMessage objects if they aren't already"""
if "chat_history" in data and data["chat_history"]:
converted_history = []
for i, msg in enumerate(data["chat_history"]):
if isinstance(msg, ChatMessage):
converted_history.append(msg)
else:
# Convert dict or other formats to ChatMessage with validation
try:
converted_history.append(ChatMessage.model_validate(msg))
except ValidationError as e:
logger.error(
f"Failed to validate chat message at index {i}: {e}. "
f"Invalid message: {msg}"
)
raise
data["chat_history"] = converted_history
super().__init__(**data)
@model_serializer()
def serialize_start_event(self) -> dict:
"""Serialize the start event and exclude the memory."""
return {
"user_msg": self.user_msg,
"chat_history": self.chat_history,
"max_iterations": self.max_iterations,
}
|
import logging
from typing import Any, Optional
from llama_index.core.bridge.pydantic import Field, model_serializer, ValidationError
from llama_index.core.tools import ToolSelection, ToolOutput
from llama_index.core.llms import ChatMessage
from llama_index.core.workflow import Event, StartEvent
logger = logging.getLogger(__name__)
class AgentInput(Event):
"""LLM input."""
input: list[ChatMessage]
current_agent_name: str
class AgentSetup(Event):
"""Agent setup."""
input: list[ChatMessage]
current_agent_name: str
class AgentStream(Event):
"""Agent stream."""
delta: str
response: str
current_agent_name: str
tool_calls: list[ToolSelection]
raw: Optional[Any] = Field(default=None, exclude=True)
class AgentOutput(Event):
"""LLM output."""
response: ChatMessage
tool_calls: list[ToolSelection]
raw: Optional[Any] = Field(default=None, exclude=True)
current_agent_name: str
def __str__(self) -> str:
return self.response.content or ""
class ToolCall(Event):
"""All tool calls are surfaced."""
tool_name: str
tool_kwargs: dict
tool_id: str
class ToolCallResult(Event):
"""Tool call result."""
tool_name: str
tool_kwargs: dict
tool_id: str
tool_output: ToolOutput
return_direct: bool
class AgentWorkflowStartEvent(StartEvent):
def __init__(self, **data: Any) -> None:
"""Convert chat_history items to ChatMessage objects if they aren't already"""
if "chat_history" in data and data["chat_history"]:
converted_history = []
for i, msg in enumerate(data["chat_history"]):
if isinstance(msg, ChatMessage):
converted_history.append(msg)
else:
# Convert dict or other formats to ChatMessage with validation
try:
converted_history.append(ChatMessage.model_validate(msg))
except ValidationError as e:
logger.error(
f"Failed to validate chat message at index {i}: {e}. "
f"Invalid message: {msg}"
)
raise
data["chat_history"] = converted_history
super().__init__(**data)
@model_serializer()
def serialize_start_event(self) -> dict:
"""Serialize the start event and exclude the memory."""
return {
"user_msg": self.user_msg,
"chat_history": self.chat_history,
"max_iterations": self.max_iterations,
}
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline)))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomResize', scale=[(1333, 640), (1333, 800)]),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline)))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
|
_base_ = '../_base_/default_runtime.py'
# model settings
model = dict(
type='YOLOV3',
backbone=dict(
type='MobileNetV2',
out_indices=(2, 4, 6),
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://mmdet/mobilenet_v2')),
neck=dict(
type='YOLOV3Neck',
num_scales=3,
in_channels=[320, 96, 32],
out_channels=[96, 96, 96]),
bbox_head=dict(
type='YOLOV3Head',
num_classes=80,
in_channels=[96, 96, 96],
out_channels=[96, 96, 96],
anchor_generator=dict(
type='YOLOAnchorGenerator',
base_sizes=[[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)]],
strides=[32, 16, 8]),
bbox_coder=dict(type='YOLOBBoxCoder'),
featmap_strides=[32, 16, 8],
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_conf=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_xy=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=2.0,
reduction='sum'),
loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='GridAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0)),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
conf_thr=0.005,
nms=dict(type='nms', iou_threshold=0.45),
max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(
type='Resize',
img_scale=[(320, 320), (416, 416)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(416, 416),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=24,
workers_per_gpu=4,
train=dict(
type='RepeatDataset', # use RepeatDataset to speed up training
times=10,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=4000,
warmup_ratio=0.0001,
step=[24, 28])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=30)
evaluation = dict(interval=1, metric=['bbox'])
find_unused_parameters = True
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (24 samples per GPU)
auto_scale_lr = dict(base_batch_size=192)
|
_base_ = '../_base_/default_runtime.py'
# model settings
model = dict(
type='YOLOV3',
backbone=dict(
type='MobileNetV2',
out_indices=(2, 4, 6),
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://mmdet/mobilenet_v2')),
neck=dict(
type='YOLOV3Neck',
num_scales=3,
in_channels=[320, 96, 32],
out_channels=[96, 96, 96]),
bbox_head=dict(
type='YOLOV3Head',
num_classes=80,
in_channels=[96, 96, 96],
out_channels=[96, 96, 96],
anchor_generator=dict(
type='YOLOAnchorGenerator',
base_sizes=[[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)]],
strides=[32, 16, 8]),
bbox_coder=dict(type='YOLOBBoxCoder'),
featmap_strides=[32, 16, 8],
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_conf=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_xy=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=2.0,
reduction='sum'),
loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='GridAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0)),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
conf_thr=0.005,
nms=dict(type='nms', iou_threshold=0.45),
max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(
type='Resize',
img_scale=[(320, 320), (416, 416)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(416, 416),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=24,
workers_per_gpu=4,
train=dict(
type='RepeatDataset', # use RepeatDataset to speed up training
times=10,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=4000,
warmup_ratio=0.0001,
step=[24, 28])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=30)
evaluation = dict(interval=1, metric=['bbox'])
find_unused_parameters = True
|
"""Airtable reader."""
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from pyairtable import Table
class AirtableReader(BaseReader):
"""
Airtable reader. Reads data from a table in a base.
Args:
api_key (str): Airtable API key.
"""
def __init__(self, api_key: str) -> None:
"""Initialize Airtable reader."""
self.api_key = api_key
def load_data(self, base_id: str, table_id: str) -> List[Document]:
"""
Load data from a table in a base.
Args:
table_id (str): Table ID.
base_id (str): Base ID.
Returns:
List[Document]: List of documents.
"""
table = Table(self.api_key, base_id, table_id)
all_records = table.all()
return [Document(text=f"{all_records}", extra_info={})]
|
"""Airtable reader."""
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from pyairtable import Table
class AirtableReader(BaseReader):
"""Airtable reader. Reads data from a table in a base.
Args:
api_key (str): Airtable API key.
"""
def __init__(self, api_key: str) -> None:
"""Initialize Airtable reader."""
self.api_key = api_key
def load_data(self, base_id: str, table_id: str) -> List[Document]:
"""Load data from a table in a base.
Args:
table_id (str): Table ID.
base_id (str): Base ID.
Returns:
List[Document]: List of documents.
"""
table = Table(self.api_key, base_id, table_id)
all_records = table.all()
return [Document(text=f"{all_records}", extra_info={})]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bfp import BFP
from .channel_mapper import ChannelMapper
from .ct_resnet_neck import CTResNetNeck
from .dilated_encoder import DilatedEncoder
from .dyhead import DyHead
from .fpg import FPG
from .fpn import FPN
from .fpn_carafe import FPN_CARAFE
from .hrfpn import HRFPN
from .nas_fpn import NASFPN
from .nasfcos_fpn import NASFCOS_FPN
from .pafpn import PAFPN
from .rfp import RFP
from .ssd_neck import SSDNeck
from .yolo_neck import YOLOV3Neck
from .yolox_pafpn import YOLOXPAFPN
__all__ = [
'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN',
'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG', 'DilatedEncoder',
'CTResNetNeck', 'SSDNeck', 'YOLOXPAFPN', 'DyHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bfp import BFP
from .channel_mapper import ChannelMapper
from .ct_resnet_neck import CTResNetNeck
from .dilated_encoder import DilatedEncoder
from .fpg import FPG
from .fpn import FPN
from .fpn_carafe import FPN_CARAFE
from .hrfpn import HRFPN
from .nas_fpn import NASFPN
from .nasfcos_fpn import NASFCOS_FPN
from .pafpn import PAFPN
from .rfp import RFP
from .ssd_neck import SSDNeck
from .yolo_neck import YOLOV3Neck
from .yolox_pafpn import YOLOXPAFPN
__all__ = [
'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN',
'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG', 'DilatedEncoder',
'CTResNetNeck', 'SSDNeck', 'YOLOXPAFPN'
]
|
import inspect
from keras.src.api_export import keras_export
from keras.src.initializers.constant_initializers import Constant
from keras.src.initializers.constant_initializers import Identity
from keras.src.initializers.constant_initializers import Ones
from keras.src.initializers.constant_initializers import Zeros
from keras.src.initializers.initializer import Initializer
from keras.src.initializers.random_initializers import GlorotNormal
from keras.src.initializers.random_initializers import GlorotUniform
from keras.src.initializers.random_initializers import HeNormal
from keras.src.initializers.random_initializers import HeUniform
from keras.src.initializers.random_initializers import LecunNormal
from keras.src.initializers.random_initializers import LecunUniform
from keras.src.initializers.random_initializers import OrthogonalInitializer
from keras.src.initializers.random_initializers import RandomNormal
from keras.src.initializers.random_initializers import RandomUniform
from keras.src.initializers.random_initializers import TruncatedNormal
from keras.src.initializers.random_initializers import VarianceScaling
from keras.src.saving import serialization_lib
from keras.src.utils.naming import to_snake_case
ALL_OBJECTS = {
Initializer,
Constant,
Identity,
Ones,
Zeros,
GlorotNormal,
GlorotUniform,
HeNormal,
HeUniform,
LecunNormal,
LecunUniform,
RandomNormal,
TruncatedNormal,
RandomUniform,
VarianceScaling,
OrthogonalInitializer,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
ALL_OBJECTS_DICT.update(
{to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}
)
# Aliases
ALL_OBJECTS_DICT.update(
{
"uniform": RandomUniform,
"normal": RandomNormal,
"orthogonal": OrthogonalInitializer,
"Orthogonal": OrthogonalInitializer, # Legacy
"one": Ones,
"zero": Zeros,
}
)
@keras_export("keras.initializers.serialize")
def serialize(initializer):
"""Returns the initializer configuration as a Python dict."""
return serialization_lib.serialize_keras_object(initializer)
@keras_export("keras.initializers.deserialize")
def deserialize(config, custom_objects=None):
"""Returns a Keras initializer object via its configuration."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.initializers.get")
def get(identifier):
"""Retrieves a Keras initializer object via an identifier.
The `identifier` may be the string name of a initializers function or class
(case-sensitively).
>>> identifier = 'Ones'
>>> keras.initializers.deserialize(identifier)
<...keras.initializers.initializers.Ones...>
You can also specify `config` of the initializer to this function by passing
dict containing `class_name` and `config` as an identifier. Also note that
the `class_name` must map to a `Initializer` class.
>>> cfg = {'class_name': 'Ones', 'config': {}}
>>> keras.initializers.deserialize(cfg)
<...keras.initializers.initializers.Ones...>
In the case that the `identifier` is a class, this method will return a new
instance of the class by its constructor.
Args:
identifier: String or dict that contains the initializer name or
configurations.
Returns:
Initializer instance base on the input identifier.
"""
if identifier is None:
return None
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
config = {"class_name": str(identifier), "config": {}}
obj = deserialize(config)
else:
obj = identifier
if callable(obj):
if inspect.isclass(obj):
obj = obj()
return obj
else:
raise ValueError(
f"Could not interpret initializer identifier: {identifier}"
)
|
import inspect
from keras.src.api_export import keras_export
from keras.src.initializers.constant_initializers import Constant
from keras.src.initializers.constant_initializers import Identity
from keras.src.initializers.constant_initializers import Ones
from keras.src.initializers.constant_initializers import Zeros
from keras.src.initializers.initializer import Initializer
from keras.src.initializers.random_initializers import GlorotNormal
from keras.src.initializers.random_initializers import GlorotUniform
from keras.src.initializers.random_initializers import HeNormal
from keras.src.initializers.random_initializers import HeUniform
from keras.src.initializers.random_initializers import LecunNormal
from keras.src.initializers.random_initializers import LecunUniform
from keras.src.initializers.random_initializers import OrthogonalInitializer
from keras.src.initializers.random_initializers import RandomNormal
from keras.src.initializers.random_initializers import RandomUniform
from keras.src.initializers.random_initializers import TruncatedNormal
from keras.src.initializers.random_initializers import VarianceScaling
from keras.src.saving import serialization_lib
from keras.src.utils.naming import to_snake_case
ALL_OBJECTS = {
Initializer,
Constant,
Identity,
Ones,
Zeros,
GlorotNormal,
GlorotUniform,
HeNormal,
HeUniform,
LecunNormal,
LecunUniform,
RandomNormal,
TruncatedNormal,
RandomUniform,
VarianceScaling,
OrthogonalInitializer,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
ALL_OBJECTS_DICT.update(
{to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}
)
# Aliases
ALL_OBJECTS_DICT.update(
{
"uniform": RandomUniform,
"normal": RandomNormal,
"orthogonal": OrthogonalInitializer,
"one": Ones,
"zero": Zeros,
}
)
@keras_export("keras.initializers.serialize")
def serialize(initializer):
"""Returns the initializer configuration as a Python dict."""
return serialization_lib.serialize_keras_object(initializer)
@keras_export("keras.initializers.deserialize")
def deserialize(config, custom_objects=None):
"""Returns a Keras initializer object via its configuration."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.initializers.get")
def get(identifier):
"""Retrieves a Keras initializer object via an identifier.
The `identifier` may be the string name of a initializers function or class
(case-sensitively).
>>> identifier = 'Ones'
>>> keras.initializers.deserialize(identifier)
<...keras.initializers.initializers.Ones...>
You can also specify `config` of the initializer to this function by passing
dict containing `class_name` and `config` as an identifier. Also note that
the `class_name` must map to a `Initializer` class.
>>> cfg = {'class_name': 'Ones', 'config': {}}
>>> keras.initializers.deserialize(cfg)
<...keras.initializers.initializers.Ones...>
In the case that the `identifier` is a class, this method will return a new
instance of the class by its constructor.
Args:
identifier: String or dict that contains the initializer name or
configurations.
Returns:
Initializer instance base on the input identifier.
"""
if identifier is None:
return None
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
config = {"class_name": str(identifier), "config": {}}
obj = deserialize(config)
else:
obj = identifier
if callable(obj):
if inspect.isclass(obj):
obj = obj()
return obj
else:
raise ValueError(
f"Could not interpret initializer identifier: {identifier}"
)
|
"""
Slides parser.
Contains parsers for .pptx files.
"""
import io
import os
import tempfile
from pathlib import Path
from typing import Dict, List, Optional
from fsspec import AbstractFileSystem
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.core.utils import infer_torch_device
class PptxReader(BaseReader):
"""
Powerpoint parser.
Extract text, caption images, and specify slides.
"""
def __init__(self) -> None:
"""Init parser."""
try:
import torch # noqa
from PIL import Image # noqa
from pptx import Presentation # noqa
from transformers import (
AutoTokenizer,
VisionEncoderDecoderModel,
ViTFeatureExtractor,
)
except ImportError:
raise ImportError(
"Please install extra dependencies that are required for "
"the PptxReader: "
"`pip install torch 'transformers<4.50' python-pptx Pillow`"
)
model = VisionEncoderDecoderModel.from_pretrained(
"nlpconnect/vit-gpt2-image-captioning"
)
feature_extractor = ViTFeatureExtractor.from_pretrained(
"nlpconnect/vit-gpt2-image-captioning"
)
tokenizer = AutoTokenizer.from_pretrained(
"nlpconnect/vit-gpt2-image-captioning"
)
self.parser_config = {
"feature_extractor": feature_extractor,
"model": model,
"tokenizer": tokenizer,
}
def caption_image(self, tmp_image_file: str) -> str:
"""Generate text caption of image."""
from PIL import Image
model = self.parser_config["model"]
feature_extractor = self.parser_config["feature_extractor"]
tokenizer = self.parser_config["tokenizer"]
device = infer_torch_device()
model.to(device)
max_length = 16
num_beams = 4
gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
i_image = Image.open(tmp_image_file)
if i_image.mode != "RGB":
i_image = i_image.convert(mode="RGB")
pixel_values = feature_extractor(
images=[i_image], return_tensors="pt"
).pixel_values
pixel_values = pixel_values.to(device)
output_ids = model.generate(pixel_values, **gen_kwargs)
preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
return preds[0].strip()
def load_data(
self,
file: Path,
extra_info: Optional[Dict] = None,
fs: Optional[AbstractFileSystem] = None,
) -> List[Document]:
"""Parse file."""
from pptx import Presentation
if fs:
with fs.open(str(file)) as f:
presentation = Presentation(io.BytesIO(f.read()))
else:
presentation = Presentation(file)
result = ""
for i, slide in enumerate(presentation.slides):
result += f"\n\nSlide #{i}: \n"
for shape in slide.shapes:
if hasattr(shape, "image"):
image = shape.image
# get image "file" contents
image_bytes = image.blob
# temporarily save the image to feed into model
f = tempfile.NamedTemporaryFile("wb", delete=False)
try:
f.write(image_bytes)
f.close()
result += f"\n Image: {self.caption_image(f.name)}\n\n"
finally:
os.unlink(f.name)
if hasattr(shape, "text"):
result += f"{shape.text}\n"
return [Document(text=result, metadata=extra_info or {})]
|
"""
Slides parser.
Contains parsers for .pptx files.
"""
import os
import tempfile
from pathlib import Path
from typing import Dict, List, Optional
from fsspec import AbstractFileSystem
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.core.utils import infer_torch_device
class PptxReader(BaseReader):
"""
Powerpoint parser.
Extract text, caption images, and specify slides.
"""
def __init__(self) -> None:
"""Init parser."""
try:
import torch # noqa
from PIL import Image # noqa
from pptx import Presentation # noqa
from transformers import (
AutoTokenizer,
VisionEncoderDecoderModel,
ViTFeatureExtractor,
)
except ImportError:
raise ImportError(
"Please install extra dependencies that are required for "
"the PptxReader: "
"`pip install torch transformers python-pptx Pillow`"
)
model = VisionEncoderDecoderModel.from_pretrained(
"nlpconnect/vit-gpt2-image-captioning"
)
feature_extractor = ViTFeatureExtractor.from_pretrained(
"nlpconnect/vit-gpt2-image-captioning"
)
tokenizer = AutoTokenizer.from_pretrained(
"nlpconnect/vit-gpt2-image-captioning"
)
self.parser_config = {
"feature_extractor": feature_extractor,
"model": model,
"tokenizer": tokenizer,
}
def caption_image(self, tmp_image_file: str) -> str:
"""Generate text caption of image."""
from PIL import Image
model = self.parser_config["model"]
feature_extractor = self.parser_config["feature_extractor"]
tokenizer = self.parser_config["tokenizer"]
device = infer_torch_device()
model.to(device)
max_length = 16
num_beams = 4
gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
i_image = Image.open(tmp_image_file)
if i_image.mode != "RGB":
i_image = i_image.convert(mode="RGB")
pixel_values = feature_extractor(
images=[i_image], return_tensors="pt"
).pixel_values
pixel_values = pixel_values.to(device)
output_ids = model.generate(pixel_values, **gen_kwargs)
preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
return preds[0].strip()
def load_data(
self,
file: Path,
extra_info: Optional[Dict] = None,
fs: Optional[AbstractFileSystem] = None,
) -> List[Document]:
"""Parse file."""
from pptx import Presentation
if fs:
with fs.open(file) as f:
presentation = Presentation(f)
else:
presentation = Presentation(file)
result = ""
for i, slide in enumerate(presentation.slides):
result += f"\n\nSlide #{i}: \n"
for shape in slide.shapes:
if hasattr(shape, "image"):
image = shape.image
# get image "file" contents
image_bytes = image.blob
# temporarily save the image to feed into model
f = tempfile.NamedTemporaryFile("wb", delete=False)
try:
f.write(image_bytes)
f.close()
result += f"\n Image: {self.caption_image(f.name)}\n\n"
finally:
os.unlink(f.name)
if hasattr(shape, "text"):
result += f"{shape.text}\n"
return [Document(text=result, metadata=extra_info or {})]
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from docarray.documents.audio import AudioDoc
from docarray.documents.image import ImageDoc
from docarray.documents.mesh import Mesh3D, VerticesAndFaces
from docarray.documents.point_cloud import PointCloud3D, PointsAndColors
from docarray.documents.text import TextDoc
from docarray.documents.video import VideoDoc
__all__ = [
'TextDoc',
'ImageDoc',
'AudioDoc',
'Mesh3D',
'VerticesAndFaces',
'PointCloud3D',
'PointsAndColors',
'VideoDoc',
]
|
from docarray.documents.audio import AudioDoc
from docarray.documents.image import ImageDoc
from docarray.documents.mesh import Mesh3D, VerticesAndFaces
from docarray.documents.point_cloud import PointCloud3D, PointsAndColors
from docarray.documents.text import TextDoc
from docarray.documents.video import VideoDoc
__all__ = [
'TextDoc',
'ImageDoc',
'AudioDoc',
'Mesh3D',
'VerticesAndFaces',
'PointCloud3D',
'PointsAndColors',
'VideoDoc',
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class AutoAssign(SingleStageDetector):
"""Implementation of `AutoAssign: Differentiable Label Assignment for Dense
Object Detection <https://arxiv.org/abs/2007.03496>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone config.
neck (:obj:`ConfigDict` or dict): The neck config.
bbox_head (:obj:`ConfigDict` or dict): The bbox head config.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of AutoAssign. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of AutoAssign. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class AutoAssign(SingleStageDetector):
"""Implementation of `AutoAssign: Differentiable Label Assignment for Dense
Object Detection <https://arxiv.org/abs/2007.03496>`_."""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
preprocess_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
preprocess_cfg=preprocess_cfg,
init_cfg=init_cfg)
|
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library of types used for type annotation."""
from tensorflow.python.framework import dtypes as _dtypes
class DTypeAnnotation:
pass
def _create_dtype_wrapper(name, underlying_dtype: _dtypes.DType):
return type(name, (DTypeAnnotation,), {"underlying_dtype": underlying_dtype})
BFloat16 = _create_dtype_wrapper("BFloat16", _dtypes.bfloat16)
Bool = _create_dtype_wrapper("Bool", _dtypes.bool)
Complex128 = _create_dtype_wrapper("Complex128", _dtypes.complex128)
Complex64 = _create_dtype_wrapper("Complex64", _dtypes.complex64)
Float8e4m3fn = _create_dtype_wrapper("Float8e4m3fn", _dtypes.float8_e4m3fn)
Float8e5m2 = _create_dtype_wrapper("Float8e5m2", _dtypes.float8_e5m2)
Float8e4m3fnuz = _create_dtype_wrapper(
"Float8e4m3fnuz", _dtypes.float8_e4m3fnuz
)
Float8e4m3b11fnuz = _create_dtype_wrapper(
"Float8e4m3b11fnuz", _dtypes.float8_e4m3b11fnuz
)
Float8e5m2fnuz = _create_dtype_wrapper(
"Float8e5m2fnuz", _dtypes.float8_e5m2fnuz
)
Float16 = _create_dtype_wrapper("Float16", _dtypes.float16)
Float32 = _create_dtype_wrapper("Float32", _dtypes.float32)
Float64 = _create_dtype_wrapper("Float64", _dtypes.float64)
Half = _create_dtype_wrapper("Half", _dtypes.float16)
Int2 = _create_dtype_wrapper("Int2", _dtypes.int2)
Int4 = _create_dtype_wrapper("Int4", _dtypes.int4)
Int8 = _create_dtype_wrapper("Int8", _dtypes.int8)
Int16 = _create_dtype_wrapper("Int16", _dtypes.int16)
Int32 = _create_dtype_wrapper("Int32", _dtypes.int32)
Int64 = _create_dtype_wrapper("Int64", _dtypes.int64)
UInt2 = _create_dtype_wrapper("UInt2", _dtypes.uint2)
UInt4 = _create_dtype_wrapper("UInt4", _dtypes.uint4)
UInt8 = _create_dtype_wrapper("UInt8", _dtypes.uint8)
UInt16 = _create_dtype_wrapper("UInt16", _dtypes.uint16)
UInt32 = _create_dtype_wrapper("UInt32", _dtypes.uint32)
UInt64 = _create_dtype_wrapper("UInt64", _dtypes.uint64)
QInt8 = _create_dtype_wrapper("QInt8", _dtypes.qint8)
QInt16 = _create_dtype_wrapper("QInt16", _dtypes.qint16)
QInt32 = _create_dtype_wrapper("QInt32", _dtypes.qint32)
QUInt16 = _create_dtype_wrapper("QUInt16", _dtypes.quint16)
QUInt8 = _create_dtype_wrapper("QUInt8", _dtypes.quint8)
Resource = _create_dtype_wrapper("Resource", _dtypes.resource)
String = _create_dtype_wrapper("String", _dtypes.string)
Variant = _create_dtype_wrapper("Variant", _dtypes.variant)
|
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library of types used for type annotation."""
from tensorflow.python.framework import dtypes as _dtypes
class DTypeAnnotation:
pass
def _create_dtype_wrapper(name, underlying_dtype: _dtypes.DType):
return type(name, (DTypeAnnotation,), {"underlying_dtype": underlying_dtype})
BFloat16 = _create_dtype_wrapper("BFloat16", _dtypes.bfloat16)
Bool = _create_dtype_wrapper("Bool", _dtypes.bool)
Complex128 = _create_dtype_wrapper("Complex128", _dtypes.complex128)
Complex64 = _create_dtype_wrapper("Complex64", _dtypes.complex64)
Float8e4m3fn = _create_dtype_wrapper("Float8e4m3fn", _dtypes.float8_e4m3fn)
Float8e5m2 = _create_dtype_wrapper("Float8e5m2", _dtypes.float8_e5m2)
Float8e4m3fnuz = _create_dtype_wrapper(
"Float8e4m3fnuz", _dtypes.float8_e4m3fnuz
)
Float8e4m3b11fnuz = _create_dtype_wrapper(
"Float8e4m3b11fnuz", _dtypes.float8_e4m3b11fnuz
)
Float8e5m2fnuz = _create_dtype_wrapper(
"Float8e5m2fnuz", _dtypes.float8_e5m2fnuz
)
Float16 = _create_dtype_wrapper("Float16", _dtypes.float16)
Float32 = _create_dtype_wrapper("Float32", _dtypes.float32)
Float64 = _create_dtype_wrapper("Float64", _dtypes.float64)
Half = _create_dtype_wrapper("Half", _dtypes.float16)
Int4 = _create_dtype_wrapper("Int4", _dtypes.int4)
Int8 = _create_dtype_wrapper("Int8", _dtypes.int8)
Int16 = _create_dtype_wrapper("Int16", _dtypes.int16)
Int32 = _create_dtype_wrapper("Int32", _dtypes.int32)
Int64 = _create_dtype_wrapper("Int64", _dtypes.int64)
UInt4 = _create_dtype_wrapper("UInt4", _dtypes.uint4)
UInt8 = _create_dtype_wrapper("UInt8", _dtypes.uint8)
UInt16 = _create_dtype_wrapper("UInt16", _dtypes.uint16)
UInt32 = _create_dtype_wrapper("UInt32", _dtypes.uint32)
UInt64 = _create_dtype_wrapper("UInt64", _dtypes.uint64)
QInt8 = _create_dtype_wrapper("QInt8", _dtypes.qint8)
QInt16 = _create_dtype_wrapper("QInt16", _dtypes.qint16)
QInt32 = _create_dtype_wrapper("QInt32", _dtypes.qint32)
QUInt16 = _create_dtype_wrapper("QUInt16", _dtypes.quint16)
QUInt8 = _create_dtype_wrapper("QUInt8", _dtypes.quint8)
Resource = _create_dtype_wrapper("Resource", _dtypes.resource)
String = _create_dtype_wrapper("String", _dtypes.string)
Variant = _create_dtype_wrapper("Variant", _dtypes.variant)
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseAnglELoss(SparseCoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0) -> None:
"""
This class implements AnglE (Angle Optimized).
This is a modification of :class:`SparseCoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SparseEncoder
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseCoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
- :class:`SparseCosineSimilarityLoss` seems to produce a weaker training signal than ``SparseCoSENTLoss`` or ``SparseAnglELoss``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(model=model, loss=losses.SparseAnglELoss(model), lambda_corpus=5e-5, all_docs=True)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseAngleLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseAnglELoss(SparseCoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0) -> None:
"""
This class implements AnglE (Angle Optimized).
This is a modification of :class:`SparseCoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SparseEncoder
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseCoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
- :class:`SparseCosineSimilarityLoss` seems to produce a weaker training signal than ``SparseCoSENTLoss`` or ``SparseAnglELoss``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SparseAnglELoss(model)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
|
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.pubmed import PubMedAPIWrapper
class PubmedQueryRun(BaseTool):
"""Tool that searches the PubMed API."""
name: str = "pub_med"
description: str = (
"A wrapper around PubMed. "
"Useful for when you need to answer questions about medicine, health, "
"and biomedical topics "
"from biomedical literature, MEDLINE, life science journals, and online books. "
"Input should be a search query."
)
api_wrapper: PubMedAPIWrapper = Field(default_factory=PubMedAPIWrapper) # type: ignore[arg-type]
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the PubMed tool."""
return self.api_wrapper.run(query)
|
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.pubmed import PubMedAPIWrapper
class PubmedQueryRun(BaseTool): # type: ignore[override]
"""Tool that searches the PubMed API."""
name: str = "pub_med"
description: str = (
"A wrapper around PubMed. "
"Useful for when you need to answer questions about medicine, health, "
"and biomedical topics "
"from biomedical literature, MEDLINE, life science journals, and online books. "
"Input should be a search query."
)
api_wrapper: PubMedAPIWrapper = Field(default_factory=PubMedAPIWrapper) # type: ignore[arg-type]
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the PubMed tool."""
return self.api_wrapper.run(query)
|
_base_ = './queryinst_r50_fpn_mstrain_480-800_3x_coco.py'
num_proposals = 300
model = dict(
rpn_head=dict(num_proposals=num_proposals),
test_cfg=dict(
_delete_=True,
rpn=None,
rcnn=dict(max_per_img=num_proposals, mask_thr_binary=0.5)))
# augmentation strategy originates from DETR.
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './queryinst_r50_fpn_mstrain_480-800_3x_coco.py'
num_proposals = 300
model = dict(
rpn_head=dict(num_proposals=num_proposals),
test_cfg=dict(
_delete_=True,
rpn=None,
rcnn=dict(max_per_img=num_proposals, mask_thr_binary=0.5)))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR.
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='AutoAugment',
policies=[[
dict(
type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(
type='Resize',
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
multiscale_mode='value',
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks'])
]
data = dict(train=dict(pipeline=train_pipeline))
|
import warnings
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='ImageUrl')
IMAGE_FILE_FORMATS = ('png', 'jpeg', 'jpg')
@_register_proto(proto_type_name='image_url')
class ImageUrl(AnyUrl):
"""
URL to a .png, .jpeg, or .jpg file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config) # basic url validation
has_image_extension = any(url.endswith(ext) for ext in IMAGE_FILE_FORMATS)
if not has_image_extension:
raise ValueError(
f'Image URL must have one of the following extensions:'
f'{IMAGE_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
timeout: Optional[float] = None,
) -> np.ndarray:
"""
Load the data from the url into a numpy.ndarray image tensor
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDoc
from docarray.typing import ImageUrl
import numpy as np
class MyDoc(BaseDoc):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, np.ndarray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:return: np.ndarray representing the image as RGB values
"""
from docarray.typing.bytes.image_bytes import ImageBytes
buffer = ImageBytes(self.load_bytes(timeout=timeout))
return buffer.load(width, height, axis_layout)
def display(self) -> None:
"""
Display image data from url in notebook.
"""
if is_notebook():
from IPython.display import Image, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Image(url=self))
else:
display(Image(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
import warnings
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='ImageUrl')
IMAGE_FILE_FORMATS = ('png', 'jpeg', 'jpg')
@_register_proto(proto_type_name='image_url')
class ImageUrl(AnyUrl):
"""
URL to a .png, .jpeg, or .jpg file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config) # basic url validation
has_image_extension = any(url.endswith(ext) for ext in IMAGE_FILE_FORMATS)
if not has_image_extension:
raise ValueError(
f'Image URL must have one of the following extensions:'
f'{IMAGE_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
timeout: Optional[float] = None,
) -> np.ndarray:
"""
Load the data from the url into a numpy.ndarray image tensor
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
from docarray.typing import ImageUrl
import numpy as np
class MyDoc(BaseDocument):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, np.ndarray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:return: np.ndarray representing the image as RGB values
"""
from docarray.typing.bytes.image_bytes import ImageBytes
buffer = ImageBytes(self.load_bytes(timeout=timeout))
return buffer.load(width, height, axis_layout)
def display(self) -> None:
"""
Display image data from url in notebook.
"""
if is_notebook():
from IPython.display import Image, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Image(url=self))
else:
display(Image(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
"""
=============================
Recursive feature elimination
=============================
This example demonstrates how Recursive Feature Elimination
(:class:`~sklearn.feature_selection.RFE`) can be used to determine the
importance of individual pixels for classifying handwritten digits.
:class:`~sklearn.feature_selection.RFE` recursively removes the least
significant features, assigning ranks based on their importance, where higher
`ranking_` values denote lower importance. The ranking is visualized using both
shades of blue and pixel annotations for clarity. As expected, pixels positioned
at the center of the image tend to be more predictive than those near the edges.
.. note::
See also :ref:`sphx_glr_auto_examples_feature_selection_plot_rfe_with_cross_validation.py`
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
# Load the digits dataset
digits = load_digits()
X = digits.images.reshape((len(digits.images), -1))
y = digits.target
pipe = Pipeline(
[
("scaler", MinMaxScaler()),
("rfe", RFE(estimator=LogisticRegression(), n_features_to_select=1, step=1)),
]
)
pipe.fit(X, y)
ranking = pipe.named_steps["rfe"].ranking_.reshape(digits.images[0].shape)
# Plot pixel ranking
plt.matshow(ranking, cmap=plt.cm.Blues)
# Add annotations for pixel numbers
for i in range(ranking.shape[0]):
for j in range(ranking.shape[1]):
plt.text(j, i, str(ranking[i, j]), ha="center", va="center", color="black")
plt.colorbar()
plt.title("Ranking of pixels with RFE\n(Logistic Regression)")
plt.show()
|
"""
=============================
Recursive feature elimination
=============================
This example demonstrates how Recursive Feature Elimination
(:class:`~sklearn.feature_selection.RFE`) can be used to determine the
importance of individual pixels for classifying handwritten digits.
:class:`~sklearn.feature_selection.RFE` recursively removes the least
significant features, assigning ranks based on their importance, where higher
`ranking_` values denote lower importance. The ranking is visualized using both
shades of blue and pixel annotations for clarity. As expected, pixels positioned
at the center of the image tend to be more predictive than those near the edges.
.. note::
See also :ref:`sphx_glr_auto_examples_feature_selection_plot_rfe_with_cross_validation.py`
""" # noqa: E501
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
# Load the digits dataset
digits = load_digits()
X = digits.images.reshape((len(digits.images), -1))
y = digits.target
pipe = Pipeline(
[
("scaler", MinMaxScaler()),
("rfe", RFE(estimator=LogisticRegression(), n_features_to_select=1, step=1)),
]
)
pipe.fit(X, y)
ranking = pipe.named_steps["rfe"].ranking_.reshape(digits.images[0].shape)
# Plot pixel ranking
plt.matshow(ranking, cmap=plt.cm.Blues)
# Add annotations for pixel numbers
for i in range(ranking.shape[0]):
for j in range(ranking.shape[1]):
plt.text(j, i, str(ranking[i, j]), ha="center", va="center", color="black")
plt.colorbar()
plt.title("Ranking of pixels with RFE\n(Logistic Regression)")
plt.show()
|
"""
Remote file reader.
A loader that fetches an arbitrary remote page or file by URL and parses its contents.
"""
import re
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from llama_index.core import SimpleDirectoryReader
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.readers.youtube_transcript import YoutubeTranscriptReader
class RemoteReader(BaseReader):
"""General reader for any remote page or file."""
def __init__(
self,
*args: Any,
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
**kwargs: Any,
) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
self.file_extractor = file_extractor
@staticmethod
def _is_youtube_video(url: str) -> bool:
# TODO create more global method for detecting all types
"""
Returns True if the given URL is a video on YouTube, False otherwise.
"""
# Regular expression pattern to match YouTube video URLs
youtube_pattern = r"(?:https?:\/\/)?(?:www\.)?(?:youtube\.com|youtu\.be)\/(?:watch\?v=)?([^\s&]+)"
# Match the pattern against the URL
match = re.match(youtube_pattern, url)
# If there's a match, it's a YouTube video URL
return match is not None
def load_data(self, url: str) -> List[Document]:
"""Parse whatever is at the URL."""
import tempfile
from urllib.parse import urlparse
from urllib.request import Request, urlopen
# check the URL
parsed_url = urlparse(url)
# Check if the scheme is http or https
if parsed_url.scheme not in (
"http",
"https",
"ftp",
"ws",
"wss",
"sftp",
"ftps",
"s3",
):
raise ValueError(
"Invalid URL scheme. Only http, https, ftp, ftps, sftp, ws, wss, and s3 are allowed."
)
extra_info = {"Source": url}
req = Request(url, headers={"User-Agent": "Magic Browser"})
result = urlopen(req)
url_type = result.info().get_content_type()
documents = []
if url_type == "text/html" or url_type == "text/plain":
text = "\n\n".join([str(el.decode("utf-8-sig")) for el in result])
documents = [Document(text=text, extra_info=extra_info)]
elif self._is_youtube_video(url):
youtube_reader = YoutubeTranscriptReader()
# TODO should we have another language, like english / french?
documents = youtube_reader.load_data([url])
else:
suffix = Path(urlparse(url).path).suffix
with tempfile.TemporaryDirectory() as temp_dir:
filepath = f"{temp_dir}/temp{suffix}"
with open(filepath, "wb") as output:
output.write(result.read())
loader = SimpleDirectoryReader(
temp_dir,
file_metadata=(lambda _: extra_info),
file_extractor=self.file_extractor,
)
documents = loader.load_data()
return documents
|
"""Remote file reader.
A loader that fetches an arbitrary remote page or file by URL and parses its contents.
"""
import re
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from llama_index.core import SimpleDirectoryReader
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.readers.youtube_transcript import YoutubeTranscriptReader
class RemoteReader(BaseReader):
"""General reader for any remote page or file."""
def __init__(
self,
*args: Any,
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
**kwargs: Any,
) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
self.file_extractor = file_extractor
@staticmethod
def _is_youtube_video(url: str) -> bool:
# TODO create more global method for detecting all types
"""
Returns True if the given URL is a video on YouTube, False otherwise.
"""
# Regular expression pattern to match YouTube video URLs
youtube_pattern = r"(?:https?:\/\/)?(?:www\.)?(?:youtube\.com|youtu\.be)\/(?:watch\?v=)?([^\s&]+)"
# Match the pattern against the URL
match = re.match(youtube_pattern, url)
# If there's a match, it's a YouTube video URL
if match:
return True
# Otherwise, it's not a YouTube video URL
return False
def load_data(self, url: str) -> List[Document]:
"""Parse whatever is at the URL."""
import tempfile
from urllib.parse import urlparse
from urllib.request import Request, urlopen
# check the URL
parsed_url = urlparse(url)
# Check if the scheme is http or https
if parsed_url.scheme not in (
"http",
"https",
"ftp",
"ws",
"wss",
"sftp",
"ftps",
"s3",
):
raise ValueError(
"Invalid URL scheme. Only http, https, ftp, ftps, sftp, ws, wss, and s3 are allowed."
)
extra_info = {"Source": url}
req = Request(url, headers={"User-Agent": "Magic Browser"})
result = urlopen(req)
url_type = result.info().get_content_type()
documents = []
if url_type == "text/html" or url_type == "text/plain":
text = "\n\n".join([str(el.decode("utf-8-sig")) for el in result])
documents = [Document(text=text, extra_info=extra_info)]
elif self._is_youtube_video(url):
youtube_reader = YoutubeTranscriptReader()
# TODO should we have another language, like english / french?
documents = youtube_reader.load_data([url])
else:
suffix = Path(urlparse(url).path).suffix
with tempfile.TemporaryDirectory() as temp_dir:
filepath = f"{temp_dir}/temp{suffix}"
with open(filepath, "wb") as output:
output.write(result.read())
loader = SimpleDirectoryReader(
temp_dir,
file_metadata=(lambda _: extra_info),
file_extractor=self.file_extractor,
)
documents = loader.load_data()
return documents
|
import pytest
from docarray import DocumentArray, Document
@pytest.mark.parametrize(
'columns',
[
[
('is_true', 'bool'),
('test_long', 'long'),
('test_double', 'double'),
],
{'is_true': 'bool', 'test_long': 'long', 'test_double': 'double'},
],
)
def test_data_type(start_storage, columns):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': columns,
'distance': 'l2_norm',
'index_name': 'test_data_type',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(
id=1,
test_bool=True,
test_long=372_036_854_775_807,
test_double=1_000_000_000_000_000,
)
]
)
assert elastic_doc[0].tags['test_bool'] is True
assert elastic_doc[0].tags['test_long'] == 372_036_854_775_807
assert elastic_doc[0].tags['test_double'] == 1_000_000_000_000_000
|
from docarray import DocumentArray, Document
def test_data_type(start_storage):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': [
('is_true', 'bool'),
('test_long', 'long'),
('test_double', 'double'),
],
'distance': 'l2_norm',
'index_name': 'test_data_type',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(
id=1,
test_bool=True,
test_long=372_036_854_775_807,
test_double=1_000_000_000_000_000,
)
]
)
assert elastic_doc[0].tags['test_bool'] is True
assert elastic_doc[0].tags['test_long'] == 372_036_854_775_807
assert elastic_doc[0].tags['test_double'] == 1_000_000_000_000_000
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.11.1.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.11.0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
from typing import Any, Optional
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.outputs import ChatGeneration, ChatResult
from pytest_mock import MockerFixture
from syrupy import SnapshotAssertion
from langchain.runnables.openai_functions import OpenAIFunctionsRouter
class FakeChatOpenAI(BaseChatModel):
@property
def _llm_type(self) -> str:
return "fake-openai-chat-model"
def _generate(
self,
messages: list[BaseMessage],
stop: Optional[list[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
return ChatResult(
generations=[
ChatGeneration(
message=AIMessage(
content="",
additional_kwargs={
"function_call": {
"name": "accept",
"arguments": '{\n "draft": "turtles"\n}',
}
},
)
)
]
)
def test_openai_functions_router(
snapshot: SnapshotAssertion, mocker: MockerFixture
) -> None:
revise = mocker.Mock(
side_effect=lambda kw: f"Revised draft: no more {kw['notes']}!"
)
accept = mocker.Mock(side_effect=lambda kw: f"Accepted draft: {kw['draft']}!")
router = OpenAIFunctionsRouter(
{
"revise": revise,
"accept": accept,
},
functions=[
{
"name": "revise",
"description": "Sends the draft for revision.",
"parameters": {
"type": "object",
"properties": {
"notes": {
"type": "string",
"description": "The editor's notes to guide the revision.",
},
},
},
},
{
"name": "accept",
"description": "Accepts the draft.",
"parameters": {
"type": "object",
"properties": {
"draft": {
"type": "string",
"description": "The draft to accept.",
},
},
},
},
],
)
model = FakeChatOpenAI()
chain = model.bind(functions=router.functions) | router
assert router.functions == snapshot
assert chain.invoke("Something about turtles?") == "Accepted draft: turtles!"
revise.assert_not_called()
accept.assert_called_once_with({"draft": "turtles"})
|
from typing import Any, List, Optional
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.outputs import ChatGeneration, ChatResult
from pytest_mock import MockerFixture
from syrupy import SnapshotAssertion
from langchain.runnables.openai_functions import OpenAIFunctionsRouter
class FakeChatOpenAI(BaseChatModel):
@property
def _llm_type(self) -> str:
return "fake-openai-chat-model"
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
return ChatResult(
generations=[
ChatGeneration(
message=AIMessage(
content="",
additional_kwargs={
"function_call": {
"name": "accept",
"arguments": '{\n "draft": "turtles"\n}',
}
},
)
)
]
)
def test_openai_functions_router(
snapshot: SnapshotAssertion, mocker: MockerFixture
) -> None:
revise = mocker.Mock(
side_effect=lambda kw: f"Revised draft: no more {kw['notes']}!"
)
accept = mocker.Mock(side_effect=lambda kw: f"Accepted draft: {kw['draft']}!")
router = OpenAIFunctionsRouter(
{
"revise": revise,
"accept": accept,
},
functions=[
{
"name": "revise",
"description": "Sends the draft for revision.",
"parameters": {
"type": "object",
"properties": {
"notes": {
"type": "string",
"description": "The editor's notes to guide the revision.",
},
},
},
},
{
"name": "accept",
"description": "Accepts the draft.",
"parameters": {
"type": "object",
"properties": {
"draft": {
"type": "string",
"description": "The draft to accept.",
},
},
},
},
],
)
model = FakeChatOpenAI()
chain = model.bind(functions=router.functions) | router
assert router.functions == snapshot
assert chain.invoke("Something about turtles?") == "Accepted draft: turtles!"
revise.assert_not_called()
accept.assert_called_once_with({"draft": "turtles"})
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.initializers import deserialize
from keras.src.initializers import get
from keras.src.initializers import serialize
from keras.src.initializers.constant_initializers import Constant
from keras.src.initializers.constant_initializers import Constant as constant
from keras.src.initializers.constant_initializers import Identity
from keras.src.initializers.constant_initializers import (
Identity as IdentityInitializer,
)
from keras.src.initializers.constant_initializers import Identity as identity
from keras.src.initializers.constant_initializers import Ones
from keras.src.initializers.constant_initializers import Ones as ones
from keras.src.initializers.constant_initializers import STFTInitializer
from keras.src.initializers.constant_initializers import Zeros
from keras.src.initializers.constant_initializers import Zeros as zeros
from keras.src.initializers.initializer import Initializer
from keras.src.initializers.random_initializers import GlorotNormal
from keras.src.initializers.random_initializers import (
GlorotNormal as glorot_normal,
)
from keras.src.initializers.random_initializers import GlorotUniform
from keras.src.initializers.random_initializers import (
GlorotUniform as glorot_uniform,
)
from keras.src.initializers.random_initializers import HeNormal
from keras.src.initializers.random_initializers import HeNormal as he_normal
from keras.src.initializers.random_initializers import HeUniform
from keras.src.initializers.random_initializers import HeUniform as he_uniform
from keras.src.initializers.random_initializers import LecunNormal
from keras.src.initializers.random_initializers import (
LecunNormal as lecun_normal,
)
from keras.src.initializers.random_initializers import LecunUniform
from keras.src.initializers.random_initializers import (
LecunUniform as lecun_uniform,
)
from keras.src.initializers.random_initializers import OrthogonalInitializer
from keras.src.initializers.random_initializers import (
OrthogonalInitializer as Orthogonal,
)
from keras.src.initializers.random_initializers import (
OrthogonalInitializer as orthogonal,
)
from keras.src.initializers.random_initializers import RandomNormal
from keras.src.initializers.random_initializers import (
RandomNormal as random_normal,
)
from keras.src.initializers.random_initializers import RandomUniform
from keras.src.initializers.random_initializers import (
RandomUniform as random_uniform,
)
from keras.src.initializers.random_initializers import TruncatedNormal
from keras.src.initializers.random_initializers import (
TruncatedNormal as truncated_normal,
)
from keras.src.initializers.random_initializers import VarianceScaling
from keras.src.initializers.random_initializers import (
VarianceScaling as variance_scaling,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.initializers import deserialize
from keras.src.initializers import get
from keras.src.initializers import serialize
from keras.src.initializers.constant_initializers import Constant
from keras.src.initializers.constant_initializers import Constant as constant
from keras.src.initializers.constant_initializers import Identity
from keras.src.initializers.constant_initializers import (
Identity as IdentityInitializer,
)
from keras.src.initializers.constant_initializers import Identity as identity
from keras.src.initializers.constant_initializers import Ones
from keras.src.initializers.constant_initializers import Ones as ones
from keras.src.initializers.constant_initializers import Zeros
from keras.src.initializers.constant_initializers import Zeros as zeros
from keras.src.initializers.initializer import Initializer
from keras.src.initializers.random_initializers import GlorotNormal
from keras.src.initializers.random_initializers import (
GlorotNormal as glorot_normal,
)
from keras.src.initializers.random_initializers import GlorotUniform
from keras.src.initializers.random_initializers import (
GlorotUniform as glorot_uniform,
)
from keras.src.initializers.random_initializers import HeNormal
from keras.src.initializers.random_initializers import HeNormal as he_normal
from keras.src.initializers.random_initializers import HeUniform
from keras.src.initializers.random_initializers import HeUniform as he_uniform
from keras.src.initializers.random_initializers import LecunNormal
from keras.src.initializers.random_initializers import (
LecunNormal as lecun_normal,
)
from keras.src.initializers.random_initializers import LecunUniform
from keras.src.initializers.random_initializers import (
LecunUniform as lecun_uniform,
)
from keras.src.initializers.random_initializers import OrthogonalInitializer
from keras.src.initializers.random_initializers import (
OrthogonalInitializer as Orthogonal,
)
from keras.src.initializers.random_initializers import (
OrthogonalInitializer as orthogonal,
)
from keras.src.initializers.random_initializers import RandomNormal
from keras.src.initializers.random_initializers import (
RandomNormal as random_normal,
)
from keras.src.initializers.random_initializers import RandomUniform
from keras.src.initializers.random_initializers import (
RandomUniform as random_uniform,
)
from keras.src.initializers.random_initializers import TruncatedNormal
from keras.src.initializers.random_initializers import (
TruncatedNormal as truncated_normal,
)
from keras.src.initializers.random_initializers import VarianceScaling
from keras.src.initializers.random_initializers import (
VarianceScaling as variance_scaling,
)
|
"""Configuration for unit tests."""
from collections.abc import Iterator, Sequence
from importlib import util
from uuid import UUID
import pytest
from blockbuster import BlockBuster, blockbuster_ctx
from pytest_mock import MockerFixture
@pytest.fixture(autouse=True)
def blockbuster() -> Iterator[BlockBuster]:
with blockbuster_ctx("langchain_core") as bb:
for func in ["os.stat", "os.path.abspath"]:
(
bb.functions[func]
.can_block_in("langchain_core/_api/internal.py", "is_caller_internal")
.can_block_in("langchain_core/runnables/base.py", "__repr__")
.can_block_in(
"langchain_core/beta/runnables/context.py", "aconfig_with_context"
)
)
for func in ["os.stat", "io.TextIOWrapper.read"]:
bb.functions[func].can_block_in(
"langsmith/client.py", "_default_retry_config"
)
for bb_function in bb.functions.values():
bb_function.can_block_in(
"freezegun/api.py", "_get_cached_module_attributes"
)
yield bb
def pytest_addoption(parser: pytest.Parser) -> None:
"""Add custom command line options to pytest."""
parser.addoption(
"--only-extended",
action="store_true",
help="Only run extended tests. Does not allow skipping any extended tests.",
)
parser.addoption(
"--only-core",
action="store_true",
help="Only run core tests. Never runs any extended tests.",
)
def pytest_collection_modifyitems(
config: pytest.Config, items: Sequence[pytest.Function]
) -> None:
"""Add implementations for handling custom markers.
At the moment, this adds support for a custom `requires` marker.
The `requires` marker is used to denote tests that require one or more packages
to be installed to run. If the package is not installed, the test is skipped.
The `requires` marker syntax is:
.. code-block:: python
@pytest.mark.requires("package1", "package2")
def test_something():
...
"""
# Mapping from the name of a package to whether it is installed or not.
# Used to avoid repeated calls to `util.find_spec`
required_pkgs_info: dict[str, bool] = {}
only_extended = config.getoption("--only-extended") or False
only_core = config.getoption("--only-core") or False
if only_extended and only_core:
msg = "Cannot specify both `--only-extended` and `--only-core`."
raise ValueError(msg)
for item in items:
requires_marker = item.get_closest_marker("requires")
if requires_marker is not None:
if only_core:
item.add_marker(pytest.mark.skip(reason="Skipping not a core test."))
continue
# Iterate through the list of required packages
required_pkgs = requires_marker.args
for pkg in required_pkgs:
# If we haven't yet checked whether the pkg is installed
# let's check it and store the result.
if pkg not in required_pkgs_info:
try:
installed = util.find_spec(pkg) is not None
except Exception:
installed = False
required_pkgs_info[pkg] = installed
if not required_pkgs_info[pkg]:
if only_extended:
pytest.fail(
f"Package `{pkg}` is not installed but is required for "
f"extended tests. Please install the given package and "
f"try again.",
)
else:
# If the package is not installed, we immediately break
# and mark the test as skipped.
item.add_marker(
pytest.mark.skip(reason=f"Requires pkg: `{pkg}`")
)
break
else:
if only_extended:
item.add_marker(
pytest.mark.skip(reason="Skipping not an extended test.")
)
@pytest.fixture
def deterministic_uuids(mocker: MockerFixture) -> MockerFixture:
side_effect = (
UUID(f"00000000-0000-4000-8000-{i:012}", version=4) for i in range(10000)
)
return mocker.patch("uuid.uuid4", side_effect=side_effect)
|
"""Configuration for unit tests."""
from collections.abc import Iterator, Sequence
from importlib import util
from uuid import UUID
import pytest
from blockbuster import BlockBuster, blockbuster_ctx
from pytest import Config, Function, Parser
from pytest_mock import MockerFixture
@pytest.fixture(autouse=True)
def blockbuster() -> Iterator[BlockBuster]:
with blockbuster_ctx("langchain_core") as bb:
for func in ["os.stat", "os.path.abspath"]:
(
bb.functions[func]
.can_block_in("langchain_core/_api/internal.py", "is_caller_internal")
.can_block_in("langchain_core/runnables/base.py", "__repr__")
.can_block_in(
"langchain_core/beta/runnables/context.py", "aconfig_with_context"
)
)
for func in ["os.stat", "io.TextIOWrapper.read"]:
bb.functions[func].can_block_in(
"langsmith/client.py", "_default_retry_config"
)
for bb_function in bb.functions.values():
bb_function.can_block_in(
"freezegun/api.py", "_get_cached_module_attributes"
)
yield bb
def pytest_addoption(parser: Parser) -> None:
"""Add custom command line options to pytest."""
parser.addoption(
"--only-extended",
action="store_true",
help="Only run extended tests. Does not allow skipping any extended tests.",
)
parser.addoption(
"--only-core",
action="store_true",
help="Only run core tests. Never runs any extended tests.",
)
def pytest_collection_modifyitems(config: Config, items: Sequence[Function]) -> None:
"""Add implementations for handling custom markers.
At the moment, this adds support for a custom `requires` marker.
The `requires` marker is used to denote tests that require one or more packages
to be installed to run. If the package is not installed, the test is skipped.
The `requires` marker syntax is:
.. code-block:: python
@pytest.mark.requires("package1", "package2")
def test_something():
...
"""
# Mapping from the name of a package to whether it is installed or not.
# Used to avoid repeated calls to `util.find_spec`
required_pkgs_info: dict[str, bool] = {}
only_extended = config.getoption("--only-extended") or False
only_core = config.getoption("--only-core") or False
if only_extended and only_core:
msg = "Cannot specify both `--only-extended` and `--only-core`."
raise ValueError(msg)
for item in items:
requires_marker = item.get_closest_marker("requires")
if requires_marker is not None:
if only_core:
item.add_marker(pytest.mark.skip(reason="Skipping not a core test."))
continue
# Iterate through the list of required packages
required_pkgs = requires_marker.args
for pkg in required_pkgs:
# If we haven't yet checked whether the pkg is installed
# let's check it and store the result.
if pkg not in required_pkgs_info:
try:
installed = util.find_spec(pkg) is not None
except Exception:
installed = False
required_pkgs_info[pkg] = installed
if not required_pkgs_info[pkg]:
if only_extended:
pytest.fail(
f"Package `{pkg}` is not installed but is required for "
f"extended tests. Please install the given package and "
f"try again.",
)
else:
# If the package is not installed, we immediately break
# and mark the test as skipped.
item.add_marker(
pytest.mark.skip(reason=f"Requires pkg: `{pkg}`")
)
break
else:
if only_extended:
item.add_marker(
pytest.mark.skip(reason="Skipping not an extended test.")
)
@pytest.fixture()
def deterministic_uuids(mocker: MockerFixture) -> MockerFixture:
side_effect = (
UUID(f"00000000-0000-4000-8000-{i:012}", version=4) for i in range(10000)
)
return mocker.patch("uuid.uuid4", side_effect=side_effect)
|
import os
import pytest
from typing import List
from unittest.mock import MagicMock, patch, AsyncMock
import uuid
from llama_index.core.base.base_selector import (
SelectorResult,
SingleSelection,
)
from llama_index.core.schema import QueryBundle
from llama_index.core.tools import ToolMetadata
from llama_index.selectors.notdiamond.base import NotDiamondSelector, LLMSingleSelector
from notdiamond import LLMConfig
@pytest.fixture()
def session_id() -> str:
return str(uuid.uuid4())
@pytest.fixture()
def choices() -> List[ToolMetadata]:
return [
ToolMetadata(
name="vector_index", description="Great for asking questions about recipes."
),
ToolMetadata(name="list_index", description="Great for summarizing recipes."),
]
@pytest.fixture()
def nd_selector(session_id):
from notdiamond import NotDiamond
os.environ["OPENAI_API_KEY"] = "test"
os.environ["ANTHROPIC_API_KEY"] = "test"
llm_configs = [
LLMConfig(provider="openai", model="gpt-4o"),
]
# mocking out model_select calls on client
_client = MagicMock(stub=NotDiamond, api_key="test", llm_configs=llm_configs)
_client.model_select.return_value = (session_id, llm_configs[0])
async def aselect(*args, **kwargs):
return (session_id, llm_configs[0])
_client.amodel_select = aselect
selector = NotDiamondSelector(client=_client)
# monkeypatch the _select and _aselect methods on parent class of NDSelector
LLMSingleSelector._select = MagicMock(
return_value=SelectorResult(
selections=[SingleSelection(index=0, reason="test")]
)
)
LLMSingleSelector._aselect = AsyncMock(
return_value=SelectorResult(
selections=[SingleSelection(index=1, reason="test")]
)
)
return selector
class TestNotDiamondSelector:
@patch("llama_index.llms.openai.OpenAI")
def test_select(self, openai_mock, nd_selector, choices, session_id):
"""_select should call openai, as mocked."""
openai_mock.return_value = MagicMock()
openai_mock.return_value.chat.return_value.message.content = "vector_index"
query = "Please describe the llama_index framework in 280 characters or less."
result = nd_selector._select(choices, QueryBundle(query_str=query))
assert result.session_id == session_id
assert str(result.llm) == "openai/gpt-4o"
assert result.selections[0].index == 0
assert openai_mock.is_called
@pytest.mark.asyncio
@patch("llama_index.llms.openai.OpenAI")
async def test_aselect(self, openai_mock, nd_selector, choices, session_id):
"""_aselect should call openai, as mocked."""
openai_mock.return_value = MagicMock()
openai_mock.return_value.chat.return_value.message.content = "vector_index"
query = "How can I cook a vegan variant of deviled eggs?"
result = await nd_selector._aselect(choices, QueryBundle(query_str=query))
assert result.session_id == session_id
assert str(result.llm) == "openai/gpt-4o"
assert result.selections[0].index == 1
assert openai_mock.is_called
|
import os
import pytest
from typing import List
from unittest.mock import MagicMock, patch, AsyncMock
import uuid
from llama_index.core.base.base_selector import (
SelectorResult,
SingleSelection,
)
from llama_index.core.schema import QueryBundle
from llama_index.core.tools import ToolMetadata
from llama_index.selectors.notdiamond.base import NotDiamondSelector, LLMSingleSelector
from notdiamond import LLMConfig
@pytest.fixture()
def session_id() -> str:
return str(uuid.uuid4())
@pytest.fixture()
def choices() -> List[ToolMetadata]:
return [
ToolMetadata(
name="vector_index", description="Great for asking questions about recipes."
),
ToolMetadata(name="list_index", description="Great for summarizing recipes."),
]
@pytest.fixture()
def nd_selector(session_id):
from notdiamond import NotDiamond
os.environ["OPENAI_API_KEY"] = "test"
os.environ["ANTHROPIC_API_KEY"] = "test"
llm_configs = [
LLMConfig(provider="openai", model="gpt-4o"),
]
# mocking out model_select calls on client
_client = MagicMock(stub=NotDiamond, api_key="test", llm_configs=llm_configs)
_client.model_select.return_value = (session_id, llm_configs[0])
async def aselect(*args, **kwargs):
return (session_id, llm_configs[0])
_client.amodel_select = aselect
selector = NotDiamondSelector(client=_client)
# monkeypatch the _select and _aselect methods on parent class of NDSelector
LLMSingleSelector._select = MagicMock(
return_value=SelectorResult(
selections=[SingleSelection(index=0, reason="test")]
)
)
LLMSingleSelector._aselect = AsyncMock(
return_value=SelectorResult(
selections=[SingleSelection(index=1, reason="test")]
)
)
return selector
class TestNotDiamondSelector:
@patch("llama_index.llms.openai.OpenAI")
def test_select(self, openai_mock, nd_selector, choices, session_id):
"""_select should call openai, as mocked."""
openai_mock.return_value = MagicMock()
openai_mock.return_value.chat.return_value.message.content = "vector_index"
query = "Please describe the llama_index framework in 280 characters or less."
result = nd_selector._select(choices, QueryBundle(query_str=query))
assert result.session_id == session_id
assert str(result.llm) == "openai/gpt-4o"
assert result.selections[0].index == 0
assert openai_mock.is_called
@pytest.mark.asyncio()
@patch("llama_index.llms.openai.OpenAI")
async def test_aselect(self, openai_mock, nd_selector, choices, session_id):
"""_aselect should call openai, as mocked."""
openai_mock.return_value = MagicMock()
openai_mock.return_value.chat.return_value.message.content = "vector_index"
query = "How can I cook a vegan variant of deviled eggs?"
result = await nd_selector._aselect(choices, QueryBundle(query_str=query))
assert result.session_id == session_id
assert str(result.llm) == "openai/gpt-4o"
assert result.selections[0].index == 1
assert openai_mock.is_called
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "4.0.0"
from .arrow_dataset import Column, Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableColumn, IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "4.0.0.dev0"
from .arrow_dataset import Column, Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableColumn, IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
import os
from typing import Optional
import fsspec
from llama_index.core.storage.docstore.keyval_docstore import KVDocumentStore
from llama_index.core.storage.docstore.types import (
DEFAULT_BATCH_SIZE,
DEFAULT_PERSIST_DIR,
DEFAULT_PERSIST_FNAME,
DEFAULT_PERSIST_PATH,
)
from llama_index.core.storage.kvstore.simple_kvstore import SimpleKVStore
from llama_index.core.storage.kvstore.types import MutableMappingKVStore, BaseInMemoryKVStore
from llama_index.core.utils import concat_dirs
class SimpleDocumentStore(KVDocumentStore):
"""
Simple Document (Node) store.
An in-memory store for Document and Node objects.
Args:
simple_kvstore (SimpleKVStore): simple key-value store
namespace (str): namespace for the docstore
"""
def __init__(
self,
simple_kvstore: Optional[SimpleKVStore] = None,
namespace: Optional[str] = None,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
"""Init a SimpleDocumentStore."""
simple_kvstore = simple_kvstore or SimpleKVStore()
super().__init__(simple_kvstore, namespace=namespace, batch_size=batch_size)
@classmethod
def from_persist_dir(
cls,
persist_dir: str = DEFAULT_PERSIST_DIR,
namespace: Optional[str] = None,
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> "SimpleDocumentStore":
"""
Create a SimpleDocumentStore from a persist directory.
Args:
persist_dir (str): directory to persist the store
namespace (Optional[str]): namespace for the docstore
fs (Optional[fsspec.AbstractFileSystem]): filesystem to use
"""
if fs is not None:
persist_path = concat_dirs(persist_dir, DEFAULT_PERSIST_FNAME)
else:
persist_path = os.path.join(persist_dir, DEFAULT_PERSIST_FNAME)
return cls.from_persist_path(persist_path, namespace=namespace, fs=fs)
@classmethod
def from_persist_path(
cls,
persist_path: str,
namespace: Optional[str] = None,
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> "SimpleDocumentStore":
"""
Create a SimpleDocumentStore from a persist path.
Args:
persist_path (str): Path to persist the store
namespace (Optional[str]): namespace for the docstore
fs (
Optional[fsspec.AbstractFileSystem]): filesystem to use
"""
simple_kvstore = SimpleKVStore.from_persist_path(persist_path, fs=fs)
return cls(simple_kvstore, namespace)
def persist(
self,
persist_path: str = DEFAULT_PERSIST_PATH,
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> None:
"""Persist the store."""
if isinstance(self._kvstore, (MutableMappingKVStore, BaseInMemoryKVStore)):
self._kvstore.persist(persist_path, fs=fs)
@classmethod
def from_dict(
cls, save_dict: dict, namespace: Optional[str] = None
) -> "SimpleDocumentStore":
simple_kvstore = SimpleKVStore.from_dict(save_dict)
return cls(simple_kvstore, namespace)
def to_dict(self) -> dict:
assert isinstance(self._kvstore, SimpleKVStore)
return self._kvstore.to_dict()
# alias for backwards compatibility
DocumentStore = SimpleDocumentStore
|
import os
from typing import Optional
import fsspec
from llama_index.core.storage.docstore.keyval_docstore import KVDocumentStore
from llama_index.core.storage.docstore.types import (
DEFAULT_BATCH_SIZE,
DEFAULT_PERSIST_DIR,
DEFAULT_PERSIST_FNAME,
DEFAULT_PERSIST_PATH,
)
from llama_index.core.storage.kvstore.simple_kvstore import SimpleKVStore
from llama_index.core.storage.kvstore.types import BaseInMemoryKVStore
from llama_index.core.utils import concat_dirs
class SimpleDocumentStore(KVDocumentStore):
"""
Simple Document (Node) store.
An in-memory store for Document and Node objects.
Args:
simple_kvstore (SimpleKVStore): simple key-value store
namespace (str): namespace for the docstore
"""
def __init__(
self,
simple_kvstore: Optional[SimpleKVStore] = None,
namespace: Optional[str] = None,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
"""Init a SimpleDocumentStore."""
simple_kvstore = simple_kvstore or SimpleKVStore()
super().__init__(simple_kvstore, namespace=namespace, batch_size=batch_size)
@classmethod
def from_persist_dir(
cls,
persist_dir: str = DEFAULT_PERSIST_DIR,
namespace: Optional[str] = None,
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> "SimpleDocumentStore":
"""
Create a SimpleDocumentStore from a persist directory.
Args:
persist_dir (str): directory to persist the store
namespace (Optional[str]): namespace for the docstore
fs (Optional[fsspec.AbstractFileSystem]): filesystem to use
"""
if fs is not None:
persist_path = concat_dirs(persist_dir, DEFAULT_PERSIST_FNAME)
else:
persist_path = os.path.join(persist_dir, DEFAULT_PERSIST_FNAME)
return cls.from_persist_path(persist_path, namespace=namespace, fs=fs)
@classmethod
def from_persist_path(
cls,
persist_path: str,
namespace: Optional[str] = None,
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> "SimpleDocumentStore":
"""
Create a SimpleDocumentStore from a persist path.
Args:
persist_path (str): Path to persist the store
namespace (Optional[str]): namespace for the docstore
fs (Optional[fsspec.AbstractFileSystem]): filesystem to use
"""
simple_kvstore = SimpleKVStore.from_persist_path(persist_path, fs=fs)
return cls(simple_kvstore, namespace)
def persist(
self,
persist_path: str = DEFAULT_PERSIST_PATH,
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> None:
"""Persist the store."""
if isinstance(self._kvstore, BaseInMemoryKVStore):
self._kvstore.persist(persist_path, fs=fs)
@classmethod
def from_dict(
cls, save_dict: dict, namespace: Optional[str] = None
) -> "SimpleDocumentStore":
simple_kvstore = SimpleKVStore.from_dict(save_dict)
return cls(simple_kvstore, namespace)
def to_dict(self) -> dict:
assert isinstance(self._kvstore, SimpleKVStore)
return self._kvstore.to_dict()
# alias for backwards compatibility
DocumentStore = SimpleDocumentStore
|
# Copyright (c) OpenMMLab. All rights reserved.
import json
import os
import tempfile
from typing import List, Optional
from mmengine.evaluator import BaseMetric
from mmengine.utils import track_iter_progress
from pycocotools.coco import COCO
from mmdet.registry import METRICS
try:
from pycocoevalcap.eval import COCOEvalCap
except ImportError:
COCOEvalCap = None
@METRICS.register_module()
class COCOCaptionMetric(BaseMetric):
"""Coco Caption evaluation wrapper.
Save the generated captions and transform into coco format.
Calling COCO API for caption metrics.
Args:
ann_file (str): the path for the COCO format caption ground truth
json file, load for evaluations.
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Should be modified according to the
`retrieval_type` for unambiguous results. Defaults to TR.
"""
def __init__(self,
ann_file: str,
collect_device: str = 'cpu',
prefix: Optional[str] = None):
if COCOEvalCap is None:
raise RuntimeError(
'COCOEvalCap is not installed, please install it by: '
'pip install pycocoevalcap')
super().__init__(collect_device=collect_device, prefix=prefix)
self.ann_file = ann_file
def process(self, data_batch, data_samples):
"""Process one batch of data samples.
The processed results should be stored in ``self.results``, which will
be used to computed the metrics when all batches have been processed.
Args:
data_batch: A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of outputs from the model.
"""
for data_sample in data_samples:
result = dict()
result['caption'] = data_sample['pred_caption']
result['image_id'] = int(data_sample['img_id'])
# Save the result to `self.results`.
self.results.append(result)
def compute_metrics(self, results: List):
"""Compute the metrics from processed results.
Args:
results (dict): The processed results of each batch.
Returns:
Dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results.
"""
# NOTICE: don't access `self.results` from the method.
with tempfile.TemporaryDirectory() as temp_dir:
eval_result_file = save_result(
result=results,
result_dir=temp_dir,
filename='caption_pred',
remove_duplicate='image_id',
)
coco_val = coco_caption_eval(eval_result_file, self.ann_file)
return coco_val
def save_result(result, result_dir, filename, remove_duplicate=''):
"""Saving predictions as json file for evaluation."""
# combine results from all processes
if remove_duplicate:
result_new = []
id_list = []
for res in track_iter_progress(result):
if res[remove_duplicate] not in id_list:
id_list.append(res[remove_duplicate])
result_new.append(res)
result = result_new
final_result_file_url = os.path.join(result_dir, '%s.json' % filename)
print(f'result file saved to {final_result_file_url}')
json.dump(result, open(final_result_file_url, 'w'))
return final_result_file_url
def coco_caption_eval(results_file, ann_file):
"""Evaluation between gt json and prediction json files."""
# create coco object and coco_result object
coco = COCO(ann_file)
coco_result = coco.loadRes(results_file)
# create coco_eval object by taking coco and coco_result
coco_eval = COCOEvalCap(coco, coco_result)
# make sure the image ids are the same
coco_eval.params['image_id'] = coco_result.getImgIds()
# This will take some times at the first run
coco_eval.evaluate()
# print output evaluation scores
for metric, score in coco_eval.eval.items():
print(f'{metric}: {score:.3f}')
return coco_eval.eval
|
# Copyright (c) OpenMMLab. All rights reserved.
import json
import os
import tempfile
from typing import List, Optional
from mmengine.evaluator import BaseMetric
from mmengine.utils import track_iter_progress
from pycocotools.coco import COCO
from mmdet.registry import METRICS
try:
from pycocoevalcap.eval import COCOEvalCap
except ImportError:
COCOEvalCap = None
@METRICS.register_module()
class COCOCaptionMetric(BaseMetric):
"""Coco Caption evaluation wrapper.
Save the generated captions and transform into coco format.
Calling COCO API for caption metrics.
Args:
ann_file (str): the path for the COCO format caption ground truth
json file, load for evaluations.
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Should be modified according to the
`retrieval_type` for unambiguous results. Defaults to TR.
"""
def __init__(self,
ann_file: str,
collect_device: str = 'cpu',
prefix: Optional[str] = None):
if COCOEvalCap is None:
raise RuntimeError(
'COCOEvalCap is not installed, please install it by: '
'pip install pycocoevalcap')
super().__init__(collect_device=collect_device, prefix=prefix)
self.ann_file = ann_file
def process(self, data_batch, data_samples):
"""Process one batch of data samples.
The processed results should be stored in ``self.results``, which will
be used to computed the metrics when all batches have been processed.
Args:
data_batch: A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of outputs from the model.
"""
for data_sample in data_samples:
result = dict()
result['caption'] = data_sample['pred_caption']
result['image_id'] = data_sample['img_id']
# Save the result to `self.results`.
self.results.append(result)
def compute_metrics(self, results: List):
"""Compute the metrics from processed results.
Args:
results (dict): The processed results of each batch.
Returns:
Dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results.
"""
# NOTICE: don't access `self.results` from the method.
with tempfile.TemporaryDirectory() as temp_dir:
eval_result_file = save_result(
result=results,
result_dir=temp_dir,
filename='m4-caption_pred',
remove_duplicate='image_id',
)
coco_val = coco_caption_eval(eval_result_file, self.ann_file)
return coco_val
def save_result(result, result_dir, filename, remove_duplicate=''):
"""Saving predictions as json file for evaluation."""
# combine results from all processes
if remove_duplicate:
result_new = []
id_list = []
for res in track_iter_progress(result):
if res[remove_duplicate] not in id_list:
id_list.append(res[remove_duplicate])
result_new.append(res)
result = result_new
final_result_file_url = os.path.join(result_dir, '%s.json' % filename)
print(f'result file saved to {final_result_file_url}')
json.dump(result, open(final_result_file_url, 'w'))
return final_result_file_url
def coco_caption_eval(results_file, ann_file):
"""Evaluation between gt json and prediction json files."""
# create coco object and coco_result object
coco = COCO(ann_file)
coco_result = coco.loadRes(results_file)
# create coco_eval object by taking coco and coco_result
coco_eval = COCOEvalCap(coco, coco_result)
# make sure the image ids are the same
coco_eval.params['image_id'] = coco_result.getImgIds()
# This will take some times at the first run
coco_eval.evaluate()
# print output evaluation scores
for metric, score in coco_eval.eval.items():
print(f'{metric}: {score:.3f}')
return coco_eval.eval
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class CascadeRCNN(TwoStageDetector):
r"""Implementation of `Cascade R-CNN: Delving into High Quality Object
Detection <https://arxiv.org/abs/1906.09756>`_"""
def __init__(self,
backbone: ConfigType,
neck: OptConfigType = None,
rpn_head: OptConfigType = None,
roi_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
preprocess_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg,
preprocess_cfg=preprocess_cfg)
# TODO: Currently not supported
def show_result(self, data, result, **kwargs):
"""Show prediction results of the detector.
Args:
data (str or np.ndarray): Image filename or loaded image.
result (Tensor or tuple): The results to draw over `img`
bbox_result or (bbox_result, segm_result).
Returns:
np.ndarray: The image with bboxes drawn on it.
"""
if self.with_mask:
ms_bbox_result, ms_segm_result = result
if isinstance(ms_bbox_result, dict):
result = (ms_bbox_result['ensemble'],
ms_segm_result['ensemble'])
else:
if isinstance(result, dict):
result = result['ensemble']
return super(CascadeRCNN, self).show_result(data, result, **kwargs)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class CascadeRCNN(TwoStageDetector):
r"""Implementation of `Cascade R-CNN: Delving into High Quality Object
Detection <https://arxiv.org/abs/1906.09756>`_"""
def __init__(self,
backbone,
neck=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None,
img_norm_cfg=None):
super(CascadeRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg,
img_norm_cfg=img_norm_cfg)
def show_result(self, data, result, **kwargs):
"""Show prediction results of the detector.
Args:
data (str or np.ndarray): Image filename or loaded image.
result (Tensor or tuple): The results to draw over `img`
bbox_result or (bbox_result, segm_result).
Returns:
np.ndarray: The image with bboxes drawn on it.
"""
if self.with_mask:
ms_bbox_result, ms_segm_result = result
if isinstance(ms_bbox_result, dict):
result = (ms_bbox_result['ensemble'],
ms_segm_result['ensemble'])
else:
if isinstance(result, dict):
result = result['ensemble']
return super(CascadeRCNN, self).show_result(data, result, **kwargs)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .logger import get_caller_name, get_root_logger, log_img_scale
from .misc import find_latest_checkpoint, update_data_root
from .setup_env import setup_multi_processes
__all__ = [
'get_root_logger', 'collect_env', 'find_latest_checkpoint',
'update_data_root', 'setup_multi_processes', 'get_caller_name',
'log_img_scale'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .logger import get_caller_name, get_root_logger, log_img_scale
from .misc import find_latest_checkpoint
from .setup_env import setup_multi_processes
__all__ = [
'get_root_logger', 'collect_env', 'find_latest_checkpoint',
'setup_multi_processes', 'get_caller_name', 'log_img_scale'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# register all modules in mmdet into the registries
# do not init the default scope here because it will be init in the runner
register_all_modules(init_default_scope=False)
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start training
runner.train()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.runner import Runner
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# register all modules in mmdet into the registries
# do not init the default scope here because it will be init in the runner
register_all_modules(init_default_scope=False)
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# build the runner from config
runner = Runner.from_cfg(cfg)
# start training
runner.train()
if __name__ == '__main__':
main()
|
from keras.src.api_export import keras_export
@keras_export(["keras.Initializer", "keras.initializers.Initializer"])
class Initializer:
"""Initializer base class: all Keras initializers inherit from this class.
Initializers should implement a `__call__()` method with the following
signature:
```python
def __call__(self, shape, dtype=None, **kwargs):
# returns a tensor of shape `shape` and dtype `dtype`
# containing values drawn from a distribution of your choice.
```
Optionally, you can also implement the method `get_config()` and the class
method `from_config` in order to support serialization, just like with
any Keras object.
Here's a simple example: a random normal initializer.
```python
class ExampleRandomNormal(Initializer):
def __init__(self, mean, stddev):
self.mean = mean
self.stddev = stddev
def __call__(self, shape, dtype=None, **kwargs):
return keras.random.normal(
shape, mean=self.mean, stddev=self.stddev, dtype=dtype
)
def get_config(self): # To support serialization
return {"mean": self.mean, "stddev": self.stddev}
```
Note that we don't have to implement `from_config()` in the example above
since the constructor arguments of the class the keys in the config returned
by `get_config()` are the same. In this case, the default `from_config()`
works fine.
"""
def __call__(self, shape, dtype=None):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor.
"""
raise NotImplementedError(
"Initializer subclasses must implement the `__call__()` method."
)
def get_config(self):
"""Returns the initializer's configuration as a JSON-serializable dict.
Returns:
A JSON-serializable Python dict.
"""
return {}
@classmethod
def from_config(cls, config):
"""Instantiates an initializer from a configuration dictionary.
Example:
```python
initializer = RandomUniform(-1, 1)
config = initializer.get_config()
initializer = RandomUniform.from_config(config)
```
Args:
config: A Python dictionary, the output of `get_config()`.
Returns:
An `Initializer` instance.
"""
return cls(**config)
def clone(self):
return self.__class__.from_config(self.get_config())
|
from keras.src.api_export import keras_export
@keras_export(["keras.Initializer", "keras.initializers.Initializer"])
class Initializer:
"""Initializer base class: all Keras initializers inherit from this class.
Initializers should implement a `__call__()` method with the following
signature:
```python
def __call__(self, shape, dtype=None, **kwargs):
# returns a tensor of shape `shape` and dtype `dtype`
# containing values drawn from a distribution of your choice.
```
Optionally, you an also implement the method `get_config()` and the class
method `from_config` in order to support serialization -- just like with
any Keras object.
Here's a simple example: a random normal initializer.
```python
class ExampleRandomNormal(Initializer):
def __init__(self, mean, stddev):
self.mean = mean
self.stddev = stddev
def __call__(self, shape, dtype=None, **kwargs):
return keras.random.normal(
shape, mean=self.mean, stddev=self.stddev, dtype=dtype
)
def get_config(self): # To support serialization
return {"mean": self.mean, "stddev": self.stddev}
```
Note that we don't have to implement `from_config()` in the example above
since the constructor arguments of the class the keys in the config returned
by `get_config()` are the same. In this case, the default `from_config()`
works fine.
"""
def __call__(self, shape, dtype=None):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor.
"""
raise NotImplementedError(
"Initializer subclasses must implement the `__call__()` method."
)
def get_config(self):
"""Returns the initializer's configuration as a JSON-serializable dict.
Returns:
A JSON-serializable Python dict.
"""
return {}
@classmethod
def from_config(cls, config):
"""Instantiates an initializer from a configuration dictionary.
Example:
```python
initializer = RandomUniform(-1, 1)
config = initializer.get_config()
initializer = RandomUniform.from_config(config)
```
Args:
config: A Python dictionary, the output of `get_config()`.
Returns:
An `Initializer` instance.
"""
return cls(**config)
def clone(self):
return self.__class__.from_config(self.get_config())
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import shutil
import subprocess
from pathlib import Path
import pytest
@pytest.fixture(scope="session", autouse=True)
def download_cache():
subprocess.run(
'scripts/download_full.sh',
cwd=Path(__file__).parents[1],
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
)
yield
shutil.rmtree(Path(__file__).parents[1] / '.cache')
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def build_docker_image(docker_image_name: str) -> str:
subprocess.run(['docker', 'build', '-t', docker_image_name, '.'], check=True)
return docker_image_name
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import shutil
import subprocess
from pathlib import Path
import pytest
from jina import Document, DocumentArray
@pytest.fixture(scope="session", autouse=True)
def download_cache():
subprocess.run(
'scripts/download_full.sh', cwd=Path(__file__).parents[1], check=True
)
yield
shutil.rmtree('.cache')
@pytest.fixture(scope='session')
def build_docker_image() -> str:
img_name = Path(__file__).parents[1].stem.lower()
subprocess.run(['docker', 'build', '-t', img_name, '.'], check=True)
return img_name
@pytest.fixture()
def data_generator():
def _generator():
data_file_path = Path(__file__).parent / 'texts' / 'test_data.txt'
with open(data_file_path, 'r') as file:
lines = file.readlines()
for line in lines:
yield Document(text=line.strip())
return _generator
@pytest.fixture()
def docs_with_text() -> DocumentArray:
return DocumentArray([Document(text='hello world') for _ in range(10)])
@pytest.fixture()
def docs_with_chunk_text() -> DocumentArray:
chunks = [Document(text='hello world') for _ in range(10)]
return DocumentArray([Document(chunks=chunks)])
@pytest.fixture()
def docs_with_chunk_chunk_text() -> DocumentArray:
root = Document()
chunks = [Document() for _ in range(10)]
chunks_2 = [[Document(text='hello world') for _ in range(10)] for _ in range(10)]
root.chunks.extend(chunks)
for i, chunk in enumerate(root.chunks):
chunk.chunks.extend(chunks_2[i])
return DocumentArray([root])
|
from backend.app import run_processes
from backend.executor import ExecutionManager
def main():
"""
Run all the processes required for the AutoGPT-server REST API.
"""
run_processes(ExecutionManager())
if __name__ == "__main__":
main()
|
from backend.app import run_processes
from backend.executor import DatabaseManager, ExecutionManager
def main():
"""
Run all the processes required for the AutoGPT-server REST API.
"""
run_processes(ExecutionManager())
if __name__ == "__main__":
main()
|
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_from_to_bytes(protocol, compress, show_progress):
da = DocList[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
bytes_da = da.to_bytes(
protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocList[MyDoc].from_bytes(
bytes_da, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_from_to_base64(protocol, compress, show_progress):
da = DocList[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
bytes_da = da.to_base64(
protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocList[MyDoc].from_base64(
bytes_da, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
|
import pytest
from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_from_to_bytes(protocol, compress, show_progress):
da = DocArray[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
bytes_da = da.to_bytes(
protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocArray[MyDoc].from_bytes(
bytes_da, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_from_to_base64(protocol, compress, show_progress):
da = DocArray[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
bytes_da = da.to_base64(
protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocArray[MyDoc].from_base64(
bytes_da, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api._tf_keras.keras.preprocessing import image
from keras.api._tf_keras.keras.preprocessing import sequence
from keras.api._tf_keras.keras.preprocessing import text
from keras.src.utils.image_dataset_utils import image_dataset_from_directory
from keras.src.utils.text_dataset_utils import text_dataset_from_directory
from keras.src.utils.timeseries_dataset_utils import (
timeseries_dataset_from_array,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras._tf_keras.keras.preprocessing import image
from keras._tf_keras.keras.preprocessing import sequence
from keras.src.utils.image_dataset_utils import image_dataset_from_directory
from keras.src.utils.text_dataset_utils import text_dataset_from_directory
from keras.src.utils.timeseries_dataset_utils import (
timeseries_dataset_from_array,
)
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras._tf_keras.keras.preprocessing import image
from keras._tf_keras.keras.preprocessing import sequence
from keras._tf_keras.keras.preprocessing import text
|
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomResize', scale=[(2048, 800), (2048, 1024)]),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=8,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_train.json',
data_prefix=dict(img='leftImg8bit/train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_val.json',
data_prefix=dict(img='leftImg8bit/val/'),
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_val.json',
metric='bbox')
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'CityscapesDataset'
# TODO remove it after cityscape metric
# data_root = '/mnt/lustre/luochunhua.vendor/openmmlab2.0/data/cityscapes/'
data_root = 'data/cityscapes/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomResize', scale=[(2048, 800), (2048, 1024)]),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=8,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_train.json',
data_prefix=dict(img='leftImg8bit/train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_val.json',
data_prefix=dict(img='leftImg8bit/val/'),
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=test_pipeline))
test_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_val.json',
data_prefix=dict(img='leftImg8bit/val/'),
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=test_pipeline))
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_val.json',
metric='bbox')
test_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_val.json',
metric='bbox')
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.2.0'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.1.0'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
from typing_extensions import TYPE_CHECKING
from docarray.typing.bytes import AudioBytes, ImageBytes, VideoBytes
from docarray.typing.id import ID
from docarray.typing.tensor import ImageNdArray, ImageTensor
from docarray.typing.tensor.audio import AudioNdArray, AudioTensor
from docarray.typing.tensor.embedding.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video import VideoNdArray, VideoTensor
from docarray.typing.url import (
AnyUrl,
AudioUrl,
ImageUrl,
Mesh3DUrl,
PointCloud3DUrl,
TextUrl,
VideoUrl,
)
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.typing.tensor import TensorFlowTensor # noqa: F401
from docarray.typing.tensor import TorchEmbedding, TorchTensor # noqa: F401
from docarray.typing.tensor.audio import AudioTensorFlowTensor # noqa: F401
from docarray.typing.tensor.audio import AudioTorchTensor # noqa: F401
from docarray.typing.tensor.embedding import TensorFlowEmbedding # noqa: F401
from docarray.typing.tensor.image import ImageTensorFlowTensor # noqa: F401
from docarray.typing.tensor.image import ImageTorchTensor # noqa: F401
from docarray.typing.tensor.video import VideoTensorFlowTensor # noqa: F401
from docarray.typing.tensor.video import VideoTorchTensor # noqa: F401
__all__ = [
'NdArray',
'NdArrayEmbedding',
'AudioNdArray',
'VideoNdArray',
'AnyEmbedding',
'ImageUrl',
'AudioUrl',
'TextUrl',
'Mesh3DUrl',
'PointCloud3DUrl',
'VideoUrl',
'AnyUrl',
'ID',
'AnyTensor',
'ImageTensor',
'AudioTensor',
'VideoTensor',
'ImageNdArray',
'ImageBytes',
'VideoBytes',
'AudioBytes',
]
_torch_tensors = [
'TorchTensor',
'TorchEmbedding',
'ImageTorchTensor',
'AudioTorchTensor',
'VideoTorchTensor',
]
_tf_tensors = [
'TensorFlowTensor',
'TensorFlowEmbedding',
'ImageTensorFlowTensor',
'AudioTensorFlowTensor',
'VideoTensorFlowTensor',
]
__all_test__ = __all__ + _torch_tensors
def __getattr__(name: str):
if name in _torch_tensors:
import_library('torch', raise_error=True)
elif name in _tf_tensors:
import_library('tensorflow', raise_error=True)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
import docarray.typing.tensor
tensor_cls = getattr(docarray.typing.tensor, name)
if name not in __all__:
__all__.append(name)
return tensor_cls
|
from typing import (
Union,
TYPE_CHECKING,
TypeVar,
Sequence,
Optional,
List,
Dict,
Generator,
Iterable,
Tuple,
ForwardRef,
)
if TYPE_CHECKING: # pragma: no cover
import scipy.sparse
import tensorflow
import torch
import numpy as np
from PIL.Image import Image as PILImage
from docarray import Document
ArrayType = TypeVar(
'ArrayType',
np.ndarray,
scipy.sparse.spmatrix,
tensorflow.SparseTensor,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
DocumentContentType = Union[bytes, str, ArrayType]
ProtoValueType = Optional[Union[str, bool, float]]
StructValueType = Union[
ProtoValueType, List[ProtoValueType], Dict[str, ProtoValueType]
]
DocumentArraySourceType = Union[
Sequence[Document], Document, Generator[Document], Iterable[Document]
]
T = TypeVar('T')
AnyDNN = TypeVar(
'AnyDNN'
) #: The type of any implementation of a Deep Neural Network object
DocumentArraySingletonIndexType = Union[int, str]
DocumentArrayMultipleIndexType = Union[
slice, Sequence[int], Sequence[str], Sequence[bool], Ellipsis
]
DocumentArraySingleAttributeType = Tuple[
Union[DocumentArraySingletonIndexType, DocumentArrayMultipleIndexType], str
]
DocumentArrayMultipleAttributeType = Tuple[
Union[DocumentArraySingletonIndexType, DocumentArrayMultipleIndexType],
Sequence[str],
]
DocumentArrayIndexType = Union[
DocumentArraySingletonIndexType,
DocumentArrayMultipleIndexType,
DocumentArraySingleAttributeType,
DocumentArrayMultipleAttributeType,
]
Image = TypeVar(
'Image',
str,
ForwardRef('np.ndarray'),
ForwardRef('PILImage'),
)
Text = TypeVar('Text', bound=str)
URI = TypeVar('URI', bound=str)
Audio = TypeVar('Audio', str, ForwardRef('np.ndarray'))
Video = TypeVar('Video', str, ForwardRef('np.ndarray'))
Mesh = TypeVar('Mesh', str, ForwardRef('np.ndarray'))
Tabular = TypeVar('Tabular', bound=str)
Blob = TypeVar('Blob', str, bytes)
JSON = TypeVar('JSON', str, dict)
|
_base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py']
# Use ClassAwareSampler
train_dataloader = dict(
sampler=dict(_delete_=True, type='ClassAwareSampler', num_sample_class=1))
|
_base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py']
# Use ClassAwareSampler
data = dict(
train_dataloader=dict(class_aware_sampler=dict(num_sample_class=1)))
|
#!/usr/bin/env python3
# Tool quickly rebuild one or two files with debug info
# Mimics following behavior:
# - touch file
# - ninja -j1 -v -n torch_python | sed -e 's/-O[23]/-g/g' -e 's#\[[0-9]\+\/[0-9]\+\] \+##' |sh
# - Copy libs from build/lib to torch/lib folder
from __future__ import annotations
import subprocess
import sys
from pathlib import Path
from typing import Any
PYTORCH_ROOTDIR = Path(__file__).resolve().parent.parent
TORCH_DIR = PYTORCH_ROOTDIR / "torch"
TORCH_LIB_DIR = TORCH_DIR / "lib"
BUILD_DIR = PYTORCH_ROOTDIR / "build"
BUILD_LIB_DIR = BUILD_DIR / "lib"
def check_output(args: list[str], cwd: str | None = None) -> str:
return subprocess.check_output(args, cwd=cwd).decode("utf-8")
def parse_args() -> Any:
from argparse import ArgumentParser
parser = ArgumentParser(description="Incremental build PyTorch with debinfo")
parser.add_argument("--verbose", action="store_true")
parser.add_argument("files", nargs="*")
return parser.parse_args()
def get_lib_extension() -> str:
if sys.platform == "linux":
return "so"
if sys.platform == "darwin":
return "dylib"
raise RuntimeError(f"Unsupported platform {sys.platform}")
def create_symlinks() -> None:
"""Creates symlinks from build/lib to torch/lib"""
if not TORCH_LIB_DIR.exists():
raise RuntimeError(f"Can't create symlinks as {TORCH_LIB_DIR} does not exist")
if not BUILD_LIB_DIR.exists():
raise RuntimeError(f"Can't create symlinks as {BUILD_LIB_DIR} does not exist")
for torch_lib in TORCH_LIB_DIR.glob(f"*.{get_lib_extension()}"):
if torch_lib.is_symlink():
continue
build_lib = BUILD_LIB_DIR / torch_lib.name
if not build_lib.exists():
raise RuntimeError(f"Can't find {build_lib} corresponding to {torch_lib}")
torch_lib.unlink()
torch_lib.symlink_to(build_lib)
def has_build_ninja() -> bool:
return (BUILD_DIR / "build.ninja").exists()
def is_devel_setup() -> bool:
output = check_output([sys.executable, "-c", "import torch;print(torch.__file__)"])
return output.strip() == str(TORCH_DIR / "__init__.py")
def create_build_plan() -> list[tuple[str, str]]:
output = check_output(
["ninja", "-j1", "-v", "-n", "torch_python"], cwd=str(BUILD_DIR)
)
rc = []
for line in output.split("\n"):
if not line.startswith("["):
continue
line = line.split("]", 1)[1].strip()
if line.startswith(": &&") and line.endswith("&& :"):
line = line[4:-4]
line = line.replace("-O2", "-g").replace("-O3", "-g")
# Build Metal shaders with debug information
if "xcrun metal " in line and "-frecord-sources" not in line:
line += " -frecord-sources -gline-tables-only"
try:
name = line.split("-o ", 1)[1].split(" ")[0]
rc.append((name, line))
except IndexError:
print(f"Skipping {line} as it does not specify output file")
return rc
def main() -> None:
if sys.platform == "win32":
print("Not supported on Windows yet")
sys.exit(-95)
if not is_devel_setup():
print(
"Not a devel setup of PyTorch, "
"please run `python -m pip install --no-build-isolation -v -e .` first"
)
sys.exit(-1)
if not has_build_ninja():
print("Only ninja build system is supported at the moment")
sys.exit(-1)
args = parse_args()
for file in args.files:
if file is None:
continue
Path(file).touch()
build_plan = create_build_plan()
if len(build_plan) == 0:
return print("Nothing to do")
if len(build_plan) > 100:
print("More than 100 items needs to be rebuild, run `ninja torch_python` first")
sys.exit(-1)
for idx, (name, cmd) in enumerate(build_plan):
print(f"[{idx + 1} / {len(build_plan)}] Building {name}")
if args.verbose:
print(cmd)
subprocess.check_call(["sh", "-c", cmd], cwd=BUILD_DIR)
create_symlinks()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# Tool quickly rebuild one or two files with debug info
# Mimics following behavior:
# - touch file
# - ninja -j1 -v -n torch_python | sed -e 's/-O[23]/-g/g' -e 's#\[[0-9]\+\/[0-9]\+\] \+##' |sh
# - Copy libs from build/lib to torch/lib folder
from __future__ import annotations
import subprocess
import sys
from pathlib import Path
from typing import Any
PYTORCH_ROOTDIR = Path(__file__).resolve().parent.parent
TORCH_DIR = PYTORCH_ROOTDIR / "torch"
TORCH_LIB_DIR = TORCH_DIR / "lib"
BUILD_DIR = PYTORCH_ROOTDIR / "build"
BUILD_LIB_DIR = BUILD_DIR / "lib"
def check_output(args: list[str], cwd: str | None = None) -> str:
return subprocess.check_output(args, cwd=cwd).decode("utf-8")
def parse_args() -> Any:
from argparse import ArgumentParser
parser = ArgumentParser(description="Incremental build PyTorch with debinfo")
parser.add_argument("--verbose", action="store_true")
parser.add_argument("files", nargs="*")
return parser.parse_args()
def get_lib_extension() -> str:
if sys.platform == "linux":
return "so"
if sys.platform == "darwin":
return "dylib"
raise RuntimeError(f"Unsupported platform {sys.platform}")
def create_symlinks() -> None:
"""Creates symlinks from build/lib to torch/lib"""
if not TORCH_LIB_DIR.exists():
raise RuntimeError(f"Can't create symlinks as {TORCH_LIB_DIR} does not exist")
if not BUILD_LIB_DIR.exists():
raise RuntimeError(f"Can't create symlinks as {BUILD_LIB_DIR} does not exist")
for torch_lib in TORCH_LIB_DIR.glob(f"*.{get_lib_extension()}"):
if torch_lib.is_symlink():
continue
build_lib = BUILD_LIB_DIR / torch_lib.name
if not build_lib.exists():
raise RuntimeError(f"Can't find {build_lib} corresponding to {torch_lib}")
torch_lib.unlink()
torch_lib.symlink_to(build_lib)
def has_build_ninja() -> bool:
return (BUILD_DIR / "build.ninja").exists()
def is_devel_setup() -> bool:
output = check_output([sys.executable, "-c", "import torch;print(torch.__file__)"])
return output.strip() == str(TORCH_DIR / "__init__.py")
def create_build_plan() -> list[tuple[str, str]]:
output = check_output(
["ninja", "-j1", "-v", "-n", "torch_python"], cwd=str(BUILD_DIR)
)
rc = []
for line in output.split("\n"):
if not line.startswith("["):
continue
line = line.split("]", 1)[1].strip()
if line.startswith(": &&") and line.endswith("&& :"):
line = line[4:-4]
line = line.replace("-O2", "-g").replace("-O3", "-g")
# Build Metal shaders with debug information
if "xcrun metal " in line and "-frecord-sources" not in line:
line += " -frecord-sources -gline-tables-only"
try:
name = line.split("-o ", 1)[1].split(" ")[0]
rc.append((name, line))
except IndexError:
print(f"Skipping {line} as it does not specify output file")
return rc
def main() -> None:
if sys.platform == "win32":
print("Not supported on Windows yet")
sys.exit(-95)
if not is_devel_setup():
print(
"Not a devel setup of PyTorch, please run `python3 setup.py develop --user` first"
)
sys.exit(-1)
if not has_build_ninja():
print("Only ninja build system is supported at the moment")
sys.exit(-1)
args = parse_args()
for file in args.files:
if file is None:
continue
Path(file).touch()
build_plan = create_build_plan()
if len(build_plan) == 0:
return print("Nothing to do")
if len(build_plan) > 100:
print("More than 100 items needs to be rebuild, run `ninja torch_python` first")
sys.exit(-1)
for idx, (name, cmd) in enumerate(build_plan):
print(f"[{idx + 1} / {len(build_plan)}] Building {name}")
if args.verbose:
print(cmd)
subprocess.check_call(["sh", "-c", cmd], cwd=BUILD_DIR)
create_symlinks()
if __name__ == "__main__":
main()
|
from .BinaryClassificationEvaluator import BinaryClassificationEvaluator
from .EmbeddingSimilarityEvaluator import EmbeddingSimilarityEvaluator
from .InformationRetrievalEvaluator import InformationRetrievalEvaluator
from .LabelAccuracyEvaluator import LabelAccuracyEvaluator
from .MSEEvaluator import MSEEvaluator
from .MSEEvaluatorFromDataFrame import MSEEvaluatorFromDataFrame
from .ParaphraseMiningEvaluator import ParaphraseMiningEvaluator
from .RerankingEvaluator import RerankingEvaluator
from .SentenceEvaluator import SentenceEvaluator
from .SequentialEvaluator import SequentialEvaluator
from .SimilarityFunction import SimilarityFunction
from .TranslationEvaluator import TranslationEvaluator
from .TripletEvaluator import TripletEvaluator
__all__ = [
"SentenceEvaluator",
"SimilarityFunction",
"BinaryClassificationEvaluator",
"EmbeddingSimilarityEvaluator",
"InformationRetrievalEvaluator",
"LabelAccuracyEvaluator",
"MSEEvaluator",
"MSEEvaluatorFromDataFrame",
"ParaphraseMiningEvaluator",
"SequentialEvaluator",
"TranslationEvaluator",
"TripletEvaluator",
"RerankingEvaluator",
]
|
from .SentenceEvaluator import SentenceEvaluator
from .SimilarityFunction import SimilarityFunction
from .BinaryClassificationEvaluator import BinaryClassificationEvaluator
from .EmbeddingSimilarityEvaluator import EmbeddingSimilarityEvaluator
from .InformationRetrievalEvaluator import InformationRetrievalEvaluator
from .LabelAccuracyEvaluator import LabelAccuracyEvaluator
from .MSEEvaluator import MSEEvaluator
from .MSEEvaluatorFromDataFrame import MSEEvaluatorFromDataFrame
from .ParaphraseMiningEvaluator import ParaphraseMiningEvaluator
from .SequentialEvaluator import SequentialEvaluator
from .TranslationEvaluator import TranslationEvaluator
from .TripletEvaluator import TripletEvaluator
from .RerankingEvaluator import RerankingEvaluator
__all__ = [
"SentenceEvaluator",
"SimilarityFunction",
"BinaryClassificationEvaluator",
"EmbeddingSimilarityEvaluator",
"InformationRetrievalEvaluator",
"LabelAccuracyEvaluator",
"MSEEvaluator",
"MSEEvaluatorFromDataFrame",
"ParaphraseMiningEvaluator",
"SequentialEvaluator",
"TranslationEvaluator",
"TripletEvaluator",
"RerankingEvaluator",
]
|
"""Pass input through a moderation endpoint."""
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.utils import check_package_version, get_from_dict_or_env
from pydantic import Field, model_validator
from langchain.chains.base import Chain
class OpenAIModerationChain(Chain):
"""Pass input through a moderation endpoint.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.chains import OpenAIModerationChain
moderation = OpenAIModerationChain()
"""
client: Any = None #: :meta private:
async_client: Any = None #: :meta private:
model_name: Optional[str] = None
"""Moderation model name to use."""
error: bool = False
"""Whether or not to error if bad content was found."""
input_key: str = "input" #: :meta private:
output_key: str = "output" #: :meta private:
openai_api_key: Optional[str] = None
openai_organization: Optional[str] = None
openai_pre_1_0: bool = Field(default=False)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: dict) -> Any:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values,
"openai_api_key",
"OPENAI_API_KEY",
)
openai_organization = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
try:
import openai
openai.api_key = openai_api_key
if openai_organization:
openai.organization = openai_organization
values["openai_pre_1_0"] = False
try:
check_package_version("openai", gte_version="1.0")
except ValueError:
values["openai_pre_1_0"] = True
if values["openai_pre_1_0"]:
values["client"] = openai.Moderation # type: ignore[attr-defined]
else:
values["client"] = openai.OpenAI(api_key=openai_api_key)
values["async_client"] = openai.AsyncOpenAI(api_key=openai_api_key)
except ImportError:
msg = (
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
raise ImportError(msg)
return values
@property
def input_keys(self) -> list[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> list[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _moderate(self, text: str, results: Any) -> str:
condition = results["flagged"] if self.openai_pre_1_0 else results.flagged
if condition:
error_str = "Text was found that violates OpenAI's content policy."
if self.error:
raise ValueError(error_str)
return error_str
return text
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, Any]:
text = inputs[self.input_key]
if self.openai_pre_1_0:
results = self.client.create(text)
output = self._moderate(text, results["results"][0])
else:
results = self.client.moderations.create(input=text)
output = self._moderate(text, results.results[0])
return {self.output_key: output}
async def _acall(
self,
inputs: dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> dict[str, Any]:
if self.openai_pre_1_0:
return await super()._acall(inputs, run_manager=run_manager)
text = inputs[self.input_key]
results = await self.async_client.moderations.create(input=text)
output = self._moderate(text, results.results[0])
return {self.output_key: output}
|
"""Pass input through a moderation endpoint."""
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.utils import check_package_version, get_from_dict_or_env
from pydantic import Field, model_validator
from langchain.chains.base import Chain
class OpenAIModerationChain(Chain):
"""Pass input through a moderation endpoint.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.chains import OpenAIModerationChain
moderation = OpenAIModerationChain()
"""
client: Any = None #: :meta private:
async_client: Any = None #: :meta private:
model_name: Optional[str] = None
"""Moderation model name to use."""
error: bool = False
"""Whether or not to error if bad content was found."""
input_key: str = "input" #: :meta private:
output_key: str = "output" #: :meta private:
openai_api_key: Optional[str] = None
openai_organization: Optional[str] = None
openai_pre_1_0: bool = Field(default=False)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: dict) -> Any:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
openai_organization = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
try:
import openai
openai.api_key = openai_api_key
if openai_organization:
openai.organization = openai_organization
values["openai_pre_1_0"] = False
try:
check_package_version("openai", gte_version="1.0")
except ValueError:
values["openai_pre_1_0"] = True
if values["openai_pre_1_0"]:
values["client"] = openai.Moderation # type: ignore[attr-defined]
else:
values["client"] = openai.OpenAI(api_key=openai_api_key)
values["async_client"] = openai.AsyncOpenAI(api_key=openai_api_key)
except ImportError:
msg = (
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
raise ImportError(msg)
return values
@property
def input_keys(self) -> list[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> list[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _moderate(self, text: str, results: Any) -> str:
condition = results["flagged"] if self.openai_pre_1_0 else results.flagged
if condition:
error_str = "Text was found that violates OpenAI's content policy."
if self.error:
raise ValueError(error_str)
return error_str
return text
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, Any]:
text = inputs[self.input_key]
if self.openai_pre_1_0:
results = self.client.create(text)
output = self._moderate(text, results["results"][0])
else:
results = self.client.moderations.create(input=text)
output = self._moderate(text, results.results[0])
return {self.output_key: output}
async def _acall(
self,
inputs: dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> dict[str, Any]:
if self.openai_pre_1_0:
return await super()._acall(inputs, run_manager=run_manager)
text = inputs[self.input_key]
results = await self.async_client.moderations.create(input=text)
output = self._moderate(text, results.results[0])
return {self.output_key: output}
|
import functools
import time
from threading import Thread
import numpy as np
import pytest
from jina import Client, Document, Flow
from jina.helper import random_port
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'http'])
def test_gateway_concurrency(protocol, reraise):
port = random_port()
CONCURRENCY = 2
def _validate(req, start, status_codes, durations, index):
end = time.time()
durations[index] = end - start
status_codes[index] = req.status.code
def _request(status_codes, durations, index):
with reraise:
start = time.time()
on_done = functools.partial(
_validate,
start=start,
status_codes=status_codes,
durations=durations,
index=index,
)
results = Client(port=port, protocol=protocol).index(
inputs=(Document() for _ in range(256)), _size=16, return_responses=True
)
assert len(results) > 0
for result in results:
on_done(result)
f = Flow(protocol=protocol, port=port).add(replicas=2)
with f:
threads = []
status_codes = [None] * CONCURRENCY
durations = [None] * CONCURRENCY
for i in range(CONCURRENCY):
t = Thread(target=_request, args=(status_codes, durations, i))
threads.append(t)
t.start()
for t in threads:
t.join()
success = status_codes.count(0)
failed = len(status_codes) - success
print(
f'clients: {len(durations)}\n'
f'min roundtrip time: {np.min(durations)}\n'
f'max roundtrip time: {np.max(durations)}\n'
f'mean roundtrip time: {np.mean(durations)}\n'
)
assert success >= 1
# In some slow environments, a certain degree of failed
# requests will occur. Here we limit the degree of failed
# requests.
rate = failed / success
assert rate < 0.1
def test_grpc_custom_otpions():
f = Flow(grpc_server_options={'grpc.max_send_message_length': -1})
with f:
pass
|
import functools
import time
from threading import Thread
import numpy as np
import pytest
from jina import Client, Document, Flow
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'http'])
def test_gateway_concurrency(protocol, reraise):
port = 12345
CONCURRENCY = 2
def _validate(req, start, status_codes, durations, index):
end = time.time()
durations[index] = end - start
status_codes[index] = req.status.code
def _request(status_codes, durations, index):
with reraise:
start = time.time()
on_done = functools.partial(
_validate,
start=start,
status_codes=status_codes,
durations=durations,
index=index,
)
results = Client(port=port, protocol=protocol).index(
inputs=(Document() for _ in range(256)), _size=16, return_responses=True
)
assert len(results) > 0
for result in results:
on_done(result)
f = Flow(protocol=protocol, port=port).add(parallel=2)
with f:
threads = []
status_codes = [None] * CONCURRENCY
durations = [None] * CONCURRENCY
for i in range(CONCURRENCY):
t = Thread(target=_request, args=(status_codes, durations, i))
threads.append(t)
t.start()
for t in threads:
t.join()
success = status_codes.count(0)
failed = len(status_codes) - success
print(
f'clients: {len(durations)}\n'
f'min roundtrip time: {np.min(durations)}\n'
f'max roundtrip time: {np.max(durations)}\n'
f'mean roundtrip time: {np.mean(durations)}\n'
)
assert success >= 1
# In some slow environments, a certain degree of failed
# requests will occur. Here we limit the degree of failed
# requests.
rate = failed / success
assert rate < 0.1
def test_grpc_custom_otpions():
f = Flow(grpc_server_options={'grpc.max_send_message_length': -1})
with f:
pass
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_overlaps import bbox_overlaps
from .cityscapes_utils import evaluateImgLists
from .class_names import (cityscapes_classes, coco_classes,
coco_panoptic_classes, dataset_aliases, get_classes,
imagenet_det_classes, imagenet_vid_classes,
objects365v1_classes, objects365v2_classes,
oid_challenge_classes, oid_v6_classes, voc_classes)
from .mean_ap import average_precision, eval_map, print_map_summary
from .panoptic_utils import (INSTANCE_OFFSET, pq_compute_multi_core,
pq_compute_single_core)
from .recall import (eval_recalls, plot_iou_recall, plot_num_recall,
print_recall_summary)
from .ytvis import YTVIS
from .ytviseval import YTVISeval
__all__ = [
'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',
'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes',
'average_precision', 'eval_map', 'print_map_summary', 'eval_recalls',
'print_recall_summary', 'plot_num_recall', 'plot_iou_recall',
'oid_v6_classes', 'oid_challenge_classes', 'INSTANCE_OFFSET',
'pq_compute_single_core', 'pq_compute_multi_core', 'bbox_overlaps',
'objects365v1_classes', 'objects365v2_classes', 'coco_panoptic_classes',
'evaluateImgLists', 'YTVIS', 'YTVISeval'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_overlaps import bbox_overlaps
from .cityscapes_utils import evaluateImgLists
from .class_names import (cityscapes_classes, coco_classes,
coco_panoptic_classes, dataset_aliases, get_classes,
imagenet_det_classes, imagenet_vid_classes,
objects365v1_classes, objects365v2_classes,
oid_challenge_classes, oid_v6_classes, voc_classes)
from .mean_ap import average_precision, eval_map, print_map_summary
from .panoptic_utils import (INSTANCE_OFFSET, pq_compute_multi_core,
pq_compute_single_core)
from .recall import (eval_recalls, plot_iou_recall, plot_num_recall,
print_recall_summary)
__all__ = [
'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',
'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes',
'average_precision', 'eval_map', 'print_map_summary', 'eval_recalls',
'print_recall_summary', 'plot_num_recall', 'plot_iou_recall',
'oid_v6_classes', 'oid_challenge_classes', 'INSTANCE_OFFSET',
'pq_compute_single_core', 'pq_compute_multi_core', 'bbox_overlaps',
'objects365v1_classes', 'objects365v2_classes', 'coco_panoptic_classes',
'evaluateImgLists'
]
|
import csv
import pathlib
from typing import Any, Callable, Optional, Tuple
import torch
from PIL import Image
from .utils import check_integrity, verify_str_arg
from .vision import VisionDataset
class FER2013(VisionDataset):
"""`FER2013
<https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``root/fer2013`` exists.
split (string, optional): The dataset split, supports ``"train"`` (default), or ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
"""
_RESOURCES = {
"train": ("train.csv", "3f0dfb3d3fd99c811a1299cb947e3131"),
"test": ("test.csv", "b02c2298636a634e8c2faabbf3ea9a23"),
}
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> None:
self._split = verify_str_arg(split, "split", self._RESOURCES.keys())
super().__init__(root, transform=transform, target_transform=target_transform)
base_folder = pathlib.Path(self.root) / "fer2013"
file_name, md5 = self._RESOURCES[self._split]
data_file = base_folder / file_name
if not check_integrity(str(data_file), md5=md5):
raise RuntimeError(
f"{file_name} not found in {base_folder} or corrupted. "
f"You can download it from "
f"https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge"
)
with open(data_file, "r", newline="") as file:
self._samples = [
(
torch.tensor([int(idx) for idx in row["pixels"].split()], dtype=torch.uint8).reshape(48, 48),
int(row["emotion"]) if "emotion" in row else None,
)
for row in csv.DictReader(file)
]
def __len__(self) -> int:
return len(self._samples)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_tensor, target = self._samples[idx]
image = Image.fromarray(image_tensor.numpy())
if self.transform is not None:
image = self.transform(image)
if self.target_transform is not None:
target = self.target_transform(target)
return image, target
def extra_repr(self) -> str:
return f"split={self._split}"
|
import csv
import pathlib
from typing import Any, Callable, Optional, Tuple
import torch
from PIL import Image
from .utils import check_integrity, verify_str_arg
from .vision import VisionDataset
class FER2013(VisionDataset):
"""`FER2013
<https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``root/fer2013`` exists.
split (string, optional): The dataset split, supports ``"train"`` (default), or ``"test"``.
transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
"""
_RESOURCES = {
"train": ("train.csv", "3f0dfb3d3fd99c811a1299cb947e3131"),
"test": ("test.csv", "b02c2298636a634e8c2faabbf3ea9a23"),
}
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> None:
self._split = verify_str_arg(split, "split", self._RESOURCES.keys())
super().__init__(root, transform=transform, target_transform=target_transform)
base_folder = pathlib.Path(self.root) / "fer2013"
file_name, md5 = self._RESOURCES[self._split]
data_file = base_folder / file_name
if not check_integrity(str(data_file), md5=md5):
raise RuntimeError(
f"{file_name} not found in {base_folder} or corrupted. "
f"You can download it from "
f"https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge"
)
with open(data_file, "r", newline="") as file:
self._samples = [
(
torch.tensor([int(idx) for idx in row["pixels"].split()], dtype=torch.uint8).reshape(48, 48),
int(row["emotion"]) if "emotion" in row else None,
)
for row in csv.DictReader(file)
]
def __len__(self) -> int:
return len(self._samples)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_tensor, target = self._samples[idx]
image = Image.fromarray(image_tensor.numpy())
if self.transform is not None:
image = self.transform(image)
if self.target_transform is not None:
target = self.target_transform(target)
return image, target
def extra_repr(self) -> str:
return f"split={self._split}"
|
import numpy as np
from docarray import BaseDoc
from docarray.array.doc_vec.doc_vec import DocVec
from docarray.typing import AnyTensor, NdArray
def test_da_init():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros(10), name='hello') for _ in range(4)]
da = DocVec[MyDoc](docs, tensor_type=NdArray)
assert (da._storage.tensor_columns['tensor'] == np.zeros((4, 10))).all()
assert da._storage.any_columns['name']._data == ['hello' for _ in range(4)]
def test_da_iter():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=i * np.zeros((10, 10)), name=f'hello{i}') for i in range(4)]
da = DocVec[MyDoc](docs, tensor_type=NdArray)
for i, doc in enumerate(da):
assert isinstance(doc, MyDoc)
assert (doc.tensor == i * np.zeros((10, 10))).all()
assert doc.name == f'hello{i}'
|
import numpy as np
from docarray import BaseDoc
from docarray.array.stacked.array_stacked import DocArrayStacked
from docarray.typing import AnyTensor, NdArray
def test_da_init():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros(10), name='hello') for _ in range(4)]
da = DocArrayStacked[MyDoc](docs, tensor_type=NdArray)
assert (da._storage.tensor_columns['tensor'] == np.zeros((4, 10))).all()
assert da._storage.any_columns['name']._data == ['hello' for _ in range(4)]
def test_da_iter():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=i * np.zeros((10, 10)), name=f'hello{i}') for i in range(4)]
da = DocArrayStacked[MyDoc](docs, tensor_type=NdArray)
for i, doc in enumerate(da):
assert isinstance(doc, MyDoc)
assert (doc.tensor == i * np.zeros((10, 10))).all()
assert doc.name == f'hello{i}'
|
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
@keras_export("keras.StatelessScope")
class StatelessScope:
"""Scope to prevent any update to Keras Variables.
The values of variables to be used inside the scope
should be passed via the `state_mapping` argument, a
list of tuples `(k, v)` where `k` is a `Variable`
and `v` is the intended value for this variable
(a backend tensor).
Updated values can be collected on scope exit via
`value = scope.get_current_value(variable)`. No updates
will be applied in-place to any variables for the duration
of the scope.
Example:
```python
state_mapping = [(k, ops.ones(k.shape, k.dtype)) for k in model.weights]
with keras.StatelessScope(state_mapping) as scope:
outputs = model.some_function(inputs)
# All model variables remain unchanged. Their new values can be
# collected via:
for k in model.weights:
new_value = scope.get_current_value(k)
print(f"New value for {k}: {new_value})
```
"""
def __init__(
self,
state_mapping=None,
collect_losses=False,
initialize_variables=True,
):
from keras.src import backend
from keras.src.backend.common.variables import Variable
self.collect_losses = collect_losses
self.initialize_variables = initialize_variables
self.losses = []
self.state_mapping = {}
state_mapping = state_mapping or {}
for k, v in state_mapping:
if not isinstance(k, Variable):
raise ValueError(
"Invalid reference variable in StatelessScope: "
"all keys in argument `mapping` must be Variable "
f"instances. Received instead: {k}"
)
if isinstance(v, Variable):
v = backend.cast(v.value, dtype=k.dtype)
else:
v = backend.convert_to_tensor(v, dtype=k.dtype)
if k.shape != v.shape:
raise ValueError(
"Invalid variable value in StatelessScope: "
"all values in argument `mapping` must be tensors with "
"a shape that matches the corresponding variable shape. "
f"For variable {k}, received invalid value {v} with shape "
f"{v.shape}."
)
self.state_mapping[id(k)] = v
def __enter__(self):
self.original_scope = get_stateless_scope()
global_state.set_global_attribute("stateless_scope", self)
return self
def add_loss(self, loss):
self.losses.append(loss)
def add_update(self, update):
variable, value = update
self.state_mapping[id(variable)] = value
def get_current_value(self, variable):
return self.state_mapping.get(id(variable), None)
def __exit__(self, *args, **kwargs):
global_state.set_global_attribute(
"stateless_scope", self.original_scope
)
if self.original_scope is None and self.initialize_variables:
# We're back in eager scope;
# if any variables were created within the stateless
# scope, we initialize them here.
from keras.src.backend.common.variables import (
initialize_all_variables,
)
initialize_all_variables()
def in_stateless_scope():
return global_state.get_global_attribute("stateless_scope") is not None
def get_stateless_scope():
return global_state.get_global_attribute("stateless_scope")
|
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
@keras_export("keras.StatelessScope")
class StatelessScope:
"""Scope to prevent any update to Keras Variables.
The values of variables to be used inside the scope
should be passed via the `state_mapping` argument, a
list of tuples `(k, v)` where `k` is a `KerasVariable`
and `v` is the intended value for this variable
(a backend tensor).
Updated values can be collected on scope exit via
`value = scope.get_current_value(variable)`. No updates
will be applied in-place to any variables for the duration
of the scope.
Example:
```python
state_mapping = [(k, ops.ones(k.shape, k.dtype)) for k in model.weights]
with keras.StatelessScope(state_mapping) as scope:
outputs = model.some_function(inputs)
# All model variables remain unchanged. Their new values can be
# collected via:
for k in model.weights:
new_value = scope.get_current_value(k)
print(f"New value for {k}: {new_value})
```
"""
def __init__(
self,
state_mapping=None,
collect_losses=False,
initialize_variables=True,
):
from keras.src import backend
from keras.src.backend.common.variables import KerasVariable
self.collect_losses = collect_losses
self.initialize_variables = initialize_variables
self.losses = []
self.state_mapping = {}
state_mapping = state_mapping or {}
for k, v in state_mapping:
if not isinstance(k, KerasVariable):
raise ValueError(
"Invalid reference variable in StatelessScope: "
"all keys in argument `mapping` must be KerasVariable "
f"instances. Received instead: {k}"
)
if isinstance(v, KerasVariable):
v = backend.cast(v.value, dtype=k.dtype)
else:
v = backend.convert_to_tensor(v, dtype=k.dtype)
if k.shape != v.shape:
raise ValueError(
"Invalid variable value in StatelessScope: "
"all values in argument `mapping` must be tensors with "
"a shape that matches the corresponding variable shape. "
f"For variable {k}, received invalid value {v} with shape "
f"{v.shape}."
)
self.state_mapping[id(k)] = v
def __enter__(self):
self.original_scope = get_stateless_scope()
global_state.set_global_attribute("stateless_scope", self)
return self
def add_loss(self, loss):
self.losses.append(loss)
def add_update(self, update):
variable, value = update
self.state_mapping[id(variable)] = value
def get_current_value(self, variable):
return self.state_mapping.get(id(variable), None)
def __exit__(self, *args, **kwargs):
global_state.set_global_attribute(
"stateless_scope", self.original_scope
)
if self.original_scope is None and self.initialize_variables:
# We're back in eager scope;
# if any variables were created within the stateless
# scope, we initialize them here.
from keras.src.backend.common.variables import (
initialize_all_variables,
)
initialize_all_variables()
def in_stateless_scope():
return global_state.get_global_attribute("stateless_scope") is not None
def get_stateless_scope():
return global_state.get_global_attribute("stateless_scope")
|
# Copyright (c) OpenMMLab. All rights reserved.
import datetime
import logging
import os.path as osp
from typing import Optional
from mmengine.fileio import dump
from mmengine.logging import print_log
from . import root
from .default_scope import DefaultScope
from .registry import Registry
def traverse_registry_tree(registry: Registry, verbose: bool = True) -> list:
"""Traverse the whole registry tree from any given node, and collect
information of all registered modules in this registry tree.
Args:
registry (Registry): a registry node in the registry tree.
verbose (bool): Whether to print log. Defaults to True
Returns:
list: Statistic results of all modules in each node of the registry
tree.
"""
root_registry = registry.root
modules_info = []
def _dfs_registry(_registry):
if isinstance(_registry, Registry):
num_modules = len(_registry.module_dict)
scope = _registry.scope
registry_info = dict(num_modules=num_modules, scope=scope)
for name, registered_class in _registry.module_dict.items():
folder = '/'.join(registered_class.__module__.split('.')[:-1])
if folder in registry_info:
registry_info[folder].append(name)
else:
registry_info[folder] = [name]
if verbose:
print_log(
f"Find {num_modules} modules in {scope}'s "
f"'{_registry.name}' registry ",
logger='current')
modules_info.append(registry_info)
else:
return
for _, child in _registry.children.items():
_dfs_registry(child)
_dfs_registry(root_registry)
return modules_info
def count_registered_modules(save_path: Optional[str] = None,
verbose: bool = True) -> dict:
"""Scan all modules in MMEngine's root and child registries and dump to
json.
Args:
save_path (str, optional): Path to save the json file.
verbose (bool): Whether to print log. Defaults to True.
Returns:
dict: Statistic results of all registered modules.
"""
# import modules to trigger registering
import mmengine.dataset
import mmengine.evaluator
import mmengine.hooks
import mmengine.model
import mmengine.optim
import mmengine.runner
import mmengine.visualization # noqa: F401
registries_info = {}
# traverse all registries in MMEngine
for item in dir(root):
if not item.startswith('__'):
registry = getattr(root, item)
if isinstance(registry, Registry):
registries_info[item] = traverse_registry_tree(
registry, verbose)
scan_data = dict(
scan_date=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
registries=registries_info)
if verbose:
print_log(
f'Finish registry analysis, got: {scan_data}', logger='current')
if save_path is not None:
json_path = osp.join(save_path, 'modules_statistic_results.json')
dump(scan_data, json_path, indent=2)
print_log(f'Result has been saved to {json_path}', logger='current')
return scan_data
def init_default_scope(scope: str) -> None:
"""Initialize the given default scope.
Args:
scope (str): The name of the default scope.
"""
never_created = DefaultScope.get_current_instance(
) is None or not DefaultScope.check_instance_created(scope)
if never_created:
DefaultScope.get_instance(scope, scope_name=scope)
return
current_scope = DefaultScope.get_current_instance() # type: ignore
if current_scope.scope_name != scope: # type: ignore
print_log(
'The current default scope ' # type: ignore
f'"{current_scope.scope_name}" is not "{scope}", ' # type: ignore
'`init_default_scope` will force set the current'
f'default scope to "{scope}".',
logger='current',
level=logging.WARNING)
# avoid name conflict
new_instance_name = f'{scope}-{datetime.datetime.now()}'
DefaultScope.get_instance(new_instance_name, scope_name=scope)
|
# Copyright (c) OpenMMLab. All rights reserved.
import datetime
import logging
import os.path as osp
from typing import Optional
from mmengine.fileio import dump
from mmengine.logging import print_log
from . import root
from .default_scope import DefaultScope
from .registry import Registry
def traverse_registry_tree(registry: Registry, verbose: bool = True) -> list:
"""Traverse the whole registry tree from any given node, and collect
information of all registered modules in this registry tree.
Args:
registry (Registry): a registry node in the registry tree.
verbose (bool): Whether to print log. Defaults to True
Returns:
list: Statistic results of all modules in each node of the registry
tree.
"""
root_registry = registry.root
modules_info = []
def _dfs_registry(_registry):
if isinstance(_registry, Registry):
num_modules = len(_registry.module_dict)
scope = _registry.scope
registry_info = dict(num_modules=num_modules, scope=scope)
for name, registered_class in _registry.module_dict.items():
folder = '/'.join(registered_class.__module__.split('.')[:-1])
if folder in registry_info:
registry_info[folder].append(name)
else:
registry_info[folder] = [name]
if verbose:
print_log(
f"Find {num_modules} modules in {scope}'s "
f"'{_registry.name}' registry ",
logger='current')
modules_info.append(registry_info)
else:
return
for _, child in _registry.children.items():
_dfs_registry(child)
_dfs_registry(root_registry)
return modules_info
def count_registered_modules(save_path: Optional[str] = None,
verbose: bool = True) -> dict:
"""Scan all modules in MMEngine's root and child registries and dump to
json.
Args:
save_path (str, optional): Path to save the json file.
verbose (bool): Whether to print log. Defaults to True.
Returns:
dict: Statistic results of all registered modules.
"""
# import modules to trigger registering
import mmengine.dataset
import mmengine.evaluator
import mmengine.hooks
import mmengine.model
import mmengine.optim
import mmengine.runner
import mmengine.visualization # noqa: F401
registries_info = {}
# traverse all registries in MMEngine
for item in dir(root):
if not item.startswith('__'):
registry = getattr(root, item)
if isinstance(registry, Registry):
registries_info[item] = traverse_registry_tree(
registry, verbose)
scan_data = dict(
scan_date=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
registries=registries_info)
if verbose:
print_log(
f'Finish registry analysis, got: {scan_data}', logger='current')
if save_path is not None:
json_path = osp.join(save_path, 'modules_statistic_results.json')
dump(scan_data, json_path, indent=2)
print_log(f'Result has been saved to {json_path}', logger='current')
return scan_data
def init_default_scope(scope: str) -> None:
"""Initialize the given default scope.
Args:
scope (str): The name of the default scope.
"""
never_created = DefaultScope.get_current_instance(
) is None or not DefaultScope.check_instance_created(scope)
if never_created:
DefaultScope.get_instance(scope, scope_name=scope)
return
current_scope = DefaultScope.get_current_instance() # type: ignore
if current_scope.scope_name != scope: # type: ignore
print_log(
'The current default scope ' # type: ignore
f'"{current_scope.scope_name}" is not "{scope}", '
'`init_default_scope` will force set the current'
f'default scope to "{scope}".',
logger='current',
level=logging.WARNING)
# avoid name conflict
new_instance_name = f'{scope}-{datetime.datetime.now()}'
DefaultScope.get_instance(new_instance_name, scope_name=scope)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import (find_latest_checkpoint, get_test_pipeline_cfg,
update_data_root)
from .replace_cfg_vals import replace_cfg_vals
from .setup_env import (register_all_modules, setup_cache_size_limit_of_dynamo,
setup_multi_processes)
from .split_batch import split_batch
from .typing_utils import (ConfigType, InstanceList, MultiConfig,
OptConfigType, OptInstanceList, OptMultiConfig,
OptPixelList, PixelList, RangeType)
__all__ = [
'collect_env', 'find_latest_checkpoint', 'update_data_root',
'setup_multi_processes', 'get_caller_name', 'log_img_scale', 'compat_cfg',
'split_batch', 'register_all_modules', 'replace_cfg_vals', 'AvoidOOM',
'AvoidCUDAOOM', 'all_reduce_dict', 'allreduce_grads', 'reduce_mean',
'sync_random_seed', 'ConfigType', 'InstanceList', 'MultiConfig',
'OptConfigType', 'OptInstanceList', 'OptMultiConfig', 'OptPixelList',
'PixelList', 'RangeType', 'get_test_pipeline_cfg',
'setup_cache_size_limit_of_dynamo'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import (find_latest_checkpoint, get_test_pipeline_cfg,
update_data_root)
from .replace_cfg_vals import replace_cfg_vals
from .setup_env import register_all_modules, setup_multi_processes
from .split_batch import split_batch
from .typing_utils import (ConfigType, InstanceList, MultiConfig,
OptConfigType, OptInstanceList, OptMultiConfig,
OptPixelList, PixelList, RangeType)
__all__ = [
'collect_env', 'find_latest_checkpoint', 'update_data_root',
'setup_multi_processes', 'get_caller_name', 'log_img_scale', 'compat_cfg',
'split_batch', 'register_all_modules', 'replace_cfg_vals', 'AvoidOOM',
'AvoidCUDAOOM', 'all_reduce_dict', 'allreduce_grads', 'reduce_mean',
'sync_random_seed', 'ConfigType', 'InstanceList', 'MultiConfig',
'OptConfigType', 'OptInstanceList', 'OptMultiConfig', 'OptPixelList',
'PixelList', 'RangeType', 'get_test_pipeline_cfg'
]
|
_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
depths = [2, 2, 6, 2]
model = dict(
type='Mask2Former',
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=depths,
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.3,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
frozen_stages=-1,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
panoptic_head=dict(
type='Mask2FormerHead', in_channels=[96, 192, 384, 768]),
init_cfg=None)
# set all layers in backbone to lr_mult=0.1
# set all norm layers, position_embeding,
# query_embeding, level_embeding to decay_multi=0.0
backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0)
backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0)
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'backbone': dict(lr_mult=0.1, decay_mult=1.0),
'backbone.patch_embed.norm': backbone_norm_multi,
'backbone.norm': backbone_norm_multi,
'absolute_pos_embed': backbone_embed_multi,
'relative_position_bias_table': backbone_embed_multi,
'query_embed': embed_multi,
'query_feat': embed_multi,
'level_embed': embed_multi
}
custom_keys.update({
f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi
for stage_id, num_blocks in enumerate(depths)
for block_id in range(num_blocks)
})
custom_keys.update({
f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi
for stage_id in range(len(depths) - 1)
})
# optimizer
optim_wrapper = dict(
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
|
_base_ = ['./mask2former_r50_lsj_8x2_50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
depths = [2, 2, 6, 2]
model = dict(
type='Mask2Former',
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=depths,
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.3,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
frozen_stages=-1,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
panoptic_head=dict(
type='Mask2FormerHead', in_channels=[96, 192, 384, 768]),
init_cfg=None)
# set all layers in backbone to lr_mult=0.1
# set all norm layers, position_embeding,
# query_embeding, level_embeding to decay_multi=0.0
backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0)
backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0)
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'backbone': dict(lr_mult=0.1, decay_mult=1.0),
'backbone.patch_embed.norm': backbone_norm_multi,
'backbone.norm': backbone_norm_multi,
'absolute_pos_embed': backbone_embed_multi,
'relative_position_bias_table': backbone_embed_multi,
'query_embed': embed_multi,
'query_feat': embed_multi,
'level_embed': embed_multi
}
custom_keys.update({
f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi
for stage_id, num_blocks in enumerate(depths)
for block_id in range(num_blocks)
})
custom_keys.update({
f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi
for stage_id in range(len(depths) - 1)
})
# optimizer
optim_wrapper = dict(
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
|
import json
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
try:
import matplotlib
matplotlib.use('Agg')
from graphviz import Source
from matplotlib.axes import Axes
except ImportError:
pass
pytestmark = pytest.mark.skipif(**tm.no_multiple(tm.no_matplotlib(),
tm.no_graphviz()))
class TestPlotting:
def test_plotting(self):
m, _ = tm.load_agaricus(__file__)
booster = xgb.train({'max_depth': 2, 'eta': 1,
'objective': 'binary:logistic'}, m,
num_boost_round=2)
ax = xgb.plot_importance(booster)
assert isinstance(ax, Axes)
assert ax.get_title() == 'Feature importance'
assert ax.get_xlabel() == 'Importance score'
assert ax.get_ylabel() == 'Features'
assert len(ax.patches) == 4
ax = xgb.plot_importance(booster, color='r',
title='t', xlabel='x', ylabel='y')
assert isinstance(ax, Axes)
assert ax.get_title() == 't'
assert ax.get_xlabel() == 'x'
assert ax.get_ylabel() == 'y'
assert len(ax.patches) == 4
for p in ax.patches:
assert p.get_facecolor() == (1.0, 0, 0, 1.0) # red
ax = xgb.plot_importance(booster, color=['r', 'r', 'b', 'b'],
title=None, xlabel=None, ylabel=None)
assert isinstance(ax, Axes)
assert ax.get_title() == ''
assert ax.get_xlabel() == ''
assert ax.get_ylabel() == ''
assert len(ax.patches) == 4
assert ax.patches[0].get_facecolor() == (1.0, 0, 0, 1.0) # red
assert ax.patches[1].get_facecolor() == (1.0, 0, 0, 1.0) # red
assert ax.patches[2].get_facecolor() == (0, 0, 1.0, 1.0) # blue
assert ax.patches[3].get_facecolor() == (0, 0, 1.0, 1.0) # blue
g = xgb.to_graphviz(booster, num_trees=0)
assert isinstance(g, Source)
ax = xgb.plot_tree(booster, num_trees=0)
assert isinstance(ax, Axes)
def test_importance_plot_lim(self):
np.random.seed(1)
dm = xgb.DMatrix(np.random.randn(100, 100), label=[0, 1] * 50)
bst = xgb.train({}, dm)
assert len(bst.get_fscore()) == 71
ax = xgb.plot_importance(bst)
assert ax.get_xlim() == (0., 11.)
assert ax.get_ylim() == (-1., 71.)
ax = xgb.plot_importance(bst, xlim=(0, 5), ylim=(10, 71))
assert ax.get_xlim() == (0., 5.)
assert ax.get_ylim() == (10., 71.)
def run_categorical(self, tree_method: str) -> None:
X, y = tm.make_categorical(1000, 31, 19, onehot=False)
reg = xgb.XGBRegressor(
enable_categorical=True, n_estimators=10, tree_method=tree_method
)
reg.fit(X, y)
trees = reg.get_booster().get_dump(dump_format="json")
for tree in trees:
j_tree = json.loads(tree)
assert "leaf" in j_tree.keys() or isinstance(
j_tree["split_condition"], list
)
graph = xgb.to_graphviz(reg, num_trees=len(j_tree) - 1)
assert isinstance(graph, Source)
ax = xgb.plot_tree(reg, num_trees=len(j_tree) - 1)
assert isinstance(ax, Axes)
@pytest.mark.skipif(**tm.no_pandas())
def test_categorical(self) -> None:
self.run_categorical("approx")
|
import json
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
try:
import matplotlib
matplotlib.use('Agg')
from graphviz import Source
from matplotlib.axes import Axes
except ImportError:
pass
pytestmark = pytest.mark.skipif(**tm.no_multiple(tm.no_matplotlib(),
tm.no_graphviz()))
class TestPlotting:
def test_plotting(self):
m, _ = tm.load_agaricus(__file__)
booster = xgb.train({'max_depth': 2, 'eta': 1,
'objective': 'binary:logistic'}, m,
num_boost_round=2)
ax = xgb.plot_importance(booster)
assert isinstance(ax, Axes)
assert ax.get_title() == 'Feature importance'
assert ax.get_xlabel() == 'F score'
assert ax.get_ylabel() == 'Features'
assert len(ax.patches) == 4
ax = xgb.plot_importance(booster, color='r',
title='t', xlabel='x', ylabel='y')
assert isinstance(ax, Axes)
assert ax.get_title() == 't'
assert ax.get_xlabel() == 'x'
assert ax.get_ylabel() == 'y'
assert len(ax.patches) == 4
for p in ax.patches:
assert p.get_facecolor() == (1.0, 0, 0, 1.0) # red
ax = xgb.plot_importance(booster, color=['r', 'r', 'b', 'b'],
title=None, xlabel=None, ylabel=None)
assert isinstance(ax, Axes)
assert ax.get_title() == ''
assert ax.get_xlabel() == ''
assert ax.get_ylabel() == ''
assert len(ax.patches) == 4
assert ax.patches[0].get_facecolor() == (1.0, 0, 0, 1.0) # red
assert ax.patches[1].get_facecolor() == (1.0, 0, 0, 1.0) # red
assert ax.patches[2].get_facecolor() == (0, 0, 1.0, 1.0) # blue
assert ax.patches[3].get_facecolor() == (0, 0, 1.0, 1.0) # blue
g = xgb.to_graphviz(booster, num_trees=0)
assert isinstance(g, Source)
ax = xgb.plot_tree(booster, num_trees=0)
assert isinstance(ax, Axes)
def test_importance_plot_lim(self):
np.random.seed(1)
dm = xgb.DMatrix(np.random.randn(100, 100), label=[0, 1] * 50)
bst = xgb.train({}, dm)
assert len(bst.get_fscore()) == 71
ax = xgb.plot_importance(bst)
assert ax.get_xlim() == (0., 11.)
assert ax.get_ylim() == (-1., 71.)
ax = xgb.plot_importance(bst, xlim=(0, 5), ylim=(10, 71))
assert ax.get_xlim() == (0., 5.)
assert ax.get_ylim() == (10., 71.)
def run_categorical(self, tree_method: str) -> None:
X, y = tm.make_categorical(1000, 31, 19, onehot=False)
reg = xgb.XGBRegressor(
enable_categorical=True, n_estimators=10, tree_method=tree_method
)
reg.fit(X, y)
trees = reg.get_booster().get_dump(dump_format="json")
for tree in trees:
j_tree = json.loads(tree)
assert "leaf" in j_tree.keys() or isinstance(
j_tree["split_condition"], list
)
graph = xgb.to_graphviz(reg, num_trees=len(j_tree) - 1)
assert isinstance(graph, Source)
ax = xgb.plot_tree(reg, num_trees=len(j_tree) - 1)
assert isinstance(ax, Axes)
@pytest.mark.skipif(**tm.no_pandas())
def test_categorical(self) -> None:
self.run_categorical("approx")
|
from typing import Dict, List, Optional, Set
import pytest
from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc
class InnerDoc(BaseDoc):
integer: int
inner_list: List
class MMDoc(BaseDoc):
text: str = ''
price: int = 0
categories: Optional[List[str]] = None
image: Optional[ImageDoc] = None
matches: Optional[DocArray] = None
matches_with_same_id: Optional[DocArray] = None
opt_int: Optional[int] = None
test_set: Optional[Set] = None
inner_doc: Optional[InnerDoc] = None
test_dict: Optional[Dict] = None
@pytest.fixture
def doc1():
return MMDoc(
text='hey here',
categories=['a', 'b', 'c'],
price=10,
matches=DocArray[MMDoc]([MMDoc()]),
matches_with_same_id=DocArray[MMDoc](
[MMDoc(id='a', matches=DocArray[MMDoc]([MMDoc()]))]
),
test_set={'a', 'a'},
inner_doc=InnerDoc(integer=2, inner_list=['c', 'd']),
test_dict={'a': 0, 'b': 2, 'd': 4, 'z': 3},
)
@pytest.fixture
def doc2(doc1):
return MMDoc(
id=doc1.id,
text='hey here 2',
categories=['d', 'e', 'f'],
price=5,
opt_int=5,
matches=DocArray[MMDoc]([MMDoc()]),
matches_with_same_id=DocArray[MMDoc](
[MMDoc(id='a', matches=DocArray[MMDoc]([MMDoc()]))]
),
test_set={'a', 'b'},
inner_doc=InnerDoc(integer=3, inner_list=['a', 'b']),
test_dict={'a': 10, 'b': 10, 'c': 3, 'z': None},
)
def test_update_complex(doc1, doc2):
doc1.update(doc2)
# doc1 is changed in place (no extra memory)
assert doc1.text == 'hey here 2'
assert doc1.categories == ['a', 'b', 'c', 'd', 'e', 'f']
assert len(doc1.matches) == 2
assert doc1.opt_int == 5
assert doc1.price == 5
assert doc1.test_set == {'a', 'b'}
assert len(doc1.matches_with_same_id) == 1
assert len(doc1.matches_with_same_id[0].matches) == 2
assert doc1.inner_doc.integer == 3
assert doc1.inner_doc.inner_list == ['c', 'd', 'a', 'b']
assert doc1.test_dict == {'a': 10, 'b': 10, 'c': 3, 'd': 4, 'z': None}
def test_update_simple():
class MyDocument(BaseDoc):
content: str
title: Optional[str] = None
tags_: List
my_doc1 = MyDocument(
content='Core content of the document', title='Title', tags_=['python', 'AI']
)
my_doc2 = MyDocument(content='Core content updated', tags_=['docarray'])
my_doc1.update(my_doc2)
assert my_doc1.content == 'Core content updated'
assert my_doc1.title == 'Title'
assert my_doc1.tags_ == ['python', 'AI', 'docarray']
def test_update_different_schema_fails():
class DocA(BaseDoc):
content: str
class DocB(BaseDoc):
image: Optional[ImageDoc] = None
docA = DocA(content='haha')
docB = DocB()
with pytest.raises(Exception):
docA.update(docB)
|
from typing import Dict, List, Optional, Set
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
class InnerDoc(BaseDocument):
integer: int
inner_list: List
class MMDoc(BaseDocument):
text: str = ''
price: int = 0
categories: Optional[List[str]] = None
image: Optional[ImageDoc] = None
matches: Optional[DocumentArray] = None
matches_with_same_id: Optional[DocumentArray] = None
opt_int: Optional[int] = None
test_set: Optional[Set] = None
inner_doc: Optional[InnerDoc] = None
test_dict: Optional[Dict] = None
@pytest.fixture
def doc1():
return MMDoc(
text='hey here',
categories=['a', 'b', 'c'],
price=10,
matches=DocumentArray[MMDoc]([MMDoc()]),
matches_with_same_id=DocumentArray[MMDoc](
[MMDoc(id='a', matches=DocumentArray[MMDoc]([MMDoc()]))]
),
test_set={'a', 'a'},
inner_doc=InnerDoc(integer=2, inner_list=['c', 'd']),
test_dict={'a': 0, 'b': 2, 'd': 4, 'z': 3},
)
@pytest.fixture
def doc2(doc1):
return MMDoc(
id=doc1.id,
text='hey here 2',
categories=['d', 'e', 'f'],
price=5,
opt_int=5,
matches=DocumentArray[MMDoc]([MMDoc()]),
matches_with_same_id=DocumentArray[MMDoc](
[MMDoc(id='a', matches=DocumentArray[MMDoc]([MMDoc()]))]
),
test_set={'a', 'b'},
inner_doc=InnerDoc(integer=3, inner_list=['a', 'b']),
test_dict={'a': 10, 'b': 10, 'c': 3, 'z': None},
)
def test_update_complex(doc1, doc2):
doc1.update(doc2)
# doc1 is changed in place (no extra memory)
assert doc1.text == 'hey here 2'
assert doc1.categories == ['a', 'b', 'c', 'd', 'e', 'f']
assert len(doc1.matches) == 2
assert doc1.opt_int == 5
assert doc1.price == 5
assert doc1.test_set == {'a', 'b'}
assert len(doc1.matches_with_same_id) == 1
assert len(doc1.matches_with_same_id[0].matches) == 2
assert doc1.inner_doc.integer == 3
assert doc1.inner_doc.inner_list == ['c', 'd', 'a', 'b']
assert doc1.test_dict == {'a': 10, 'b': 10, 'c': 3, 'd': 4, 'z': None}
def test_update_simple():
class MyDocument(BaseDocument):
content: str
title: Optional[str] = None
tags_: List
my_doc1 = MyDocument(
content='Core content of the document', title='Title', tags_=['python', 'AI']
)
my_doc2 = MyDocument(content='Core content updated', tags_=['docarray'])
my_doc1.update(my_doc2)
assert my_doc1.content == 'Core content updated'
assert my_doc1.title == 'Title'
assert my_doc1.tags_ == ['python', 'AI', 'docarray']
def test_update_different_schema_fails():
class DocA(BaseDocument):
content: str
class DocB(BaseDocument):
image: Optional[ImageDoc] = None
docA = DocA(content='haha')
docB = DocB()
with pytest.raises(Exception):
docA.update(docB)
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoTorchTensor')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_torch_tensor')
class VideoTorchTensor(TorchTensor, VideoTensorMixin, metaclass=metaTorchAndNode):
"""
Subclass of TorchTensor, to represent a video tensor.
Adds video-specific features to the tensor.
EXAMPLE USAGE
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoTorchTensor')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
class VideoTorchTensor(TorchTensor, VideoTensorMixin, metaclass=metaTorchAndNode):
"""
Subclass of TorchTensor, to represent a video tensor.
Adds video-specific features to the tensor.
EXAMPLE USAGE
"""
_PROTO_FIELD_NAME = 'video_torch_tensor'
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import BlockchainDocumentLoader
from langchain_community.document_loaders.blockchain import BlockchainType
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BlockchainType": "langchain_community.document_loaders.blockchain",
"BlockchainDocumentLoader": "langchain_community.document_loaders",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BlockchainDocumentLoader",
"BlockchainType",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import BlockchainDocumentLoader
from langchain_community.document_loaders.blockchain import BlockchainType
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BlockchainType": "langchain_community.document_loaders.blockchain",
"BlockchainDocumentLoader": "langchain_community.document_loaders",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BlockchainType",
"BlockchainDocumentLoader",
]
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from pathlib import Path
import pytest
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def build_docker_image(docker_image_name: str) -> str:
subprocess.run(['docker', 'build', '-t', docker_image_name, '.'], check=True)
return docker_image_name
@pytest.fixture(scope='session')
def build_docker_image_gpu(docker_image_name: str) -> str:
image_name = f'{docker_image_name}:gpu'
subprocess.run(
['docker', 'build', '-t', image_name, '-f', 'Dockerfile.gpu', '.'], check=True
)
return image_name
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from pathlib import Path
import pytest
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def build_docker_image(docker_image_name: str) -> str:
subprocess.run(['docker', 'build', '-t', docker_image_name, '.'], check=True)
return docker_image_name
|
from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import BaseDocumentCompressor, Document
from langchain_core.retrievers import BaseRetriever, RetrieverLike
from pydantic import ConfigDict
class ContextualCompressionRetriever(BaseRetriever):
"""Retriever that wraps a base retriever and compresses the results."""
base_compressor: BaseDocumentCompressor
"""Compressor for compressing retrieved documents."""
base_retriever: RetrieverLike
"""Base Retriever to use for getting relevant documents."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> list[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
Sequence of relevant documents
"""
docs = self.base_retriever.invoke(
query,
config={"callbacks": run_manager.get_child()},
**kwargs,
)
if docs:
compressed_docs = self.base_compressor.compress_documents(
docs,
query,
callbacks=run_manager.get_child(),
)
return list(compressed_docs)
return []
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
**kwargs: Any,
) -> list[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
List of relevant documents
"""
docs = await self.base_retriever.ainvoke(
query,
config={"callbacks": run_manager.get_child()},
**kwargs,
)
if docs:
compressed_docs = await self.base_compressor.acompress_documents(
docs,
query,
callbacks=run_manager.get_child(),
)
return list(compressed_docs)
return []
|
from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import BaseDocumentCompressor, Document
from langchain_core.retrievers import BaseRetriever, RetrieverLike
from pydantic import ConfigDict
class ContextualCompressionRetriever(BaseRetriever):
"""Retriever that wraps a base retriever and compresses the results."""
base_compressor: BaseDocumentCompressor
"""Compressor for compressing retrieved documents."""
base_retriever: RetrieverLike
"""Base Retriever to use for getting relevant documents."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> list[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
Sequence of relevant documents
"""
docs = self.base_retriever.invoke(
query, config={"callbacks": run_manager.get_child()}, **kwargs
)
if docs:
compressed_docs = self.base_compressor.compress_documents(
docs, query, callbacks=run_manager.get_child()
)
return list(compressed_docs)
return []
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
**kwargs: Any,
) -> list[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
List of relevant documents
"""
docs = await self.base_retriever.ainvoke(
query, config={"callbacks": run_manager.get_child()}, **kwargs
)
if docs:
compressed_docs = await self.base_compressor.acompress_documents(
docs, query, callbacks=run_manager.get_child()
)
return list(compressed_docs)
return []
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class FlopsLoss(nn.Module):
def __init__(self, model: SparseEncoder, threshold: float | None = None) -> None:
"""
FlopsLoss implements a regularization technique to promote sparsity in sparse encoder models.
It calculates the squared L2 norm of the mean embedding vector, which helps reduce the number of floating-point
operations (FLOPs) required during inference by encouraging more zero values in the embeddings.
It can use a threshold to ignore embeddings with too few non-zero (active) elements.
This loss is used as a regularization component within other losses like :class:`SpladeLoss` rather than
being used as a standalone loss function.
Args:
model: SparseEncoder model to be regularized
threshold: Optional threshold for the number of non-zero (active) elements in the embeddings.
If specified, only embeddings with more than this number of non-zero (active) elements will be considered.
This can help to ignore embeddings that are too sparse and may not contribute meaningfully to the loss.
References:
- For further details, see: https://arxiv.org/pdf/2004.05665 for the general FLOPS loss and https://arxiv.org/pdf/2504.14839 for FLOPS with thresholds, a.k.a. FLOPS with l0 masking.
Relations:
- Used as a component within :class:`SpladeLoss` to regularize both query and document embeddings
Example:
- This loss is typically used within the :class:`SpladeLoss` class, which combines it with other loss components.
"""
super().__init__()
self.model = model
self.threshold = threshold
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise NotImplementedError(
"FlopsLoss is not intended to be used directly. Use it as a regulizer within the SpladeLoss class."
)
def compute_loss_from_embeddings(self, embeddings: list[torch.Tensor]) -> torch.Tensor:
if self.threshold is not None:
l0_norm = (embeddings != 0).sum(dim=1)
mask = (l0_norm > self.threshold).float()
embeddings = embeddings * mask.unsqueeze(1)
return torch.sum(torch.mean(embeddings, dim=0) ** 2)
@property
def citation(self) -> str:
return """
@article{paria2020minimizing,
title={Minimizing flops to learn efficient sparse representations},
author={Paria, Biswajit and Yeh, Chih-Kuan and Yen, Ian EH and Xu, Ning and Ravikumar, Pradeep and P{\'o}czos, Barnab{\'a}s},
journal={arXiv preprint arXiv:2004.05665},
year={2020}
}
"""
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class FlopsLoss(nn.Module):
def __init__(self, model: SparseEncoder, threshold: float = None) -> None:
"""
FlopsLoss implements a regularization technique to promote sparsity in sparse encoder models.
It calculates the squared L2 norm of the mean embedding vector, which helps reduce the number of floating-point
operations (FLOPs) required during inference by encouraging more zero values in the embeddings.
It can use a threshold to ignore embeddings with too few non-zero (active) elements.
This loss is used as a regularization component within other losses like :class:`SpladeLoss` rather than
being used as a standalone loss function.
Args:
model: SparseEncoder model to be regularized
threshold: Optional threshold for the number of non-zero (active) elements in the embeddings.
If specified, only embeddings with more than this number of non-zero (active) elements will be considered.
This can help to ignore embeddings that are too sparse and may not contribute meaningfully to the loss.
References:
- For further details, see: https://arxiv.org/pdf/2004.05665 for the general FLOPS loss and https://arxiv.org/pdf/2504.14839 for FLOPS with thresholds, a.k.a. FLOPS with l0 masking.
Relations:
- Used as a component within :class:`SpladeLoss` to regularize both query and document embeddings
Example:
- This loss is typically used within the :class:`SpladeLoss` class, which combines it with other loss components.
"""
super().__init__()
self.model = model
self.threshold = threshold
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise NotImplementedError(
"FlopsLoss is not intended to be used directly. Use it as a regulizer within the SpladeLoss class."
)
def compute_loss_from_embeddings(self, embeddings: list[torch.Tensor]) -> torch.Tensor:
if self.threshold is not None:
l0_norm = (embeddings != 0).sum(dim=1)
mask = (l0_norm > self.threshold).float()
embeddings = embeddings * mask.unsqueeze(1)
return torch.sum(torch.mean(embeddings, dim=0) ** 2)
@property
def citation(self) -> str:
return """
@article{paria2020minimizing,
title={Minimizing flops to learn efficient sparse representations},
author={Paria, Biswajit and Yeh, Chih-Kuan and Yen, Ian EH and Xu, Ning and Ravikumar, Pradeep and P{\'o}czos, Barnab{\'a}s},
journal={arXiv preprint arXiv:2004.05665},
year={2020}
}
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
from .builder import DATASETS, PIPELINES, build_dataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .custom import CustomDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .samplers import AspectRatioBatchSampler, ClassAwareSampler
from .utils import get_loading_pipeline, replace_ImageToTensor
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
__all__ = [
'CustomDataset', 'XMLDataset', 'CocoDataset', 'DeepFashionDataset',
'VOCDataset', 'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset',
'LVISV1Dataset', 'WIDERFaceDataset', 'DATASETS', 'PIPELINES',
'build_dataset', 'replace_ImageToTensor', 'get_loading_pipeline',
'CocoPanopticDataset', 'MultiImageMixDataset', 'OpenImagesDataset',
'OpenImagesChallengeDataset', 'AspectRatioBatchSampler',
'ClassAwareSampler'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .builder import DATASETS, PIPELINES, build_dataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .custom import CustomDataset
from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset,
MultiImageMixDataset, RepeatDataset)
from .deepfashion import DeepFashionDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .samplers import AspectRatioBatchSampler, ClassAwareSampler
from .utils import (NumClassCheckHook, get_loading_pipeline,
replace_ImageToTensor)
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
__all__ = [
'CustomDataset', 'XMLDataset', 'CocoDataset', 'DeepFashionDataset',
'VOCDataset', 'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset',
'LVISV1Dataset', 'ConcatDataset', 'RepeatDataset', 'ClassBalancedDataset',
'WIDERFaceDataset', 'DATASETS', 'PIPELINES', 'build_dataset',
'replace_ImageToTensor', 'get_loading_pipeline', 'NumClassCheckHook',
'CocoPanopticDataset', 'MultiImageMixDataset', 'OpenImagesDataset',
'OpenImagesChallengeDataset', 'AspectRatioBatchSampler',
'ClassAwareSampler'
]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.losses import Reduction as Reduction
from keras.src.losses import deserialize as deserialize
from keras.src.losses import get as get
from keras.src.losses import serialize as serialize
from keras.src.losses.loss import Loss as Loss
from keras.src.losses.losses import CTC as CTC
from keras.src.losses.losses import BinaryCrossentropy as BinaryCrossentropy
from keras.src.losses.losses import (
BinaryFocalCrossentropy as BinaryFocalCrossentropy,
)
from keras.src.losses.losses import (
CategoricalCrossentropy as CategoricalCrossentropy,
)
from keras.src.losses.losses import (
CategoricalFocalCrossentropy as CategoricalFocalCrossentropy,
)
from keras.src.losses.losses import (
CategoricalGeneralizedCrossEntropy as CategoricalGeneralizedCrossEntropy,
)
from keras.src.losses.losses import CategoricalHinge as CategoricalHinge
from keras.src.losses.losses import Circle as Circle
from keras.src.losses.losses import CosineSimilarity as CosineSimilarity
from keras.src.losses.losses import Dice as Dice
from keras.src.losses.losses import Hinge as Hinge
from keras.src.losses.losses import Huber as Huber
from keras.src.losses.losses import KLDivergence as KLDivergence
from keras.src.losses.losses import LogCosh as LogCosh
from keras.src.losses.losses import MeanAbsoluteError as MeanAbsoluteError
from keras.src.losses.losses import (
MeanAbsolutePercentageError as MeanAbsolutePercentageError,
)
from keras.src.losses.losses import MeanSquaredError as MeanSquaredError
from keras.src.losses.losses import (
MeanSquaredLogarithmicError as MeanSquaredLogarithmicError,
)
from keras.src.losses.losses import Poisson as Poisson
from keras.src.losses.losses import (
SparseCategoricalCrossentropy as SparseCategoricalCrossentropy,
)
from keras.src.losses.losses import SquaredHinge as SquaredHinge
from keras.src.losses.losses import Tversky as Tversky
from keras.src.losses.losses import binary_crossentropy as binary_crossentropy
from keras.src.losses.losses import (
binary_focal_crossentropy as binary_focal_crossentropy,
)
from keras.src.losses.losses import (
categorical_crossentropy as categorical_crossentropy,
)
from keras.src.losses.losses import (
categorical_focal_crossentropy as categorical_focal_crossentropy,
)
from keras.src.losses.losses import (
categorical_generalized_cross_entropy as categorical_generalized_cross_entropy,
)
from keras.src.losses.losses import categorical_hinge as categorical_hinge
from keras.src.losses.losses import circle as circle
from keras.src.losses.losses import cosine_similarity as cosine_similarity
from keras.src.losses.losses import ctc as ctc
from keras.src.losses.losses import dice as dice
from keras.src.losses.losses import hinge as hinge
from keras.src.losses.losses import huber as huber
from keras.src.losses.losses import kl_divergence as KLD
from keras.src.losses.losses import kl_divergence as kld
from keras.src.losses.losses import kl_divergence as kullback_leibler_divergence
from keras.src.losses.losses import log_cosh as logcosh
from keras.src.losses.losses import mean_absolute_error as MAE
from keras.src.losses.losses import mean_absolute_error as mae
from keras.src.losses.losses import mean_absolute_percentage_error as MAPE
from keras.src.losses.losses import mean_absolute_percentage_error as mape
from keras.src.losses.losses import mean_squared_error as MSE
from keras.src.losses.losses import mean_squared_error as mse
from keras.src.losses.losses import mean_squared_logarithmic_error as MSLE
from keras.src.losses.losses import mean_squared_logarithmic_error as msle
from keras.src.losses.losses import poisson as poisson
from keras.src.losses.losses import (
sparse_categorical_crossentropy as sparse_categorical_crossentropy,
)
from keras.src.losses.losses import squared_hinge as squared_hinge
from keras.src.losses.losses import tversky as tversky
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.losses import Reduction
from keras.src.losses import deserialize
from keras.src.losses import get
from keras.src.losses import serialize
from keras.src.losses.loss import Loss
from keras.src.losses.losses import CTC
from keras.src.losses.losses import BinaryCrossentropy
from keras.src.losses.losses import BinaryFocalCrossentropy
from keras.src.losses.losses import CategoricalCrossentropy
from keras.src.losses.losses import CategoricalFocalCrossentropy
from keras.src.losses.losses import CategoricalGeneralizedCrossEntropy
from keras.src.losses.losses import CategoricalHinge
from keras.src.losses.losses import Circle
from keras.src.losses.losses import CosineSimilarity
from keras.src.losses.losses import Dice
from keras.src.losses.losses import Hinge
from keras.src.losses.losses import Huber
from keras.src.losses.losses import KLDivergence
from keras.src.losses.losses import LogCosh
from keras.src.losses.losses import MeanAbsoluteError
from keras.src.losses.losses import MeanAbsolutePercentageError
from keras.src.losses.losses import MeanSquaredError
from keras.src.losses.losses import MeanSquaredLogarithmicError
from keras.src.losses.losses import Poisson
from keras.src.losses.losses import SparseCategoricalCrossentropy
from keras.src.losses.losses import SquaredHinge
from keras.src.losses.losses import Tversky
from keras.src.losses.losses import binary_crossentropy
from keras.src.losses.losses import binary_focal_crossentropy
from keras.src.losses.losses import categorical_crossentropy
from keras.src.losses.losses import categorical_focal_crossentropy
from keras.src.losses.losses import categorical_generalized_cross_entropy
from keras.src.losses.losses import categorical_hinge
from keras.src.losses.losses import circle
from keras.src.losses.losses import cosine_similarity
from keras.src.losses.losses import ctc
from keras.src.losses.losses import dice
from keras.src.losses.losses import hinge
from keras.src.losses.losses import huber
from keras.src.losses.losses import kl_divergence as KLD
from keras.src.losses.losses import kl_divergence as kld
from keras.src.losses.losses import kl_divergence as kullback_leibler_divergence
from keras.src.losses.losses import log_cosh as logcosh
from keras.src.losses.losses import mean_absolute_error as MAE
from keras.src.losses.losses import mean_absolute_error as mae
from keras.src.losses.losses import mean_absolute_percentage_error as MAPE
from keras.src.losses.losses import mean_absolute_percentage_error as mape
from keras.src.losses.losses import mean_squared_error as MSE
from keras.src.losses.losses import mean_squared_error as mse
from keras.src.losses.losses import mean_squared_logarithmic_error as MSLE
from keras.src.losses.losses import mean_squared_logarithmic_error as msle
from keras.src.losses.losses import poisson
from keras.src.losses.losses import sparse_categorical_crossentropy
from keras.src.losses.losses import squared_hinge
from keras.src.losses.losses import tversky
|
from typing import Any
from unittest.mock import Mock, patch
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import ConfigurableField
from langchain.runnables.hub import HubRunnable
@patch("langchain.hub.pull")
def test_hub_runnable(mock_pull: Mock) -> None:
mock_pull.return_value = ChatPromptTemplate.from_messages(
[
("system", "a"),
("user", "b"),
],
)
basic: HubRunnable = HubRunnable("efriis/my-prompt")
bound = basic.bound
assert isinstance(bound, ChatPromptTemplate)
assert len(bound.messages) == 2
repo_dict = {
"efriis/my-prompt-1": ChatPromptTemplate.from_messages(
[
("system", "a"),
("user", "1"),
],
),
"efriis/my-prompt-2": ChatPromptTemplate.from_messages(
[
("system", "a"),
("user", "2"),
],
),
}
def repo_lookup(owner_repo_commit: str, **kwargs: Any) -> ChatPromptTemplate:
return repo_dict[owner_repo_commit]
@patch("langchain.hub.pull")
def test_hub_runnable_configurable_alternative(mock_pull: Mock) -> None:
mock_pull.side_effect = repo_lookup
original: HubRunnable = HubRunnable("efriis/my-prompt-1")
obj_a1 = original.configurable_alternatives(
ConfigurableField(id="owner_repo_commit", name="Hub ID"),
default_key="a1",
a2=HubRunnable("efriis/my-prompt-2"),
)
obj_a2 = obj_a1.with_config(configurable={"owner_repo_commit": "a2"})
templated = obj_a1.invoke({})
message_a1 = templated.messages[1]
assert message_a1.content == "1"
templated_2 = obj_a2.invoke({})
message_a2 = templated_2.messages[1]
assert message_a2.content == "2"
@patch("langchain.hub.pull")
def test_hub_runnable_configurable_fields(mock_pull: Mock) -> None:
mock_pull.side_effect = repo_lookup
original: HubRunnable = HubRunnable("efriis/my-prompt-1")
obj_configurable = original.configurable_fields(
owner_repo_commit=ConfigurableField(id="owner_repo_commit", name="Hub ID"),
)
templated_1 = obj_configurable.invoke({})
assert templated_1.messages[1].content == "1"
templated_2 = obj_configurable.with_config(
configurable={"owner_repo_commit": "efriis/my-prompt-2"},
).invoke({})
assert templated_2.messages[1].content == "2"
|
from typing import Any
from unittest.mock import Mock, patch
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import ConfigurableField
from langchain.runnables.hub import HubRunnable
@patch("langchain.hub.pull")
def test_hub_runnable(mock_pull: Mock) -> None:
mock_pull.return_value = ChatPromptTemplate.from_messages(
[
("system", "a"),
("user", "b"),
]
)
basic: HubRunnable = HubRunnable("efriis/my-prompt")
bound = basic.bound
assert isinstance(bound, ChatPromptTemplate)
assert len(bound.messages) == 2
repo_dict = {
"efriis/my-prompt-1": ChatPromptTemplate.from_messages(
[
("system", "a"),
("user", "1"),
]
),
"efriis/my-prompt-2": ChatPromptTemplate.from_messages(
[
("system", "a"),
("user", "2"),
]
),
}
def repo_lookup(owner_repo_commit: str, **kwargs: Any) -> ChatPromptTemplate:
return repo_dict[owner_repo_commit]
@patch("langchain.hub.pull")
def test_hub_runnable_configurable_alternative(mock_pull: Mock) -> None:
mock_pull.side_effect = repo_lookup
original: HubRunnable = HubRunnable("efriis/my-prompt-1")
obj_a1 = original.configurable_alternatives(
ConfigurableField(id="owner_repo_commit", name="Hub ID"),
default_key="a1",
a2=HubRunnable("efriis/my-prompt-2"),
)
obj_a2 = obj_a1.with_config(configurable={"owner_repo_commit": "a2"})
templated = obj_a1.invoke({})
message_a1 = templated.messages[1]
assert message_a1.content == "1"
templated_2 = obj_a2.invoke({})
message_a2 = templated_2.messages[1]
assert message_a2.content == "2"
@patch("langchain.hub.pull")
def test_hub_runnable_configurable_fields(mock_pull: Mock) -> None:
mock_pull.side_effect = repo_lookup
original: HubRunnable = HubRunnable("efriis/my-prompt-1")
obj_configurable = original.configurable_fields(
owner_repo_commit=ConfigurableField(id="owner_repo_commit", name="Hub ID"),
)
templated_1 = obj_configurable.invoke({})
assert templated_1.messages[1].content == "1"
templated_2 = obj_configurable.with_config(
configurable={"owner_repo_commit": "efriis/my-prompt-2"}
).invoke({})
assert templated_2.messages[1].content == "2"
|
from __future__ import annotations
from dataclasses import field
from typing import Any, Callable
import torch
from sentence_transformers.data_collator import SentenceTransformerDataCollator
class CrossEncoderDataCollator(SentenceTransformerDataCollator):
"""Collator for a CrossEncoder model.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
This works with the two text dataset that is used as the example in the training overview:
https://www.sbert.net/docs/sentence_transformer/training_overview.html
It is important that the columns are in the expected order. For example, if your dataset has columns
"answer", "question" in that order, then the MultipleNegativesRankingLoss will consider
"answer" as the anchor and "question" as the positive, and it will (unexpectedly) optimize for
"given the answer, what is the question?".
"""
tokenize_fn: Callable
valid_label_columns: list[str] = field(default_factory=lambda: ["label", "score"])
_warned_columns: set[tuple[str]] = field(default_factory=set, init=False, repr=False)
def __call__(self, features: list[dict[str, Any]]) -> dict[str, torch.Tensor]:
column_names = list(features[0].keys())
# We should always be able to return a loss, label or not:
batch = {}
if "dataset_name" in column_names:
column_names.remove("dataset_name")
batch["dataset_name"] = features[0]["dataset_name"]
# TODO:
# if tuple(column_names) not in self._warned_columns:
# self.maybe_warn_about_column_order(column_names)
# Extract the label column if it exists
for label_column in self.valid_label_columns:
if label_column in column_names:
batch["label"] = torch.tensor([row[label_column] for row in features])
column_names.remove(label_column)
break
for column_name in column_names:
# If the prompt length has been set, we should add it to the batch
if column_name.endswith("_prompt_length") and column_name[: -len("_prompt_length")] in column_names:
batch[column_name] = torch.tensor([row[column_name] for row in features], dtype=torch.int)
continue
batch[column_name] = [row[column_name] for row in features]
return batch
|
from __future__ import annotations
from dataclasses import field
from typing import Any, Callable
import torch
from sentence_transformers.data_collator import SentenceTransformerDataCollator
class CrossEncoderDataCollator(SentenceTransformerDataCollator):
"""Collator for a CrossEncoder model.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
This works with the two text dataset that is used as the example in the training overview:
https://www.sbert.net/docs/sentence_transformer/training_overview.html
It is important that the columns are in the expected order. For example, if your dataset has columns
"answer", "question" in that order, then the MultipleNegativesRankingLoss will consider
"answer" as the anchor and "question" as the positive, and it will (unexpectedly) optimize for
"given the answer, what is the question?".
"""
tokenize_fn: Callable
valid_label_columns: list[str] = field(default_factory=lambda: ["label", "score"])
_warned_columns: set[tuple[str]] = field(default_factory=set, init=False, repr=False)
def __call__(self, features: list[dict[str, Any]]) -> dict[str, torch.Tensor]:
column_names = list(features[0].keys())
# We should always be able to return a loss, label or not:
batch = {}
if "dataset_name" in column_names:
column_names.remove("dataset_name")
batch["dataset_name"] = features[0]["dataset_name"]
# TODO:
# if tuple(column_names) not in self._warned_columns:
# self.maybe_warn_about_column_order(column_names)
# Extract the label column if it exists
for label_column in self.valid_label_columns:
if label_column in column_names:
batch["label"] = torch.tensor([row[label_column] for row in features])
column_names.remove(label_column)
break
for column_name in column_names:
# If the prompt length has been set, we should add it to the batch
if column_name.endswith("_prompt_length") and column_name[: -len("_prompt_length")] in column_names:
batch[column_name] = torch.tensor([row[column_name] for row in features], dtype=torch.int)
continue
# tokenized = self.tokenize_fn([row[column_name] for row in features])
# for key, value in tokenized.items():
# batch[f"{column_name}_{key}"] = value
batch[column_name] = [row[column_name] for row in features]
return batch
|
_base_ = [
'../common/ms-poly_3x_coco-instance.py',
'../_base_/models/mask-rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_1.6gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')),
neck=dict(
type='FPN',
in_channels=[72, 168, 408, 912],
out_channels=256,
num_outs=5))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005),
clip_grad=dict(max_norm=35, norm_type=2))
|
_base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_1.6gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')),
neck=dict(
type='FPN',
in_channels=[72, 168, 408, 912],
out_channels=256,
num_outs=5))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005),
clip_grad=dict(max_norm=35, norm_type=2))
|
from typing import TYPE_CHECKING, Type
if TYPE_CHECKING:
from pandas import DataFrame
from docarray.typing import T
class DataframeIOMixin:
"""Save/load from :class:`pandas.dataframe`
.. note::
These functions require you to install `pandas`
"""
def to_dataframe(self, **kwargs) -> 'DataFrame':
"""Export itself to a :class:`pandas.DataFrame` object.
:param kwargs: the extra kwargs will be passed to :meth:`pandas.DataFrame.from_dict`.
:return: a :class:`pandas.DataFrame` object
"""
from pandas import DataFrame
return DataFrame.from_dict(self.to_list(), **kwargs)
@classmethod
def from_dataframe(cls: Type['T'], df: 'DataFrame', *args, **kwargs) -> 'T':
"""Import a :class:`DocumentArray` from a :class:`pandas.DataFrame` object.
:param df: a :class:`pandas.DataFrame` object.
:return: a :class:`DocumentArray` object
"""
da = cls(**kwargs)
from docarray import Document
for m in df.to_dict(orient='records'):
# drop nan
da.append(
Document(
{k: v for k, v in m.items() if (not isinstance(v, float) or v == v)}
)
)
return da
|
from typing import TYPE_CHECKING, Type
if TYPE_CHECKING:
from pandas import DataFrame
from ....typing import T
class DataframeIOMixin:
"""Save/load from :class:`pandas.dataframe`
.. note::
These functions require you to install `pandas`
"""
def to_dataframe(self, **kwargs) -> 'DataFrame':
"""Export itself to a :class:`pandas.DataFrame` object.
:param kwargs: the extra kwargs will be passed to :meth:`pandas.DataFrame.from_dict`.
:return: a :class:`pandas.DataFrame` object
"""
from pandas import DataFrame
return DataFrame.from_dict(self.to_list(), **kwargs)
@classmethod
def from_dataframe(cls: Type['T'], df: 'DataFrame', *args, **kwargs) -> 'T':
"""Import a :class:`DocumentArray` from a :class:`pandas.DataFrame` object.
:param df: a :class:`pandas.DataFrame` object.
:return: a :class:`DocumentArray` object
"""
da = cls(**kwargs)
from .... import Document
for m in df.to_dict(orient='records'):
# drop nan
da.append(
Document(
{k: v for k, v in m.items() if (not isinstance(v, float) or v == v)}
)
)
return da
|
__version__ = "3.1.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
]
|
__version__ = "2.2.2"
__MODEL_HUB_ORGANIZATION__ = 'sentence-transformers'
from .datasets import SentencesDataset, ParallelSentencesDataset
from .LoggingHandler import LoggingHandler
from .SentenceTransformer import SentenceTransformer
from .readers import InputExample
from .cross_encoder.CrossEncoder import CrossEncoder
|
import re
from typing import TYPE_CHECKING, Any, Dict, Union
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.
The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.
"""
self.greater_is_better = True
self.primary_metric = None
def __call__(
self, model: "SentenceTransformer", output_path: str = None, epoch: int = -1, steps: int = -1
) -> Union[float, Dict[str, float]]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
Args:
model: the model to evaluate
output_path: path where predictions and metrics are written
to
epoch: the epoch where the evaluation takes place. This is
used for the file prefixes. If this is -1, then we
assume evaluation on test data.
steps: the steps in the current epoch at time of the
evaluation. This is used for the file prefixes. If this
is -1, then we assume evaluation at the end of the
epoch.
Returns:
Either a score for the evaluation with a higher score
indicating a better result, or a dictionary with scores. If
the latter is chosen, then `evaluator.primary_metric` must
be defined
"""
pass
def prefix_name_to_metrics(self, metrics: Dict[str, float], name: str):
if not name:
return metrics
metrics = {name + "_" + key: value for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(self, model: "SentenceTransformer", metrics: Dict[str, Any]) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Remove "Evaluator" from the class name
2. Add a space before every capital letter
"""
class_name = self.__class__.__name__
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", "\g<1> \g<2>", class_name)
|
class SentenceEvaluator:
"""
Base class for all evaluators
Extend this class and implement __call__ for custom evaluators.
"""
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
:param model:
the model to evaluate
:param output_path:
path where predictions and metrics are written to
:param epoch
the epoch where the evaluation takes place.
This is used for the file prefixes.
If this is -1, then we assume evaluation on test data.
:param steps
the steps in the current epoch at time of the evaluation.
This is used for the file prefixes.
If this is -1, then we assume evaluation at the end of the epoch.
:return: a score for the evaluation with a higher score indicating a better result
"""
pass
|
"""Test in memory docstore."""
from langchain.output_parsers.regex_dict import RegexDictParser
DEF_EXPECTED_RESULT = {"action": "Search", "action_input": "How to use this class?"}
DEF_OUTPUT_KEY_TO_FORMAT = {"action": "Action", "action_input": "Action Input"}
DEF_README = """We have just received a new result from the LLM, and our next step is
to filter and read its format using regular expressions to identify specific fields,
such as:
- Action: Search
- Action Input: How to use this class?
- Additional Fields: "N/A"
To assist us in this task, we use the regex_dict class. This class allows us to send a
dictionary containing an output key and the expected format, which in turn enables us to
retrieve the result of the matching formats and extract specific information from it.
To exclude irrelevant information from our return dictionary, we can instruct the LLM to
use a specific command that notifies us when it doesn't know the answer. We call this
variable the "no_update_value", and for our current case, we set it to "N/A". Therefore,
we expect the result to only contain the following fields:
{
{key = action, value = search}
{key = action_input, value = "How to use this class?"}.
}"""
def test_regex_dict_result() -> None:
"""Test regex dict result."""
regex_dict_parser = RegexDictParser(
output_key_to_format=DEF_OUTPUT_KEY_TO_FORMAT, no_update_value="N/A"
)
result_dict = regex_dict_parser.parse(DEF_README)
print("parse_result:", result_dict) # noqa: T201
assert DEF_EXPECTED_RESULT == result_dict
def test_regex_dict_output_type() -> None:
"""Test regex dict output type."""
regex_dict_parser = RegexDictParser(
output_key_to_format=DEF_OUTPUT_KEY_TO_FORMAT, no_update_value="N/A"
)
assert regex_dict_parser.OutputType == dict[str, str]
|
"""Test in memory docstore."""
from typing import Dict
from langchain.output_parsers.regex_dict import RegexDictParser
DEF_EXPECTED_RESULT = {"action": "Search", "action_input": "How to use this class?"}
DEF_OUTPUT_KEY_TO_FORMAT = {"action": "Action", "action_input": "Action Input"}
DEF_README = """We have just received a new result from the LLM, and our next step is
to filter and read its format using regular expressions to identify specific fields,
such as:
- Action: Search
- Action Input: How to use this class?
- Additional Fields: "N/A"
To assist us in this task, we use the regex_dict class. This class allows us to send a
dictionary containing an output key and the expected format, which in turn enables us to
retrieve the result of the matching formats and extract specific information from it.
To exclude irrelevant information from our return dictionary, we can instruct the LLM to
use a specific command that notifies us when it doesn't know the answer. We call this
variable the "no_update_value", and for our current case, we set it to "N/A". Therefore,
we expect the result to only contain the following fields:
{
{key = action, value = search}
{key = action_input, value = "How to use this class?"}.
}"""
def test_regex_dict_result() -> None:
"""Test regex dict result."""
regex_dict_parser = RegexDictParser(
output_key_to_format=DEF_OUTPUT_KEY_TO_FORMAT, no_update_value="N/A"
)
result_dict = regex_dict_parser.parse(DEF_README)
print("parse_result:", result_dict) # noqa: T201
assert DEF_EXPECTED_RESULT == result_dict
def test_regex_dict_output_type() -> None:
"""Test regex dict output type."""
regex_dict_parser = RegexDictParser(
output_key_to_format=DEF_OUTPUT_KEY_TO_FORMAT, no_update_value="N/A"
)
assert regex_dict_parser.OutputType is Dict[str, str]
|
import numpy as np
def approximate_mode(class_counts, n_draws, rng):
"""Computes approximate mode of multivariate hypergeometric.
This is an approximation to the mode of the multivariate
hypergeometric given by class_counts and n_draws.
It shouldn't be off by more than one.
It is the mostly likely outcome of drawing n_draws many
samples from the population given by class_counts.
Args
----------
class_counts : ndarray of int
Population per class.
n_draws : int
Number of draws (samples to draw) from the overall population.
rng : random state
Used to break ties.
Returns
-------
sampled_classes : ndarray of int
Number of samples drawn from each class.
np.sum(sampled_classes) == n_draws
"""
# this computes a bad approximation to the mode of the
# multivariate hypergeometric given by class_counts and n_draws
continuous = n_draws * class_counts / class_counts.sum()
# floored means we don't overshoot n_samples, but probably undershoot
floored = np.floor(continuous)
# we add samples according to how much "left over" probability
# they had, until we arrive at n_samples
need_to_add = int(n_draws - floored.sum())
if need_to_add > 0:
remainder = continuous - floored
values = np.sort(np.unique(remainder))[::-1]
# add according to remainder, but break ties
# randomly to avoid biases
for value in values:
(inds,) = np.where(remainder == value)
# if we need_to_add less than what's in inds
# we draw randomly from them.
# if we need to add more, we add them all and
# go to the next value
add_now = min(len(inds), need_to_add)
inds = rng.choice(inds, size=add_now, replace=False)
floored[inds] += 1
need_to_add -= add_now
if need_to_add == 0:
break
return floored.astype(np.int64)
def stratified_shuffle_split_generate_indices(y, n_train, n_test, rng, n_splits=10):
"""
Provides train/test indices to split data in train/test sets.
It's reference is taken from StratifiedShuffleSplit implementation
of scikit-learn library.
Args
----------
n_train : int,
represents the absolute number of train samples.
n_test : int,
represents the absolute number of test samples.
random_state : int or RandomState instance, default=None
Controls the randomness of the training and testing indices produced.
Pass an int for reproducible output across multiple function calls.
n_splits : int, default=10
Number of re-shuffling & splitting iterations.
"""
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = np.bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError("Minimum class count error")
if n_train < n_classes:
raise ValueError(
"The train_size = %d should be greater or equal to the number of classes = %d" % (n_train, n_classes)
)
if n_test < n_classes:
raise ValueError(
"The test_size = %d should be greater or equal to the number of classes = %d" % (n_test, n_classes)
)
class_indices = np.split(np.argsort(y_indices, kind="mergesort"), np.cumsum(class_counts)[:-1])
for _ in range(n_splits):
n_i = approximate_mode(class_counts, n_train, rng)
class_counts_remaining = class_counts - n_i
t_i = approximate_mode(class_counts_remaining, n_test, rng)
train = []
test = []
for i in range(n_classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = class_indices[i].take(permutation, mode="clip")
train.extend(perm_indices_class_i[: n_i[i]])
test.extend(perm_indices_class_i[n_i[i] : n_i[i] + t_i[i]])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
|
import numpy as np
def approximate_mode(class_counts, n_draws, rng):
"""Computes approximate mode of multivariate hypergeometric.
This is an approximation to the mode of the multivariate
hypergeometric given by class_counts and n_draws.
It shouldn't be off by more than one.
It is the mostly likely outcome of drawing n_draws many
samples from the population given by class_counts.
Args
----------
class_counts : ndarray of int
Population per class.
n_draws : int
Number of draws (samples to draw) from the overall population.
rng : random state
Used to break ties.
Returns
-------
sampled_classes : ndarray of int
Number of samples drawn from each class.
np.sum(sampled_classes) == n_draws
"""
# this computes a bad approximation to the mode of the
# multivariate hypergeometric given by class_counts and n_draws
continuous = n_draws * class_counts / class_counts.sum()
# floored means we don't overshoot n_samples, but probably undershoot
floored = np.floor(continuous)
# we add samples according to how much "left over" probability
# they had, until we arrive at n_samples
need_to_add = int(n_draws - floored.sum())
if need_to_add > 0:
remainder = continuous - floored
values = np.sort(np.unique(remainder))[::-1]
# add according to remainder, but break ties
# randomly to avoid biases
for value in values:
(inds,) = np.where(remainder == value)
# if we need_to_add less than what's in inds
# we draw randomly from them.
# if we need to add more, we add them all and
# go to the next value
add_now = min(len(inds), need_to_add)
inds = rng.choice(inds, size=add_now, replace=False)
floored[inds] += 1
need_to_add -= add_now
if need_to_add == 0:
break
return floored.astype(np.int64)
def stratified_shuffle_split_generate_indices(y, n_train, n_test, rng, n_splits=10):
"""
Provides train/test indices to split data in train/test sets.
It's reference is taken from StratifiedShuffleSplit implementation
of scikit-learn library.
Args
----------
n_train : int,
represents the absolute number of train samples.
n_test : int,
represents the absolute number of test samples.
random_state : int or RandomState instance, default=None
Controls the randomness of the training and testing indices produced.
Pass an int for reproducible output across multiple function calls.
n_splits : int, default=10
Number of re-shuffling & splitting iterations.
"""
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = np.bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError("Minimum class count error")
if n_train < n_classes:
raise ValueError(
"The train_size = %d should be greater or " "equal to the number of classes = %d" % (n_train, n_classes)
)
if n_test < n_classes:
raise ValueError(
"The test_size = %d should be greater or " "equal to the number of classes = %d" % (n_test, n_classes)
)
class_indices = np.split(np.argsort(y_indices, kind="mergesort"), np.cumsum(class_counts)[:-1])
for _ in range(n_splits):
n_i = approximate_mode(class_counts, n_train, rng)
class_counts_remaining = class_counts - n_i
t_i = approximate_mode(class_counts_remaining, n_test, rng)
train = []
test = []
for i in range(n_classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = class_indices[i].take(permutation, mode="clip")
train.extend(perm_indices_class_i[: n_i[i]])
test.extend(perm_indices_class_i[n_i[i] : n_i[i] + t_i[i]])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
|
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for SigLIP."""
from ...image_processing_utils_fast import BaseImageProcessorFast
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
PILImageResampling,
)
from ...utils import auto_docstring
@auto_docstring
class SiglipImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BICUBIC
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"height": 224, "width": 224}
default_to_square = False
do_resize = True
do_rescale = True
do_normalize = True
__all__ = ["SiglipImageProcessorFast"]
|
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for SigLIP."""
from ...image_processing_utils_fast import BASE_IMAGE_PROCESSOR_FAST_DOCSTRING, BaseImageProcessorFast
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
PILImageResampling,
)
from ...utils import add_start_docstrings
@add_start_docstrings(
"Constructs a fast SigLIP image processor.",
BASE_IMAGE_PROCESSOR_FAST_DOCSTRING,
)
class SiglipImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BICUBIC
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"height": 224, "width": 224}
default_to_square = False
do_resize = True
do_rescale = True
do_normalize = True
__all__ = ["SiglipImageProcessorFast"]
|
_base_ = './dino-4scale_r50_8xb2-12e_coco.py'
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
num_levels = 5
model = dict(
num_feature_levels=num_levels,
backbone=dict(
_delete_=True,
type='SwinTransformer',
pretrain_img_size=384,
embed_dims=192,
depths=[2, 2, 18, 2],
num_heads=[6, 12, 24, 48],
window_size=12,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(0, 1, 2, 3),
# Please only add indices that would be used
# in FPN, otherwise some parameter will not be used
with_cp=True,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[192, 384, 768, 1536], num_outs=num_levels),
encoder=dict(layer_cfg=dict(self_attn_cfg=dict(num_levels=num_levels))),
decoder=dict(layer_cfg=dict(cross_attn_cfg=dict(num_levels=num_levels))))
|
_base_ = './dino-4scale_r50_8xb2-12e_coco.py'
fp16 = dict(loss_scale=512.)
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
num_levels = 5
model = dict(
num_feature_levels=num_levels,
backbone=dict(
_delete_=True,
type='SwinTransformer',
pretrain_img_size=384,
embed_dims=192,
depths=[2, 2, 18, 2],
num_heads=[6, 12, 24, 48],
window_size=12,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(0, 1, 2, 3),
# Please only add indices that would be used
# in FPN, otherwise some parameter will not be used
with_cp=True,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[192, 384, 768, 1536], num_outs=num_levels),
encoder=dict(layer_cfg=dict(self_attn_cfg=dict(num_levels=num_levels))),
decoder=dict(layer_cfg=dict(cross_attn_cfg=dict(num_levels=num_levels))))
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import MongoDBChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"MongoDBChatMessageHistory": "langchain_community.chat_message_histories",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MongoDBChatMessageHistory",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import MongoDBChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"MongoDBChatMessageHistory": "langchain_community.chat_message_histories"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MongoDBChatMessageHistory",
]
|
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import TorchEmbedding, TorchTensor
def test_proto_tensor():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
tensor._to_node_protobuf()
def test_json_schema():
schema_json_of(TorchTensor)
def test_dump_json():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
orjson_dumps(tensor)
def test_unwrap():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
ndarray = tensor.unwrap()
assert not isinstance(ndarray, TorchTensor)
assert isinstance(tensor, TorchTensor)
assert isinstance(ndarray, torch.Tensor)
assert tensor.data_ptr() == ndarray.data_ptr()
assert (ndarray == torch.zeros(3, 224, 224)).all()
def test_parametrized():
# correct shape, single axis
tensor = parse_obj_as(TorchTensor[128], torch.zeros(128))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (128,)
# correct shape, multiple axis
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(3, 224, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
# wrong but reshapable shape
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(224, 3, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
# wrong and not reshapable shape
with pytest.raises(ValueError):
parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(224, 224))
@pytest.mark.parametrize('shape', [(3, 224, 224), (224, 224, 3)])
def test_parameterized_tensor_class_name(shape):
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(shape))
assert tensor.__class__.__name__ == 'TorchTensor[3, 224, 224]'
assert tensor.__class__.__qualname__ == 'TorchTensor[3, 224, 224]'
assert f'{tensor[0][0][0]}' == 'TorchTensor[3, 224, 224](0.)'
def test_torch_embedding():
# correct shape
tensor = parse_obj_as(TorchEmbedding[128], torch.zeros(128))
assert isinstance(tensor, TorchEmbedding)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (128,)
# wrong shape at data setting time
with pytest.raises(ValueError):
parse_obj_as(TorchEmbedding[128], torch.zeros(256))
# illegal shape at class creation time
with pytest.raises(ValueError):
parse_obj_as(TorchEmbedding[128, 128], torch.zeros(128, 128))
|
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import TorchEmbedding, TorchTensor
def test_proto_tensor():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
tensor._to_node_protobuf()
def test_json_schema():
schema_json_of(TorchTensor)
def test_dump_json():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
orjson_dumps(tensor)
def test_unwrap():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
ndarray = tensor.unwrap()
assert not isinstance(ndarray, TorchTensor)
assert isinstance(tensor, TorchTensor)
assert isinstance(ndarray, torch.Tensor)
assert tensor.data_ptr() == ndarray.data_ptr()
assert (ndarray == torch.zeros(3, 224, 224)).all()
def test_parametrized():
# correct shape, single axis
tensor = parse_obj_as(TorchTensor[128], torch.zeros(128))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (128,)
# correct shape, multiple axis
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(3, 224, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
# wrong but reshapable shape
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(224, 3, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
# wrong and not reshapable shape
with pytest.raises(ValueError):
parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(224, 224))
def test_torch_embedding():
# correct shape
tensor = parse_obj_as(TorchEmbedding[128], torch.zeros(128))
assert isinstance(tensor, TorchEmbedding)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (128,)
# wrong shape at data setting time
with pytest.raises(ValueError):
parse_obj_as(TorchEmbedding[128], torch.zeros(256))
# illegal shape at class creation time
with pytest.raises(ValueError):
parse_obj_as(TorchEmbedding[128, 128], torch.zeros(128, 128))
|
from .checkpointer import Checkpoint, WorkflowCheckpointer
from .context import Context
from .context_serializers import JsonPickleSerializer, JsonSerializer
from .decorators import step
from .errors import WorkflowRuntimeError, WorkflowTimeoutError, WorkflowValidationError
from .events import Event, HumanResponseEvent, InputRequiredEvent, StartEvent, StopEvent
from .workflow import Workflow
__all__ = [
"Context",
"Event",
"StartEvent",
"StopEvent",
"Workflow",
"WorkflowRuntimeError",
"WorkflowTimeoutError",
"WorkflowValidationError",
"step",
"InputRequiredEvent",
"HumanResponseEvent",
"JsonPickleSerializer",
"JsonSerializer",
"WorkflowCheckpointer",
"Checkpoint",
]
|
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.drawing import (
draw_all_possible_flows,
draw_most_recent_execution,
)
from llama_index.core.workflow.errors import (
WorkflowRuntimeError,
WorkflowTimeoutError,
WorkflowValidationError,
)
from llama_index.core.workflow.events import (
Event,
StartEvent,
StopEvent,
InputRequiredEvent,
HumanResponseEvent,
)
from llama_index.core.workflow.workflow import Workflow
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.context_serializers import (
JsonPickleSerializer,
JsonSerializer,
)
from llama_index.core.workflow.checkpointer import (
Checkpoint,
WorkflowCheckpointer,
)
__all__ = [
"Context",
"Event",
"StartEvent",
"StopEvent",
"Workflow",
"WorkflowRuntimeError",
"WorkflowTimeoutError",
"WorkflowValidationError",
"draw_all_possible_flows",
"draw_most_recent_execution",
"step",
"Context",
"InputRequiredEvent",
"HumanResponseEvent",
"JsonPickleSerializer",
"JsonSerializer",
"WorkflowCheckpointer",
"Checkpoint",
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.