input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import os.path as osp
from argparse import ArgumentParser
import mmcv
from mmengine.config import Config
from mmengine.logging import MMLogger
from mmengine.utils import mkdir_or_exist
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def parse_args():
parser = ArgumentParser()
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint_root', help='Checkpoint file root path')
parser.add_argument('--img', default='demo/demo.jpg', help='Image file')
parser.add_argument('--aug', action='store_true', help='aug test')
parser.add_argument('--model-name', help='model name to inference')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--out-dir', default=None, help='Dir to output file')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='the interval of show (s), 0 is block')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--palette',
default='coco',
choices=['coco', 'voc', 'citys', 'random'],
help='Color palette used for visualization')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
args = parser.parse_args()
return args
def inference_model(config_name, checkpoint, visualizer, args, logger=None):
cfg = Config.fromfile(config_name)
if args.aug:
raise NotImplementedError()
model = init_detector(
cfg, checkpoint, palette=args.palette, device=args.device)
visualizer.dataset_meta = model.dataset_meta
# test a single image
result = inference_detector(model, args.img)
# show the results
if args.show or args.out_dir is not None:
img = mmcv.imread(args.img)
img = mmcv.imconvert(img, 'bgr', 'rgb')
out_file = None
if args.out_dir is not None:
out_dir = args.out_dir
mkdir_or_exist(out_dir)
out_file = osp.join(
out_dir,
config_name.split('/')[-1].replace('py', 'jpg'))
visualizer.add_datasample(
'result',
img,
data_sample=result,
draw_gt=False,
show=args.show,
wait_time=args.wait_time,
out_file=out_file,
pred_score_thr=args.score_thr)
return result
# Sample test whether the inference code is correct
def main(args):
# register all modules in mmdet into the registries
register_all_modules()
config = Config.fromfile(args.config)
# init visualizer
visualizer_cfg = dict(type='DetLocalVisualizer', name='visualizer')
visualizer = VISUALIZERS.build(visualizer_cfg)
# test single model
if args.model_name:
if args.model_name in config:
model_infos = config[args.model_name]
if not isinstance(model_infos, list):
model_infos = [model_infos]
model_info = model_infos[0]
config_name = model_info['config'].strip()
print(f'processing: {config_name}', flush=True)
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
# build the model from a config file and a checkpoint file
inference_model(config_name, checkpoint, visualizer, args)
return
else:
raise RuntimeError('model name input error.')
# test all model
logger = MMLogger.get_instance(
name='MMLogger',
log_file='benchmark_test_image.log',
log_level=logging.ERROR)
for model_key in config:
model_infos = config[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'], flush=True)
config_name = model_info['config'].strip()
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
try:
# build the model from a config file and a checkpoint file
inference_model(config_name, checkpoint, visualizer, args,
logger)
except Exception as e:
logger.error(f'{config_name} " : {repr(e)}')
if __name__ == '__main__':
args = parse_args()
main(args)
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import os.path as osp
from argparse import ArgumentParser
import mmcv
from mmengine.config import Config
from mmengine.logging import MMLogger
from mmengine.utils import mkdir_or_exist
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def parse_args():
parser = ArgumentParser()
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint_root', help='Checkpoint file root path')
parser.add_argument('--img', default='demo/demo.jpg', help='Image file')
parser.add_argument('--aug', action='store_true', help='aug test')
parser.add_argument('--model-name', help='model name to inference')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--out-dir', default=None, help='Dir to output file')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='the interval of show (s), 0 is block')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--palette',
default='coco',
choices=['coco', 'voc', 'citys', 'random'],
help='Color palette used for visualization')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
args = parser.parse_args()
return args
def inference_model(config_name, checkpoint, visualizer, args, logger=None):
cfg = Config.fromfile(config_name)
if args.aug:
raise NotImplementedError()
model = init_detector(
cfg, checkpoint, palette=args.palette, device=args.device)
visualizer.dataset_meta = model.dataset_meta
# test a single image
result = inference_detector(model, args.img)
# show the results
if args.show or args.out_dir is not None:
img = mmcv.imread(args.img)
img = mmcv.imconvert(img, 'bgr', 'rgb')
out_file = None
if args.out_dir is not None:
out_dir = args.out_dir
mkdir_or_exist(out_dir)
out_file = osp.join(
out_dir,
config_name.split('/')[-1].replace('py', 'jpg'))
visualizer.add_datasample(
'result',
img,
pred_sample=result,
show=args.show,
wait_time=args.wait_time,
out_file=out_file,
pred_score_thr=args.score_thr)
return result
# Sample test whether the inference code is correct
def main(args):
# register all modules in mmdet into the registries
register_all_modules()
config = Config.fromfile(args.config)
# init visualizer
visualizer_cfg = dict(type='DetLocalVisualizer', name='visualizer')
visualizer = VISUALIZERS.build(visualizer_cfg)
# test single model
if args.model_name:
if args.model_name in config:
model_infos = config[args.model_name]
if not isinstance(model_infos, list):
model_infos = [model_infos]
model_info = model_infos[0]
config_name = model_info['config'].strip()
print(f'processing: {config_name}', flush=True)
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
# build the model from a config file and a checkpoint file
inference_model(config_name, checkpoint, visualizer, args)
return
else:
raise RuntimeError('model name input error.')
# test all model
logger = MMLogger.get_instance(
name='MMLogger',
log_file='benchmark_test_image.log',
log_level=logging.ERROR)
for model_key in config:
model_infos = config[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'], flush=True)
config_name = model_info['config'].strip()
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
try:
# build the model from a config file and a checkpoint file
inference_model(config_name, checkpoint, visualizer, args,
logger)
except Exception as e:
logger.error(f'{config_name} " : {repr(e)}')
if __name__ == '__main__':
args = parse_args()
main(args)
|
import numpy as np
import pytest
from keras.src import testing
from keras.src.layers.activations import leaky_relu
class LeakyReLUTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_leaky_relu(self):
self.run_layer_test(
leaky_relu.LeakyReLU,
init_kwargs={
"negative_slope": 1,
},
input_shape=(2, 3, 4),
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_leaky_relu_correctness(self):
leaky_relu_layer = leaky_relu.LeakyReLU(negative_slope=0.5)
input = np.array([-10, -5, 0.0, 5, 10])
expected_output = np.array([-5.0, -2.5, 0.0, 5.0, 10.0])
result = leaky_relu_layer(input)
self.assertAllClose(result, expected_output)
def test_invalid_usage(self):
with self.assertRaisesRegex(
ValueError,
"The negative_slope value of a Leaky ReLU layer cannot be None",
):
self.run_layer_test(
leaky_relu.LeakyReLU,
init_kwargs={"negative_slope": None},
input_shape=(2, 3, 4),
supports_masking=True,
)
|
import numpy as np
import pytest
from keras.src import testing
from keras.src.layers.activations import leaky_relu
class LeakyReLUTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_leaky_relu(self):
self.run_layer_test(
leaky_relu.LeakyReLU,
init_kwargs={
"negative_slope": 1,
},
input_shape=(2, 3, 4),
supports_masking=True,
)
def test_leaky_relu_correctness(self):
leaky_relu_layer = leaky_relu.LeakyReLU(negative_slope=0.5)
input = np.array([-10, -5, 0.0, 5, 10])
expected_output = np.array([-5.0, -2.5, 0.0, 5.0, 10.0])
result = leaky_relu_layer(input)
self.assertAllClose(result, expected_output)
def test_invalid_usage(self):
with self.assertRaisesRegex(
ValueError,
"The negative_slope value of a Leaky ReLU layer cannot be None",
):
self.run_layer_test(
leaky_relu.LeakyReLU,
init_kwargs={"negative_slope": None},
input_shape=(2, 3, 4),
supports_masking=True,
)
|
import numpy as np
import pytest
import torch
from docarray.base_doc import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import AnyUrl, NdArray, TorchTensor
@pytest.fixture()
def doc_and_class():
class Mmdoc(BaseDoc):
img: NdArray
url: AnyUrl
txt: str
torch_tensor: TorchTensor
bytes_: bytes
doc = Mmdoc(
img=np.zeros((10)),
url='http://doccaray.io',
txt='hello',
torch_tensor=torch.zeros(10),
bytes_=b'hello',
)
return doc, Mmdoc
def test_to_json(doc_and_class):
doc, _ = doc_and_class
doc.json()
def test_from_json(doc_and_class):
doc, Mmdoc = doc_and_class
new_doc = Mmdoc.parse_raw(doc.json())
for field, field2 in zip(doc.dict().keys(), new_doc.dict().keys()):
if field in ['torch_tensor', 'img']:
assert (getattr(doc, field) == getattr(doc, field2)).all()
else:
assert getattr(doc, field) == getattr(doc, field2)
def test_to_dict_to_json(doc_and_class):
doc, Mmdoc = doc_and_class
new_doc = Mmdoc.parse_raw(orjson_dumps(doc.dict()))
for field, field2 in zip(doc.dict().keys(), new_doc.dict().keys()):
if field in ['torch_tensor', 'img']:
assert (getattr(doc, field) == getattr(doc, field2)).all()
else:
assert getattr(doc, field) == getattr(doc, field2)
|
import numpy as np
import pytest
import torch
from docarray.base_doc import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import AnyUrl, NdArray, TorchTensor
@pytest.fixture()
def doc_and_class():
class Mmdoc(BaseDoc):
img: NdArray
url: AnyUrl
txt: str
torch_tensor: TorchTensor
bytes_: bytes
doc = Mmdoc(
img=np.zeros((10)),
url='http://doccaray.io',
txt='hello',
torch_tensor=torch.zeros(10),
bytes_=b'hello',
)
return doc, Mmdoc
def test_to_json(doc_and_class):
doc, _ = doc_and_class
doc.json()
def test_from_json(doc_and_class):
doc, Mmdoc = doc_and_class
new_doc = Mmdoc.parse_raw(doc.json())
for (field, field2) in zip(doc.dict().keys(), new_doc.dict().keys()):
if field in ['torch_tensor', 'img']:
assert (getattr(doc, field) == getattr(doc, field2)).all()
else:
assert getattr(doc, field) == getattr(doc, field2)
def test_to_dict_to_json(doc_and_class):
doc, Mmdoc = doc_and_class
new_doc = Mmdoc.parse_raw(orjson_dumps(doc.dict()))
for (field, field2) in zip(doc.dict().keys(), new_doc.dict().keys()):
if field in ['torch_tensor', 'img']:
assert (getattr(doc, field) == getattr(doc, field2)).all()
else:
assert getattr(doc, field) == getattr(doc, field2)
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.8.0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.7.1.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
import warnings
from abc import ABC
from typing import TYPE_CHECKING, Any, BinaryIO, Dict, TypeVar, Union
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library, is_notebook
if TYPE_CHECKING:
from docarray.typing.bytes.audio_bytes import AudioBytes
T = TypeVar('T', bound='AbstractAudioTensor')
MAX_INT_16 = 2**15
class AbstractAudioTensor(AbstractTensor, ABC):
def to_bytes(self) -> 'AudioBytes':
"""
Convert audio tensor to [`AudioBytes`][docarray.typing.AudioBytes].
"""
from docarray.typing.bytes.audio_bytes import AudioBytes
tensor = self.get_comp_backend().to_numpy(self)
tensor = (tensor * MAX_INT_16).astype('<h')
return AudioBytes(tensor.tobytes())
def save(
self: 'T',
file_path: Union[str, BinaryIO],
format: str = 'wav',
frame_rate: int = 44100,
sample_width: int = 2,
pydub_args: Dict[str, Any] = {},
) -> None:
"""
Save audio tensor to an audio file. Mono/stereo is preserved.
:param file_path: path to an audio file. If file is a string, open the file by
that name, otherwise treat it as a file-like object.
:param format: format for the audio file ('mp3', 'wav', 'raw', 'ogg' or other ffmpeg/avconv supported files)
:param frame_rate: sampling frequency
:param sample_width: sample width in bytes
:param pydub_args: dictionary of additional arguments for pydub.AudioSegment.export function
"""
pydub = import_library('pydub', raise_error=True) # noqa: F841
from pydub import AudioSegment
comp_backend = self.get_comp_backend()
channels = 2 if comp_backend.n_dim(array=self) > 1 else 1 # type: ignore
segment = AudioSegment(
self.to_bytes(),
frame_rate=frame_rate,
sample_width=sample_width,
channels=channels,
)
segment.export(file_path, format=format, **pydub_args)
def display(self, rate=44100):
"""
Play audio data from tensor in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
audio_np = self.get_comp_backend().to_numpy(self)
display(Audio(audio_np, rate=rate))
else:
warnings.warn('Display of audio is only possible in a notebook.')
|
import warnings
from abc import ABC
from typing import TYPE_CHECKING, Any, BinaryIO, Dict, TypeVar, Union
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library, is_notebook
if TYPE_CHECKING:
from docarray.typing.bytes.audio_bytes import AudioBytes
T = TypeVar('T', bound='AbstractAudioTensor')
MAX_INT_16 = 2**15
class AbstractAudioTensor(AbstractTensor, ABC):
def to_bytes(self) -> 'AudioBytes':
"""
Convert audio tensor to [`AudioBytes`][docarray.typrin.AudioBytes].
"""
from docarray.typing.bytes.audio_bytes import AudioBytes
tensor = self.get_comp_backend().to_numpy(self)
tensor = (tensor * MAX_INT_16).astype('<h')
return AudioBytes(tensor.tobytes())
def save(
self: 'T',
file_path: Union[str, BinaryIO],
format: str = 'wav',
frame_rate: int = 44100,
sample_width: int = 2,
pydub_args: Dict[str, Any] = {},
) -> None:
"""
Save audio tensor to an audio file. Mono/stereo is preserved.
:param file_path: path to an audio file. If file is a string, open the file by
that name, otherwise treat it as a file-like object.
:param format: format for the audio file ('mp3', 'wav', 'raw', 'ogg' or other ffmpeg/avconv supported files)
:param frame_rate: sampling frequency
:param sample_width: sample width in bytes
:param pydub_args: dictionary of additional arguments for pydub.AudioSegment.export function
"""
pydub = import_library('pydub', raise_error=True) # noqa: F841
from pydub import AudioSegment
comp_backend = self.get_comp_backend()
channels = 2 if comp_backend.n_dim(array=self) > 1 else 1 # type: ignore
segment = AudioSegment(
self.to_bytes(),
frame_rate=frame_rate,
sample_width=sample_width,
channels=channels,
)
segment.export(file_path, format=format, **pydub_args)
def display(self, rate=44100):
"""
Play audio data from tensor in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
audio_np = self.get_comp_backend().to_numpy(self)
display(Audio(audio_np, rate=rate))
else:
warnings.warn('Display of audio is only possible in a notebook.')
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoTorchTensor')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_torch_tensor')
class VideoTorchTensor(TorchTensor, VideoTensorMixin, metaclass=metaTorchAndNode):
"""
Subclass of [`TorchTensor`][docarray.typing.TorchTensor], to represent a video tensor.
Adds video-specific features to the tensor.
---
```python
from typing import Optional
import torch
from docarray import BaseDoc
from docarray.typing import VideoTorchTensor, VideoUrl
class MyVideoDoc(BaseDoc):
title: str
url: Optional[VideoUrl]
video_tensor: Optional[VideoTorchTensor]
doc_1 = MyVideoDoc(
title='my_first_video_doc',
video_tensor=torch.randn(size=(100, 224, 224, 3)),
)
# doc_1.video_tensor.save(file_path='file_1.mp4')
doc_2 = MyVideoDoc(
title='my_second_video_doc',
url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true',
)
doc_2.video_tensor = doc_2.url.load().video
# doc_2.video_tensor.save(file_path='file_2.wav')
```
---
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoTorchTensor')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_torch_tensor')
class VideoTorchTensor(TorchTensor, VideoTensorMixin, metaclass=metaTorchAndNode):
"""
Subclass of [`TorchTensor`][docarray.typing.TorchTensor], to represent a video tensor.
Adds video-specific features to the tensor.
---
```python
from typing import Optional
import torch
from docarray import BaseDoc
from docarray.typing import VideoTorchTensor, VideoUrl
class MyVideoDoc(BaseDoc):
title: str
url: Optional[VideoUrl]
video_tensor: Optional[VideoTorchTensor]
doc_1 = MyVideoDoc(
title='my_first_video_doc',
video_tensor=torch.randn(size=(100, 224, 224, 3)),
)
# doc_1.video_tensor.save(file_path='file_1.mp4')
doc_2 = MyVideoDoc(
title='my_second_video_doc',
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true',
)
doc_2.video_tensor = doc_2.url.load().video
# doc_2.video_tensor.save(file_path='file_2.wav')
```
---
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
"""
Video audio parser.
Contains parsers for mp3, mp4 files.
"""
from pathlib import Path
from typing import Any, Dict, List, Optional, cast
import logging
from fsspec import AbstractFileSystem
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
logger = logging.getLogger(__name__)
class VideoAudioReader(BaseReader):
"""
Video audio parser.
Extract text from transcript of video/audio files.
"""
def __init__(self, *args: Any, model_version: str = "base", **kwargs: Any) -> None:
"""Init parser."""
super().__init__(*args, **kwargs)
self._model_version = model_version
try:
import whisper
except ImportError:
raise ImportError(
"Please install OpenAI whisper model "
"'pip install git+https://github.com/openai/whisper.git' "
"to use the model"
)
model = whisper.load_model(self._model_version)
self.parser_config = {"model": model}
def load_data(
self,
file: Path,
extra_info: Optional[Dict] = None,
fs: Optional[AbstractFileSystem] = None,
) -> List[Document]:
"""Parse file."""
import whisper
if file.name.endswith("mp4"):
try:
from pydub import AudioSegment
except ImportError:
raise ImportError("Please install pydub 'pip install pydub' ")
if fs:
with fs.open(file, "rb") as f:
video = AudioSegment.from_file(f, format="mp4")
else:
# open file
video = AudioSegment.from_file(file, format="mp4")
# Extract audio from video
audio = video.split_to_mono()[0]
file_str = str(file)[:-4] + ".mp3"
# export file
audio.export(file_str, format="mp3")
model = cast(whisper.Whisper, self.parser_config["model"])
result = model.transcribe(str(file))
transcript = result["text"]
return [Document(text=transcript, metadata=extra_info or {})]
|
"""Video audio parser.
Contains parsers for mp3, mp4 files.
"""
from pathlib import Path
from typing import Any, Dict, List, Optional, cast
import logging
from fsspec import AbstractFileSystem
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
logger = logging.getLogger(__name__)
class VideoAudioReader(BaseReader):
"""Video audio parser.
Extract text from transcript of video/audio files.
"""
def __init__(self, *args: Any, model_version: str = "base", **kwargs: Any) -> None:
"""Init parser."""
super().__init__(*args, **kwargs)
self._model_version = model_version
try:
import whisper
except ImportError:
raise ImportError(
"Please install OpenAI whisper model "
"'pip install git+https://github.com/openai/whisper.git' "
"to use the model"
)
model = whisper.load_model(self._model_version)
self.parser_config = {"model": model}
def load_data(
self,
file: Path,
extra_info: Optional[Dict] = None,
fs: Optional[AbstractFileSystem] = None,
) -> List[Document]:
"""Parse file."""
import whisper
if file.name.endswith("mp4"):
try:
from pydub import AudioSegment
except ImportError:
raise ImportError("Please install pydub 'pip install pydub' ")
if fs:
with fs.open(file, "rb") as f:
video = AudioSegment.from_file(f, format="mp4")
else:
# open file
video = AudioSegment.from_file(file, format="mp4")
# Extract audio from video
audio = video.split_to_mono()[0]
file_str = str(file)[:-4] + ".mp3"
# export file
audio.export(file_str, format="mp3")
model = cast(whisper.Whisper, self.parser_config["model"])
result = model.transcribe(str(file))
transcript = result["text"]
return [Document(text=transcript, metadata=extra_info or {})]
|
from __future__ import annotations
from .splade_callbacks import SchedulerType, SpladeWeightRegulizerSchedulerCallback
__all__ = ["SpladeWeightRegulizerSchedulerCallback", "SchedulerType"]
|
from __future__ import annotations
from .splade_callbacks import SchedulerType, SpladeLambdaSchedulerCallback
__all__ = ["SpladeLambdaSchedulerCallback", "SchedulerType"]
|
_base_ = ['./ld_r18-gflv1-r101_fpn_1x_coco.py']
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth' # noqa
model = dict(
teacher_config='configs/gfl/gfl_r101-dconv-c3-c5_fpn_ms-2x_coco.py',
teacher_ckpt=teacher_ckpt,
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5))
max_epochs = 24
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# multi-scale training
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = ['./ld_r18-gflv1-r101_fpn_1x_coco.py']
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth' # noqa
model = dict(
teacher_config='configs/gfl/gfl_r101-dconv-c3-c5_fpn_ms-2x_coco.py',
teacher_ckpt=teacher_ckpt,
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5))
max_epochs = 24
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# multi-scale training
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
# CREDITS: https://github.com/openai/CLIP
import gzip
import html
from functools import lru_cache
from pathlib import Path
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return str(Path(__file__).parents[2] / '.cache/bpe_simple_vocab_16e6.txt.gz')
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
|
# CREDITS: https://github.com/openai/CLIP
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.getcwd(), '.cache', 'bpe_simple_vocab_16e6.txt.gz')
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
|
# Copyright (c) OpenMMLab. All rights reserved.
import ast
import os.path as osp
import re
import warnings
from typing import Tuple
from mmengine.fileio import load
from mmengine.utils import check_file_exist
PKG2PROJECT = {
'mmcls': 'mmcls',
'mmdet': 'mmdet',
'mmdet3d': 'mmdet3d',
'mmseg': 'mmsegmentation',
'mmaction2': 'mmaction2',
'mmtrack': 'mmtrack',
'mmpose': 'mmpose',
'mmedit': 'mmedit',
'mmocr': 'mmocr',
'mmgen': 'mmgen',
'mmfewshot': 'mmfewshot',
'mmrazor': 'mmrazor',
'mmflow': 'mmflow',
'mmhuman3d': 'mmhuman3d',
'mmrotate': 'mmrotate',
'mmselfsup': 'mmselfsup',
}
def _get_cfg_metainfo(package_path: str, cfg_path: str) -> dict:
"""Get target meta information from all 'metafile.yml' defined in `mode-
index.yml` of external package.
Args:
package_path (str): Path of external package.
cfg_path (str): Name of experiment config.
Returns:
dict: Meta information of target experiment.
"""
meta_index_path = osp.join(package_path, '.mim', 'model-index.yml')
meta_index = load(meta_index_path)
cfg_dict = dict()
for meta_path in meta_index['Import']:
meta_path = osp.join(package_path, '.mim', meta_path)
cfg_meta = load(meta_path)
for model_cfg in cfg_meta['Models']:
if 'Config' not in model_cfg:
warnings.warn(f'There is not `Config` define in {model_cfg}')
continue
cfg_name = model_cfg['Config'].partition('/')[-1]
# Some config could have multiple weights, we only pick the
# first one.
if cfg_name in cfg_dict:
continue
cfg_dict[cfg_name] = model_cfg
if cfg_path not in cfg_dict:
raise ValueError(f'Expected configs: {cfg_dict.keys()}, but got '
f'{cfg_path}')
return cfg_dict[cfg_path]
def _get_external_cfg_path(package_path: str, cfg_file: str) -> str:
"""Get config path of external package.
Args:
package_path (str): Path of external package.
cfg_file (str): Name of experiment config.
Returns:
str: Absolute config path from external package.
"""
cfg_file = cfg_file.split('.')[0]
model_cfg = _get_cfg_metainfo(package_path, cfg_file)
cfg_path = osp.join(package_path, model_cfg['Config'])
check_file_exist(cfg_path)
return cfg_path
def _get_external_cfg_base_path(package_path: str, cfg_name: str) -> str:
"""Get base config path of external package.
Args:
package_path (str): Path of external package.
cfg_name (str): External relative config path with 'package::'.
Returns:
str: Absolute config path from external package.
"""
cfg_path = osp.join(package_path, '.mim', 'configs', cfg_name)
check_file_exist(cfg_path)
return cfg_path
def _get_package_and_cfg_path(cfg_path: str) -> Tuple[str, str]:
"""Get package name and relative config path.
Args:
cfg_path (str): External relative config path with 'package::'.
Returns:
Tuple[str, str]: Package name and config path.
"""
if re.match(r'\w*::\w*/\w*', cfg_path) is None:
raise ValueError(
'`_get_package_and_cfg_path` is used for get external package, '
'please specify the package name and relative config path, just '
'like `mmdet::faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py`')
package_cfg = cfg_path.split('::')
if len(package_cfg) > 2:
raise ValueError('`::` should only be used to separate package and '
'config name, but found multiple `::` in '
f'{cfg_path}')
package, cfg_path = package_cfg
assert package in PKG2PROJECT, 'mmengine does not support to load ' \
f'{package} config.'
package = PKG2PROJECT[package]
return package, cfg_path
class RemoveAssignFromAST(ast.NodeTransformer):
"""Remove Assign node if the target's name match the key.
Args:
key (str): The target name of the Assign node.
"""
def __init__(self, key):
self.key = key
def visit_Assign(self, node):
if (isinstance(node.targets[0], ast.Name)
and node.targets[0].id == self.key):
return None
else:
return node
|
# Copyright (c) OpenMMLab. All rights reserved.
import ast
class RemoveAssignFromAST(ast.NodeTransformer):
"""Remove Assign node if the target's name match the key.
Args:
key (str): The target name of the Assign node.
"""
def __init__(self, key):
self.key = key
def visit_Assign(self, node):
if (isinstance(node.targets[0], ast.Name)
and node.targets[0].id == self.key):
return None
else:
return node
|
from typing import (
TYPE_CHECKING,
Iterable,
)
from docarray.array.memory import DocumentArrayInMemory
if TYPE_CHECKING:
from docarray.document import Document
class ChunkArray(DocumentArrayInMemory):
"""
:class:`ChunkArray` inherits from :class:`DocumentArray`.
It's a subset of Documents.
:param docs: Set of sub-documents (i.e chunks) of `reference_doc`
:param reference_doc: Reference :class:`Document` for the sub-documents
"""
def __init__(self, docs, reference_doc: 'Document'):
"""
Set constructor method.
:param doc_views: protobuf representation of the chunks
:param reference_doc: parent document
"""
self._ref_doc = reference_doc
super().__init__(docs)
if isinstance(docs, Iterable) and self._ref_doc is not None:
for d in docs:
d.parent_id = self._ref_doc.id
d.granularity = self._ref_doc.granularity + 1
def append(self, document: 'Document'):
"""Add a sub-document (i.e chunk) to the current Document.
:param document: Sub-document to be appended
.. note::
Comparing to :attr:`DocumentArray.append()`, this method adds more safeguard to
make sure the added chunk is legit.
"""
document.parent_id = self._ref_doc.id
document.granularity = self._ref_doc.granularity + 1
super().append(document)
@property
def reference_doc(self) -> 'Document':
"""
Get the document that :class:`ChunkArray` belongs to.
:return: reference doc
"""
return self._ref_doc
@property
def granularity(self) -> int:
"""
Get granularity of all document in this array.
:return: granularity
"""
return self._ref_doc.granularity + 1
@property
def adjacency(self) -> int:
"""
Get adjacency of all document in this array.
:return: adjacency
"""
return self._ref_doc.adjacency
|
from typing import (
TYPE_CHECKING,
Iterable,
)
from .memory import DocumentArrayInMemory
if TYPE_CHECKING:
from ..document import Document
class ChunkArray(DocumentArrayInMemory):
"""
:class:`ChunkArray` inherits from :class:`DocumentArray`.
It's a subset of Documents.
:param docs: Set of sub-documents (i.e chunks) of `reference_doc`
:param reference_doc: Reference :class:`Document` for the sub-documents
"""
def __init__(self, docs, reference_doc: 'Document'):
"""
Set constructor method.
:param doc_views: protobuf representation of the chunks
:param reference_doc: parent document
"""
self._ref_doc = reference_doc
super().__init__(docs)
if isinstance(docs, Iterable) and self._ref_doc is not None:
for d in docs:
d.parent_id = self._ref_doc.id
d.granularity = self._ref_doc.granularity + 1
def append(self, document: 'Document'):
"""Add a sub-document (i.e chunk) to the current Document.
:param document: Sub-document to be appended
.. note::
Comparing to :attr:`DocumentArray.append()`, this method adds more safeguard to
make sure the added chunk is legit.
"""
document.parent_id = self._ref_doc.id
document.granularity = self._ref_doc.granularity + 1
super().append(document)
@property
def reference_doc(self) -> 'Document':
"""
Get the document that :class:`ChunkArray` belongs to.
:return: reference doc
"""
return self._ref_doc
@property
def granularity(self) -> int:
"""
Get granularity of all document in this array.
:return: granularity
"""
return self._ref_doc.granularity + 1
@property
def adjacency(self) -> int:
"""
Get adjacency of all document in this array.
:return: adjacency
"""
return self._ref_doc.adjacency
|
from __future__ import annotations
import logging
from dataclasses import dataclass
from sentence_transformers.data_collator import SentenceTransformerDataCollator
logger = logging.getLogger(__name__)
@dataclass
class SparseEncoderDataCollator(SentenceTransformerDataCollator):
"""Collator for a SparseEncoder model. Overrided from SentenceTransformerDataCollator nothing added.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
This works with the two text dataset that is used as the example in the training overview:
https://www.sbert.net/docs/sentence_transformer/training_overview.html
It is important that the columns are in the expected order. For example, if your dataset has columns
"answer", "question" in that order, then the MultipleNegativesRankingLoss will consider
"answer" as the anchor and "question" as the positive, and it will (unexpectedly) optimize for
"given the answer, what is the question?".
"""
|
from __future__ import annotations
import logging
from dataclasses import dataclass
from sentence_transformers.data_collator import SentenceTransformerDataCollator
logger = logging.getLogger(__name__)
@dataclass
class SparseEncoderDataCollator(SentenceTransformerDataCollator):
"""Collator for a SparseEncoder model.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
This works with the two text dataset that is used as the example in the training overview:
https://www.sbert.net/docs/sentence_transformer/training_overview.html
It is important that the columns are in the expected order. For example, if your dataset has columns
"answer", "question" in that order, then the MultipleNegativesRankingLoss will consider
"answer" as the anchor and "question" as the positive, and it will (unexpectedly) optimize for
"given the answer, what is the question?".
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import DDODHead
class TestDDODHead(TestCase):
def test_ddod_head_loss(self):
"""Tests ddod head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1
}]
cfg = Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9, alpha=0.8),
reg_assigner=dict(type='ATSSAssigner', topk=9, alpha=0.5),
allowed_border=-1,
pos_weight=-1,
debug=False))
atss_head = DDODHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=1,
use_dcn=False,
norm_cfg=None,
train_cfg=cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_iou=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [8, 16, 32, 64, 128]
]
cls_scores, bbox_preds, centernesses = atss_head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = atss_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_centerness_loss = sum(empty_gt_losses['loss_iou'])
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_centerness_loss.item(), 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = atss_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
onegt_centerness_loss = sum(one_gt_losses['loss_iou'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_centerness_loss.item(), 0,
'centerness loss should be non-zero')
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import DDODHead
class TestDDODHead(TestCase):
def test_ddod_head_loss(self):
"""Tests ddod head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1
}]
cfg = Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9, alpha=0.8),
reg_assigner=dict(type='ATSSAssigner', topk=9, alpha=0.5),
allowed_border=-1,
pos_weight=-1,
debug=False))
atss_head = DDODHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=1,
use_dcn=False,
norm_cfg=None,
train_cfg=cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_iou=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [8, 16, 32, 64, 128]
]
cls_scores, bbox_preds, centernesses = atss_head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = atss_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_centerness_loss = sum(empty_gt_losses['loss_iou'])
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_centerness_loss.item(), 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = atss_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
onegt_centerness_loss = sum(one_gt_losses['loss_iou'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_centerness_loss.item(), 0,
'centerness loss should be non-zero')
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for file names."""
import itertools
import os
import re
_uppercase_uppercase_re = re.compile(r"([A-Z]+)([A-Z][a-z])")
_lowercase_uppercase_re = re.compile(r"([a-z\d])([A-Z])")
_single_underscore_re = re.compile(r"(?<!_)_(?!_)")
_multiple_underscores_re = re.compile(r"(_{2,})")
_split_re = r"^\w+(\.\w+)*$"
INVALID_WINDOWS_CHARACTERS_IN_PATH = r"<>:/\|?*"
def camelcase_to_snakecase(name):
"""Convert camel-case string to snake-case."""
name = _uppercase_uppercase_re.sub(r"\1_\2", name)
name = _lowercase_uppercase_re.sub(r"\1_\2", name)
return name.lower()
def snakecase_to_camelcase(name):
"""Convert snake-case string to camel-case string."""
name = _single_underscore_re.split(name)
name = [_multiple_underscores_re.split(n) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(name) if n != "")
def filename_prefix_for_name(name):
if os.path.basename(name) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}")
return camelcase_to_snakecase(name)
def filename_prefix_for_split(name, split):
if os.path.basename(name) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}")
if not re.match(_split_re, split):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'.")
return f"{filename_prefix_for_name(name)}-{split}"
def filepattern_for_dataset_split(dataset_name, split, data_dir, filetype_suffix=None):
prefix = filename_prefix_for_split(dataset_name, split)
if filetype_suffix:
prefix += f".{filetype_suffix}"
filepath = os.path.join(data_dir, prefix)
return f"{filepath}*"
def filenames_for_dataset_split(path, dataset_name, split, filetype_suffix=None, shard_lengths=None):
prefix = filename_prefix_for_split(dataset_name, split)
prefix = os.path.join(path, prefix)
if shard_lengths:
num_shards = len(shard_lengths)
filenames = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(num_shards)]
if filetype_suffix:
filenames = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
filename = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for file names."""
import itertools
import os
import re
_uppercase_uppercase_re = re.compile(r"([A-Z]+)([A-Z][a-z])")
_lowercase_uppercase_re = re.compile(r"([a-z\d])([A-Z])")
_single_underscore_re = re.compile(r"(?<!_)_(?!_)")
_multiple_underscores_re = re.compile(r"(_{2,})")
_split_re = r"^\w+(\.\w+)*$"
INVALID_WINDOWS_CHARACTERS_IN_PATH = r"<>:/\|?*"
def camelcase_to_snakecase(name):
"""Convert camel-case string to snake-case."""
name = _uppercase_uppercase_re.sub(r"\1_\2", name)
name = _lowercase_uppercase_re.sub(r"\1_\2", name)
return name.lower()
def snakecase_to_camelcase(name):
"""Convert snake-case string to camel-case string."""
name = _single_underscore_re.split(name)
name = [_multiple_underscores_re.split(n) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(name) if n != "")
def filename_prefix_for_name(name):
if os.path.basename(name) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}")
return camelcase_to_snakecase(name)
def filename_prefix_for_split(name, split):
if os.path.basename(name) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}")
if not re.match(_split_re, split):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'.")
return f"{filename_prefix_for_name(name)}-{split}"
def filepattern_for_dataset_split(dataset_name, split, data_dir, filetype_suffix=None):
prefix = filename_prefix_for_split(dataset_name, split)
if filetype_suffix:
prefix += f".{filetype_suffix}"
filepath = os.path.join(data_dir, prefix)
return f"{filepath}*"
def filenames_for_dataset_split(path, dataset_name, split, filetype_suffix=None, shard_lengths=None):
prefix = filename_prefix_for_split(dataset_name, split)
prefix = os.path.join(path, prefix)
if shard_lengths:
num_shards = len(shard_lengths)
filenames = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(num_shards)]
if filetype_suffix:
filenames = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
filename = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
|
from __future__ import annotations
import pytest
from torch.utils.data import BatchSampler, ConcatDataset, SequentialSampler
from sentence_transformers.sampler import RoundRobinBatchSampler
from sentence_transformers.util import is_datasets_available
if is_datasets_available():
from datasets import Dataset
else:
pytest.skip(
reason='Sentence Transformers was not installed with the `["train"]` extra.',
allow_module_level=True,
)
DATASET_LENGTH = 25
@pytest.fixture
def dummy_concat_dataset() -> ConcatDataset:
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": [0, 1, 2, ... , 23, 24, 100, 101, ..., 123, 124],
"label": [0, 1, 0, 1, ..., 0, 1],
}
"""
values_1 = list(range(DATASET_LENGTH))
labels = [x % 2 for x in values_1]
dataset_1 = Dataset.from_dict({"data": values_1, "label": labels})
values_2 = [x + 100 for x in values_1] + [x + 200 for x in values_1]
dataset_2 = Dataset.from_dict({"data": values_2, "label": labels + labels})
return ConcatDataset([dataset_1, dataset_2])
def test_round_robin_batch_sampler(dummy_concat_dataset: ConcatDataset) -> None:
batch_size = 4
batch_sampler_1 = BatchSampler(
SequentialSampler(range(len(dummy_concat_dataset.datasets[0]))), batch_size=batch_size, drop_last=True
)
batch_sampler_2 = BatchSampler(
SequentialSampler(range(len(dummy_concat_dataset.datasets[1]))), batch_size=batch_size, drop_last=True
)
sampler = RoundRobinBatchSampler(dataset=dummy_concat_dataset, batch_samplers=[batch_sampler_1, batch_sampler_2])
batches = list(iter(sampler))
# Despite the second dataset being larger (2 * DATASET_LENGTH), we still only sample DATASET_LENGTH // batch_size batches from each dataset
# because the RoundRobinBatchSampler should stop sampling once it has sampled all elements from one dataset
assert len(batches) == 2 * DATASET_LENGTH // batch_size
assert len(sampler) == len(batches)
# Assert that batches are produced in a round-robin fashion
for i in range(0, len(batches), 2):
# Batch from the first part of the dataset
batch_1 = batches[i]
assert all(
dummy_concat_dataset[idx]["data"] < 100 for idx in batch_1
), f"Batch {i} contains data from the second part of the dataset: {[dummy_concat_dataset[idx]['data'] for idx in batch_1]}"
# Batch from the second part of the dataset
batch_2 = batches[i + 1]
assert all(
dummy_concat_dataset[idx]["data"] >= 100 for idx in batch_2
), f"Batch {i+1} contains data from the first part of the dataset: {[dummy_concat_dataset[idx]['data'] for idx in batch_2]}"
def test_round_robin_batch_sampler_value_error(dummy_concat_dataset: ConcatDataset) -> None:
batch_size = 4
batch_sampler_1 = BatchSampler(SequentialSampler(range(DATASET_LENGTH)), batch_size=batch_size, drop_last=True)
batch_sampler_2 = BatchSampler(SequentialSampler(range(DATASET_LENGTH)), batch_size=batch_size, drop_last=True)
batch_sampler_3 = BatchSampler(SequentialSampler(range(DATASET_LENGTH)), batch_size=batch_size, drop_last=True)
with pytest.raises(
ValueError, match="The number of batch samplers must match the number of datasets in the ConcatDataset"
):
RoundRobinBatchSampler(
dataset=dummy_concat_dataset, batch_samplers=[batch_sampler_1, batch_sampler_2, batch_sampler_3]
)
|
from __future__ import annotations
import pytest
from datasets import Dataset
from torch.utils.data import BatchSampler, ConcatDataset, SequentialSampler
from sentence_transformers.sampler import RoundRobinBatchSampler
DATASET_LENGTH = 25
@pytest.fixture
def dummy_concat_dataset() -> ConcatDataset:
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": [0, 1, 2, ... , 23, 24, 100, 101, ..., 123, 124],
"label": [0, 1, 0, 1, ..., 0, 1],
}
"""
values_1 = list(range(DATASET_LENGTH))
labels = [x % 2 for x in values_1]
dataset_1 = Dataset.from_dict({"data": values_1, "label": labels})
values_2 = [x + 100 for x in values_1] + [x + 200 for x in values_1]
dataset_2 = Dataset.from_dict({"data": values_2, "label": labels + labels})
return ConcatDataset([dataset_1, dataset_2])
def test_round_robin_batch_sampler(dummy_concat_dataset: ConcatDataset) -> None:
batch_size = 4
batch_sampler_1 = BatchSampler(
SequentialSampler(range(len(dummy_concat_dataset.datasets[0]))), batch_size=batch_size, drop_last=True
)
batch_sampler_2 = BatchSampler(
SequentialSampler(range(len(dummy_concat_dataset.datasets[1]))), batch_size=batch_size, drop_last=True
)
sampler = RoundRobinBatchSampler(dataset=dummy_concat_dataset, batch_samplers=[batch_sampler_1, batch_sampler_2])
batches = list(iter(sampler))
# Despite the second dataset being larger (2 * DATASET_LENGTH), we still only sample DATASET_LENGTH // batch_size batches from each dataset
# because the RoundRobinBatchSampler should stop sampling once it has sampled all elements from one dataset
assert len(batches) == 2 * DATASET_LENGTH // batch_size
assert len(sampler) == len(batches)
# Assert that batches are produced in a round-robin fashion
for i in range(0, len(batches), 2):
# Batch from the first part of the dataset
batch_1 = batches[i]
assert all(
dummy_concat_dataset[idx]["data"] < 100 for idx in batch_1
), f"Batch {i} contains data from the second part of the dataset: {[dummy_concat_dataset[idx]['data'] for idx in batch_1]}"
# Batch from the second part of the dataset
batch_2 = batches[i + 1]
assert all(
dummy_concat_dataset[idx]["data"] >= 100 for idx in batch_2
), f"Batch {i+1} contains data from the first part of the dataset: {[dummy_concat_dataset[idx]['data'] for idx in batch_2]}"
def test_round_robin_batch_sampler_value_error(dummy_concat_dataset: ConcatDataset) -> None:
batch_size = 4
batch_sampler_1 = BatchSampler(SequentialSampler(range(DATASET_LENGTH)), batch_size=batch_size, drop_last=True)
batch_sampler_2 = BatchSampler(SequentialSampler(range(DATASET_LENGTH)), batch_size=batch_size, drop_last=True)
batch_sampler_3 = BatchSampler(SequentialSampler(range(DATASET_LENGTH)), batch_size=batch_size, drop_last=True)
with pytest.raises(
ValueError, match="The number of batch samplers must match the number of datasets in the ConcatDataset"
):
RoundRobinBatchSampler(
dataset=dummy_concat_dataset, batch_samplers=[batch_sampler_1, batch_sampler_2, batch_sampler_3]
)
|
"""
This script finds the person responsible for labeling a PR by a commit SHA. It is used by the workflow in
'.github/workflows/pr-labels.yml'.
Note: we only ping the person who pulls the pr, not the reviewers, as the reviewers can sometimes be external
to torchaudio with no labeling responsibility, so we don't want to bother them.
"""
import json
import os
import sys
from typing import Any, Optional, Set
import requests
# For a PR to be properly labeled it should have one primary label and one secondary label
# For a PR with primary label "other", it does not require an additional secondary label
PRIMARY_LABELS = {
"BC-breaking",
"deprecation",
"bug fix",
"new feature",
"improvement",
"prototype",
"other",
}
SECONDARY_LABELS = {
"module: io",
"module: ops",
"module: models",
"module: pipelines",
"module: datasets",
"module: docs",
"module: tests",
"tutorial",
"recipe",
"example",
"build",
"style",
"perf",
"other",
}
GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN")
REQUEST_HEADERS = {"Accept": "application/vnd.github.v3+json", "Authorization": f"token {GITHUB_TOKEN}"}
TORCHAUDIO_REPO = "https://api.github.com/repos/pytorch/audio"
def query_torchaudio(cmd: str) -> Any:
response = requests.get(f"{TORCHAUDIO_REPO}/{cmd}", headers=REQUEST_HEADERS)
return response.json()
def get_pr_merger_and_number(commit_hash: str) -> Optional[str]:
data = query_torchaudio(f"commits/{commit_hash}")
commit_message = data["commit"]["message"]
pulled_by = commit_message.split("Pulled By: ")
pulled_by = pulled_by[1].split("\n")[0] if len(pulled_by) > 1 else None
pr_number = commit_message.split("Pull Request resolved: https://github.com/pytorch/audio/pull/")
pr_number = pr_number[1].split("\n")[0] if len(pr_number) > 1 else None
return pulled_by, pr_number
def get_labels(pr_number: int) -> Set[str]:
data = query_torchaudio(f"pulls/{pr_number}")
labels = {label["name"] for label in data["labels"]}
return labels
def post_github_comment(pr_number: int, merger: str) -> Any:
message = {
"body": f"Hey @{merger}."
+ """
You merged this PR, but labels were not properly added. Please add a primary and secondary label \
(See https://github.com/pytorch/audio/blob/main/.github/process_commit.py).
---
## Some guidance:
Use 'module: ops' for operations under 'torchaudio/{transforms, functional}', \
and ML-related components under 'torchaudio/csrc' (e.g. RNN-T loss).
Things in "examples" directory:
- 'recipe' is applicable to training recipes under the 'examples' folder,
- 'tutorial' is applicable to tutorials under the “examples/tutorials” folder
- 'example' is applicable to everything else (e.g. C++ examples)
- 'module: docs' is applicable to code documentations (not to tutorials).
Regarding examples in code documentations, please also use 'module: docs'.
Please use 'other' tag only when you’re sure the changes are not much relevant to users, \
or when all other tags are not applicable. Try not to use it often, in order to minimize \
efforts required when we prepare release notes.
---
When preparing release notes, please make sure 'documentation' and 'tutorials' occur as the \
last sub-categories under each primary category like 'new feature', 'improvements' or 'prototype'.
Things related to build are by default excluded from the release note, \
except when it impacts users. For example:
* Drop support of Python 3.7.
* Add support of Python 3.X.
* Change the way a third party library is bound (so that user needs to install it separately).
"""
}
response = requests.post(
f"{TORCHAUDIO_REPO}/issues/{pr_number}/comments", json.dumps(message), headers=REQUEST_HEADERS
)
return response.json()
if __name__ == "__main__":
commit_hash = sys.argv[1]
merger, pr_number = get_pr_merger_and_number(commit_hash)
if pr_number:
labels = get_labels(pr_number)
is_properly_labeled = bool(PRIMARY_LABELS.intersection(labels) and SECONDARY_LABELS.intersection(labels))
if not is_properly_labeled:
post_github_comment(pr_number, merger)
|
"""
This script finds the person responsible for labeling a PR by a commit SHA. It is used by the workflow in
'.github/workflows/pr-labels.yml'.
Note: we only ping the person who pulls the pr, not the reviewers, as the reviewers can sometimes be external
to torchaudio with no labeling responsibility, so we don't want to bother them.
"""
import json
import os
import sys
from typing import Any, Optional, Set, Tuple
import requests
# For a PR to be properly labeled it should have one primary label and one secondary label
# For a PR with primary label "other", it does not require an additional secondary label
PRIMARY_LABELS = {
"BC-breaking",
"deprecation",
"bug fix",
"new feature",
"improvement",
"prototype",
"other",
}
SECONDARY_LABELS = {
"module: io",
"module: ops",
"module: models",
"module: pipelines",
"module: datasets",
"module: docs",
"module: tests",
"tutorial",
"recipe",
"example",
"build",
"style",
"perf",
"other",
}
GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN")
REQUEST_HEADERS = {"Accept": "application/vnd.github.v3+json", "Authorization": f"token {GITHUB_TOKEN}"}
TORCHAUDIO_REPO = "https://api.github.com/repos/pytorch/audio"
def query_torchaudio(cmd: str) -> Any:
response = requests.get(f"{TORCHAUDIO_REPO}/{cmd}", headers=REQUEST_HEADERS)
return response.json()
def get_pr_merger_and_number(commit_hash: str) -> Optional[str]:
data = query_torchaudio(f"commits/{commit_hash}")
commit_message = data["commit"]["message"]
pulled_by = commit_message.split("Pulled By: ")
pulled_by = pulled_by[1].split("\n")[0] if len(pulled_by) > 1 else None
pr_number = commit_message.split("Pull Request resolved: https://github.com/pytorch/audio/pull/")
pr_number = pr_number[1].split("\n")[0] if len(pr_number) > 1 else None
return pulled_by, pr_number
def get_labels(pr_number: int) -> Set[str]:
data = query_torchaudio(f"pulls/{pr_number}")
labels = {label["name"] for label in data["labels"]}
return labels
def post_github_comment(pr_number: int, merger: str) -> Any:
message = {
"body": f"Hey @{merger}."
+ """
You merged this PR, but labels were not properly added. Please add a primary and secondary label \
(See https://github.com/pytorch/audio/blob/main/.github/process_commit.py).
---
## Some guidance:
Use 'module: ops' for operations under 'torchaudio/{transforms, functional}', \
and ML-related components under 'torchaudio/csrc' (e.g. RNN-T loss).
Things in "examples" directory:
- 'recipe' is applicable to training recipes under the 'examples' folder,
- 'tutorial' is applicable to tutorials under the “examples/tutorials” folder
- 'example' is applicable to everything else (e.g. C++ examples)
- 'module: docs' is applicable to code documentations (not to tutorials). \
Regarding examples in code documentations, please also use 'module: docs'.
Please use 'other' tag only when you’re sure the changes are not much relevant to users, \
or when all other tags are not applicable. Try not to use it often, in order to minimize \
efforts required when we prepare release notes.
---
When preparing release notes, please make sure 'documentation' and 'tutorials' occur as the \
last sub-categories under each primary category like 'new feature', 'improvements' or 'prototype'.
Things related to build are by default excluded from the release note, \
except when it impacts users. For example:
* Drop support of Python 3.7.
* Add support of Python 3.X.
* Change the way a third party library is bound (so that user needs to install it separately).
"""
}
response = requests.post(
f"{TORCHAUDIO_REPO}/issues/{pr_number}/comments", json.dumps(message), headers=REQUEST_HEADERS
)
return response.json()
if __name__ == "__main__":
commit_hash = sys.argv[1]
merger, pr_number = get_pr_merger_and_number(commit_hash)
if pr_number:
labels = get_labels(pr_number)
is_properly_labeled = bool(PRIMARY_LABELS.intersection(labels) and SECONDARY_LABELS.intersection(labels))
if not is_properly_labeled:
post_github_comment(pr_number, merger)
|
import datetime
from typing import List
import prisma.enums
import pydantic
class Pagination(pydantic.BaseModel):
total_items: int = pydantic.Field(
description="Total number of items.", examples=[42]
)
total_pages: int = pydantic.Field(
description="Total number of pages.", examples=[97]
)
current_page: int = pydantic.Field(
description="Current_page page number.", examples=[1]
)
page_size: int = pydantic.Field(
description="Number of items per page.", examples=[25]
)
class MyAgent(pydantic.BaseModel):
agent_id: str
agent_version: int
agent_name: str
description: str
last_edited: datetime.datetime
class MyAgentsResponse(pydantic.BaseModel):
agents: list[MyAgent]
pagination: Pagination
class StoreAgent(pydantic.BaseModel):
slug: str
agent_name: str
agent_image: str
creator: str
creator_avatar: str
sub_heading: str
description: str
runs: int
rating: float
class StoreAgentsResponse(pydantic.BaseModel):
agents: list[StoreAgent]
pagination: Pagination
class StoreAgentDetails(pydantic.BaseModel):
store_listing_version_id: str
slug: str
agent_name: str
agent_video: str
agent_image: list[str]
creator: str
creator_avatar: str
sub_heading: str
description: str
categories: list[str]
runs: int
rating: float
versions: list[str]
last_updated: datetime.datetime
class Creator(pydantic.BaseModel):
name: str
username: str
description: str
avatar_url: str
num_agents: int
agent_rating: float
agent_runs: int
is_featured: bool
class CreatorsResponse(pydantic.BaseModel):
creators: List[Creator]
pagination: Pagination
class CreatorDetails(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str
agent_rating: float
agent_runs: int
top_categories: list[str]
class Profile(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str
is_featured: bool = False
class StoreSubmission(pydantic.BaseModel):
agent_id: str
agent_version: int
name: str
sub_heading: str
slug: str
description: str
image_urls: list[str]
date_submitted: datetime.datetime
status: prisma.enums.SubmissionStatus
runs: int
rating: float
class StoreSubmissionsResponse(pydantic.BaseModel):
submissions: list[StoreSubmission]
pagination: Pagination
class StoreSubmissionRequest(pydantic.BaseModel):
agent_id: str
agent_version: int
slug: str
name: str
sub_heading: str
video_url: str | None = None
image_urls: list[str] = []
description: str = ""
categories: list[str] = []
class ProfileDetails(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str | None = None
class StoreReview(pydantic.BaseModel):
score: int
comments: str | None = None
class StoreReviewCreate(pydantic.BaseModel):
store_listing_version_id: str
score: int
comments: str | None = None
|
import datetime
from typing import List
import prisma.enums
import pydantic
class Pagination(pydantic.BaseModel):
total_items: int = pydantic.Field(
description="Total number of items.", examples=[42]
)
total_pages: int = pydantic.Field(
description="Total number of pages.", examples=[97]
)
current_page: int = pydantic.Field(
description="Current_page page number.", examples=[1]
)
page_size: int = pydantic.Field(
description="Number of items per page.", examples=[25]
)
class MyAgent(pydantic.BaseModel):
agent_id: str
agent_version: int
agent_name: str
last_edited: datetime.datetime
class MyAgentsResponse(pydantic.BaseModel):
agents: list[MyAgent]
pagination: Pagination
class StoreAgent(pydantic.BaseModel):
slug: str
agent_name: str
agent_image: str
creator: str
creator_avatar: str
sub_heading: str
description: str
runs: int
rating: float
class StoreAgentsResponse(pydantic.BaseModel):
agents: list[StoreAgent]
pagination: Pagination
class StoreAgentDetails(pydantic.BaseModel):
store_listing_version_id: str
slug: str
agent_name: str
agent_video: str
agent_image: list[str]
creator: str
creator_avatar: str
sub_heading: str
description: str
categories: list[str]
runs: int
rating: float
versions: list[str]
last_updated: datetime.datetime
class Creator(pydantic.BaseModel):
name: str
username: str
description: str
avatar_url: str
num_agents: int
agent_rating: float
agent_runs: int
is_featured: bool
class CreatorsResponse(pydantic.BaseModel):
creators: List[Creator]
pagination: Pagination
class CreatorDetails(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str
agent_rating: float
agent_runs: int
top_categories: list[str]
class Profile(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str
is_featured: bool = False
class StoreSubmission(pydantic.BaseModel):
agent_id: str
agent_version: int
name: str
sub_heading: str
slug: str
description: str
image_urls: list[str]
date_submitted: datetime.datetime
status: prisma.enums.SubmissionStatus
runs: int
rating: float
class StoreSubmissionsResponse(pydantic.BaseModel):
submissions: list[StoreSubmission]
pagination: Pagination
class StoreSubmissionRequest(pydantic.BaseModel):
agent_id: str
agent_version: int
slug: str
name: str
sub_heading: str
video_url: str | None = None
image_urls: list[str] = []
description: str = ""
categories: list[str] = []
class ProfileDetails(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str | None = None
class StoreReview(pydantic.BaseModel):
score: int
comments: str | None = None
class StoreReviewCreate(pydantic.BaseModel):
store_listing_version_id: str
score: int
comments: str | None = None
|
import contextlib
from collections.abc import Iterable
from pathlib import Path
from typing import Any
from tomlkit import dump, inline_table, load
from tomlkit.items import InlineTable
def _get_dep_inline_table(path: Path) -> InlineTable:
dep = inline_table()
dep.update({"path": str(path), "develop": True})
return dep
def add_dependencies_to_pyproject_toml(
pyproject_toml: Path,
local_editable_dependencies: Iterable[tuple[str, Path]],
) -> None:
"""Add dependencies to pyproject.toml."""
with open(pyproject_toml, encoding="utf-8") as f:
# tomlkit types aren't amazing - treat as Dict instead
pyproject: dict[str, Any] = load(f)
pyproject["tool"]["poetry"]["dependencies"].update(
{
name: _get_dep_inline_table(loc.relative_to(pyproject_toml.parent))
for name, loc in local_editable_dependencies
},
)
with open(pyproject_toml, "w", encoding="utf-8") as f:
dump(pyproject, f)
def remove_dependencies_from_pyproject_toml(
pyproject_toml: Path,
local_editable_dependencies: Iterable[str],
) -> None:
"""Remove dependencies from pyproject.toml."""
with open(pyproject_toml, encoding="utf-8") as f:
pyproject: dict[str, Any] = load(f)
# tomlkit types aren't amazing - treat as Dict instead
dependencies = pyproject["tool"]["poetry"]["dependencies"]
for name in local_editable_dependencies:
with contextlib.suppress(KeyError):
del dependencies[name]
with open(pyproject_toml, "w", encoding="utf-8") as f:
dump(pyproject, f)
|
import contextlib
from collections.abc import Iterable
from pathlib import Path
from typing import Any
from tomlkit import dump, inline_table, load
from tomlkit.items import InlineTable
def _get_dep_inline_table(path: Path) -> InlineTable:
dep = inline_table()
dep.update({"path": str(path), "develop": True})
return dep
def add_dependencies_to_pyproject_toml(
pyproject_toml: Path, local_editable_dependencies: Iterable[tuple[str, Path]]
) -> None:
"""Add dependencies to pyproject.toml."""
with open(pyproject_toml, encoding="utf-8") as f:
# tomlkit types aren't amazing - treat as Dict instead
pyproject: dict[str, Any] = load(f)
pyproject["tool"]["poetry"]["dependencies"].update(
{
name: _get_dep_inline_table(loc.relative_to(pyproject_toml.parent))
for name, loc in local_editable_dependencies
}
)
with open(pyproject_toml, "w", encoding="utf-8") as f:
dump(pyproject, f)
def remove_dependencies_from_pyproject_toml(
pyproject_toml: Path, local_editable_dependencies: Iterable[str]
) -> None:
"""Remove dependencies from pyproject.toml."""
with open(pyproject_toml, encoding="utf-8") as f:
pyproject: dict[str, Any] = load(f)
# tomlkit types aren't amazing - treat as Dict instead
dependencies = pyproject["tool"]["poetry"]["dependencies"]
for name in local_editable_dependencies:
with contextlib.suppress(KeyError):
del dependencies[name]
with open(pyproject_toml, "w", encoding="utf-8") as f:
dump(pyproject, f)
|
from abc import ABC
from typing import Any, Callable, Dict, List, Optional, Union, TypeVar
from llama_index.core.llms import ChatMessage
from llama_index.core.memory import BaseMemory
from llama_index.core.workflow import (
Context,
)
from llama_index.core.workflow.checkpointer import CheckpointCallback
from llama_index.core.workflow.handler import WorkflowHandler
T = TypeVar("T", bound="BaseWorkflowAgent") # type: ignore[name-defined]
class SingleAgentRunnerMixin(ABC):
"""
Mixin class for executing a single agent within a workflow system.
This class provides the necessary interface for running a single agent.
"""
def _get_steps(self) -> Dict[str, Callable]:
"""Returns all the steps from the prebuilt workflow."""
from llama_index.core.agent.workflow import AgentWorkflow
instance = AgentWorkflow(agents=[self]) # type: ignore
return instance._get_steps()
def run(
self,
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
memory: Optional[BaseMemory] = None,
ctx: Optional[Context] = None,
stepwise: bool = False,
checkpoint_callback: Optional[CheckpointCallback] = None,
**workflow_kwargs: Any,
) -> WorkflowHandler:
"""Run the agent."""
from llama_index.core.agent.workflow import AgentWorkflow
workflow = AgentWorkflow(agents=[self], **workflow_kwargs) # type: ignore[list-item]
return workflow.run(
user_msg=user_msg,
chat_history=chat_history,
memory=memory,
ctx=ctx,
stepwise=stepwise,
checkpoint_callback=checkpoint_callback,
)
|
from abc import ABC
from typing import Any, Callable, Dict, List, Optional, Union, TypeVar
from llama_index.core.llms import ChatMessage
from llama_index.core.memory import BaseMemory
from llama_index.core.workflow import (
Context,
)
from llama_index.core.workflow.checkpointer import CheckpointCallback
from llama_index.core.workflow.handler import WorkflowHandler
T = TypeVar("T", bound="BaseWorkflowAgent") # type: ignore[name-defined]
class SingleAgentRunnerMixin(ABC):
"""Mixin class for executing a single agent within a workflow system.
This class provides the necessary interface for running a single agent.
"""
def _get_steps(self) -> Dict[str, Callable]:
"""Returns all the steps from the prebuilt workflow."""
from llama_index.core.agent.workflow import AgentWorkflow
instance = AgentWorkflow(agents=[self]) # type: ignore
return instance._get_steps()
def run(
self: T,
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
memory: Optional[BaseMemory] = None,
ctx: Optional[Context] = None,
stepwise: bool = False,
checkpoint_callback: Optional[CheckpointCallback] = None,
**workflow_kwargs: Any,
) -> WorkflowHandler:
"""Run the agent."""
from llama_index.core.agent.workflow import AgentWorkflow
workflow = AgentWorkflow(agents=[self], **workflow_kwargs)
return workflow.run(
user_msg=user_msg,
chat_history=chat_history,
memory=memory,
ctx=ctx,
stepwise=stepwise,
checkpoint_callback=checkpoint_callback,
)
|
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["pipeline_hunyuan_skyreels_image2video"] = ["HunyuanSkyreelsImageToVideoPipeline"]
_import_structure["pipeline_hunyuan_video"] = ["HunyuanVideoPipeline"]
_import_structure["pipeline_hunyuan_video_framepack"] = ["HunyuanVideoFramepackPipeline"]
_import_structure["pipeline_hunyuan_video_image2video"] = ["HunyuanVideoImageToVideoPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import *
else:
from .pipeline_hunyuan_skyreels_image2video import HunyuanSkyreelsImageToVideoPipeline
from .pipeline_hunyuan_video import HunyuanVideoPipeline
from .pipeline_hunyuan_video_framepack import HunyuanVideoFramepackPipeline
from .pipeline_hunyuan_video_image2video import HunyuanVideoImageToVideoPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
|
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["pipeline_hunyuan_skyreels_image2video"] = ["HunyuanSkyreelsImageToVideoPipeline"]
_import_structure["pipeline_hunyuan_video"] = ["HunyuanVideoPipeline"]
_import_structure["pipeline_hunyuan_video_image2video"] = ["HunyuanVideoImageToVideoPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import *
else:
from .pipeline_hunyuan_skyreels_image2video import HunyuanSkyreelsImageToVideoPipeline
from .pipeline_hunyuan_video import HunyuanVideoPipeline
from .pipeline_hunyuan_video_image2video import HunyuanVideoImageToVideoPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
METAINFO = {
'classes':
('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'),
# palette is a list of color tuples, which is used for visualization.
'palette': [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192),
(197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255),
(153, 69, 1), (120, 166, 157), (0, 182, 199), (0, 226, 252),
(182, 182, 255), (0, 0, 230), (220, 20, 60), (163, 255, 0),
(0, 82, 0), (3, 95, 161), (0, 80, 100), (183, 130, 88)]
}
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/VOCdevkit/'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
_delete_=True,
type=dataset_type,
data_root=data_root,
ann_file='annotations/voc0712_trainval.json',
data_prefix=dict(img=''),
metainfo=METAINFO,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args={{_base_.backend_args}})))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/voc07_test.json',
data_prefix=dict(img=''),
metainfo=METAINFO,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/voc07_test.json',
metric='bbox',
format_only=False,
backend_args={{_base_.backend_args}})
test_evaluator = val_evaluator
# training schedule, the dataset is repeated 3 times, so the
# actual epoch = 4 * 3 = 12
max_epochs = 4
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[3],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
METAINFO = {
'classes':
('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'),
# palette is a list of color tuples, which is used for visualization.
'palette': [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192),
(197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255),
(153, 69, 1), (120, 166, 157), (0, 182, 199), (0, 226, 252),
(182, 182, 255), (0, 0, 230), (220, 20, 60), (163, 255, 0),
(0, 82, 0), (3, 95, 161), (0, 80, 100), (183, 130, 88)]
}
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/VOCdevkit/'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
_delete_=True,
type=dataset_type,
data_root=data_root,
ann_file='annotations/voc0712_trainval.json',
data_prefix=dict(img=''),
metainfo=METAINFO,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/voc07_test.json',
data_prefix=dict(img=''),
metainfo=METAINFO,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/voc07_test.json',
metric='bbox',
format_only=False)
test_evaluator = val_evaluator
# training schedule, the dataset is repeated 3 times, so the
# actual epoch = 4 * 3 = 12
max_epochs = 4
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[3],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
_base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
data_preprocessor = dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True)
# model settings
model = dict(
type='CornerNet',
data_preprocessor=data_preprocessor,
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, 2, 2, 2, 2, 4],
norm_cfg=dict(type='BN', requires_grad=True)),
neck=None,
bbox_head=dict(
type='CentripetalHead',
num_classes=80,
in_channels=256,
num_feat_levels=2,
corner_emb_channels=0,
loss_heatmap=dict(
type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1),
loss_guiding_shift=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=0.05),
loss_centripetal_shift=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=1)),
# training and testing settings
train_cfg=None,
test_cfg=dict(
corner_topk=100,
local_maximum_kernel=3,
distance_threshold=0.5,
score_thr=0.05,
max_per_img=100,
nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
# data settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
# The cropped images are padded into squares during training,
# but may be smaller than crop_size.
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
test_mode=False,
test_pad_mode=None,
mean=data_preprocessor['mean'],
std=data_preprocessor['std'],
# Image data is not converted to rgb.
to_rgb=data_preprocessor['bgr_to_rgb']),
dict(type='Resize', scale=(511, 511), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
# TODO: mstest is not currently implemented
test_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
file_client_args={{_base_.file_client_args}}),
# don't need Resize
dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
test_mode=True,
test_pad_mode=['logical_or', 127],
mean=data_preprocessor['mean'],
std=data_preprocessor['std'],
# Image data is not converted to rgb.
to_rgb=data_preprocessor['bgr_to_rgb']),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'border'))
]
train_dataloader = dict(
batch_size=6,
num_workers=3,
batch_sampler=None,
dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='Adam', lr=0.0005),
clip_grad=dict(max_norm=35, norm_type=2))
max_epochs = 210
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[190],
gamma=0.1)
]
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (16 GPUs) x (6 samples per GPU)
auto_scale_lr = dict(base_batch_size=96)
|
_base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
data_preprocessor = dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True)
# model settings
model = dict(
type='CornerNet',
data_preprocessor=data_preprocessor,
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, 2, 2, 2, 2, 4],
norm_cfg=dict(type='BN', requires_grad=True)),
neck=None,
bbox_head=dict(
type='CentripetalHead',
num_classes=80,
in_channels=256,
num_feat_levels=2,
corner_emb_channels=0,
loss_heatmap=dict(
type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1),
loss_guiding_shift=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=0.05),
loss_centripetal_shift=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=1)),
# training and testing settings
train_cfg=None,
test_cfg=dict(
corner_topk=100,
local_maximum_kernel=3,
distance_threshold=0.5,
score_thr=0.05,
max_per_img=100,
nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
# data settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
# The cropped images are padded into squares during training,
# but may be smaller than crop_size.
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
test_mode=False,
test_pad_mode=None,
mean=data_preprocessor['mean'],
std=data_preprocessor['std'],
# Image data is not converted to rgb.
to_rgb=data_preprocessor['bgr_to_rgb']),
dict(type='Resize', scale=(511, 511), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
# TODO: mstest is not currently implemented
test_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
file_client_args={{_base_.file_client_args}}),
# don't need Resize
dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
test_mode=True,
test_pad_mode=['logical_or', 127],
mean=data_preprocessor['mean'],
std=data_preprocessor['std'],
# Image data is not converted to rgb.
to_rgb=data_preprocessor['bgr_to_rgb']),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'border'))
]
train_dataloader = dict(
batch_size=6,
num_workers=3,
batch_sampler=None,
dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='Adam', lr=0.0005),
clip_grad=dict(max_norm=35, norm_type=2))
max_epochs = 210
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[190],
gamma=0.1)
]
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (16 GPUs) x (6 samples per GPU)
auto_scale_lr = dict(base_batch_size=96)
|
"""Tool for the SceneXplain API."""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.scenexplain import SceneXplainAPIWrapper
class SceneXplainInput(BaseModel):
"""Input for SceneXplain."""
query: str = Field(..., description="The link to the image to explain")
class SceneXplainTool(BaseTool):
"""Tool that explains images."""
name: str = "image_explainer"
description: str = (
"An Image Captioning Tool: Use this tool to generate a detailed caption "
"for an image. The input can be an image file of any format, and "
"the output will be a text description that covers every detail of the image."
)
api_wrapper: SceneXplainAPIWrapper = Field(default_factory=SceneXplainAPIWrapper)
def _run(
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
|
"""Tool for the SceneXplain API."""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.scenexplain import SceneXplainAPIWrapper
class SceneXplainInput(BaseModel):
"""Input for SceneXplain."""
query: str = Field(..., description="The link to the image to explain")
class SceneXplainTool(BaseTool): # type: ignore[override]
"""Tool that explains images."""
name: str = "image_explainer"
description: str = (
"An Image Captioning Tool: Use this tool to generate a detailed caption "
"for an image. The input can be an image file of any format, and "
"the output will be a text description that covers every detail of the image."
)
api_wrapper: SceneXplainAPIWrapper = Field(default_factory=SceneXplainAPIWrapper) # type: ignore[arg-type]
def _run(
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
|
from .proto import ProtoArrayMixin
|
from abc import ABC
from docarray.array.mixins.content import ContentPropertyMixin
from docarray.array.mixins.delitem import DelItemMixin
from docarray.array.mixins.embed import EmbedMixin
from docarray.array.mixins.empty import EmptyMixin
from docarray.array.mixins.evaluation import EvaluationMixin
from docarray.array.mixins.find import FindMixin
from docarray.array.mixins.getattr import GetAttributeMixin
from docarray.array.mixins.getitem import GetItemMixin
from docarray.array.mixins.group import GroupMixin
from docarray.array.mixins.io.binary import BinaryIOMixin
from docarray.array.mixins.io.common import CommonIOMixin
from docarray.array.mixins.io.csv import CsvIOMixin
from docarray.array.mixins.io.dataframe import DataframeIOMixin
from docarray.array.mixins.io.from_gen import FromGeneratorMixin
from docarray.array.mixins.io.json import JsonIOMixin
from docarray.array.mixins.io.pushpull import PushPullMixin
from docarray.array.mixins.match import MatchMixin
from docarray.array.mixins.parallel import ParallelMixin
from docarray.array.mixins.plot import PlotMixin
from docarray.array.mixins.post import PostMixin
from docarray.array.mixins.pydantic import PydanticMixin
from docarray.array.mixins.reduce import ReduceMixin
from docarray.array.mixins.sample import SampleMixin
from docarray.array.mixins.setitem import SetItemMixin
from docarray.array.mixins.strawberry import StrawberryMixin
from docarray.array.mixins.text import TextToolsMixin
from docarray.array.mixins.traverse import TraverseMixin
from docarray.array.mixins.dataloader import DataLoaderMixin
class AllMixins(
GetAttributeMixin,
GetItemMixin,
SetItemMixin,
DelItemMixin,
ContentPropertyMixin,
PydanticMixin,
StrawberryMixin,
GroupMixin,
EmptyMixin,
CsvIOMixin,
JsonIOMixin,
BinaryIOMixin,
CommonIOMixin,
EmbedMixin,
PushPullMixin,
FromGeneratorMixin,
FindMixin,
MatchMixin,
TraverseMixin,
PlotMixin,
SampleMixin,
PostMixin,
TextToolsMixin,
EvaluationMixin,
ReduceMixin,
ParallelMixin,
DataframeIOMixin,
DataLoaderMixin,
ABC,
):
"""All plugins that can be used in :class:`DocumentArray`."""
...
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmcv.runner import BaseModule
from ..builder import NECKS
@NECKS.register_module()
class SSDNeck(BaseModule):
"""Extra layers of SSD backbone to generate multi-scale feature maps.
Args:
in_channels (Sequence[int]): Number of input channels per scale.
out_channels (Sequence[int]): Number of output channels per scale.
level_strides (Sequence[int]): Stride of 3x3 conv per level.
level_paddings (Sequence[int]): Padding size of 3x3 conv per level.
l2_norm_scale (float|None): L2 normalization layer init scale.
If None, not use L2 normalization on the first input feature.
last_kernel_size (int): Kernel size of the last conv layer.
Default: 3.
use_depthwise (bool): Whether to use DepthwiseSeparableConv.
Default: False.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: None.
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
level_strides,
level_paddings,
l2_norm_scale=20.,
last_kernel_size=3,
use_depthwise=False,
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='ReLU'),
init_cfg=[
dict(
type='Xavier', distribution='uniform',
layer='Conv2d'),
dict(type='Constant', val=1, layer='BatchNorm2d'),
]):
super(SSDNeck, self).__init__(init_cfg)
assert len(out_channels) > len(in_channels)
assert len(out_channels) - len(in_channels) == len(level_strides)
assert len(level_strides) == len(level_paddings)
assert in_channels == out_channels[:len(in_channels)]
if l2_norm_scale:
self.l2_norm = L2Norm(in_channels[0], l2_norm_scale)
self.init_cfg += [
dict(
type='Constant',
val=self.l2_norm.scale,
override=dict(name='l2_norm'))
]
self.extra_layers = nn.ModuleList()
extra_layer_channels = out_channels[len(in_channels):]
second_conv = DepthwiseSeparableConvModule if \
use_depthwise else ConvModule
for i, (out_channel, stride, padding) in enumerate(
zip(extra_layer_channels, level_strides, level_paddings)):
kernel_size = last_kernel_size \
if i == len(extra_layer_channels) - 1 else 3
per_lvl_convs = nn.Sequential(
ConvModule(
out_channels[len(in_channels) - 1 + i],
out_channel // 2,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
second_conv(
out_channel // 2,
out_channel,
kernel_size,
stride=stride,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.extra_layers.append(per_lvl_convs)
def forward(self, inputs):
"""Forward function."""
outs = [feat for feat in inputs]
if hasattr(self, 'l2_norm'):
outs[0] = self.l2_norm(outs[0])
feat = outs[-1]
for layer in self.extra_layers:
feat = layer(feat)
outs.append(feat)
return tuple(outs)
class L2Norm(nn.Module):
def __init__(self, n_dims, scale=20., eps=1e-10):
"""L2 normalization layer.
Args:
n_dims (int): Number of dimensions to be normalized
scale (float, optional): Defaults to 20..
eps (float, optional): Used to avoid division by zero.
Defaults to 1e-10.
"""
super(L2Norm, self).__init__()
self.n_dims = n_dims
self.weight = nn.Parameter(torch.Tensor(self.n_dims))
self.eps = eps
self.scale = scale
def forward(self, x):
"""Forward function."""
# normalization layer convert to FP32 in FP16 training
x_float = x.float()
norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps
return (self.weight[None, :, None, None].float().expand_as(x_float) *
x_float / norm).type_as(x)
|
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmcv.runner import BaseModule
from ..builder import NECKS
@NECKS.register_module()
class SSDNeck(BaseModule):
"""Extra layers of SSD backbone to generate multi-scale feature maps.
Args:
in_channels (Sequence[int]): Number of input channels per scale.
out_channels (Sequence[int]): Number of output channels per scale.
level_strides (Sequence[int]): Stride of 3x3 conv per level.
level_paddings (Sequence[int]): Padding size of 3x3 conv per level.
l2_norm_scale (float|None): L2 normalization layer init scale.
If None, not use L2 normalization on the first input feature.
last_kernel_size (int): Kernel size of the last conv layer.
Default: 3.
use_depthwise (bool): Whether to use DepthwiseSeparableConv.
Default: False.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: None.
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
level_strides,
level_paddings,
l2_norm_scale=20.,
last_kernel_size=3,
use_depthwise=False,
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='ReLU'),
init_cfg=[
dict(
type='Xavier', distribution='uniform',
layer='Conv2d'),
dict(type='Constant', val=1, layer='BatchNorm2d'),
]):
super(SSDNeck, self).__init__(init_cfg)
assert len(out_channels) > len(in_channels)
assert len(out_channels) - len(in_channels) == len(level_strides)
assert len(level_strides) == len(level_paddings)
assert in_channels == out_channels[:len(in_channels)]
if l2_norm_scale:
self.l2_norm = L2Norm(in_channels[0], l2_norm_scale)
self.init_cfg += [
dict(
type='Constant',
val=self.l2_norm.scale,
override=dict(name='l2_norm'))
]
self.extra_layers = nn.ModuleList()
extra_layer_channels = out_channels[len(in_channels):]
second_conv = DepthwiseSeparableConvModule if \
use_depthwise else ConvModule
for i, (out_channel, stride, padding) in enumerate(
zip(extra_layer_channels, level_strides, level_paddings)):
kernel_size = last_kernel_size \
if i == len(extra_layer_channels) - 1 else 3
per_lvl_convs = nn.Sequential(
ConvModule(
out_channels[len(in_channels) - 1 + i],
out_channel // 2,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
second_conv(
out_channel // 2,
out_channel,
kernel_size,
stride=stride,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.extra_layers.append(per_lvl_convs)
def forward(self, inputs):
"""Forward function."""
outs = [feat for feat in inputs]
if hasattr(self, 'l2_norm'):
outs[0] = self.l2_norm(outs[0])
feat = outs[-1]
for layer in self.extra_layers:
feat = layer(feat)
outs.append(feat)
return tuple(outs)
class L2Norm(nn.Module):
def __init__(self, n_dims, scale=20., eps=1e-10):
"""L2 normalization layer.
Args:
n_dims (int): Number of dimensions to be normalized
scale (float, optional): Defaults to 20..
eps (float, optional): Used to avoid division by zero.
Defaults to 1e-10.
"""
super(L2Norm, self).__init__()
self.n_dims = n_dims
self.weight = nn.Parameter(torch.Tensor(self.n_dims))
self.eps = eps
self.scale = scale
def forward(self, x):
"""Forward function."""
# normalization layer convert to FP32 in FP16 training
x_float = x.float()
norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps
return (self.weight[None, :, None, None].float().expand_as(x_float) *
x_float / norm).type_as(x)
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.audio_url import AudioUrl
from docarray.typing.url.image_url import ImageUrl
from docarray.typing.url.text_url import TextUrl
from docarray.typing.url.url_3d.mesh_url import Mesh3DUrl
from docarray.typing.url.url_3d.point_cloud_url import PointCloud3DUrl
from docarray.typing.url.video_url import VideoUrl
__all__ = [
'ImageUrl',
'AudioUrl',
'AnyUrl',
'TextUrl',
'Mesh3DUrl',
'PointCloud3DUrl',
'VideoUrl',
]
|
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.audio_url import AudioUrl
from docarray.typing.url.image_url import ImageUrl
from docarray.typing.url.text_url import TextUrl
from docarray.typing.url.url_3d.mesh_url import Mesh3DUrl
from docarray.typing.url.url_3d.point_cloud_url import PointCloud3DUrl
from docarray.typing.url.video_url import VideoUrl
__all__ = [
'ImageUrl',
'AudioUrl',
'AnyUrl',
'TextUrl',
'Mesh3DUrl',
'PointCloud3DUrl',
'VideoUrl',
]
|
import os
import pytest
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.tools.notion import NotionToolSpec
# Get yourself a page id and database id from your notion account
# Refer to the page: https://developers.notion.com/docs/create-a-notion-integration#give-your-integration-page-permissions
page_ids = ["17d66c19670f80c5aaddfb8a0a449179"] # replace with your page id
database_ids = ["16066c19-670f-801d-adb8-fa9d1cdaa053"] # replace with your database id
def test_class():
names_of_base_classes = [b.__name__ for b in NotionToolSpec.__mro__]
assert BaseToolSpec.__name__ in names_of_base_classes
@pytest.mark.skipif(
"NOTION_INTEGRATION_TOKEN" not in os.environ,
reason="NOTION_INTEGRATION_TOKEN is not set",
)
def test_load_data_with_page_ids():
tool = NotionToolSpec()
content = tool.load_data(page_ids=page_ids)
assert content
@pytest.mark.skipif(
"NOTION_INTEGRATION_TOKEN" not in os.environ,
reason="NOTION_INTEGRATION_TOKEN is not set",
)
def test_load_data_with_database_ids():
tool = NotionToolSpec()
content = tool.load_data(database_ids=database_ids)
assert content
@pytest.mark.skipif(
"NOTION_INTEGRATION_TOKEN" not in os.environ,
reason="NOTION_INTEGRATION_TOKEN is not set",
)
def test_load_data_with_page_ids_and_database_ids():
tool = NotionToolSpec()
content = tool.load_data(page_ids=page_ids, database_ids=database_ids)
assert content
@pytest.mark.skipif(
"NOTION_INTEGRATION_TOKEN" not in os.environ,
reason="NOTION_INTEGRATION_TOKEN is not set",
)
def test_search_data():
tool = NotionToolSpec()
result = tool.search_data(query="Website") # replace with your search query
assert len(result) > 0
|
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.tools.notion import NotionToolSpec
def test_class():
names_of_base_classes = [b.__name__ for b in NotionToolSpec.__mro__]
assert BaseToolSpec.__name__ in names_of_base_classes
|
"""Google Search tool spec."""
import json
import urllib.parse
from typing import Optional
import requests
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
QUERY_URL_TMPL = (
"https://www.googleapis.com/customsearch/v1?key={key}&cx={engine}&q={query}"
)
class GoogleSearchToolSpec(BaseToolSpec):
"""Google Search tool spec."""
spec_functions = ["google_search"]
def __init__(self, key: str, engine: str, num: Optional[int] = None) -> None:
"""Initialize with parameters."""
self.key = key
self.engine = engine
self.num = num
def google_search(self, query: str):
"""
Make a query to the Google search engine to receive a list of results.
Args:
query (str): The query to be passed to Google search.
num (int, optional): The number of search results to return. Defaults to None.
Raises:
ValueError: If the 'num' is not an integer between 1 and 10.
"""
url = QUERY_URL_TMPL.format(
key=self.key, engine=self.engine, query=urllib.parse.quote_plus(query)
)
if self.num is not None:
if not 1 <= self.num <= 10:
raise ValueError("num should be an integer between 1 and 10, inclusive")
url += f"&num={self.num}"
response = requests.get(url)
results = json.loads(response.text).get("items", [])
documents = []
if len(results) == 0:
return "No search results available"
for result in results:
if "snippet" in result:
documents.append(
Document(
text=result["snippet"],
metadata={"title": result["title"], "link": result["link"]},
)
)
return documents
|
"""Google Search tool spec."""
import urllib.parse
from typing import Optional
import requests
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
QUERY_URL_TMPL = (
"https://www.googleapis.com/customsearch/v1?key={key}&cx={engine}&q={query}"
)
class GoogleSearchToolSpec(BaseToolSpec):
"""Google Search tool spec."""
spec_functions = ["google_search"]
def __init__(self, key: str, engine: str, num: Optional[int] = None) -> None:
"""Initialize with parameters."""
self.key = key
self.engine = engine
self.num = num
def google_search(self, query: str):
"""
Make a query to the Google search engine to receive a list of results.
Args:
query (str): The query to be passed to Google search.
num (int, optional): The number of search results to return. Defaults to None.
Raises:
ValueError: If the 'num' is not an integer between 1 and 10.
"""
url = QUERY_URL_TMPL.format(
key=self.key, engine=self.engine, query=urllib.parse.quote_plus(query)
)
if self.num is not None:
if not 1 <= self.num <= 10:
raise ValueError("num should be an integer between 1 and 10, inclusive")
url += f"&num={self.num}"
response = requests.get(url)
return [Document(text=response.text)]
|
from typing import Any, Dict, Optional, Type, cast
from llama_index.core.llms.llm import LLM
from llama_index.core.output_parsers.pydantic import PydanticOutputParser
from llama_index.core.prompts.base import BasePromptTemplate, PromptTemplate
from llama_index.core.settings import Settings
from llama_index.core.types import BaseOutputParser, BasePydanticProgram, Model
class LLMTextCompletionProgram(BasePydanticProgram[Model]):
"""
LLM Text Completion Program.
Uses generic LLM text completion + an output parser to generate a structured output.
"""
def __init__(
self,
output_parser: BaseOutputParser,
output_cls: Type[Model],
prompt: BasePromptTemplate,
llm: LLM,
verbose: bool = False,
) -> None:
self._output_parser = output_parser
self._output_cls = output_cls
self._llm = llm
self._prompt = prompt
self._verbose = verbose
self._prompt.output_parser = output_parser
@classmethod
def from_defaults(
cls,
output_parser: Optional[BaseOutputParser] = None,
output_cls: Optional[Type[Model]] = None,
prompt_template_str: Optional[str] = None,
prompt: Optional[BasePromptTemplate] = None,
llm: Optional[LLM] = None,
verbose: bool = False,
**kwargs: Any,
) -> "LLMTextCompletionProgram[Model]":
llm = llm or Settings.llm
if prompt is None and prompt_template_str is None:
raise ValueError("Must provide either prompt or prompt_template_str.")
if prompt is not None and prompt_template_str is not None:
raise ValueError("Must provide either prompt or prompt_template_str.")
if prompt_template_str is not None:
prompt = PromptTemplate(prompt_template_str)
# decide default output class if not set
if output_cls is None:
if not isinstance(output_parser, PydanticOutputParser):
raise ValueError("Output parser must be PydanticOutputParser.")
output_cls = output_parser.output_cls
else:
if output_parser is None:
output_parser = PydanticOutputParser(output_cls=output_cls)
return cls(
output_parser,
output_cls,
prompt=cast(PromptTemplate, prompt),
llm=llm,
verbose=verbose,
)
@property
def output_cls(self) -> Type[Model]:
return self._output_cls
@property
def prompt(self) -> BasePromptTemplate:
return self._prompt
@prompt.setter
def prompt(self, prompt: BasePromptTemplate) -> None:
self._prompt = prompt
def __call__(
self,
llm_kwargs: Optional[Dict[str, Any]] = None,
*args: Any,
**kwargs: Any,
) -> Model:
llm_kwargs = llm_kwargs or {}
if self._llm.metadata.is_chat_model:
messages = self._prompt.format_messages(llm=self._llm, **kwargs)
messages = self._llm._extend_messages(messages)
chat_response = self._llm.chat(messages, **llm_kwargs)
raw_output = chat_response.message.content or ""
else:
formatted_prompt = self._prompt.format(llm=self._llm, **kwargs)
response = self._llm.complete(formatted_prompt, **llm_kwargs)
raw_output = response.text
output = self._output_parser.parse(raw_output)
if not isinstance(output, self._output_cls):
raise ValueError(
f"Output parser returned {type(output)} but expected {self._output_cls}"
)
return output
async def acall(
self,
llm_kwargs: Optional[Dict[str, Any]] = None,
*args: Any,
**kwargs: Any,
) -> Model:
llm_kwargs = llm_kwargs or {}
if self._llm.metadata.is_chat_model:
messages = self._prompt.format_messages(llm=self._llm, **kwargs)
messages = self._llm._extend_messages(messages)
chat_response = await self._llm.achat(messages, **llm_kwargs)
raw_output = chat_response.message.content or ""
else:
formatted_prompt = self._prompt.format(llm=self._llm, **kwargs)
response = await self._llm.acomplete(formatted_prompt, **llm_kwargs)
raw_output = response.text
output = self._output_parser.parse(raw_output)
if not isinstance(output, self._output_cls):
raise ValueError(
f"Output parser returned {type(output)} but expected {self._output_cls}"
)
return output
|
from typing import Any, Dict, Optional, Type, cast
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.llms.llm import LLM
from llama_index.core.output_parsers.pydantic import PydanticOutputParser
from llama_index.core.prompts.base import BasePromptTemplate, PromptTemplate
from llama_index.core.settings import Settings
from llama_index.core.types import BaseOutputParser, BasePydanticProgram
class LLMTextCompletionProgram(BasePydanticProgram[BaseModel]):
"""
LLM Text Completion Program.
Uses generic LLM text completion + an output parser to generate a structured output.
"""
def __init__(
self,
output_parser: BaseOutputParser,
output_cls: Type[BaseModel],
prompt: BasePromptTemplate,
llm: LLM,
verbose: bool = False,
) -> None:
self._output_parser = output_parser
self._output_cls = output_cls
self._llm = llm
self._prompt = prompt
self._verbose = verbose
self._prompt.output_parser = output_parser
@classmethod
def from_defaults(
cls,
output_parser: Optional[BaseOutputParser] = None,
output_cls: Optional[Type[BaseModel]] = None,
prompt_template_str: Optional[str] = None,
prompt: Optional[BasePromptTemplate] = None,
llm: Optional[LLM] = None,
verbose: bool = False,
**kwargs: Any,
) -> "LLMTextCompletionProgram":
llm = llm or Settings.llm
if prompt is None and prompt_template_str is None:
raise ValueError("Must provide either prompt or prompt_template_str.")
if prompt is not None and prompt_template_str is not None:
raise ValueError("Must provide either prompt or prompt_template_str.")
if prompt_template_str is not None:
prompt = PromptTemplate(prompt_template_str)
# decide default output class if not set
if output_cls is None:
if not isinstance(output_parser, PydanticOutputParser):
raise ValueError("Output parser must be PydanticOutputParser.")
output_cls = output_parser.output_cls
else:
if output_parser is None:
output_parser = PydanticOutputParser(output_cls=output_cls)
return cls(
output_parser,
output_cls,
prompt=cast(PromptTemplate, prompt),
llm=llm,
verbose=verbose,
)
@property
def output_cls(self) -> Type[BaseModel]:
return self._output_cls
@property
def prompt(self) -> BasePromptTemplate:
return self._prompt
@prompt.setter
def prompt(self, prompt: BasePromptTemplate) -> None:
self._prompt = prompt
def __call__(
self,
llm_kwargs: Optional[Dict[str, Any]] = None,
*args: Any,
**kwargs: Any,
) -> BaseModel:
llm_kwargs = llm_kwargs or {}
if self._llm.metadata.is_chat_model:
messages = self._prompt.format_messages(llm=self._llm, **kwargs)
messages = self._llm._extend_messages(messages)
chat_response = self._llm.chat(messages, **llm_kwargs)
raw_output = chat_response.message.content or ""
else:
formatted_prompt = self._prompt.format(llm=self._llm, **kwargs)
response = self._llm.complete(formatted_prompt, **llm_kwargs)
raw_output = response.text
output = self._output_parser.parse(raw_output)
if not isinstance(output, self._output_cls):
raise ValueError(
f"Output parser returned {type(output)} but expected {self._output_cls}"
)
return output
async def acall(
self,
llm_kwargs: Optional[Dict[str, Any]] = None,
*args: Any,
**kwargs: Any,
) -> BaseModel:
llm_kwargs = llm_kwargs or {}
if self._llm.metadata.is_chat_model:
messages = self._prompt.format_messages(llm=self._llm, **kwargs)
messages = self._llm._extend_messages(messages)
chat_response = await self._llm.achat(messages, **llm_kwargs)
raw_output = chat_response.message.content or ""
else:
formatted_prompt = self._prompt.format(llm=self._llm, **kwargs)
response = await self._llm.acomplete(formatted_prompt, **llm_kwargs)
raw_output = response.text
output = self._output_parser.parse(raw_output)
if not isinstance(output, self._output_cls):
raise ValueError(
f"Output parser returned {type(output)} but expected {self._output_cls}"
)
return output
|
"""
This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
Nowadays, with Sentence Transformers v3+, it is recommended to use the `SentenceTransformerTrainer` class to train models.
See https://www.sbert.net/docs/sentence_transformer/training_overview.html for more information.
See this script for more details on how to use the new training API:
https://github.com/UKPLab/sentence-transformers/blob/master/examples/unsupervised_learning/TSDAE/train_stsb_tsdae.py
"""
from __future__ import annotations
import numpy as np
from torch.utils.data import Dataset
from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available
from sentence_transformers.readers.InputExample import InputExample
class DenoisingAutoEncoderDataset(Dataset):
"""
The DenoisingAutoEncoderDataset returns InputExamples in the format: texts=[noise_fn(sentence), sentence]
It is used in combination with the DenoisingAutoEncoderLoss: Here, a decoder tries to re-construct the
sentence without noise.
Args:
sentences: A list of sentences
noise_fn: A noise function: Given a string, it returns a string
with noise, e.g. deleted words
"""
def __init__(self, sentences: list[str], noise_fn=lambda s: DenoisingAutoEncoderDataset.delete(s)):
if not is_nltk_available():
raise ImportError(NLTK_IMPORT_ERROR.format(self.__class__.__name__))
self.sentences = sentences
self.noise_fn = noise_fn
def __getitem__(self, item):
sent = self.sentences[item]
return InputExample(texts=[self.noise_fn(sent), sent])
def __len__(self):
return len(self.sentences)
# Deletion noise.
@staticmethod
def delete(text, del_ratio=0.6):
from nltk import word_tokenize
from nltk.tokenize.treebank import TreebankWordDetokenizer
words = word_tokenize(text)
n = len(words)
if n == 0:
return text
keep_or_not = np.random.rand(n) > del_ratio
if sum(keep_or_not) == 0:
keep_or_not[np.random.choice(n)] = True # guarantee that at least one word remains
words_processed = TreebankWordDetokenizer().detokenize(np.array(words)[keep_or_not])
return words_processed
|
from __future__ import annotations
import numpy as np
from torch.utils.data import Dataset
from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available
from sentence_transformers.readers.InputExample import InputExample
class DenoisingAutoEncoderDataset(Dataset):
"""
The DenoisingAutoEncoderDataset returns InputExamples in the format: texts=[noise_fn(sentence), sentence]
It is used in combination with the DenoisingAutoEncoderLoss: Here, a decoder tries to re-construct the
sentence without noise.
Args:
sentences: A list of sentences
noise_fn: A noise function: Given a string, it returns a string
with noise, e.g. deleted words
"""
def __init__(self, sentences: list[str], noise_fn=lambda s: DenoisingAutoEncoderDataset.delete(s)):
if not is_nltk_available():
raise ImportError(NLTK_IMPORT_ERROR.format(self.__class__.__name__))
self.sentences = sentences
self.noise_fn = noise_fn
def __getitem__(self, item):
sent = self.sentences[item]
return InputExample(texts=[self.noise_fn(sent), sent])
def __len__(self):
return len(self.sentences)
# Deletion noise.
@staticmethod
def delete(text, del_ratio=0.6):
from nltk import word_tokenize
from nltk.tokenize.treebank import TreebankWordDetokenizer
words = word_tokenize(text)
n = len(words)
if n == 0:
return text
keep_or_not = np.random.rand(n) > del_ratio
if sum(keep_or_not) == 0:
keep_or_not[np.random.choice(n)] = True # guarantee that at least one word remains
words_processed = TreebankWordDetokenizer().detokenize(np.array(words)[keep_or_not])
return words_processed
|
__version__ = '0.12.4'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.12.3'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
pytestmark = pytest.mark.integration
@pytest.mark.parametrize("path", ["paws", "csv"])
def test_inspect_dataset(path, tmp_path):
inspect_dataset(path, tmp_path)
script_name = path + ".py"
assert script_name in os.listdir(tmp_path)
assert "__pycache__" not in os.listdir(tmp_path)
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning")
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning")
@pytest.mark.parametrize("path", ["accuracy"])
def test_inspect_metric(path, tmp_path):
inspect_metric(path, tmp_path)
script_name = path + ".py"
assert script_name in os.listdir(tmp_path)
assert "__pycache__" not in os.listdir(tmp_path)
@pytest.mark.parametrize(
"path, config_name, expected_splits",
[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "default", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
],
)
def test_get_dataset_config_info(path, config_name, expected_splits):
info = get_dataset_config_info(path, config_name=config_name)
assert info.config_name == config_name
assert list(info.splits.keys()) == expected_splits
def test_get_dataset_config_info_private(hf_token, hf_private_dataset_repo_txt_data):
info = get_dataset_config_info(hf_private_dataset_repo_txt_data, config_name="default", token=hf_token)
assert list(info.splits.keys()) == ["train"]
@pytest.mark.parametrize(
"path, config_name, expected_exception",
[
("paws", None, ValueError),
],
)
def test_get_dataset_config_info_error(path, config_name, expected_exception):
with pytest.raises(expected_exception):
get_dataset_config_info(path, config_name=config_name)
@pytest.mark.parametrize(
"path, expected",
[
("squad", ["plain_text"]),
("acronym_identification", ["default"]),
("lhoestq/squad", ["plain_text"]),
("lhoestq/test", ["default"]),
("lhoestq/demo1", ["default"]),
("dalle-mini/wit", ["default"]),
("datasets-maintainers/audiofolder_no_configs_in_metadata", ["default"]),
("datasets-maintainers/audiofolder_single_config_in_metadata", ["custom"]),
("datasets-maintainers/audiofolder_two_configs_in_metadata", ["v1", "v2"]),
],
)
def test_get_dataset_config_names(path, expected):
config_names = get_dataset_config_names(path)
assert config_names == expected
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config",
[
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["default"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
],
)
def test_get_dataset_info(path, expected_configs, expected_splits_in_first_config):
infos = get_dataset_infos(path)
assert list(infos.keys()) == expected_configs
expected_config = expected_configs[0]
assert expected_config in infos
info = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits",
[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "default", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
],
)
def test_get_dataset_split_names(path, expected_config, expected_splits):
infos = get_dataset_infos(path)
assert expected_config in infos
info = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception",
[
("paws", None, ValueError),
],
)
def test_get_dataset_split_names_error(path, config_name, expected_exception):
with pytest.raises(expected_exception):
get_dataset_split_names(path, config_name=config_name)
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
pytestmark = pytest.mark.integration
@pytest.mark.parametrize("path", ["paws", "csv"])
def test_inspect_dataset(path, tmp_path):
inspect_dataset(path, tmp_path)
script_name = path + ".py"
assert script_name in os.listdir(tmp_path)
assert "__pycache__" not in os.listdir(tmp_path)
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning")
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning")
@pytest.mark.parametrize("path", ["accuracy"])
def test_inspect_metric(path, tmp_path):
inspect_metric(path, tmp_path)
script_name = path + ".py"
assert script_name in os.listdir(tmp_path)
assert "__pycache__" not in os.listdir(tmp_path)
@pytest.mark.parametrize(
"path, config_name, expected_splits",
[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "default", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
],
)
def test_get_dataset_config_info(path, config_name, expected_splits):
info = get_dataset_config_info(path, config_name=config_name)
assert info.config_name == config_name
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception",
[
("paws", None, ValueError),
],
)
def test_get_dataset_config_info_error(path, config_name, expected_exception):
with pytest.raises(expected_exception):
get_dataset_config_info(path, config_name=config_name)
@pytest.mark.parametrize(
"path, expected",
[
("squad", ["plain_text"]),
("acronym_identification", ["default"]),
("lhoestq/squad", ["plain_text"]),
("lhoestq/test", ["default"]),
("lhoestq/demo1", ["default"]),
("dalle-mini/wit", ["default"]),
("datasets-maintainers/audiofolder_no_configs_in_metadata", ["default"]),
("datasets-maintainers/audiofolder_single_config_in_metadata", ["custom"]),
("datasets-maintainers/audiofolder_two_configs_in_metadata", ["v1", "v2"]),
],
)
def test_get_dataset_config_names(path, expected):
config_names = get_dataset_config_names(path)
assert config_names == expected
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config",
[
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["default"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
],
)
def test_get_dataset_info(path, expected_configs, expected_splits_in_first_config):
infos = get_dataset_infos(path)
assert list(infos.keys()) == expected_configs
expected_config = expected_configs[0]
assert expected_config in infos
info = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits",
[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "default", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
],
)
def test_get_dataset_split_names(path, expected_config, expected_splits):
infos = get_dataset_infos(path)
assert expected_config in infos
info = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception",
[
("paws", None, ValueError),
],
)
def test_get_dataset_split_names_error(path, config_name, expected_exception):
with pytest.raises(expected_exception):
get_dataset_split_names(path, config_name=config_name)
|
import orjson
from pydantic.json import ENCODERS_BY_TYPE
from docarray.typing.abstract_type import AbstractType
def _default_orjson(obj):
"""
default option for orjson dumps.
:param obj:
:return: return a json compatible object
"""
if isinstance(obj, AbstractType):
return obj._docarray_to_json_compatible()
else:
for cls_, encoder in ENCODERS_BY_TYPE.items():
if isinstance(obj, cls_):
return encoder(obj)
return obj
def orjson_dumps(v, *, default=None) -> bytes:
# dumps to bytes using orjson
return orjson.dumps(v, default=_default_orjson, option=orjson.OPT_SERIALIZE_NUMPY)
def orjson_dumps_and_decode(v, *, default=None) -> str:
# dumps to bytes using orjson
return orjson_dumps(v, default=default).decode()
|
import orjson
from docarray.typing.tensor.abstract_tensor import AbstractTensor
def _default_orjson(obj):
"""
default option for orjson dumps.
:param obj:
:return: return a json compatible object
"""
if isinstance(obj, AbstractTensor):
return obj._docarray_to_json_compatible()
else:
return obj
def orjson_dumps(v, *, default=None) -> bytes:
# dumps to bytes using orjson
return orjson.dumps(v, default=_default_orjson, option=orjson.OPT_SERIALIZE_NUMPY)
def orjson_dumps_and_decode(v, *, default=None) -> str:
# dumps to bytes using orjson
return orjson_dumps(v, default=default).decode()
|
from markitdown import MarkItDown
from llama_index.core.bridge.pydantic import BaseModel, model_validator
import os
from pathlib import Path
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from typing import Tuple, Optional, Union, List
from typing_extensions import Self
def is_empty(list_it: list) -> Tuple[bool, Optional[list]]:
if len(list_it) == 0:
return True, None
return False, list_it
class ValidFilePath(BaseModel):
file_path: Union[str, Path, List[str], List[Path]]
@model_validator(mode="after")
def validate_file_path(self) -> Self:
if isinstance(self.file_path, str):
if not (Path(self.file_path).is_dir()):
raise ValueError("Directory path does not exist")
dir_files = []
for root, _, files in os.walk(self.file_path):
for el in files:
dir_files.append(os.path.join(root, el))
self.file_path = dir_files
elif isinstance(self.file_path, Path):
if not self.file_path.is_dir():
raise ValueError("Directory path does not exist")
dir_files = []
for root, _, files in os.walk(self.file_path):
for el in files:
dir_files.append(os.path.join(root, el))
self.file_path = dir_files
empty, fls = is_empty(self.file_path)
if empty:
raise ValueError("There is no file to parse!")
else:
files = []
if isinstance(fls[0], str):
for fl in fls:
if Path(fl).is_file() and os.path.splitext(fl)[1] in [".docx", ".html", ".xml", ".csv", ".pdf", ".pptx", ".xlsx", ".json", ".zip", ".txt", "", ".md"]:
files.append(fl)
else:
for fl in fls:
if fl.is_file() and os.path.splitext(fl)[1] in [".docx", ".html", ".xml", ".csv", ".pdf", ".pptx", ".xlsx", ".json", ".zip", ".txt", "", ".md"]:
files.append(fl.__str__())
self.file_path = files
return self
class MarkItDownReader(BaseReader):
"""
MarkItDownReader is a document reader that utilizes the MarkItDown parser to convert files or collections of files into Document objects.
Methods
-------
load_data(file_path: str | Path | Iterable[str] | Iterable[Path]) -> List[Document]
Loads and parses a directory (if `file_path` is `str` or `Path`) or a list of files specified by `file_path` using the MarkItDown parser.
Returns a list of Document objects, each containing the text content and metadata such as file path, file type, and content length.
"""
_reader: MarkItDown = MarkItDown()
@classmethod
def class_name(cls) -> str:
"""Get the name identifier of the class."""
return "MarkItDownReader"
def load_data(
self,
file_path: Union[str, Path, List[str], List[Path]],
) -> List[Document]:
docs: List[Document] = []
fl_pt = ValidFilePath(file_path=file_path)
fs = fl_pt.file_path
for f in fs:
res = self._reader.convert(f)
docs.append(Document(text=res.text_content, metadata={"file_path": f.__str__(), "file_type": os.path.splitext(f)[1], "content_length": len(res.text_content)}))
return docs
|
from markitdown import MarkItDown
from llama_index.core.bridge.pydantic import BaseModel, model_validator
import os
from pathlib import Path
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from typing import Tuple, Optional, Union, List
from typing_extensions import Self
def is_empty(list_it: list) -> Tuple[bool, Optional[list]]:
if len(list_it) == 0:
return True, None
return False, list_it
class ValidFilePath(BaseModel):
file_path: Union[str, Path, List[str], List[Path]]
@model_validator(mode="after")
def validate_file_path(self) -> Self:
if isinstance(self.file_path, str):
if not (Path(self.file_path).is_dir()):
raise ValueError("Directory path does not exist")
dir_files = []
for root, _, files in os.walk(self.file_path):
for el in files:
dir_files.append(os.path.join(root, el))
self.file_path = dir_files
elif isinstance(self.file_path, Path):
if not self.file_path.is_dir():
raise ValueError("Directory path does not exist")
dir_files = []
for root, _, files in os.walk(self.file_path):
for el in files:
dir_files.append(os.path.join(root, el))
self.file_path = dir_files
empty, fls = is_empty(self.file_path)
if empty:
raise ValueError("There is no file to parse!")
else:
files = []
if isinstance(fls[0], str):
for fl in fls:
if Path(fl).is_file() and os.path.splitext(fl)[1] in [".docx", ".html", ".xml", ".csv", ".pdf", ".pptx", ".xlsx", ".json", ".zip", ".txt", "", ".md"]:
files.append(fl)
else:
for fl in fls:
if fl.is_file() and os.path.splitext(fl)[1] in [".docx", ".html", ".xml", ".csv", ".pdf", ".pptx", ".xlsx", ".json", ".zip", ".txt", "", ".md"]:
files.append(fl.__str__())
self.file_path = files
return self
class MarkItDownReader(BaseReader):
"""
MarkItDownReader is a document reader that utilizes the MarkItDown parser to convert files or collections of files into Document objects.
Methods:
-------
load_data(file_path: str | Path | Iterable[str] | Iterable[Path]) -> List[Document]
Loads and parses a directory (if `file_path` is `str` or `Path`) or a list of files specified by `file_path` using the MarkItDown parser.
Returns a list of Document objects, each containing the text content and metadata such as file path, file type, and content length.
"""
_reader: MarkItDown = MarkItDown()
@classmethod
def class_name(cls) -> str:
"""Get the name identifier of the class."""
return "MarkItDownReader"
def load_data(
self,
file_path: Union[str, Path, List[str], List[Path]],
) -> List[Document]:
docs: List[Document] = []
fl_pt = ValidFilePath(file_path=file_path)
fs = fl_pt.file_path
for f in fs:
res = self._reader.convert(f)
docs.append(Document(text=res.text_content, metadata={"file_path": f.__str__(), "file_type": os.path.splitext(f)[1], "content_length": len(res.text_content)}))
return docs
|
from pathlib import Path
from typing import Any
from langchain_core._api.path import as_import_path
def __getattr__(name: str) -> Any:
"""Get attr name."""
if name == "create_csv_agent":
# Get directory of langchain package
HERE = Path(__file__).parents[3]
here = as_import_path(Path(__file__).parent, relative_to=HERE)
old_path = "langchain." + here + "." + name
new_path = "langchain_experimental." + here + "." + name
msg = (
"This agent has been moved to langchain experiment. "
"This agent relies on python REPL tool under the hood, so to use it "
"safely please sandbox the python REPL. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"and https://github.com/langchain-ai/langchain/discussions/11680"
"To keep using this code as is, install langchain experimental and "
f"update your import statement from:\n `{old_path}` to `{new_path}`."
)
raise ImportError(msg)
msg = f"{name} does not exist"
raise AttributeError(msg)
|
from pathlib import Path
from typing import Any
from langchain_core._api.path import as_import_path
def __getattr__(name: str) -> Any:
"""Get attr name."""
if name == "create_csv_agent":
# Get directory of langchain package
HERE = Path(__file__).parents[3]
here = as_import_path(Path(__file__).parent, relative_to=HERE)
old_path = "langchain." + here + "." + name
new_path = "langchain_experimental." + here + "." + name
raise ImportError(
"This agent has been moved to langchain experiment. "
"This agent relies on python REPL tool under the hood, so to use it "
"safely please sandbox the python REPL. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"and https://github.com/langchain-ai/langchain/discussions/11680"
"To keep using this code as is, install langchain experimental and "
f"update your import statement from:\n `{old_path}` to `{new_path}`."
)
raise AttributeError(f"{name} does not exist")
|
_base_ = './mask-rcnn_x50-32x4d_fpn_gn-ws-all_2x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[20, 23],
gamma=0.1)
]
|
_base_ = './mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[20, 23],
gamma=0.1)
]
|
import datetime
import autogpt_libs.auth as autogpt_auth_lib
import fastapi.testclient
import pytest
import pytest_mock
import backend.server.model as server_model
import backend.server.v2.library.model as library_model
from backend.server.v2.library.routes import router as library_router
app = fastapi.FastAPI()
app.include_router(library_router)
client = fastapi.testclient.TestClient(app)
def override_auth_middleware():
"""Override auth middleware for testing"""
return {"sub": "test-user-id"}
def override_get_user_id():
"""Override get_user_id for testing"""
return "test-user-id"
app.dependency_overrides[autogpt_auth_lib.auth_middleware] = override_auth_middleware
app.dependency_overrides[autogpt_auth_lib.depends.get_user_id] = override_get_user_id
@pytest.mark.asyncio
async def test_get_library_agents_success(mocker: pytest_mock.MockFixture):
mocked_value = library_model.LibraryAgentResponse(
agents=[
library_model.LibraryAgent(
id="test-agent-1",
agent_id="test-agent-1",
agent_version=1,
name="Test Agent 1",
description="Test Description 1",
image_url=None,
creator_name="Test Creator",
creator_image_url="",
input_schema={"type": "object", "properties": {}},
status=library_model.LibraryAgentStatus.COMPLETED,
new_output=False,
can_access_graph=True,
is_latest_version=True,
updated_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
),
library_model.LibraryAgent(
id="test-agent-2",
agent_id="test-agent-2",
agent_version=1,
name="Test Agent 2",
description="Test Description 2",
image_url=None,
creator_name="Test Creator",
creator_image_url="",
input_schema={"type": "object", "properties": {}},
status=library_model.LibraryAgentStatus.COMPLETED,
new_output=False,
can_access_graph=False,
is_latest_version=True,
updated_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
),
],
pagination=server_model.Pagination(
total_items=2, total_pages=1, current_page=1, page_size=50
),
)
mock_db_call = mocker.patch("backend.server.v2.library.db.list_library_agents")
mock_db_call.return_value = mocked_value
response = client.get("/agents?search_term=test")
assert response.status_code == 200
data = library_model.LibraryAgentResponse.model_validate(response.json())
assert len(data.agents) == 2
assert data.agents[0].agent_id == "test-agent-1"
assert data.agents[0].can_access_graph is True
assert data.agents[1].agent_id == "test-agent-2"
assert data.agents[1].can_access_graph is False
mock_db_call.assert_called_once_with(
user_id="test-user-id",
search_term="test",
sort_by=library_model.LibraryAgentSort.UPDATED_AT,
page=1,
page_size=15,
)
def test_get_library_agents_error(mocker: pytest_mock.MockFixture):
mock_db_call = mocker.patch("backend.server.v2.library.db.list_library_agents")
mock_db_call.side_effect = Exception("Test error")
response = client.get("/agents?search_term=test")
assert response.status_code == 500
mock_db_call.assert_called_once_with(
user_id="test-user-id",
search_term="test",
sort_by=library_model.LibraryAgentSort.UPDATED_AT,
page=1,
page_size=15,
)
@pytest.mark.skip(reason="Mocker Not implemented")
def test_add_agent_to_library_success(mocker: pytest_mock.MockFixture):
mock_db_call = mocker.patch("backend.server.v2.library.db.add_agent_to_library")
mock_db_call.return_value = None
response = client.post("/agents/test-version-id")
assert response.status_code == 201
mock_db_call.assert_called_once_with(
store_listing_version_id="test-version-id", user_id="test-user-id"
)
@pytest.mark.skip(reason="Mocker Not implemented")
def test_add_agent_to_library_error(mocker: pytest_mock.MockFixture):
mock_db_call = mocker.patch("backend.server.v2.library.db.add_agent_to_library")
mock_db_call.side_effect = Exception("Test error")
response = client.post("/agents/test-version-id")
assert response.status_code == 500
assert response.json()["detail"] == "Failed to add agent to library"
mock_db_call.assert_called_once_with(
store_listing_version_id="test-version-id", user_id="test-user-id"
)
|
import datetime
import autogpt_libs.auth as autogpt_auth_lib
import fastapi
import fastapi.testclient
import pytest
import pytest_mock
import backend.server.model as server_model
import backend.server.v2.library.model as library_model
from backend.server.v2.library.routes import router as library_router
app = fastapi.FastAPI()
app.include_router(library_router)
client = fastapi.testclient.TestClient(app)
def override_auth_middleware():
"""Override auth middleware for testing"""
return {"sub": "test-user-id"}
def override_get_user_id():
"""Override get_user_id for testing"""
return "test-user-id"
app.dependency_overrides[autogpt_auth_lib.auth_middleware] = override_auth_middleware
app.dependency_overrides[autogpt_auth_lib.depends.get_user_id] = override_get_user_id
def test_get_library_agents_success(mocker: pytest_mock.MockFixture):
mocked_value = [
library_model.LibraryAgentResponse(
agents=[
library_model.LibraryAgent(
id="test-agent-1",
agent_id="test-agent-1",
agent_version=1,
name="Test Agent 1",
description="Test Description 1",
image_url=None,
creator_name="Test Creator",
creator_image_url="",
input_schema={"type": "object", "properties": {}},
status=library_model.LibraryAgentStatus.COMPLETED,
new_output=False,
can_access_graph=True,
is_latest_version=True,
updated_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
),
library_model.LibraryAgent(
id="test-agent-2",
agent_id="test-agent-2",
agent_version=1,
name="Test Agent 2",
description="Test Description 2",
image_url=None,
creator_name="Test Creator",
creator_image_url="",
input_schema={"type": "object", "properties": {}},
status=library_model.LibraryAgentStatus.COMPLETED,
new_output=False,
can_access_graph=False,
is_latest_version=True,
updated_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
),
],
pagination=server_model.Pagination(
total_items=2, total_pages=1, current_page=1, page_size=50
),
),
]
mock_db_call = mocker.patch("backend.server.v2.library.db.get_library_agents")
mock_db_call.return_value = mocked_value
response = client.get("/agents?search_term=test")
assert response.status_code == 200
data = library_model.LibraryAgentResponse.model_validate(response.json())
assert len(data.agents) == 2
assert data.agents[0].agent_id == "test-agent-1"
assert data.agents[0].can_access_graph is True
assert data.agents[1].agent_id == "test-agent-2"
assert data.agents[1].can_access_graph is False
mock_db_call.assert_called_once_with(
user_id="test-user-id",
search_term="test",
sort_by=library_model.LibraryAgentSort.UPDATED_AT,
page=1,
page_size=15,
)
def test_get_library_agents_error(mocker: pytest_mock.MockFixture):
mock_db_call = mocker.patch("backend.server.v2.library.db.get_library_agents")
mock_db_call.side_effect = Exception("Test error")
response = client.get("/agents?search_term=test")
assert response.status_code == 500
mock_db_call.assert_called_once_with(
user_id="test-user-id",
search_term="test",
sort_by=library_model.LibraryAgentSort.UPDATED_AT,
page=1,
page_size=15,
)
@pytest.mark.skip(reason="Mocker Not implemented")
def test_add_agent_to_library_success(mocker: pytest_mock.MockFixture):
mock_db_call = mocker.patch("backend.server.v2.library.db.add_agent_to_library")
mock_db_call.return_value = None
response = client.post("/agents/test-version-id")
assert response.status_code == 201
mock_db_call.assert_called_once_with(
store_listing_version_id="test-version-id", user_id="test-user-id"
)
@pytest.mark.skip(reason="Mocker Not implemented")
def test_add_agent_to_library_error(mocker: pytest_mock.MockFixture):
mock_db_call = mocker.patch("backend.server.v2.library.db.add_agent_to_library")
mock_db_call.side_effect = Exception("Test error")
response = client.post("/agents/test-version-id")
assert response.status_code == 500
assert response.json()["detail"] == "Failed to add agent to library"
mock_db_call.assert_called_once_with(
store_listing_version_id="test-version-id", user_id="test-user-id"
)
|
from .database import DatabaseManager, DatabaseManagerAsyncClient, DatabaseManagerClient
from .manager import ExecutionManager
from .scheduler import Scheduler
__all__ = [
"DatabaseManager",
"DatabaseManagerClient",
"DatabaseManagerAsyncClient",
"ExecutionManager",
"Scheduler",
]
|
from .database import DatabaseManager, DatabaseManagerClient
from .manager import ExecutionManager
from .scheduler import Scheduler
__all__ = [
"DatabaseManager",
"DatabaseManagerClient",
"ExecutionManager",
"Scheduler",
]
|
from __future__ import annotations
import json
import logging
import os
from typing import Literal
import torch
from torch import Tensor, nn
from .tokenizer import WhitespaceTokenizer
logger = logging.getLogger(__name__)
class BoW(nn.Module):
"""Implements a Bag-of-Words (BoW) model to derive sentence embeddings.
A weighting can be added to allow the generation of tf-idf vectors. The output vector has the size of the vocab.
"""
def __init__(
self,
vocab: list[str],
word_weights: dict[str, float] = {},
unknown_word_weight: float = 1,
cumulative_term_frequency: bool = True,
):
super().__init__()
vocab = list(set(vocab)) # Ensure vocab is unique
self.config_keys = ["vocab", "word_weights", "unknown_word_weight", "cumulative_term_frequency"]
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
self.cumulative_term_frequency = cumulative_term_frequency
# Maps wordIdx -> word weight
self.weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
self.weights.append(weight)
logger.info(
f"{num_unknown_words} out of {len(vocab)} words without a weighting value. Set weight to {unknown_word_weight}"
)
self.tokenizer = WhitespaceTokenizer(vocab, stop_words=set(), do_lower_case=False)
self.sentence_embedding_dimension = len(vocab)
def forward(self, features: dict[str, Tensor]):
# Nothing to do, everything is done in get_sentence_features
return features
def tokenize(self, texts: list[str], **kwargs) -> list[int]:
tokenized = [self.tokenizer.tokenize(text, **kwargs) for text in texts]
return self.get_sentence_features(tokenized)
def get_sentence_embedding_dimension(self):
return self.sentence_embedding_dimension
def get_sentence_features(
self, tokenized_texts: list[list[int]], pad_seq_length: int = 0
) -> dict[Literal["sentence_embedding"], torch.Tensor]:
vectors = []
for tokens in tokenized_texts:
vector = torch.zeros(self.get_sentence_embedding_dimension(), dtype=torch.float32)
for token in tokens:
if self.cumulative_term_frequency:
vector[token] += self.weights[token]
else:
vector[token] = self.weights[token]
vectors.append(vector)
return {"sentence_embedding": torch.stack(vectors)}
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return BoW(**config)
|
from __future__ import annotations
import json
import logging
import os
from typing import Literal
import torch
from torch import Tensor, nn
from .tokenizer import WhitespaceTokenizer
logger = logging.getLogger(__name__)
class BoW(nn.Module):
"""Implements a Bag-of-Words (BoW) model to derive sentence embeddings.
A weighting can be added to allow the generation of tf-idf vectors. The output vector has the size of the vocab.
"""
def __init__(
self,
vocab: list[str],
word_weights: dict[str, float] = {},
unknown_word_weight: float = 1,
cumulative_term_frequency: bool = True,
):
super(BoW, self).__init__()
vocab = list(set(vocab)) # Ensure vocab is unique
self.config_keys = ["vocab", "word_weights", "unknown_word_weight", "cumulative_term_frequency"]
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
self.cumulative_term_frequency = cumulative_term_frequency
# Maps wordIdx -> word weight
self.weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
self.weights.append(weight)
logger.info(
"{} out of {} words without a weighting value. Set weight to {}".format(
num_unknown_words, len(vocab), unknown_word_weight
)
)
self.tokenizer = WhitespaceTokenizer(vocab, stop_words=set(), do_lower_case=False)
self.sentence_embedding_dimension = len(vocab)
def forward(self, features: dict[str, Tensor]):
# Nothing to do, everything is done in get_sentence_features
return features
def tokenize(self, texts: list[str], **kwargs) -> list[int]:
tokenized = [self.tokenizer.tokenize(text, **kwargs) for text in texts]
return self.get_sentence_features(tokenized)
def get_sentence_embedding_dimension(self):
return self.sentence_embedding_dimension
def get_sentence_features(
self, tokenized_texts: list[list[int]], pad_seq_length: int = 0
) -> dict[Literal["sentence_embedding"], torch.Tensor]:
vectors = []
for tokens in tokenized_texts:
vector = torch.zeros(self.get_sentence_embedding_dimension(), dtype=torch.float32)
for token in tokens:
if self.cumulative_term_frequency:
vector[token] += self.weights[token]
else:
vector[token] = self.weights[token]
vectors.append(vector)
return {"sentence_embedding": torch.stack(vectors)}
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return BoW(**config)
|
"""langchain-core version information and utilities."""
VERSION = "0.3.62"
|
"""langchain-core version information and utilities."""
VERSION = "0.3.61"
|
"""Parser for JSON output."""
from __future__ import annotations
import json
from json import JSONDecodeError
from typing import Annotated, Any, Optional, TypeVar, Union
import jsonpatch # type: ignore[import-untyped]
import pydantic
from pydantic import SkipValidation
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers.format_instructions import JSON_FORMAT_INSTRUCTIONS
from langchain_core.output_parsers.transform import BaseCumulativeTransformOutputParser
from langchain_core.outputs import Generation
from langchain_core.utils.json import (
parse_and_check_json_markdown,
parse_json_markdown,
parse_partial_json,
)
from langchain_core.utils.pydantic import IS_PYDANTIC_V1
if IS_PYDANTIC_V1:
PydanticBaseModel = pydantic.BaseModel
else:
from pydantic.v1 import BaseModel
# Union type needs to be last assignment to PydanticBaseModel to make mypy happy.
PydanticBaseModel = Union[BaseModel, pydantic.BaseModel] # type: ignore[assignment,misc]
TBaseModel = TypeVar("TBaseModel", bound=PydanticBaseModel)
class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]):
"""Parse the output of an LLM call to a JSON object.
When used in streaming mode, it will yield partial JSON objects containing
all the keys that have been returned so far.
In streaming, if `diff` is set to `True`, yields JSONPatch operations
describing the difference between the previous and the current object.
"""
pydantic_object: Annotated[Optional[type[TBaseModel]], SkipValidation()] = None # type: ignore[valid-type]
"""The Pydantic object to use for validation.
If None, no validation is performed."""
def _diff(self, prev: Optional[Any], next: Any) -> Any:
return jsonpatch.make_patch(prev, next).patch
def _get_schema(self, pydantic_object: type[TBaseModel]) -> dict[str, Any]:
if issubclass(pydantic_object, pydantic.BaseModel):
return pydantic_object.model_json_schema()
if issubclass(pydantic_object, pydantic.v1.BaseModel):
return pydantic_object.schema()
return None
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
"""Parse the result of an LLM call to a JSON object.
Args:
result: The result of the LLM call.
partial: Whether to parse partial JSON objects.
If True, the output will be a JSON object containing
all the keys that have been returned so far.
If False, the output will be the full JSON object.
Default is False.
Returns:
The parsed JSON object.
Raises:
OutputParserException: If the output is not valid JSON.
"""
text = result[0].text
text = text.strip()
if partial:
try:
return parse_json_markdown(text)
except JSONDecodeError:
return None
else:
try:
return parse_json_markdown(text)
except JSONDecodeError as e:
msg = f"Invalid json output: {text}"
raise OutputParserException(msg, llm_output=text) from e
def parse(self, text: str) -> Any:
"""Parse the output of an LLM call to a JSON object.
Args:
text: The output of the LLM call.
Returns:
The parsed JSON object.
"""
return self.parse_result([Generation(text=text)])
def get_format_instructions(self) -> str:
"""Return the format instructions for the JSON output.
Returns:
The format instructions for the JSON output.
"""
if self.pydantic_object is None:
return "Return a JSON object."
# Copy schema to avoid altering original Pydantic schema.
schema = dict(self._get_schema(self.pydantic_object).items())
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure json in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema, ensure_ascii=False)
return JSON_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "simple_json_output_parser"
# For backwards compatibility
SimpleJsonOutputParser = JsonOutputParser
__all__ = [
"JsonOutputParser",
"SimpleJsonOutputParser", # For backwards compatibility
"parse_and_check_json_markdown", # For backwards compatibility
"parse_partial_json", # For backwards compatibility
]
|
"""Parser for JSON output."""
from __future__ import annotations
import json
from json import JSONDecodeError
from typing import Annotated, Any, Optional, TypeVar, Union
import jsonpatch # type: ignore[import-untyped]
import pydantic
from pydantic import SkipValidation
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers.format_instructions import JSON_FORMAT_INSTRUCTIONS
from langchain_core.output_parsers.transform import BaseCumulativeTransformOutputParser
from langchain_core.outputs import Generation
from langchain_core.utils.json import (
parse_and_check_json_markdown,
parse_json_markdown,
parse_partial_json,
)
from langchain_core.utils.pydantic import IS_PYDANTIC_V1
if IS_PYDANTIC_V1:
PydanticBaseModel = pydantic.BaseModel
else:
from pydantic.v1 import BaseModel
# Union type needs to be last assignment to PydanticBaseModel to make mypy happy.
PydanticBaseModel = Union[BaseModel, pydantic.BaseModel] # type: ignore[assignment,misc]
TBaseModel = TypeVar("TBaseModel", bound=PydanticBaseModel)
class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]):
"""Parse the output of an LLM call to a JSON object.
When used in streaming mode, it will yield partial JSON objects containing
all the keys that have been returned so far.
In streaming, if `diff` is set to `True`, yields JSONPatch operations
describing the difference between the previous and the current object.
"""
pydantic_object: Annotated[Optional[type[TBaseModel]], SkipValidation()] = None # type: ignore[valid-type]
"""The Pydantic object to use for validation.
If None, no validation is performed."""
def _diff(self, prev: Optional[Any], next: Any) -> Any:
return jsonpatch.make_patch(prev, next).patch
def _get_schema(self, pydantic_object: type[TBaseModel]) -> dict[str, Any]:
if issubclass(pydantic_object, pydantic.BaseModel):
return pydantic_object.model_json_schema()
if issubclass(pydantic_object, pydantic.v1.BaseModel):
return pydantic_object.schema()
return None
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
"""Parse the result of an LLM call to a JSON object.
Args:
result: The result of the LLM call.
partial: Whether to parse partial JSON objects.
If True, the output will be a JSON object containing
all the keys that have been returned so far.
If False, the output will be the full JSON object.
Default is False.
Returns:
The parsed JSON object.
Raises:
OutputParserException: If the output is not valid JSON.
"""
text = result[0].text
text = text.strip()
if partial:
try:
return parse_json_markdown(text)
except JSONDecodeError:
return None
else:
try:
return parse_json_markdown(text)
except JSONDecodeError as e:
msg = f"Invalid json output: {text}"
raise OutputParserException(msg, llm_output=text) from e
def parse(self, text: str) -> Any:
"""Parse the output of an LLM call to a JSON object.
Args:
text: The output of the LLM call.
Returns:
The parsed JSON object.
"""
return self.parse_result([Generation(text=text)])
def get_format_instructions(self) -> str:
"""Return the format instructions for the JSON output.
Returns:
The format instructions for the JSON output.
"""
if self.pydantic_object is None:
return "Return a JSON object."
# Copy schema to avoid altering original Pydantic schema.
schema = dict(self._get_schema(self.pydantic_object).items())
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure json in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema, ensure_ascii=False)
return JSON_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "simple_json_output_parser"
# For backwards compatibility
SimpleJsonOutputParser = JsonOutputParser
__all__ = [
"JsonOutputParser",
"SimpleJsonOutputParser", # For backwards compatibility
"parse_partial_json", # For backwards compatibility
"parse_and_check_json_markdown", # For backwards compatibility
]
|
from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.jax import core
from keras.src.backend.jax import distribution_lib
from keras.src.backend.jax import image
from keras.src.backend.jax import linalg
from keras.src.backend.jax import math
from keras.src.backend.jax import nn
from keras.src.backend.jax import numpy
from keras.src.backend.jax import random
from keras.src.backend.jax.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.jax.core import Variable
from keras.src.backend.jax.core import cast
from keras.src.backend.jax.core import compute_output_spec
from keras.src.backend.jax.core import cond
from keras.src.backend.jax.core import convert_to_numpy
from keras.src.backend.jax.core import convert_to_tensor
from keras.src.backend.jax.core import device_scope
from keras.src.backend.jax.core import is_tensor
from keras.src.backend.jax.core import random_seed_dtype
from keras.src.backend.jax.core import scatter
from keras.src.backend.jax.core import shape
from keras.src.backend.jax.core import stop_gradient
from keras.src.backend.jax.core import vectorized_map
from keras.src.backend.jax.rnn import cudnn_ok
from keras.src.backend.jax.rnn import gru
from keras.src.backend.jax.rnn import lstm
from keras.src.backend.jax.rnn import rnn
|
from keras.src.backend.jax import core
from keras.src.backend.jax import distribution_lib
from keras.src.backend.jax import image
from keras.src.backend.jax import linalg
from keras.src.backend.jax import math
from keras.src.backend.jax import nn
from keras.src.backend.jax import numpy
from keras.src.backend.jax import random
from keras.src.backend.jax.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.jax.core import Variable
from keras.src.backend.jax.core import cast
from keras.src.backend.jax.core import compute_output_spec
from keras.src.backend.jax.core import cond
from keras.src.backend.jax.core import convert_to_numpy
from keras.src.backend.jax.core import convert_to_tensor
from keras.src.backend.jax.core import device_scope
from keras.src.backend.jax.core import is_tensor
from keras.src.backend.jax.core import random_seed_dtype
from keras.src.backend.jax.core import scatter
from keras.src.backend.jax.core import shape
from keras.src.backend.jax.core import stop_gradient
from keras.src.backend.jax.core import vectorized_map
from keras.src.backend.jax.rnn import cudnn_ok
from keras.src.backend.jax.rnn import gru
from keras.src.backend.jax.rnn import lstm
from keras.src.backend.jax.rnn import rnn
|
from llama_index_instrumentation.span.simple import SimpleSpan # noqa
|
from typing import Dict, Optional
from llama_index.core.bridge.pydantic import Field
from llama_index.core.instrumentation.span.base import BaseSpan
from datetime import datetime
class SimpleSpan(BaseSpan):
"""Simple span class."""
start_time: datetime = Field(default_factory=lambda: datetime.now())
end_time: Optional[datetime] = Field(default=None)
duration: float = Field(default=0.0, description="Duration of span in seconds.")
metadata: Optional[Dict] = Field(default=None)
|
import os
from pathlib import Path
from typing import List, Tuple, Union
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.librispeech import load_librispeech_item
from torchaudio.datasets.utils import extract_archive
_ARCHIVE_NAME = "librispeech_finetuning"
_URL = "https://dl.fbaipublicfiles.com/librilight/data/librispeech_finetuning.tgz"
_CHECKSUM = "5d1efdc777b548194d7e09ba89126e2188026df9fd57aa57eb14408d2b2342af"
def _get_fileids_paths(path, subset, _ext_audio) -> List[Tuple[str, str]]:
"""Get the file names and the corresponding file paths without `speaker_id`
and `chapter_id` directories.
The format of path is like:
{root}/{_ARCHIVE_NAME}/1h/[0-5]/[clean, other] or
{root}/{_ARCHIVE_NAME}/9h/[clean, other]
"""
if subset == "10min":
files_paths = [
(os.path.join(os.path.dirname(p), "..", ".."), str(p.stem))
for p in Path(path).glob("1h/0/*/*/*/*" + _ext_audio)
]
elif subset in ["1h", "10h"]:
files_paths = [
(os.path.join(os.path.dirname(p), "..", ".."), str(p.stem))
for p in Path(path).glob("1h/*/*/*/*/*" + _ext_audio)
]
if subset == "10h":
files_paths += [
(os.path.join(os.path.dirname(p), "..", ".."), str(p.stem))
for p in Path(path).glob("9h/*/*/*/*" + _ext_audio)
]
else:
raise ValueError(f"Unsupported subset value. Found {subset}.")
files_paths = sorted(files_paths, key=lambda x: x[0] + x[1])
return files_paths
class LibriLightLimited(Dataset):
"""Create a Dataset for LibriLightLimited, which is the supervised subset of
LibriLight dataset.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
subset (str, optional): The subset to use. Options: [``10min``, ``1h``, ``10h``]
(Default: ``10min``).
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
_ext_txt = ".trans.txt"
_ext_audio = ".flac"
def __init__(
self,
root: Union[str, Path],
subset: str = "10min",
download: bool = False,
) -> None:
if subset not in ["10min", "1h", "10h"]:
raise ValueError("`subset` must be one of ['10min', '1h', '10h']")
root = os.fspath(root)
self._path = os.path.join(root, _ARCHIVE_NAME)
archive = os.path.join(root, f"{_ARCHIVE_NAME}.tgz")
if not os.path.isdir(self._path):
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download")
if not os.path.isfile(archive):
download_url_to_file(_URL, archive, hash_prefix=_CHECKSUM)
extract_archive(archive)
self._fileids_paths = _get_fileids_paths(self._path, subset, self._ext_audio)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, int, int, int]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, int, int, int):
``(waveform, sample_rate, transcript, speaker_id, chapter_id, utterance_id)``
"""
file_path, fileid = self._fileids_paths[n]
return load_librispeech_item(fileid, file_path, self._ext_audio, self._ext_txt)
def __len__(self) -> int:
return len(self._fileids_paths)
|
import os
from pathlib import Path
from typing import List, Tuple, Union
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.librispeech import load_librispeech_item
from torchaudio.datasets.utils import extract_archive
_ARCHIVE_NAME = "librispeech_finetuning"
_URL = "https://dl.fbaipublicfiles.com/librilight/data/librispeech_finetuning.tgz"
_CHECKSUM = "5d1efdc777b548194d7e09ba89126e2188026df9fd57aa57eb14408d2b2342af"
def _get_fileids_paths(path, subset, _ext_audio) -> List[Tuple[str, str]]:
"""Get the file names and the corresponding file paths without `speaker_id`
and `chapter_id` directories.
The format of path is like:
{root}/{_ARCHIVE_NAME}/1h/[0-5]/[clean, other] or
{root}/{_ARCHIVE_NAME}/9h/[clean, other]
"""
if subset == "10min":
files_paths = [
(os.path.join(os.path.dirname(p), "..", ".."), str(p.stem))
for p in Path(path).glob("1h/0/*/*/*/*" + _ext_audio)
]
elif subset in ["1h", "10h"]:
files_paths = [
(os.path.join(os.path.dirname(p), "..", ".."), str(p.stem))
for p in Path(path).glob("1h/*/*/*/*/*" + _ext_audio)
]
if subset == "10h":
files_paths += [
(os.path.join(os.path.dirname(p), "..", ".."), str(p.stem))
for p in Path(path).glob("9h/*/*/*/*" + _ext_audio)
]
else:
raise ValueError(f"Unsupported subset value. Found {subset}.")
files_paths = sorted(files_paths, key=lambda x: x[0] + x[1])
return files_paths
class LibriLightLimited(Dataset):
"""Create a Dataset for LibriLightLimited, which is the supervised subset of
LibriLight dataset.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
subset (str, optional): The subset to use. Options: [``10min``, ``1h``, ``10h``]
(Default: ``10min``).
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
_ext_txt = ".trans.txt"
_ext_audio = ".flac"
def __init__(
self,
root: Union[str, Path],
subset: str = "10min",
download: bool = False,
) -> None:
assert subset in ["10min", "1h", "10h"], "`subset` must be one of ['10min', '1h', '10h']"
root = os.fspath(root)
self._path = os.path.join(root, _ARCHIVE_NAME)
archive = os.path.join(root, f"{_ARCHIVE_NAME}.tgz")
if not os.path.isdir(self._path):
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download")
if not os.path.isfile(archive):
download_url_to_file(_URL, archive, hash_prefix=_CHECKSUM)
extract_archive(archive)
self._fileids_paths = _get_fileids_paths(self._path, subset, self._ext_audio)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, int, int, int]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, int, int, int):
``(waveform, sample_rate, transcript, speaker_id, chapter_id, utterance_id)``
"""
file_path, fileid = self._fileids_paths[n]
return load_librispeech_item(fileid, file_path, self._ext_audio, self._ext_txt)
def __len__(self) -> int:
return len(self._fileids_paths)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
import torch.nn as nn
from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
from torch import Tensor
from mmdet.core.utils import OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .anchor_head import AnchorHead
@MODELS.register_module()
class RetinaSepBNHead(AnchorHead):
""""RetinaHead with separate BN.
In RetinaHead, conv/norm layers are shared across different FPN levels,
while in RetinaSepBNHead, conv layers are shared across different FPN
levels, but BN layers are separated.
"""
def __init__(self,
num_classes: int,
num_ins: int,
in_channels: int,
stacked_convs: int = 4,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None,
**kwargs) -> None:
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.num_ins = num_ins
super().__init__(
num_classes=num_classes,
in_channels=in_channels,
init_cfg=init_cfg,
**kwargs)
def _init_layers(self) -> None:
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.num_ins):
cls_convs = nn.ModuleList()
reg_convs = nn.ModuleList()
for j in range(self.stacked_convs):
chn = self.in_channels if j == 0 else self.feat_channels
cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.cls_convs.append(cls_convs)
self.reg_convs.append(reg_convs)
for i in range(self.stacked_convs):
for j in range(1, self.num_ins):
self.cls_convs[j][i].conv = self.cls_convs[0][i].conv
self.reg_convs[j][i].conv = self.reg_convs[0][i].conv
self.retina_cls = nn.Conv2d(
self.feat_channels,
self.num_base_priors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = nn.Conv2d(
self.feat_channels, self.num_base_priors * 4, 3, padding=1)
def init_weights(self) -> None:
"""Initialize weights of the head."""
super().init_weights()
for m in self.cls_convs[0]:
normal_init(m.conv, std=0.01)
for m in self.reg_convs[0]:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.retina_cls, std=0.01, bias=bias_cls)
normal_init(self.retina_reg, std=0.01)
def forward(self, feats: Tuple[Tensor]) -> tuple:
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of classification scores and bbox prediction
- cls_scores (list[Tensor]): Classification scores for all
scale levels, each is a 4D-tensor, the channels number is
num_anchors * num_classes.
- bbox_preds (list[Tensor]): Box energies / deltas for all
scale levels, each is a 4D-tensor, the channels number is
num_anchors * 4.
"""
cls_scores = []
bbox_preds = []
for i, x in enumerate(feats):
cls_feat = feats[i]
reg_feat = feats[i]
for cls_conv in self.cls_convs[i]:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs[i]:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
cls_scores.append(cls_score)
bbox_preds.append(bbox_pred)
return cls_scores, bbox_preds
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
from mmdet.registry import MODELS
from .anchor_head import AnchorHead
@MODELS.register_module()
class RetinaSepBNHead(AnchorHead):
""""RetinaHead with separate BN.
In RetinaHead, conv/norm layers are shared across different FPN levels,
while in RetinaSepBNHead, conv layers are shared across different FPN
levels, but BN layers are separated.
"""
def __init__(self,
num_classes,
num_ins,
in_channels,
stacked_convs=4,
conv_cfg=None,
norm_cfg=None,
init_cfg=None,
**kwargs):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.num_ins = num_ins
super(RetinaSepBNHead, self).__init__(
num_classes, in_channels, init_cfg=init_cfg, **kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.num_ins):
cls_convs = nn.ModuleList()
reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.cls_convs.append(cls_convs)
self.reg_convs.append(reg_convs)
for i in range(self.stacked_convs):
for j in range(1, self.num_ins):
self.cls_convs[j][i].conv = self.cls_convs[0][i].conv
self.reg_convs[j][i].conv = self.reg_convs[0][i].conv
self.retina_cls = nn.Conv2d(
self.feat_channels,
self.num_base_priors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = nn.Conv2d(
self.feat_channels, self.num_base_priors * 4, 3, padding=1)
def init_weights(self):
"""Initialize weights of the head."""
super(RetinaSepBNHead, self).init_weights()
for m in self.cls_convs[0]:
normal_init(m.conv, std=0.01)
for m in self.reg_convs[0]:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.retina_cls, std=0.01, bias=bias_cls)
normal_init(self.retina_reg, std=0.01)
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of classification scores and bbox prediction
cls_scores (list[Tensor]): Classification scores for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * 4.
"""
cls_scores = []
bbox_preds = []
for i, x in enumerate(feats):
cls_feat = feats[i]
reg_feat = feats[i]
for cls_conv in self.cls_convs[i]:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs[i]:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
cls_scores.append(cls_score)
bbox_preds.append(bbox_pred)
return cls_scores, bbox_preds
|
from langchain_core.utils.utils import (
build_extra_kwargs,
check_package_version,
convert_to_secret_str,
get_pydantic_field_names,
guard_import,
mock_now,
raise_for_status_with_text,
xor_args,
)
__all__ = [
"build_extra_kwargs",
"check_package_version",
"convert_to_secret_str",
"get_pydantic_field_names",
"guard_import",
"mock_now",
"raise_for_status_with_text",
"xor_args",
]
|
from langchain_core.utils.utils import (
build_extra_kwargs,
check_package_version,
convert_to_secret_str,
get_pydantic_field_names,
guard_import,
mock_now,
raise_for_status_with_text,
xor_args,
)
__all__ = [
"xor_args",
"raise_for_status_with_text",
"mock_now",
"guard_import",
"check_package_version",
"get_pydantic_field_names",
"build_extra_kwargs",
"convert_to_secret_str",
]
|
import os
import pytest
import torch
import whisper
@pytest.mark.parametrize("model_name", whisper.available_models())
def test_transcribe(model_name: str):
device = "cuda" if torch.cuda.is_available() else "cpu"
model = whisper.load_model(model_name).to(device)
audio_path = os.path.join(os.path.dirname(__file__), "jfk.flac")
language = "en" if model_name.endswith(".en") else None
result = model.transcribe(
audio_path, language=language, temperature=0.0, word_timestamps=True
)
assert result["language"] == "en"
transcription = result["text"].lower()
assert "my fellow americans" in transcription
assert "your country" in transcription
assert "do for you" in transcription
timing_checked = False
for segment in result["segments"]:
for timing in segment["words"]:
assert timing["start"] < timing["end"]
if timing["word"].strip(" ,") == "Americans":
assert timing["start"] <= 1.8
assert timing["end"] >= 1.8
print(timing)
timing_checked = True
assert timing_checked
|
import os
import pytest
import torch
import whisper
@pytest.mark.parametrize("model_name", whisper.available_models())
def test_transcribe(model_name: str):
device = "cuda" if torch.cuda.is_available() else "cpu"
model = whisper.load_model(model_name).to(device)
audio_path = os.path.join(os.path.dirname(__file__), "jfk.flac")
language = "en" if model_name.endswith(".en") else None
result = model.transcribe(audio_path, language=language, temperature=0.0, word_timestamps=True)
assert result["language"] == "en"
transcription = result["text"].lower()
assert "my fellow americans" in transcription
assert "your country" in transcription
assert "do for you" in transcription
timing_checked = False
for segment in result["segments"]:
for timing in segment["words"]:
assert timing["start"] < timing["end"]
if timing["word"].strip(" ,") == "Americans":
assert timing["start"] <= 1.8
assert timing["end"] >= 1.8
print(timing)
timing_checked = True
assert timing_checked
|
import torch
_TORCHFUNCTION_SUBCLASS = False
class _ReturnTypeCM:
def __init__(self, to_restore):
self.to_restore = to_restore
def __enter__(self):
return self
def __exit__(self, *args):
global _TORCHFUNCTION_SUBCLASS
_TORCHFUNCTION_SUBCLASS = self.to_restore
def set_return_type(return_type: str):
"""[BETA] Set the return type of torch operations on :class:`~torchvision.tv_tensors.TVTensor`.
This only affects the behaviour of torch operations. It has no effect on
``torchvision`` transforms or functionals, which will always return as
output the same type that was passed as input.
.. warning::
We recommend using :class:`~torchvision.transforms.v2.ToPureTensor` at
the end of your transform pipelines if you use
``set_return_type("TVTensor")``. This will avoid the
``__torch_function__`` overhead in the models ``forward()``.
Can be used as a global flag for the entire program:
.. code:: python
img = tv_tensors.Image(torch.rand(3, 5, 5))
img + 2 # This is a pure Tensor (default behaviour)
set_return_type("TVTensor")
img + 2 # This is an Image
or as a context manager to restrict the scope:
.. code:: python
img = tv_tensors.Image(torch.rand(3, 5, 5))
img + 2 # This is a pure Tensor
with set_return_type("TVTensor"):
img + 2 # This is an Image
img + 2 # This is a pure Tensor
Args:
return_type (str): Can be "TVTensor" or "Tensor" (case-insensitive).
Default is "Tensor" (i.e. pure :class:`torch.Tensor`).
"""
global _TORCHFUNCTION_SUBCLASS
to_restore = _TORCHFUNCTION_SUBCLASS
try:
_TORCHFUNCTION_SUBCLASS = {"tensor": False, "tvtensor": True}[return_type.lower()]
except KeyError:
raise ValueError(f"return_type must be 'TVTensor' or 'Tensor', got {return_type}") from None
return _ReturnTypeCM(to_restore)
def _must_return_subclass():
return _TORCHFUNCTION_SUBCLASS
# For those ops we always want to preserve the original subclass instead of returning a pure Tensor
_FORCE_TORCHFUNCTION_SUBCLASS = {torch.Tensor.clone, torch.Tensor.to, torch.Tensor.detach, torch.Tensor.requires_grad_}
|
import torch
_TORCHFUNCTION_SUBCLASS = False
class _ReturnTypeCM:
def __init__(self, to_restore):
self.to_restore = to_restore
def __enter__(self):
return self
def __exit__(self, *args):
global _TORCHFUNCTION_SUBCLASS
_TORCHFUNCTION_SUBCLASS = self.to_restore
def set_return_type(return_type: str):
"""[BETA] Set the return type of torch operations on tv_tensors.
This only affects the behaviour of torch operations. It has no effect on
``torchvision`` transforms or functionals, which will always return as
output the same type that was passed as input.
.. warning::
We recommend using :class:`~torchvision.transforms.v2.ToPureTensor` at
the end of your transform pipelines if you use
``set_return_type("dataptoint")``. This will avoid the
``__torch_function__`` overhead in the models ``forward()``.
Can be used as a global flag for the entire program:
.. code:: python
img = tv_tensors.Image(torch.rand(3, 5, 5))
img + 2 # This is a pure Tensor (default behaviour)
set_return_type("tv_tensors")
img + 2 # This is an Image
or as a context manager to restrict the scope:
.. code:: python
img = tv_tensors.Image(torch.rand(3, 5, 5))
img + 2 # This is a pure Tensor
with set_return_type("tv_tensors"):
img + 2 # This is an Image
img + 2 # This is a pure Tensor
Args:
return_type (str): Can be "tv_tensor" or "tensor". Default is "tensor".
"""
global _TORCHFUNCTION_SUBCLASS
to_restore = _TORCHFUNCTION_SUBCLASS
_TORCHFUNCTION_SUBCLASS = {"tensor": False, "tv_tensor": True}[return_type.lower()]
return _ReturnTypeCM(to_restore)
def _must_return_subclass():
return _TORCHFUNCTION_SUBCLASS
# For those ops we always want to preserve the original subclass instead of returning a pure Tensor
_FORCE_TORCHFUNCTION_SUBCLASS = {torch.Tensor.clone, torch.Tensor.to, torch.Tensor.detach, torch.Tensor.requires_grad_}
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Collecting some commonly used type hint in mmdetection."""
from typing import Dict, List, Optional, Sequence, Tuple, Union
import torch
from mmengine.config import ConfigDict
from mmengine.data import InstanceData, PixelData
from ..bbox.samplers import SamplingResult
from ..data_structures import DetDataSample
# Type hint of config data
ConfigType = Union[ConfigDict, dict]
OptConfigType = Optional[ConfigType]
# Type hint of one or more config data
MultiConfig = Union[ConfigType, List[ConfigType]]
OptMultiConfig = Optional[MultiConfig]
InstanceList = List[InstanceData]
OptInstanceList = Optional[InstanceList]
PixelList = List[PixelData]
OptPixelList = Optional[PixelList]
SampleList = List[DetDataSample]
OptSampleList = Optional[SampleList]
SamplingResultList = List[SamplingResult]
OptSamplingResultList = Optional[SamplingResultList]
ForwardResults = Union[Dict[str, torch.Tensor], List[DetDataSample],
Tuple[torch.Tensor], torch.Tensor]
RangeType = Sequence[Tuple[int, int]]
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Collecting some commonly used type hint in mmdetection."""
from typing import Dict, List, Optional, Tuple, Union
import torch
from mmengine.config import ConfigDict
from mmengine.data import InstanceData, PixelData
from ..bbox.samplers import SamplingResult
from ..data_structures import DetDataSample
# Type hint of config data
ConfigType = Union[ConfigDict, dict]
OptConfigType = Optional[ConfigType]
# Type hint of one or more config data
MultiConfig = Union[ConfigType, List[ConfigType]]
OptMultiConfig = Optional[MultiConfig]
InstanceList = List[InstanceData]
OptInstanceList = Optional[InstanceList]
PixelList = List[PixelData]
OptPixelList = Optional[PixelList]
SampleList = List[DetDataSample]
OptSampleList = Optional[SampleList]
SamplingResultList = List[SamplingResult]
OptSamplingResultList = Optional[SamplingResultList]
ForwardResults = Union[Dict[str, torch.Tensor], List[DetDataSample],
Tuple[torch.Tensor], torch.Tensor]
|
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VAN model configuration"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
logger = logging.get_logger(__name__)
class VanConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`VanModel`]. It is used to instantiate a VAN model
according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the VAN
[Visual-Attention-Network/van-base](https://huggingface.co/Visual-Attention-Network/van-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
patch_sizes (`list[int]`, *optional*, defaults to `[7, 3, 3, 3]`):
Patch size to use in each stage's embedding layer.
strides (`list[int]`, *optional*, defaults to `[4, 2, 2, 2]`):
Stride size to use in each stage's embedding layer to downsample the input.
hidden_sizes (`list[int]`, *optional*, defaults to `[64, 128, 320, 512]`):
Dimensionality (hidden size) at each stage.
depths (`list[int]`, *optional*, defaults to `[3, 3, 12, 3]`):
Depth (number of layers) for each stage.
mlp_ratios (`list[int]`, *optional*, defaults to `[8, 8, 4, 4]`):
The expansion ratio for mlp layer at each stage.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in each layer. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
layer_scale_init_value (`float`, *optional*, defaults to 0.01):
The initial value for layer scaling.
drop_path_rate (`float`, *optional*, defaults to 0.0):
The dropout probability for stochastic depth.
dropout_rate (`float`, *optional*, defaults to 0.0):
The dropout probability for dropout.
Example:
```python
>>> from transformers import VanModel, VanConfig
>>> # Initializing a VAN van-base style configuration
>>> configuration = VanConfig()
>>> # Initializing a model from the van-base style configuration
>>> model = VanModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "van"
def __init__(
self,
image_size=224,
num_channels=3,
patch_sizes=[7, 3, 3, 3],
strides=[4, 2, 2, 2],
hidden_sizes=[64, 128, 320, 512],
depths=[3, 3, 12, 3],
mlp_ratios=[8, 8, 4, 4],
hidden_act="gelu",
initializer_range=0.02,
layer_norm_eps=1e-6,
layer_scale_init_value=1e-2,
drop_path_rate=0.0,
dropout_rate=0.0,
**kwargs,
):
super().__init__(**kwargs)
self.image_size = image_size
self.num_channels = num_channels
self.patch_sizes = patch_sizes
self.strides = strides
self.hidden_sizes = hidden_sizes
self.depths = depths
self.mlp_ratios = mlp_ratios
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.layer_scale_init_value = layer_scale_init_value
self.drop_path_rate = drop_path_rate
self.dropout_rate = dropout_rate
__all__ = ["VanConfig"]
|
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VAN model configuration"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
logger = logging.get_logger(__name__)
class VanConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`VanModel`]. It is used to instantiate a VAN model
according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the VAN
[Visual-Attention-Network/van-base](https://huggingface.co/Visual-Attention-Network/van-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
patch_sizes (`List[int]`, *optional*, defaults to `[7, 3, 3, 3]`):
Patch size to use in each stage's embedding layer.
strides (`List[int]`, *optional*, defaults to `[4, 2, 2, 2]`):
Stride size to use in each stage's embedding layer to downsample the input.
hidden_sizes (`List[int]`, *optional*, defaults to `[64, 128, 320, 512]`):
Dimensionality (hidden size) at each stage.
depths (`List[int]`, *optional*, defaults to `[3, 3, 12, 3]`):
Depth (number of layers) for each stage.
mlp_ratios (`List[int]`, *optional*, defaults to `[8, 8, 4, 4]`):
The expansion ratio for mlp layer at each stage.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in each layer. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
layer_scale_init_value (`float`, *optional*, defaults to 0.01):
The initial value for layer scaling.
drop_path_rate (`float`, *optional*, defaults to 0.0):
The dropout probability for stochastic depth.
dropout_rate (`float`, *optional*, defaults to 0.0):
The dropout probability for dropout.
Example:
```python
>>> from transformers import VanModel, VanConfig
>>> # Initializing a VAN van-base style configuration
>>> configuration = VanConfig()
>>> # Initializing a model from the van-base style configuration
>>> model = VanModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "van"
def __init__(
self,
image_size=224,
num_channels=3,
patch_sizes=[7, 3, 3, 3],
strides=[4, 2, 2, 2],
hidden_sizes=[64, 128, 320, 512],
depths=[3, 3, 12, 3],
mlp_ratios=[8, 8, 4, 4],
hidden_act="gelu",
initializer_range=0.02,
layer_norm_eps=1e-6,
layer_scale_init_value=1e-2,
drop_path_rate=0.0,
dropout_rate=0.0,
**kwargs,
):
super().__init__(**kwargs)
self.image_size = image_size
self.num_channels = num_channels
self.patch_sizes = patch_sizes
self.strides = strides
self.hidden_sizes = hidden_sizes
self.depths = depths
self.mlp_ratios = mlp_ratios
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.layer_scale_init_value = layer_scale_init_value
self.drop_path_rate = drop_path_rate
self.dropout_rate = dropout_rate
__all__ = ["VanConfig"]
|
"""String utilities."""
from typing import Any
def stringify_value(val: Any) -> str:
"""Stringify a value.
Args:
val: The value to stringify.
Returns:
str: The stringified value.
"""
if isinstance(val, str):
return val
elif isinstance(val, dict):
return "\n" + stringify_dict(val)
elif isinstance(val, list):
return "\n".join(stringify_value(v) for v in val)
else:
return str(val)
def stringify_dict(data: dict) -> str:
"""Stringify a dictionary.
Args:
data: The dictionary to stringify.
Returns:
str: The stringified dictionary.
"""
text = ""
for key, value in data.items():
text += key + ": " + stringify_value(value) + "\n"
return text
def comma_list(items: list[Any]) -> str:
"""Convert a list to a comma-separated string.
Args:
items: The list to convert.
Returns:
str: The comma-separated string.
"""
return ", ".join(str(item) for item in items)
|
from typing import Any
def stringify_value(val: Any) -> str:
"""Stringify a value.
Args:
val: The value to stringify.
Returns:
str: The stringified value.
"""
if isinstance(val, str):
return val
elif isinstance(val, dict):
return "\n" + stringify_dict(val)
elif isinstance(val, list):
return "\n".join(stringify_value(v) for v in val)
else:
return str(val)
def stringify_dict(data: dict) -> str:
"""Stringify a dictionary.
Args:
data: The dictionary to stringify.
Returns:
str: The stringified dictionary.
"""
text = ""
for key, value in data.items():
text += key + ": " + stringify_value(value) + "\n"
return text
def comma_list(items: list[Any]) -> str:
"""Convert a list to a comma-separated string.
Args:
items: The list to convert.
Returns:
str: The comma-separated string.
"""
return ", ".join(str(item) for item in items)
|
import abc
import io
import pathlib
import pickle
from typing import Any, BinaryIO, cast, Dict, Iterator, List, Optional, Tuple, Union
import numpy as np
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper
from torchvision.datapoints import Image
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
path_comparator,
read_categories_file,
)
from .._api import register_dataset, register_info
class CifarFileReader(IterDataPipe[Tuple[np.ndarray, int]]):
def __init__(self, datapipe: IterDataPipe[Dict[str, Any]], *, labels_key: str) -> None:
self.datapipe = datapipe
self.labels_key = labels_key
def __iter__(self) -> Iterator[Tuple[np.ndarray, int]]:
for mapping in self.datapipe:
image_arrays = mapping["data"].reshape((-1, 3, 32, 32))
category_idcs = mapping[self.labels_key]
yield from iter(zip(image_arrays, category_idcs))
class _CifarBase(Dataset):
_FILE_NAME: str
_SHA256: str
_LABELS_KEY: str
_META_FILE_NAME: str
_CATEGORIES_KEY: str
_categories: List[str]
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "test"))
super().__init__(root, skip_integrity_check=skip_integrity_check)
@abc.abstractmethod
def _is_data_file(self, data: Tuple[str, BinaryIO]) -> Optional[int]:
pass
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
f"https://www.cs.toronto.edu/~kriz/{self._FILE_NAME}",
sha256=self._SHA256,
)
]
def _unpickle(self, data: Tuple[str, io.BytesIO]) -> Dict[str, Any]:
_, file = data
content = cast(Dict[str, Any], pickle.load(file, encoding="latin1"))
file.close()
return content
def _prepare_sample(self, data: Tuple[np.ndarray, int]) -> Dict[str, Any]:
image_array, category_idx = data
return dict(
image=Image(image_array),
label=Label(category_idx, categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Filter(dp, self._is_data_file)
dp = Mapper(dp, self._unpickle)
dp = CifarFileReader(dp, labels_key=self._LABELS_KEY)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 50_000 if self._split == "train" else 10_000
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
dp = Filter(dp, path_comparator("name", self._META_FILE_NAME))
dp = Mapper(dp, self._unpickle)
return cast(List[str], next(iter(dp))[self._CATEGORIES_KEY])
@register_info("cifar10")
def _cifar10_info() -> Dict[str, Any]:
return dict(categories=read_categories_file("cifar10"))
@register_dataset("cifar10")
class Cifar10(_CifarBase):
"""
- **homepage**: https://www.cs.toronto.edu/~kriz/cifar.html
"""
_FILE_NAME = "cifar-10-python.tar.gz"
_SHA256 = "6d958be074577803d12ecdefd02955f39262c83c16fe9348329d7fe0b5c001ce"
_LABELS_KEY = "labels"
_META_FILE_NAME = "batches.meta"
_CATEGORIES_KEY = "label_names"
_categories = _cifar10_info()["categories"]
def _is_data_file(self, data: Tuple[str, Any]) -> bool:
path = pathlib.Path(data[0])
return path.name.startswith("data" if self._split == "train" else "test")
@register_info("cifar100")
def _cifar100_info() -> Dict[str, Any]:
return dict(categories=read_categories_file("cifar100"))
@register_dataset("cifar100")
class Cifar100(_CifarBase):
"""
- **homepage**: https://www.cs.toronto.edu/~kriz/cifar.html
"""
_FILE_NAME = "cifar-100-python.tar.gz"
_SHA256 = "85cd44d02ba6437773c5bbd22e183051d648de2e7d6b014e1ef29b855ba677a7"
_LABELS_KEY = "fine_labels"
_META_FILE_NAME = "meta"
_CATEGORIES_KEY = "fine_label_names"
_categories = _cifar100_info()["categories"]
def _is_data_file(self, data: Tuple[str, Any]) -> bool:
path = pathlib.Path(data[0])
return path.name == self._split
|
import abc
import io
import pathlib
import pickle
from typing import Any, BinaryIO, cast, Dict, Iterator, List, Optional, Tuple, Union
import numpy as np
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper
from torchvision.prototype.datapoints import Image, Label
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
path_comparator,
read_categories_file,
)
from .._api import register_dataset, register_info
class CifarFileReader(IterDataPipe[Tuple[np.ndarray, int]]):
def __init__(self, datapipe: IterDataPipe[Dict[str, Any]], *, labels_key: str) -> None:
self.datapipe = datapipe
self.labels_key = labels_key
def __iter__(self) -> Iterator[Tuple[np.ndarray, int]]:
for mapping in self.datapipe:
image_arrays = mapping["data"].reshape((-1, 3, 32, 32))
category_idcs = mapping[self.labels_key]
yield from iter(zip(image_arrays, category_idcs))
class _CifarBase(Dataset):
_FILE_NAME: str
_SHA256: str
_LABELS_KEY: str
_META_FILE_NAME: str
_CATEGORIES_KEY: str
_categories: List[str]
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "test"))
super().__init__(root, skip_integrity_check=skip_integrity_check)
@abc.abstractmethod
def _is_data_file(self, data: Tuple[str, BinaryIO]) -> Optional[int]:
pass
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
f"https://www.cs.toronto.edu/~kriz/{self._FILE_NAME}",
sha256=self._SHA256,
)
]
def _unpickle(self, data: Tuple[str, io.BytesIO]) -> Dict[str, Any]:
_, file = data
content = cast(Dict[str, Any], pickle.load(file, encoding="latin1"))
file.close()
return content
def _prepare_sample(self, data: Tuple[np.ndarray, int]) -> Dict[str, Any]:
image_array, category_idx = data
return dict(
image=Image(image_array),
label=Label(category_idx, categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Filter(dp, self._is_data_file)
dp = Mapper(dp, self._unpickle)
dp = CifarFileReader(dp, labels_key=self._LABELS_KEY)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 50_000 if self._split == "train" else 10_000
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
dp = Filter(dp, path_comparator("name", self._META_FILE_NAME))
dp = Mapper(dp, self._unpickle)
return cast(List[str], next(iter(dp))[self._CATEGORIES_KEY])
@register_info("cifar10")
def _cifar10_info() -> Dict[str, Any]:
return dict(categories=read_categories_file("cifar10"))
@register_dataset("cifar10")
class Cifar10(_CifarBase):
"""
- **homepage**: https://www.cs.toronto.edu/~kriz/cifar.html
"""
_FILE_NAME = "cifar-10-python.tar.gz"
_SHA256 = "6d958be074577803d12ecdefd02955f39262c83c16fe9348329d7fe0b5c001ce"
_LABELS_KEY = "labels"
_META_FILE_NAME = "batches.meta"
_CATEGORIES_KEY = "label_names"
_categories = _cifar10_info()["categories"]
def _is_data_file(self, data: Tuple[str, Any]) -> bool:
path = pathlib.Path(data[0])
return path.name.startswith("data" if self._split == "train" else "test")
@register_info("cifar100")
def _cifar100_info() -> Dict[str, Any]:
return dict(categories=read_categories_file("cifar100"))
@register_dataset("cifar100")
class Cifar100(_CifarBase):
"""
- **homepage**: https://www.cs.toronto.edu/~kriz/cifar.html
"""
_FILE_NAME = "cifar-100-python.tar.gz"
_SHA256 = "85cd44d02ba6437773c5bbd22e183051d648de2e7d6b014e1ef29b855ba677a7"
_LABELS_KEY = "fine_labels"
_META_FILE_NAME = "meta"
_CATEGORIES_KEY = "fine_label_names"
_categories = _cifar100_info()["categories"]
def _is_data_file(self, data: Tuple[str, Any]) -> bool:
path = pathlib.Path(data[0])
return path.name == self._split
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.3.0'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.2.0'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
try:
from ._load_gpu_decoder import _HAS_GPU_VIDEO_DECODER
except ModuleNotFoundError:
_HAS_GPU_VIDEO_DECODER = False
from ._video_opt import (
_HAS_CPU_VIDEO_DECODER,
_HAS_VIDEO_OPT,
_probe_video_from_file,
_probe_video_from_memory,
_read_video_from_file,
_read_video_from_memory,
_read_video_timestamps_from_file,
_read_video_timestamps_from_memory,
Timebase,
VideoMetaData,
)
from .image import (
decode_avif,
decode_gif,
decode_heic,
decode_image,
decode_jpeg,
decode_png,
decode_webp,
encode_jpeg,
encode_png,
ImageReadMode,
read_file,
read_image,
write_file,
write_jpeg,
write_png,
)
from .video import read_video, read_video_timestamps, write_video
from .video_reader import VideoReader
__all__ = [
"write_video",
"read_video",
"read_video_timestamps",
"_read_video_from_file",
"_read_video_timestamps_from_file",
"_probe_video_from_file",
"_read_video_from_memory",
"_read_video_timestamps_from_memory",
"_probe_video_from_memory",
"_HAS_CPU_VIDEO_DECODER",
"_HAS_VIDEO_OPT",
"_HAS_GPU_VIDEO_DECODER",
"_read_video_clip_from_memory",
"_read_video_meta_data",
"VideoMetaData",
"Timebase",
"ImageReadMode",
"decode_image",
"decode_jpeg",
"decode_png",
"decode_avif",
"decode_heic",
"decode_webp",
"decode_gif",
"encode_jpeg",
"encode_png",
"read_file",
"read_image",
"write_file",
"write_jpeg",
"write_png",
"Video",
"VideoReader",
]
|
from typing import Any, Dict, Iterator
import torch
from ..utils import _log_api_usage_once
try:
from ._load_gpu_decoder import _HAS_GPU_VIDEO_DECODER
except ModuleNotFoundError:
_HAS_GPU_VIDEO_DECODER = False
from ._video_opt import (
_HAS_CPU_VIDEO_DECODER,
_HAS_VIDEO_OPT,
_probe_video_from_file,
_probe_video_from_memory,
_read_video_from_file,
_read_video_from_memory,
_read_video_timestamps_from_file,
_read_video_timestamps_from_memory,
Timebase,
VideoMetaData,
)
from .image import (
decode_gif,
decode_image,
decode_jpeg,
decode_png,
decode_webp,
encode_jpeg,
encode_png,
ImageReadMode,
read_file,
read_image,
write_file,
write_jpeg,
write_png,
)
from .video import read_video, read_video_timestamps, write_video
from .video_reader import VideoReader
__all__ = [
"write_video",
"read_video",
"read_video_timestamps",
"_read_video_from_file",
"_read_video_timestamps_from_file",
"_probe_video_from_file",
"_read_video_from_memory",
"_read_video_timestamps_from_memory",
"_probe_video_from_memory",
"_HAS_CPU_VIDEO_DECODER",
"_HAS_VIDEO_OPT",
"_HAS_GPU_VIDEO_DECODER",
"_read_video_clip_from_memory",
"_read_video_meta_data",
"VideoMetaData",
"Timebase",
"ImageReadMode",
"decode_image",
"decode_jpeg",
"decode_png",
"decode_heic",
"decode_webp",
"decode_gif",
"encode_jpeg",
"encode_png",
"read_file",
"read_image",
"write_file",
"write_jpeg",
"write_png",
"Video",
"VideoReader",
]
from .._internally_replaced_utils import IN_FBCODE
if IN_FBCODE:
from .image import _decode_avif as decode_avif, _decode_heic as decode_heic
__all__ += ["decode_avif", "decode_heic"]
|
import logging
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from backend.util.process import AppProcess
logger = logging.getLogger(__name__)
def run_processes(*processes: "AppProcess", **kwargs):
"""
Execute all processes in the app. The last process is run in the foreground.
Includes enhanced error handling and process lifecycle management.
"""
try:
# Run all processes except the last one in the background.
for process in processes[:-1]:
process.start(background=True, **kwargs)
# Run the last process in the foreground.
processes[-1].start(background=False, **kwargs)
finally:
for process in processes:
try:
process.stop()
except Exception as e:
logger.exception(f"[{process.service_name}] unable to stop: {e}")
def main(**kwargs):
"""
Run all the processes required for the AutoGPT-server (REST and WebSocket APIs).
"""
from backend.executor import DatabaseManager, ExecutionManager, ExecutionScheduler
from backend.notifications import NotificationManager
from backend.server.rest_api import AgentServer
from backend.server.ws_api import WebsocketServer
run_processes(
DatabaseManager(),
ExecutionManager(),
ExecutionScheduler(),
NotificationManager(),
WebsocketServer(),
AgentServer(),
**kwargs,
)
if __name__ == "__main__":
main()
|
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from backend.util.process import AppProcess
def run_processes(*processes: "AppProcess", **kwargs):
"""
Execute all processes in the app. The last process is run in the foreground.
"""
try:
for process in processes[:-1]:
process.start(background=True, **kwargs)
# Run the last process in the foreground
processes[-1].start(background=False, **kwargs)
finally:
for process in processes:
process.stop()
def main(**kwargs):
"""
Run all the processes required for the AutoGPT-server (REST and WebSocket APIs).
"""
from backend.executor import DatabaseManager, ExecutionManager, ExecutionScheduler
from backend.notifications import NotificationManager
from backend.server.rest_api import AgentServer
from backend.server.ws_api import WebsocketServer
run_processes(
DatabaseManager(),
ExecutionManager(),
ExecutionScheduler(),
NotificationManager(),
WebsocketServer(),
AgentServer(),
**kwargs,
)
if __name__ == "__main__":
main()
|
import os
import pytest
import torch
import torchaudio
class GreedyCTCDecoder(torch.nn.Module):
def __init__(self, labels, blank: int = 0):
super().__init__()
self.blank = blank
self.labels = labels
def forward(self, logits: torch.Tensor) -> str:
"""Given a sequence logits over labels, get the best path string
Args:
logits (Tensor): Logit tensors. Shape `[num_seq, num_label]`.
Returns:
str: The resulting transcript
"""
best_path = torch.argmax(logits, dim=-1) # [num_seq,]
best_path = torch.unique_consecutive(best_path, dim=-1)
hypothesis = []
for i in best_path:
if i != self.blank:
hypothesis.append(self.labels[i])
return "".join(hypothesis)
@pytest.fixture
def ctc_decoder():
return GreedyCTCDecoder
_FILES = {
"en": "Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.flac",
"de": "20090505-0900-PLENARY-16-de_20090505-21_56_00_8.flac",
"en2": "20120613-0900-PLENARY-8-en_20120613-13_46_50_3.flac",
"es": "20130207-0900-PLENARY-7-es_20130207-13_02_05_5.flac",
"fr": "20121212-0900-PLENARY-5-fr_20121212-11_37_04_10.flac",
"it": "20170516-0900-PLENARY-16-it_20170516-18_56_31_1.flac",
}
_MIXTURE_FILE = "mixture_3729-6852-0037_8463-287645-0000.wav"
_CLEAN_FILES = [
"s1_3729-6852-0037_8463-287645-0000.wav",
"s2_3729-6852-0037_8463-287645-0000.wav",
]
@pytest.fixture
def sample_speech(tmp_path, lang):
if lang not in _FILES:
raise NotImplementedError(f"Unexpected lang: {lang}")
filename = _FILES[lang]
path = tmp_path.parent / filename
if not path.exists():
torchaudio.utils.download_asset(f"test-assets/{filename}", path=path)
return path
@pytest.fixture
def mixture_source():
path = torchaudio.utils.download_asset(os.path.join("test-assets", f"{_MIXTURE_FILE}"))
return path
@pytest.fixture
def clean_sources():
paths = []
for file in _CLEAN_FILES:
path = torchaudio.utils.download_asset(os.path.join("test-assets", f"{file}"))
paths.append(path)
return paths
def pytest_addoption(parser):
parser.addoption(
"--use-tmp-hub-dir",
action="store_true",
help=(
"When provided, tests will use temporary directory as Torch Hub directory. "
"Downloaded models will be deleted after each test."
),
)
@pytest.fixture(autouse=True)
def temp_hub_dir(tmpdir, pytestconfig):
if not pytestconfig.getoption("use_tmp_hub_dir"):
yield
else:
org_dir = torch.hub.get_dir()
torch.hub.set_dir(tmpdir)
yield
torch.hub.set_dir(org_dir)
@pytest.fixture()
def emissions():
path = torchaudio.utils.download_asset("test-assets/emissions-8555-28447-0012.pt")
return torch.load(path)
|
import pytest
import torch
import torchaudio
class GreedyCTCDecoder(torch.nn.Module):
def __init__(self, labels, blank: int = 0):
super().__init__()
self.blank = blank
self.labels = labels
def forward(self, logits: torch.Tensor) -> str:
"""Given a sequence logits over labels, get the best path string
Args:
logits (Tensor): Logit tensors. Shape `[num_seq, num_label]`.
Returns:
str: The resulting transcript
"""
best_path = torch.argmax(logits, dim=-1) # [num_seq,]
best_path = torch.unique_consecutive(best_path, dim=-1)
hypothesis = []
for i in best_path:
if i != self.blank:
hypothesis.append(self.labels[i])
return "".join(hypothesis)
@pytest.fixture
def ctc_decoder():
return GreedyCTCDecoder
_FILES = {
"en": "Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.flac",
"de": "20090505-0900-PLENARY-16-de_20090505-21_56_00_8.flac",
"en2": "20120613-0900-PLENARY-8-en_20120613-13_46_50_3.flac",
"es": "20130207-0900-PLENARY-7-es_20130207-13_02_05_5.flac",
"fr": "20121212-0900-PLENARY-5-fr_20121212-11_37_04_10.flac",
"it": "20170516-0900-PLENARY-16-it_20170516-18_56_31_1.flac",
}
@pytest.fixture
def sample_speech(tmp_path, lang):
if lang not in _FILES:
raise NotImplementedError(f"Unexpected lang: {lang}")
filename = _FILES[lang]
path = tmp_path.parent / filename
if not path.exists():
torchaudio.utils.download_asset(f"test-assets/{filename}", path=path)
return path
def pytest_addoption(parser):
parser.addoption(
"--use-tmp-hub-dir",
action="store_true",
help=(
"When provided, tests will use temporary directory as Torch Hub directory. "
"Downloaded models will be deleted after each test."
),
)
@pytest.fixture(autouse=True)
def temp_hub_dir(tmpdir, pytestconfig):
if not pytestconfig.getoption("use_tmp_hub_dir"):
yield
else:
org_dir = torch.hub.get_dir()
torch.hub.set_dir(tmpdir)
yield
torch.hub.set_dir(org_dir)
@pytest.fixture()
def emissions():
path = torchaudio.utils.download_asset("test-assets/emissions-8555-28447-0012.pt")
return torch.load(path)
|
from workflows.errors import (
ContextSerdeError, # noqa
WorkflowCancelledByUser, # noqa
WorkflowConfigurationError, # noqa
WorkflowDone, # noqa
WorkflowRuntimeError, # noqa
WorkflowStepDoesNotExistError, # noqa
WorkflowTimeoutError, # noqa
WorkflowValidationError, # noqa
)
|
class WorkflowValidationError(Exception):
pass
class WorkflowTimeoutError(Exception):
pass
class WorkflowRuntimeError(Exception):
pass
class WorkflowDone(Exception):
pass
class WorkflowCancelledByUser(Exception):
pass
class WorkflowStepDoesNotExistError(Exception):
pass
class WorkflowConfigurationError(Exception):
pass
class ContextSerdeError(Exception):
pass
|
import asyncio
import sys
import pytest
from llama_index.core import Document
from llama_index.graph_rag.cognee import CogneeGraphRAG
def test_smoke():
"""No-op test: CI will fail if no tests are collected."""
@pytest.mark.skipif(
sys.version_info < (3, 10), reason="mock strategy requires python3.10 or higher"
)
@pytest.mark.asyncio()
async def test_add_data(monkeypatch):
# Instantiate cognee GraphRAG
cogneeGraphRAG = CogneeGraphRAG(
llm_api_key="",
llm_provider="openai",
llm_model="gpt-4o-mini",
graph_db_provider="networkx",
vector_db_provider="lancedb",
relational_db_provider="sqlite",
relational_db_name="cognee_db",
)
async def mock_add_return(add, dataset_name):
return True
import cognee
monkeypatch.setattr(cognee, "add", mock_add_return)
# Gather documents to add to GraphRAG
documents = [
Document(
text="Jessica Miller, Experienced Sales Manager with a strong track record in driving sales growth and building high-performing teams."
),
Document(
text="David Thompson, Creative Graphic Designer with over 8 years of experience in visual design and branding."
),
]
await cogneeGraphRAG.add(documents, "test")
await cogneeGraphRAG.add(documents[0], "test")
if __name__ == "__main__":
asyncio.run(test_add_data())
|
from llama_index.core import Document
import asyncio
import pytest
from llama_index.graph_rag.cognee import CogneeGraphRAG
@pytest.mark.asyncio()
async def test_add_data(monkeypatch):
# Instantiate cognee GraphRAG
cogneeGraphRAG = CogneeGraphRAG(
llm_api_key="",
llm_provider="openai",
llm_model="gpt-4o-mini",
graph_db_provider="networkx",
vector_db_provider="lancedb",
relational_db_provider="sqlite",
relational_db_name="cognee_db",
)
async def mock_add_return(add, dataset_name):
return True
import cognee
monkeypatch.setattr(cognee, "add", mock_add_return)
# Gather documents to add to GraphRAG
documents = [
Document(
text="Jessica Miller, Experienced Sales Manager with a strong track record in driving sales growth and building high-performing teams."
),
Document(
text="David Thompson, Creative Graphic Designer with over 8 years of experience in visual design and branding."
),
]
await cogneeGraphRAG.add(documents, "test")
await cogneeGraphRAG.add(documents[0], "test")
if __name__ == "__main__":
asyncio.run(test_add_data())
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.12.0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.11.1.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_kernel_internal
def erase(
inpt: torch.Tensor,
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> torch.Tensor:
if torch.jit.is_scripting():
return erase_image(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
_log_api_usage_once(erase)
kernel = _get_kernel(erase, type(inpt))
return kernel(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
@_register_kernel_internal(erase, torch.Tensor)
@_register_kernel_internal(erase, datapoints.Image)
def erase_image(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@_register_kernel_internal(erase, PIL.Image.Image)
def _erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
@_register_kernel_internal(erase, datapoints.Video)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
|
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_kernel_internal
def erase(
inpt: torch.Tensor,
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> torch.Tensor:
if torch.jit.is_scripting():
return erase_image_tensor(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
_log_api_usage_once(erase)
kernel = _get_kernel(erase, type(inpt))
return kernel(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
@_register_kernel_internal(erase, torch.Tensor)
@_register_kernel_internal(erase, datapoints.Image)
def erase_image_tensor(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@_register_kernel_internal(erase, PIL.Image.Image)
def erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image_tensor(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
@_register_kernel_internal(erase, datapoints.Video)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image_tensor(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union
from docarray.base_doc import BaseDoc
from docarray.typing.tensor.tensor import AnyTensor
from docarray.utils._internal.misc import import_library
T = TypeVar('T', bound='VerticesAndFaces')
class VerticesAndFaces(BaseDoc):
"""
Document for handling the tensor data of a [`Mesh3D`][docarray.documents.mesh.Mesh3D] object.
A VerticesAndFaces Document can contain:
- an [`AnyTensor`](../../../../api_references/typing/tensor/tensor)
containing the vertices information (`VerticesAndFaces.vertices`)
- an [`AnyTensor`](../../../../api_references/typing/tensor/tensor)
containing the faces information (`VerticesAndFaces.faces`)
"""
vertices: AnyTensor
faces: AnyTensor
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
return super().validate(value)
def display(self) -> None:
"""
Plot mesh consisting of vertices and faces.
"""
if TYPE_CHECKING:
import trimesh
else:
trimesh = import_library('trimesh', raise_error=True)
from IPython.display import display
if self.vertices is None or self.faces is None:
raise ValueError(
'Can\'t display mesh from tensors when the vertices and/or faces '
'are None.'
)
mesh = trimesh.Trimesh(vertices=self.vertices, faces=self.faces)
display(mesh.show())
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union
from docarray.base_doc import BaseDoc
from docarray.typing.tensor.tensor import AnyTensor
from docarray.utils._internal.misc import import_library
T = TypeVar('T', bound='VerticesAndFaces')
class VerticesAndFaces(BaseDoc):
"""
Document for handling 3D mesh tensor data.
A VerticesAndFaces Document can contain an AnyTensor containing the vertices
information (`VerticesAndFaces.vertices`), and an AnyTensor containing the faces
information (`VerticesAndFaces.faces`).
"""
vertices: AnyTensor
faces: AnyTensor
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
return super().validate(value)
def display(self) -> None:
"""
Plot mesh consisting of vertices and faces.
"""
if TYPE_CHECKING:
import trimesh
else:
trimesh = import_library('trimesh', raise_error=True)
from IPython.display import display
if self.vertices is None or self.faces is None:
raise ValueError(
'Can\'t display mesh from tensors when the vertices and/or faces '
'are None.'
)
mesh = trimesh.Trimesh(vertices=self.vertices, faces=self.faces)
display(mesh.show())
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
import torch.nn as nn
from mmengine.runner import load_checkpoint
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType
from ..utils.misc import unpack_gt_instances
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
@MODELS.register_module()
class LAD(KnowledgeDistillationSingleStageDetector):
"""Implementation of `LAD <https://arxiv.org/pdf/2108.10520.pdf>`_."""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
teacher_backbone: ConfigType,
teacher_neck: ConfigType,
teacher_bbox_head: ConfigType,
teacher_ckpt: Optional[str] = None,
eval_teacher: bool = True,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None) -> None:
super(KnowledgeDistillationSingleStageDetector, self).__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor)
self.eval_teacher = eval_teacher
self.teacher_model = nn.Module()
self.teacher_model.backbone = MODELS.build(teacher_backbone)
if teacher_neck is not None:
self.teacher_model.neck = MODELS.build(teacher_neck)
teacher_bbox_head.update(train_cfg=train_cfg)
teacher_bbox_head.update(test_cfg=test_cfg)
self.teacher_model.bbox_head = MODELS.build(teacher_bbox_head)
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
@property
def with_teacher_neck(self) -> bool:
"""bool: whether the detector has a teacher_neck"""
return hasattr(self.teacher_model, 'neck') and \
self.teacher_model.neck is not None
def extract_teacher_feat(self, batch_inputs: Tensor) -> Tensor:
"""Directly extract teacher features from the backbone+neck."""
x = self.teacher_model.backbone(batch_inputs)
if self.with_teacher_neck:
x = self.teacher_model.neck(x)
return x
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
outputs = unpack_gt_instances(batch_data_samples)
batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \
= outputs
# get label assignment from the teacher
with torch.no_grad():
x_teacher = self.extract_teacher_feat(batch_inputs)
outs_teacher = self.teacher_model.bbox_head(x_teacher)
label_assignment_results = \
self.teacher_model.bbox_head.get_label_assignment(
*outs_teacher, batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore)
# the student use the label assignment from the teacher to learn
x = self.extract_feat(batch_inputs)
losses = self.bbox_head.loss(x, label_assignment_results,
batch_data_samples)
return losses
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
import torch.nn as nn
from mmengine.runner import load_checkpoint
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType
from ..utils.misc import unpack_gt_instances
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
@MODELS.register_module()
class LAD(KnowledgeDistillationSingleStageDetector):
"""Implementation of `LAD <https://arxiv.org/pdf/2108.10520.pdf>`_."""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
teacher_backbone: ConfigType,
teacher_neck: ConfigType,
teacher_bbox_head: ConfigType,
teacher_ckpt: Optional[str] = None,
eval_teacher: bool = True,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None) -> None:
super(KnowledgeDistillationSingleStageDetector, self).__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor)
self.eval_teacher = eval_teacher
self.teacher_model = nn.Module()
self.teacher_model.backbone = MODELS.build(teacher_backbone)
if teacher_neck is not None:
self.teacher_model.neck = MODELS.build(teacher_neck)
teacher_bbox_head.update(train_cfg=train_cfg)
teacher_bbox_head.update(test_cfg=test_cfg)
self.teacher_model.bbox_head = MODELS.build(teacher_bbox_head)
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
@property
def with_teacher_neck(self) -> bool:
"""bool: whether the detector has a teacher_neck"""
return hasattr(self.teacher_model, 'neck') and \
self.teacher_model.neck is not None
def extract_teacher_feat(self, batch_inputs: Tensor) -> Tensor:
"""Directly extract teacher features from the backbone+neck."""
x = self.teacher_model.backbone(batch_inputs)
if self.with_teacher_neck:
x = self.teacher_model.neck(x)
return x
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
outputs = unpack_gt_instances(batch_data_samples)
batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \
= outputs
# get label assignment from the teacher
with torch.no_grad():
x_teacher = self.extract_teacher_feat(batch_inputs)
outs_teacher = self.teacher_model.bbox_head(x_teacher)
label_assignment_results = \
self.teacher_model.bbox_head.get_label_assignment(
*outs_teacher, batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore)
# the student use the label assignment from the teacher to learn
x = self.extract_feat(batch_inputs)
losses = self.bbox_head.loss(x, label_assignment_results,
batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore)
return losses
|
#!/usr/bin/env python3
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import psutil
vm = psutil.virtual_memory()
total_gb = vm.total / (1024**3)
available_gb = vm.available / (1024**3)
print(f"Total RAM: {total_gb:.2f} GB")
print(f"Available RAM: {available_gb:.2f} GB")
except ImportError:
pass
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
if torch.cuda.is_available():
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
device_properties = torch.cuda.get_device_properties(0)
total_memory = device_properties.total_memory / (1024**3)
print(f"CUDA memory: {total_memory} GB")
print("XPU available:", hasattr(torch, "xpu") and torch.xpu.is_available())
if hasattr(torch, "xpu") and torch.xpu.is_available():
print("XPU model:", torch.xpu.get_device_properties(0).name)
print("XPU compiler version:", torch.version.xpu)
print("Number of XPUs available:", torch.xpu.device_count())
device_properties = torch.xpu.get_device_properties(0)
total_memory = device_properties.total_memory / (1024**3)
print(f"XPU memory: {total_memory} GB")
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
|
#!/usr/bin/env python3
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
if torch.cuda.is_available():
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
device_properties = torch.cuda.get_device_properties(0)
total_memory = device_properties.total_memory / (1024**3)
print(f"CUDA memory: {total_memory} GB")
print("XPU available:", hasattr(torch, "xpu") and torch.xpu.is_available())
if hasattr(torch, "xpu") and torch.xpu.is_available():
print("XPU model:", torch.xpu.get_device_properties(0).name)
print("XPU compiler version:", torch.version.xpu)
print("Number of XPUs available:", torch.xpu.device_count())
device_properties = torch.xpu.get_device_properties(0)
total_memory = device_properties.total_memory / (1024**3)
print(f"XPU memory: {total_memory} GB")
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
|
from pathlib import Path
from typing import List
import pytest
from dpr_text import DPRTextEncoder
from jina import Document, DocumentArray, Executor
_EMBEDDING_DIM = 768
@pytest.fixture(scope='session')
def basic_encoder() -> DPRTextEncoder:
return DPRTextEncoder()
@pytest.fixture(scope='session')
def basic_encoder_ctx() -> DPRTextEncoder:
return DPRTextEncoder(
'facebook/dpr-ctx_encoder-single-nq-base',
encoder_type='context',
title_tag_key='title',
)
def test_config():
encoder = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert encoder.encoder_type == 'question'
def test_no_document(basic_encoder: DPRTextEncoder):
basic_encoder.encode(None, {})
def test_empty_documents(basic_encoder: DPRTextEncoder):
docs = DocumentArray([])
basic_encoder.encode(docs, {})
assert len(docs) == 0
def test_no_text_documents(basic_encoder: DPRTextEncoder):
docs = DocumentArray([Document()])
basic_encoder.encode(docs, {})
assert len(docs) == 1
assert docs[0].embedding is None
def test_context_encoder_doc_no_title(basic_encoder_ctx: DPRTextEncoder):
docs = DocumentArray([Document(text='hello there')])
with pytest.raises(ValueError, match='If you set `title_tag_key` property'):
basic_encoder_ctx.encode(docs, {})
def test_wrong_encoder_type():
with pytest.raises(ValueError, match='The ``encoder_type`` parameter'):
DPRTextEncoder(encoder_type='worng_type')
def test_encoding_cpu():
docs = DocumentArray([Document(text='hello there')])
encoder = DPRTextEncoder(device='cpu')
encoder.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
def test_encoding_question_type(basic_encoder: DPRTextEncoder):
docs = DocumentArray([Document(text='hello there')])
basic_encoder.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
def test_encoding_context_type(basic_encoder_ctx: DPRTextEncoder):
docs = DocumentArray([Document(text='hello there', tags={'title': 'greeting'})])
basic_encoder_ctx.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.gpu
def test_encoding_gpu():
docs = DocumentArray([Document(text='hello there')])
encoder = DPRTextEncoder(device='cuda')
encoder.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: DPRTextEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: DPRTextEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
def test_quality_embeddings(basic_encoder: DPRTextEncoder):
docs = DocumentArray(
[
Document(id='A', text='a furry animal that with a long tail'),
Document(id='B', text='a domesticated mammal with four legs'),
Document(id='C', text='a type of aircraft that uses rotating wings'),
Document(id='D', text='flying vehicle that has fixed wings and engines'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
def test_ctx_encoder_with_incorrect_model():
with pytest.raises(
ValueError,
match='Please ensure that pretrained_model_name_or_path is correctly set',
):
DPRTextEncoder(encoder_type='context')
|
from pathlib import Path
from typing import List
import pytest
from dpr_text import DPRTextEncoder
from jina import Document, DocumentArray, Executor
_EMBEDDING_DIM = 768
@pytest.fixture(scope='session')
def basic_encoder() -> DPRTextEncoder:
return DPRTextEncoder()
@pytest.fixture(scope='session')
def basic_encoder_ctx() -> DPRTextEncoder:
return DPRTextEncoder(
'facebook/dpr-ctx_encoder-single-nq-base',
encoder_type='context',
title_tag_key='title',
)
def test_config():
encoder = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert encoder.encoder_type == 'question'
def test_no_document(basic_encoder: DPRTextEncoder):
basic_encoder.encode(None, {})
def test_empty_documents(basic_encoder: DPRTextEncoder):
docs = DocumentArray([])
basic_encoder.encode(docs, {})
assert len(docs) == 0
def test_no_text_documents(basic_encoder: DPRTextEncoder):
docs = DocumentArray([Document()])
basic_encoder.encode(docs, {})
assert len(docs) == 1
assert docs[0].embedding is None
def test_context_encoder_doc_no_title(basic_encoder_ctx: DPRTextEncoder):
docs = DocumentArray([Document(text='hello there')])
with pytest.raises(ValueError, match='If you set `title_tag_key` property'):
basic_encoder_ctx.encode(docs, {})
def test_wrong_encoder_type():
with pytest.raises(ValueError, match='The ``encoder_type`` parameter'):
DPRTextEncoder(encoder_type='worng_type')
def test_encoding_cpu():
docs = DocumentArray([Document(text='hello there')])
encoder = DPRTextEncoder(device='cpu')
encoder.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
def test_encoding_question_type(basic_encoder: DPRTextEncoder):
docs = DocumentArray([Document(text='hello there')])
basic_encoder.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
def test_encoding_context_type(basic_encoder_ctx: DPRTextEncoder):
docs = DocumentArray([Document(text='hello there', tags={'title': 'greeting'})])
basic_encoder_ctx.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.gpu
def test_encoding_gpu():
docs = DocumentArray([Document(text='hello there')])
encoder = DPRTextEncoder(device='cuda')
encoder.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: DPRTextEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: DPRTextEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
def test_quality_embeddings(basic_encoder: DPRTextEncoder):
docs = DocumentArray(
[
Document(id='A', text='a furry animal that with a long tail'),
Document(id='B', text='a domesticated mammal with four legs'),
Document(id='C', text='a type of aircraft that uses rotating wings'),
Document(id='D', text='flying vehicle that has fixed wings and engines'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
import copy
import importlib
import os
import sys
from keras.src import backend as backend_module
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
def in_tf_graph():
if global_state.get_global_attribute("in_tf_graph_scope", False):
return True
if "tensorflow" in sys.modules:
from keras.src.utils.module_utils import tensorflow as tf
return not tf.executing_eagerly()
return False
def convert_tf_tensor(outputs, dtype=None):
if backend_module.backend() != "tensorflow" and not in_tf_graph():
outputs = backend_module.convert_to_tensor(outputs, dtype=dtype)
return outputs
class TFGraphScope:
def __init__(self):
self._original_value = global_state.get_global_attribute(
"in_tf_graph_scope", False
)
def __enter__(self):
global_state.set_global_attribute("in_tf_graph_scope", True)
def __exit__(self, *args, **kwargs):
global_state.set_global_attribute(
"in_tf_graph_scope", self._original_value
)
class DynamicBackend:
"""A class that can be used to switch from one backend to another.
Example:
```python
backend = DynamicBackend("tensorflow")
y = backend.square(tf.constant(...))
backend.set_backend("jax")
y = backend.square(jax.numpy.array(...))
```
Args:
backend: Initial backend to use (string).
"""
def __init__(self, backend=None):
self._backend = backend or backend_module.backend()
def set_backend(self, backend):
self._backend = backend
def reset(self):
self._backend = backend_module.backend()
@property
def name(self):
return self._backend
def __getattr__(self, name):
if self._backend == "tensorflow":
from keras.src.backend import tensorflow as tf_backend
return getattr(tf_backend, name)
if self._backend == "jax":
from keras.src.backend import jax as jax_backend
return getattr(jax_backend, name)
if self._backend == "torch":
from keras.src.backend import torch as torch_backend
return getattr(torch_backend, name)
if self._backend == "numpy":
# TODO (ariG23498):
# The import `from keras.src.backend import numpy as numpy_backend`
# is not working. This is a temporary fix.
# The import is redirected to `keras.backend.numpy.numpy.py`
from keras.src import backend as numpy_backend
return getattr(numpy_backend, name)
@keras_export("keras.config.set_backend")
def set_backend(backend):
"""Reload the backend (and the Keras package).
Example:
```python
keras.config.set_backend("jax")
```
⚠️ WARNING ⚠️: Using this function is dangerous and should be done
carefully. Changing the backend will **NOT** convert
the type of any already-instantiated objects.
Thus, any layers / tensors / etc. already created will no
longer be usable without errors. It is strongly recommended **not**
to keep around **any** Keras-originated objects instances created
before calling `set_backend()`.
This includes any function or class instance that uses any Keras
functionality. All such code needs to be re-executed after calling
`set_backend()`.
"""
os.environ["KERAS_BACKEND"] = backend
# Clear module cache.
loaded_modules = [
key for key in sys.modules.keys() if key.startswith("keras")
]
for key in loaded_modules:
del sys.modules[key]
# Reimport Keras with the new backend (set via KERAS_BACKEND).
import keras
# Finally: refresh all imported Keras submodules.
globs = copy.copy(globals())
for key, value in globs.items():
if value.__class__ == keras.__class__:
if str(value).startswith("<module 'keras."):
module_name = str(value)
module_name = module_name[module_name.find("'") + 1 :]
module_name = module_name[: module_name.find("'")]
globals()[key] = importlib.import_module(module_name)
|
import copy
import importlib
import os
import sys
from keras.src import backend as backend_module
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
def in_tf_graph():
if global_state.get_global_attribute("in_tf_graph_scope", False):
return True
if "tensorflow" in sys.modules:
from keras.src.utils.module_utils import tensorflow as tf
return not tf.executing_eagerly()
return False
def convert_tf_tensor(outputs, dtype=None):
if backend_module.backend() != "tensorflow" and not in_tf_graph():
outputs = backend_module.convert_to_tensor(outputs, dtype=dtype)
return outputs
class TFGraphScope:
def __init__(self):
self._original_value = global_state.get_global_attribute(
"in_tf_graph_scope", False
)
def __enter__(self):
global_state.set_global_attribute("in_tf_graph_scope", True)
def __exit__(self, *args, **kwargs):
global_state.set_global_attribute(
"in_tf_graph_scope", self._original_value
)
class DynamicBackend:
"""A class that can be used to switch from one backend to another.
Example:
```python
backend = DynamicBackend("tensorflow")
y = backend.square(tf.constant(...))
backend.set_backend("jax")
y = backend.square(jax.numpy.array(...))
```
Args:
backend: Initial backend to use (string).
"""
def __init__(self, backend=None):
self._backend = backend or backend_module.backend()
def set_backend(self, backend):
self._backend = backend
def reset(self):
self._backend = backend_module.backend()
def __getattr__(self, name):
if self._backend == "tensorflow":
from keras.src.backend import tensorflow as tf_backend
return getattr(tf_backend, name)
if self._backend == "jax":
from keras.src.backend import jax as jax_backend
return getattr(jax_backend, name)
if self._backend == "torch":
from keras.src.backend import torch as torch_backend
return getattr(torch_backend, name)
if self._backend == "numpy":
# TODO (ariG23498):
# The import `from keras.src.backend import numpy as numpy_backend`
# is not working. This is a temporary fix.
# The import is redirected to `keras.backend.numpy.numpy.py`
from keras.src import backend as numpy_backend
return getattr(numpy_backend, name)
@keras_export("keras.config.set_backend")
def set_backend(backend):
"""Reload the backend (and the Keras package).
Example:
```python
keras.config.set_backend("jax")
```
⚠️ WARNING ⚠️: Using this function is dangerous and should be done
carefully. Changing the backend will **NOT** convert
the type of any already-instantiated objects.
Thus, any layers / tensors / etc. already created will no
longer be usable without errors. It is strongly recommended **not**
to keep around **any** Keras-originated objects instances created
before calling `set_backend()`.
This includes any function or class instance that uses any Keras
functionality. All such code needs to be re-executed after calling
`set_backend()`.
"""
os.environ["KERAS_BACKEND"] = backend
# Clear module cache.
loaded_modules = [
key for key in sys.modules.keys() if key.startswith("keras")
]
for key in loaded_modules:
del sys.modules[key]
# Reimport Keras with the new backend (set via KERAS_BACKEND).
import keras
# Finally: refresh all imported Keras submodules.
globs = copy.copy(globals())
for key, value in globs.items():
if value.__class__ == keras.__class__:
if str(value).startswith("<module 'keras."):
module_name = str(value)
module_name = module_name[module_name.find("'") + 1 :]
module_name = module_name[: module_name.find("'")]
globals()[key] = importlib.import_module(module_name)
|
import pytest
from .utils import remove_color_codes
@pytest.mark.parametrize(
"raw_text, clean_text",
[
(
"COMMAND = \x1b[36mbrowse_website\x1b[0m "
"ARGUMENTS = \x1b[36m{'url': 'https://www.google.com',"
" 'question': 'What is the capital of France?'}\x1b[0m",
"COMMAND = browse_website "
"ARGUMENTS = {'url': 'https://www.google.com',"
" 'question': 'What is the capital of France?'}",
),
(
"{'Schaue dir meine Projekte auf github () an, als auch meine Webseiten': "
"'https://github.com/Significant-Gravitas/AutoGPT,"
" https://discord.gg/autogpt und https://twitter.com/Auto_GPT'}",
"{'Schaue dir meine Projekte auf github () an, als auch meine Webseiten': "
"'https://github.com/Significant-Gravitas/AutoGPT,"
" https://discord.gg/autogpt und https://twitter.com/Auto_GPT'}",
),
("", ""),
("hello", "hello"),
("hello\x1b[31m world", "hello world"),
("\x1b[36mHello,\x1b[32m World!", "Hello, World!"),
(
"\x1b[1m\x1b[31mError:\x1b[0m\x1b[31m file not found",
"Error: file not found",
),
],
)
def test_remove_color_codes(raw_text, clean_text):
assert remove_color_codes(raw_text) == clean_text
|
import pytest
from .utils import remove_color_codes
@pytest.mark.parametrize(
"raw_text, clean_text",
[
(
"COMMAND = \x1b[36mbrowse_website\x1b[0m "
"ARGUMENTS = \x1b[36m{'url': 'https://www.google.com',"
" 'question': 'What is the capital of France?'}\x1b[0m",
"COMMAND = browse_website "
"ARGUMENTS = {'url': 'https://www.google.com',"
" 'question': 'What is the capital of France?'}",
),
(
"{'Schaue dir meine Projekte auf github () an, als auch meine Webseiten': "
"'https://github.com/Significant-Gravitas/AutoGPT,"
" https://discord.gg/autogpt und https://twitter.com/Auto_GPT'}",
"{'Schaue dir meine Projekte auf github () an, als auch meine Webseiten': "
"'https://github.com/Significant-Gravitas/AutoGPT,"
" https://discord.gg/autogpt und https://twitter.com/Auto_GPT'}",
),
("", ""),
("hello", "hello"),
("hello\x1B[31m world", "hello world"),
("\x1B[36mHello,\x1B[32m World!", "Hello, World!"),
(
"\x1B[1m\x1B[31mError:\x1B[0m\x1B[31m file not found",
"Error: file not found",
),
],
)
def test_remove_color_codes(raw_text, clean_text):
assert remove_color_codes(raw_text) == clean_text
|
import inspect
import threading
from typing import Any, Awaitable, Callable, ParamSpec, TypeVar, cast, overload
P = ParamSpec("P")
R = TypeVar("R")
@overload
def thread_cached(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[R]]: ...
@overload
def thread_cached(func: Callable[P, R]) -> Callable[P, R]: ...
def thread_cached(
func: Callable[P, R] | Callable[P, Awaitable[R]],
) -> Callable[P, R] | Callable[P, Awaitable[R]]:
thread_local = threading.local()
if inspect.iscoroutinefunction(func):
async def async_wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
cache = getattr(thread_local, "cache", None)
if cache is None:
cache = thread_local.cache = {}
key = (func, args, tuple(sorted(kwargs.items())))
if key not in cache:
cache[key] = await cast(Callable[P, Awaitable[R]], func)(
*args, **kwargs
)
return cache[key]
return async_wrapper
else:
def sync_wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
cache = getattr(thread_local, "cache", None)
if cache is None:
cache = thread_local.cache = {}
# Include function in the key to prevent collisions between different functions
key = (func, args, tuple(sorted(kwargs.items())))
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
return sync_wrapper
def clear_thread_cache(func: Callable[..., Any]) -> None:
"""Clear the cache for a thread-cached function."""
thread_local = threading.local()
cache = getattr(thread_local, "cache", None)
if cache is not None:
# Clear all entries that match the function
for key in list(cache.keys()):
if key and len(key) > 0 and key[0] == func:
del cache[key]
|
import threading
from typing import Callable, ParamSpec, TypeVar
P = ParamSpec("P")
R = TypeVar("R")
def thread_cached(func: Callable[P, R]) -> Callable[P, R]:
thread_local = threading.local()
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
cache = getattr(thread_local, "cache", None)
if cache is None:
cache = thread_local.cache = {}
key = (args, tuple(sorted(kwargs.items())))
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
return wrapper
|
import pytest
from datasets.exceptions import DatasetNotFoundError
from datasets.inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
pytestmark = pytest.mark.integration
@pytest.mark.parametrize(
"path, config_name, expected_splits",
[
("rajpurkar/squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "default", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
],
)
def test_get_dataset_config_info(path, config_name, expected_splits):
info = get_dataset_config_info(path, config_name=config_name)
assert info.config_name == config_name
assert list(info.splits.keys()) == expected_splits
def test_get_dataset_config_info_private(hf_token, hf_private_dataset_repo_txt_data):
info = get_dataset_config_info(hf_private_dataset_repo_txt_data, config_name="default", token=hf_token)
assert list(info.splits.keys()) == ["train"]
@pytest.mark.parametrize(
"path, config_name, expected_exception",
[
("paws", None, ValueError),
# non-existing, gated, private:
("hf-internal-testing/non-existing-dataset", "default", DatasetNotFoundError),
("hf-internal-testing/gated_dataset_with_data_files", "default", DatasetNotFoundError),
("hf-internal-testing/private_dataset_with_data_files", "default", DatasetNotFoundError),
("hf-internal-testing/gated_dataset_with_data_files", "default", DatasetNotFoundError),
("hf-internal-testing/private_dataset_with_data_files", "default", DatasetNotFoundError),
],
)
def test_get_dataset_config_info_raises(path, config_name, expected_exception):
with pytest.raises(expected_exception):
get_dataset_config_info(path, config_name=config_name)
@pytest.mark.parametrize(
"path, expected",
[
("amirveyseh/acronym_identification", ["default"]),
("rajpurkar/squad", ["plain_text"]),
("dalle-mini/wit", ["default"]),
("hf-internal-testing/librispeech_asr_dummy", ["clean"]),
("hf-internal-testing/audiofolder_no_configs_in_metadata", ["default"]),
("hf-internal-testing/audiofolder_single_config_in_metadata", ["custom"]),
("hf-internal-testing/audiofolder_two_configs_in_metadata", ["v1", "v2"]),
],
)
def test_get_dataset_config_names(path, expected):
config_names = get_dataset_config_names(path)
assert config_names == expected
@pytest.mark.parametrize(
"path, expected",
[
("amirveyseh/acronym_identification", "default"),
("rajpurkar/squad", "plain_text"),
("dalle-mini/wit", "default"),
("hf-internal-testing/librispeech_asr_dummy", "clean"),
("hf-internal-testing/audiofolder_no_configs_in_metadata", "default"),
("hf-internal-testing/audiofolder_single_config_in_metadata", "custom"),
("hf-internal-testing/audiofolder_two_configs_in_metadata", None),
],
)
def test_get_dataset_default_config_name(path, expected):
default_config_name = get_dataset_default_config_name(path)
if expected:
assert default_config_name == expected
else:
assert default_config_name is None
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config",
[
("rajpurkar/squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["default"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
],
)
def test_get_dataset_info(path, expected_configs, expected_splits_in_first_config):
infos = get_dataset_infos(path)
assert list(infos.keys()) == expected_configs
expected_config = expected_configs[0]
assert expected_config in infos
info = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits",
[
("rajpurkar/squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "default", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
],
)
def test_get_dataset_split_names(path, expected_config, expected_splits):
infos = get_dataset_infos(path)
assert expected_config in infos
info = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception",
[
("paws", None, ValueError),
],
)
def test_get_dataset_split_names_error(path, config_name, expected_exception):
with pytest.raises(expected_exception):
get_dataset_split_names(path, config_name=config_name)
|
import pytest
from datasets.exceptions import DatasetNotFoundError
from datasets.inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
pytestmark = pytest.mark.integration
@pytest.mark.parametrize(
"path, config_name, expected_splits",
[
("rajpurkar/squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "default", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
],
)
def test_get_dataset_config_info(path, config_name, expected_splits):
info = get_dataset_config_info(path, config_name=config_name)
assert info.config_name == config_name
assert list(info.splits.keys()) == expected_splits
def test_get_dataset_config_info_private(hf_token, hf_private_dataset_repo_txt_data):
info = get_dataset_config_info(hf_private_dataset_repo_txt_data, config_name="default", token=hf_token)
assert list(info.splits.keys()) == ["train"]
@pytest.mark.parametrize(
"path, config_name, expected_exception",
[
("paws", None, ValueError),
# non-existing, gated, private:
("hf-internal-testing/non-existing-dataset", "default", DatasetNotFoundError),
("hf-internal-testing/gated_dataset_with_data_files", "default", DatasetNotFoundError),
("hf-internal-testing/private_dataset_with_data_files", "default", DatasetNotFoundError),
("hf-internal-testing/gated_dataset_with_data_files", "default", DatasetNotFoundError),
("hf-internal-testing/private_dataset_with_data_files", "default", DatasetNotFoundError),
],
)
def test_get_dataset_config_info_raises(path, config_name, expected_exception):
with pytest.raises(expected_exception):
get_dataset_config_info(path, config_name=config_name)
@pytest.mark.parametrize(
"path, expected",
[
("acronym_identification", ["default"]),
("rajpurkar/squad", ["plain_text"]),
("dalle-mini/wit", ["default"]),
("hf-internal-testing/librispeech_asr_dummy", ["clean"]),
("hf-internal-testing/audiofolder_no_configs_in_metadata", ["default"]),
("hf-internal-testing/audiofolder_single_config_in_metadata", ["custom"]),
("hf-internal-testing/audiofolder_two_configs_in_metadata", ["v1", "v2"]),
],
)
def test_get_dataset_config_names(path, expected):
config_names = get_dataset_config_names(path)
assert config_names == expected
@pytest.mark.parametrize(
"path, expected",
[
("acronym_identification", "default"),
("rajpurkar/squad", "plain_text"),
("dalle-mini/wit", "default"),
("hf-internal-testing/librispeech_asr_dummy", "clean"),
("hf-internal-testing/audiofolder_no_configs_in_metadata", "default"),
("hf-internal-testing/audiofolder_single_config_in_metadata", "custom"),
("hf-internal-testing/audiofolder_two_configs_in_metadata", None),
],
)
def test_get_dataset_default_config_name(path, expected):
default_config_name = get_dataset_default_config_name(path)
if expected:
assert default_config_name == expected
else:
assert default_config_name is None
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config",
[
("rajpurkar/squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["default"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
],
)
def test_get_dataset_info(path, expected_configs, expected_splits_in_first_config):
infos = get_dataset_infos(path)
assert list(infos.keys()) == expected_configs
expected_config = expected_configs[0]
assert expected_config in infos
info = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits",
[
("rajpurkar/squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "default", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
],
)
def test_get_dataset_split_names(path, expected_config, expected_splits):
infos = get_dataset_infos(path)
assert expected_config in infos
info = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception",
[
("paws", None, ValueError),
],
)
def test_get_dataset_split_names_error(path, config_name, expected_exception):
with pytest.raises(expected_exception):
get_dataset_split_names(path, config_name=config_name)
|
import codecs
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class TextDecoderBlock(Block):
class Input(BlockSchema):
text: str = SchemaField(
description="A string containing escaped characters to be decoded",
placeholder='Your entire text block with \\n and \\" escaped characters',
)
class Output(BlockSchema):
decoded_text: str = SchemaField(
description="The decoded text with escape sequences processed"
)
def __init__(self):
super().__init__(
id="2570e8fe-8447-43ed-84c7-70d657923231",
description="Decodes a string containing escape sequences into actual text",
categories={BlockCategory.TEXT},
input_schema=TextDecoderBlock.Input,
output_schema=TextDecoderBlock.Output,
test_input={"text": """Hello\nWorld!\nThis is a \"quoted\" string."""},
test_output=[
(
"decoded_text",
"""Hello
World!
This is a "quoted" string.""",
)
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
decoded_text = codecs.decode(input_data.text, "unicode_escape")
yield "decoded_text", decoded_text
|
import codecs
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class TextDecoderBlock(Block):
class Input(BlockSchema):
text: str = SchemaField(
description="A string containing escaped characters to be decoded",
placeholder='Your entire text block with \\n and \\" escaped characters',
)
class Output(BlockSchema):
decoded_text: str = SchemaField(
description="The decoded text with escape sequences processed"
)
def __init__(self):
super().__init__(
id="2570e8fe-8447-43ed-84c7-70d657923231",
description="Decodes a string containing escape sequences into actual text",
categories={BlockCategory.TEXT},
input_schema=TextDecoderBlock.Input,
output_schema=TextDecoderBlock.Output,
test_input={"text": """Hello\nWorld!\nThis is a \"quoted\" string."""},
test_output=[
(
"decoded_text",
"""Hello
World!
This is a "quoted" string.""",
)
],
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
decoded_text = codecs.decode(input_data.text, "unicode_escape")
yield "decoded_text", decoded_text
|
"""**Schemas** are the LangChain Base Classes and Interfaces."""
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.caches import BaseCache
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.documents import BaseDocumentTransformer, Document
from langchain_core.exceptions import LangChainException, OutputParserException
from langchain_core.memory import BaseMemory
from langchain_core.messages import (
AIMessage,
BaseMessage,
ChatMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
_message_from_dict,
get_buffer_string,
messages_from_dict,
messages_to_dict,
)
from langchain_core.messages.base import message_to_dict
from langchain_core.output_parsers import (
BaseLLMOutputParser,
BaseOutputParser,
StrOutputParser,
)
from langchain_core.outputs import (
ChatGeneration,
ChatResult,
Generation,
LLMResult,
RunInfo,
)
from langchain_core.prompt_values import PromptValue
from langchain_core.prompts import BasePromptTemplate, format_document
from langchain_core.retrievers import BaseRetriever
from langchain_core.stores import BaseStore
RUN_KEY = "__run"
# Backwards compatibility.
Memory = BaseMemory
_message_to_dict = message_to_dict
__all__ = [
"RUN_KEY",
"AIMessage",
"AgentAction",
"AgentFinish",
"BaseCache",
"BaseChatMessageHistory",
"BaseDocumentTransformer",
"BaseLLMOutputParser",
"BaseMemory",
"BaseMessage",
"BaseOutputParser",
"BasePromptTemplate",
"BaseRetriever",
"BaseStore",
"ChatGeneration",
"ChatMessage",
"ChatResult",
"Document",
"FunctionMessage",
"Generation",
"HumanMessage",
"LLMResult",
"LangChainException",
"Memory",
"OutputParserException",
"PromptValue",
"RunInfo",
"StrOutputParser",
"SystemMessage",
"_message_from_dict",
"_message_to_dict",
"format_document",
"get_buffer_string",
"message_to_dict",
"messages_from_dict",
"messages_to_dict",
]
|
"""**Schemas** are the LangChain Base Classes and Interfaces."""
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.caches import BaseCache
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.documents import BaseDocumentTransformer, Document
from langchain_core.exceptions import LangChainException, OutputParserException
from langchain_core.memory import BaseMemory
from langchain_core.messages import (
AIMessage,
BaseMessage,
ChatMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
_message_from_dict,
get_buffer_string,
messages_from_dict,
messages_to_dict,
)
from langchain_core.messages.base import message_to_dict
from langchain_core.output_parsers import (
BaseLLMOutputParser,
BaseOutputParser,
StrOutputParser,
)
from langchain_core.outputs import (
ChatGeneration,
ChatResult,
Generation,
LLMResult,
RunInfo,
)
from langchain_core.prompt_values import PromptValue
from langchain_core.prompts import BasePromptTemplate, format_document
from langchain_core.retrievers import BaseRetriever
from langchain_core.stores import BaseStore
RUN_KEY = "__run"
# Backwards compatibility.
Memory = BaseMemory
_message_to_dict = message_to_dict
__all__ = [
"BaseCache",
"BaseMemory",
"BaseStore",
"AgentFinish",
"AgentAction",
"Document",
"BaseChatMessageHistory",
"BaseDocumentTransformer",
"BaseMessage",
"ChatMessage",
"FunctionMessage",
"HumanMessage",
"AIMessage",
"SystemMessage",
"messages_from_dict",
"messages_to_dict",
"message_to_dict",
"_message_to_dict",
"_message_from_dict",
"get_buffer_string",
"RunInfo",
"LLMResult",
"ChatResult",
"ChatGeneration",
"Generation",
"PromptValue",
"LangChainException",
"BaseRetriever",
"RUN_KEY",
"Memory",
"OutputParserException",
"StrOutputParser",
"BaseOutputParser",
"BaseLLMOutputParser",
"BasePromptTemplate",
"format_document",
]
|
from langchain_core.messages import (
AIMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.output_parsers.openai_tools import (
parse_tool_call,
)
from langchain_community.chat_models.tongyi import (
convert_dict_to_message,
convert_message_to_dict,
)
def test__convert_dict_to_message_human() -> None:
message_dict = {"role": "user", "content": "foo"}
result = convert_dict_to_message(message_dict)
expected_output = HumanMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_ai() -> None:
message_dict = {"role": "assistant", "content": "foo"}
result = convert_dict_to_message(message_dict)
expected_output = AIMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_other_role() -> None:
message_dict = {"role": "system", "content": "foo"}
result = convert_dict_to_message(message_dict)
expected_output = SystemMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_function_call() -> None:
raw_function_calls = [
{
"function": {
"name": "get_current_weather",
"arguments": '{"location": "Boston", "unit": "fahrenheit"}',
},
"type": "function",
}
]
message_dict = {
"role": "assistant",
"content": "foo",
"tool_calls": raw_function_calls,
}
result = convert_dict_to_message(message_dict)
tool_calls = [
parse_tool_call(raw_tool_call, return_id=True)
for raw_tool_call in raw_function_calls
]
expected_output = AIMessage(
content="foo",
additional_kwargs={"tool_calls": raw_function_calls},
tool_calls=tool_calls, # type: ignore[arg-type]
invalid_tool_calls=[],
)
assert result == expected_output
def test__convert_dict_to_message_partial_mode() -> None:
message_dict = {"role": "assistant", "content": "foo", "partial": True}
result = convert_dict_to_message(message_dict)
expected_output = AIMessage(content="foo", additional_kwargs={"partial": True})
assert result == expected_output
def test__convert_message_to_dict_human() -> None:
message = HumanMessage(content="foo")
result = convert_message_to_dict(message)
expected_output = {"role": "user", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_ai() -> None:
message = AIMessage(content="foo")
result = convert_message_to_dict(message)
expected_output = {"role": "assistant", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_ai_partial_mode() -> None:
message = AIMessage(content="foo", additional_kwargs={"partial": True})
result = convert_message_to_dict(message)
expected_output = {"role": "assistant", "content": "foo", "partial": True}
assert result == expected_output
def test__convert_message_to_dict_system() -> None:
message = SystemMessage(content="foo")
result = convert_message_to_dict(message)
expected_output = {"role": "system", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_tool() -> None:
message = FunctionMessage(name="foo", content="bar")
result = convert_message_to_dict(message)
expected_output = {
"role": "tool",
"tool_call_id": "",
"content": "bar",
"name": "foo",
}
assert result == expected_output
|
from langchain_core.messages import (
AIMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.output_parsers.openai_tools import (
parse_tool_call,
)
from langchain_community.chat_models.tongyi import (
convert_dict_to_message,
convert_message_to_dict,
)
def test__convert_dict_to_message_human() -> None:
message_dict = {"role": "user", "content": "foo"}
result = convert_dict_to_message(message_dict)
expected_output = HumanMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_ai() -> None:
message_dict = {"role": "assistant", "content": "foo"}
result = convert_dict_to_message(message_dict)
expected_output = AIMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_other_role() -> None:
message_dict = {"role": "system", "content": "foo"}
result = convert_dict_to_message(message_dict)
expected_output = SystemMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_function_call() -> None:
raw_function_calls = [
{
"function": {
"name": "get_current_weather",
"arguments": '{"location": "Boston", "unit": "fahrenheit"}',
},
"type": "function",
}
]
message_dict = {
"role": "assistant",
"content": "foo",
"tool_calls": raw_function_calls,
}
result = convert_dict_to_message(message_dict)
tool_calls = [
parse_tool_call(raw_tool_call, return_id=True)
for raw_tool_call in raw_function_calls
]
expected_output = AIMessage(
content="foo",
additional_kwargs={"tool_calls": raw_function_calls},
tool_calls=tool_calls, # type: ignore[arg-type]
invalid_tool_calls=[],
)
assert result == expected_output
def test__convert_message_to_dict_human() -> None:
message = HumanMessage(content="foo")
result = convert_message_to_dict(message)
expected_output = {"role": "user", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_ai() -> None:
message = AIMessage(content="foo")
result = convert_message_to_dict(message)
expected_output = {"role": "assistant", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_system() -> None:
message = SystemMessage(content="foo")
result = convert_message_to_dict(message)
expected_output = {"role": "system", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_tool() -> None:
message = FunctionMessage(name="foo", content="bar")
result = convert_message_to_dict(message)
expected_output = {
"role": "tool",
"tool_call_id": "",
"content": "bar",
"name": "foo",
}
assert result == expected_output
|
from .text_paddle import TextPaddleEncoder
|
from .text_paddle import TextPaddleEncoder
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.memorize.tool import Memorize, TrainableLLM
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"TrainableLLM": "langchain_community.tools.memorize.tool",
"Memorize": "langchain_community.tools.memorize.tool",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Memorize",
"TrainableLLM",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.memorize.tool import Memorize, TrainableLLM
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"TrainableLLM": "langchain_community.tools.memorize.tool",
"Memorize": "langchain_community.tools.memorize.tool",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"TrainableLLM",
"Memorize",
]
|
import pytest
from langchain.evaluation.string_distance import (
PairwiseStringDistanceEvalChain,
StringDistance,
StringDistanceEvalChain,
)
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", list(StringDistance))
def test_zero_distance(distance: StringDistance) -> None:
eval_chain = StringDistanceEvalChain(distance=distance)
string = "三人行则必有我师"
result = eval_chain.evaluate_strings(prediction=string, reference=string)
assert "score" in result
assert result["score"] == 0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", list(StringDistance))
async def test_zero_distance_async(distance: StringDistance) -> None:
eval_chain = StringDistanceEvalChain(distance=distance)
string = "三人行则必有我师"
result = await eval_chain.aevaluate_strings(prediction=string, reference=string)
assert "score" in result
assert result["score"] == 0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", list(StringDistance))
@pytest.mark.parametrize("normalize_score", [True, False])
def test_zero_distance_pairwise(
*, distance: StringDistance, normalize_score: bool
) -> None:
eval_chain = PairwiseStringDistanceEvalChain(
distance=distance, normalize_score=normalize_score
)
string = "三人行则必有我师"
result = eval_chain.evaluate_string_pairs(prediction=string, prediction_b=string)
assert "score" in result
assert result["score"] == 0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", list(StringDistance))
async def test_zero_distance_pairwise_async(distance: StringDistance) -> None:
eval_chain = PairwiseStringDistanceEvalChain(distance=distance)
string = "三人行则必有我师"
result = await eval_chain.aevaluate_string_pairs(
prediction=string, prediction_b=string
)
assert "score" in result
assert result["score"] == 0
valid_distances = [
distance for distance in StringDistance if distance != StringDistance.HAMMING
]
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", valid_distances)
@pytest.mark.parametrize("normalize_score", [True, False])
def test_non_zero_distance(*, distance: StringDistance, normalize_score: bool) -> None:
eval_chain = StringDistanceEvalChain(
distance=distance, normalize_score=normalize_score
)
prediction = "I like to eat apples."
reference = "I like apples."
result = eval_chain.evaluate_strings(prediction=prediction, reference=reference)
assert "score" in result
assert result["score"] > 0
if normalize_score:
assert result["score"] < 1.0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", valid_distances)
async def test_non_zero_distance_async(distance: StringDistance) -> None:
eval_chain = StringDistanceEvalChain(distance=distance)
prediction = "I like to eat apples."
reference = "I like apples."
result = await eval_chain.aevaluate_strings(
prediction=prediction, reference=reference
)
assert "score" in result
assert 0 < result["score"] < 1.0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", valid_distances)
def test_non_zero_distance_pairwise(distance: StringDistance) -> None:
eval_chain = PairwiseStringDistanceEvalChain(distance=distance)
prediction = "I like to eat apples."
reference = "I like apples."
result = eval_chain.evaluate_string_pairs(
prediction=prediction, prediction_b=reference
)
assert "score" in result
assert 0 < result["score"] < 1.0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", valid_distances)
async def test_non_zero_distance_pairwise_async(distance: StringDistance) -> None:
eval_chain = PairwiseStringDistanceEvalChain(distance=distance)
prediction = "I like to eat apples."
reference = "I like apples."
result = await eval_chain.aevaluate_string_pairs(
prediction=prediction, prediction_b=reference
)
assert "score" in result
assert 0 < result["score"] < 1.0
|
import pytest
from langchain.evaluation.string_distance import (
PairwiseStringDistanceEvalChain,
StringDistance,
StringDistanceEvalChain,
)
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", list(StringDistance))
def test_zero_distance(distance: StringDistance) -> None:
eval_chain = StringDistanceEvalChain(distance=distance)
string = "三人行则必有我师"
result = eval_chain.evaluate_strings(prediction=string, reference=string)
assert "score" in result
assert result["score"] == 0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", list(StringDistance))
async def test_zero_distance_async(distance: StringDistance) -> None:
eval_chain = StringDistanceEvalChain(distance=distance)
string = "三人行则必有我师"
result = await eval_chain.aevaluate_strings(prediction=string, reference=string)
assert "score" in result
assert result["score"] == 0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", list(StringDistance))
@pytest.mark.parametrize("normalize_score", [True, False])
def test_zero_distance_pairwise(
distance: StringDistance, normalize_score: bool
) -> None:
eval_chain = PairwiseStringDistanceEvalChain(
distance=distance, normalize_score=normalize_score
)
string = "三人行则必有我师"
result = eval_chain.evaluate_string_pairs(prediction=string, prediction_b=string)
assert "score" in result
assert result["score"] == 0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", list(StringDistance))
async def test_zero_distance_pairwise_async(distance: StringDistance) -> None:
eval_chain = PairwiseStringDistanceEvalChain(distance=distance)
string = "三人行则必有我师"
result = await eval_chain.aevaluate_string_pairs(
prediction=string, prediction_b=string
)
assert "score" in result
assert result["score"] == 0
valid_distances = [
distance for distance in StringDistance if distance != StringDistance.HAMMING
]
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", valid_distances)
@pytest.mark.parametrize("normalize_score", [True, False])
def test_non_zero_distance(distance: StringDistance, normalize_score: bool) -> None:
eval_chain = StringDistanceEvalChain(
distance=distance, normalize_score=normalize_score
)
prediction = "I like to eat apples."
reference = "I like apples."
result = eval_chain.evaluate_strings(prediction=prediction, reference=reference)
assert "score" in result
assert result["score"] > 0
if normalize_score:
assert result["score"] < 1.0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", valid_distances)
async def test_non_zero_distance_async(distance: StringDistance) -> None:
eval_chain = StringDistanceEvalChain(distance=distance)
prediction = "I like to eat apples."
reference = "I like apples."
result = await eval_chain.aevaluate_strings(
prediction=prediction, reference=reference
)
assert "score" in result
assert 0 < result["score"] < 1.0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", valid_distances)
def test_non_zero_distance_pairwise(distance: StringDistance) -> None:
eval_chain = PairwiseStringDistanceEvalChain(distance=distance)
prediction = "I like to eat apples."
reference = "I like apples."
result = eval_chain.evaluate_string_pairs(
prediction=prediction, prediction_b=reference
)
assert "score" in result
assert 0 < result["score"] < 1.0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", valid_distances)
async def test_non_zero_distance_pairwise_async(distance: StringDistance) -> None:
eval_chain = PairwiseStringDistanceEvalChain(distance=distance)
prediction = "I like to eat apples."
reference = "I like apples."
result = await eval_chain.aevaluate_string_pairs(
prediction=prediction, prediction_b=reference
)
assert "score" in result
assert 0 < result["score"] < 1.0
|
from typing import Optional
from docarray import DocList, BaseDoc
from docarray.typing import NdArray
from jina import Executor, requests
import numpy as np
class MyDoc(BaseDoc):
text: str
embedding: Optional[NdArray] = None
class Encoder(Executor):
def __init__(
self,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
@requests
def encode(self, docs: DocList[MyDoc], **kwargs) -> DocList[MyDoc]:
for doc in docs:
doc.embedding = np.random.random(128)
|
from typing import Optional
from docarray import DocList, BaseDoc
from docarray.typing import NdArray
from jina import Executor, requests
import numpy as np
class MyDoc(BaseDoc):
text: str
embedding: Optional[NdArray] = None
class Encoder(Executor):
def __init__(
self,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
@requests
def encode(self, docs: DocList[MyDoc], **kwargs) -> DocList[MyDoc]:
for doc in docs:
doc.embedding = np.random.random(128)
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg),
mask_head=dict(norm_cfg=norm_cfg)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg),
mask_head=dict(norm_cfg=norm_cfg)))
# # use ResNeSt img_norm
img_norm_cfg = dict(
mean=[123.68, 116.779, 103.939], std=[58.393, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
from setuptools import find_packages, setup
with open("README.md", mode="r", encoding="utf-8") as readme_file:
readme = readme_file.read()
setup(
name="sentence-transformers",
version="3.0.0.dev0",
author="Nils Reimers",
author_email="[email protected]",
description="Multilingual text embeddings",
long_description=readme,
long_description_content_type="text/markdown",
license="Apache License 2.0",
url="https://www.SBERT.net",
download_url="https://github.com/UKPLab/sentence-transformers/",
packages=find_packages(),
include_package_data=True,
python_requires=">=3.8.0",
install_requires=[
"transformers>=4.34.0,<5.0.0",
"tqdm",
"torch>=1.11.0",
"numpy",
"scikit-learn",
"scipy",
"huggingface-hub>=0.15.1",
"Pillow",
"datasets",
"accelerate>=0.20.3",
],
extras_require={
"dev": [
"pre-commit",
"pytest",
"ruff>=0.3.0",
],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="Transformer Networks BERT XLNet sentence embedding PyTorch NLP deep learning",
)
|
from setuptools import setup, find_packages
with open("README.md", mode="r", encoding="utf-8") as readme_file:
readme = readme_file.read()
setup(
name="sentence-transformers",
version="3.0.0.dev0",
author="Nils Reimers",
author_email="[email protected]",
description="Multilingual text embeddings",
long_description=readme,
long_description_content_type="text/markdown",
license="Apache License 2.0",
url="https://www.SBERT.net",
download_url="https://github.com/UKPLab/sentence-transformers/",
packages=find_packages(),
include_package_data=True,
python_requires=">=3.8.0",
install_requires=[
"transformers>=4.34.0,<5.0.0",
"tqdm",
"torch>=1.11.0",
"numpy",
"scikit-learn",
"scipy",
"huggingface-hub>=0.15.1",
"Pillow",
"datasets",
"accelerate>=0.20.3",
],
extras_require={
"dev": [
"pre-commit",
"pytest",
"ruff>=0.3.0",
],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="Transformer Networks BERT XLNet sentence embedding PyTorch NLP deep learning",
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.optimizers import legacy as legacy
from keras.optimizers import schedules as schedules
from keras.src.optimizers import deserialize as deserialize
from keras.src.optimizers import get as get
from keras.src.optimizers import serialize as serialize
from keras.src.optimizers.adadelta import Adadelta as Adadelta
from keras.src.optimizers.adafactor import Adafactor as Adafactor
from keras.src.optimizers.adagrad import Adagrad as Adagrad
from keras.src.optimizers.adam import Adam as Adam
from keras.src.optimizers.adamax import Adamax as Adamax
from keras.src.optimizers.adamw import AdamW as AdamW
from keras.src.optimizers.ftrl import Ftrl as Ftrl
from keras.src.optimizers.lamb import Lamb as Lamb
from keras.src.optimizers.lion import Lion as Lion
from keras.src.optimizers.loss_scale_optimizer import (
LossScaleOptimizer as LossScaleOptimizer,
)
from keras.src.optimizers.muon import Muon as Muon
from keras.src.optimizers.nadam import Nadam as Nadam
from keras.src.optimizers.optimizer import Optimizer as Optimizer
from keras.src.optimizers.rmsprop import RMSprop as RMSprop
from keras.src.optimizers.sgd import SGD as SGD
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.optimizers import legacy
from keras.api.optimizers import schedules
from keras.src.optimizers import deserialize
from keras.src.optimizers import get
from keras.src.optimizers import serialize
from keras.src.optimizers.adadelta import Adadelta
from keras.src.optimizers.adafactor import Adafactor
from keras.src.optimizers.adagrad import Adagrad
from keras.src.optimizers.adam import Adam
from keras.src.optimizers.adamax import Adamax
from keras.src.optimizers.adamw import AdamW
from keras.src.optimizers.ftrl import Ftrl
from keras.src.optimizers.lamb import Lamb
from keras.src.optimizers.lion import Lion
from keras.src.optimizers.loss_scale_optimizer import LossScaleOptimizer
from keras.src.optimizers.muon import Muon
from keras.src.optimizers.nadam import Nadam
from keras.src.optimizers.optimizer import Optimizer
from keras.src.optimizers.rmsprop import RMSprop
from keras.src.optimizers.sgd import SGD
|
"""**Load** module helps with serialization and deserialization."""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.load.dump import dumpd, dumps
from langchain_core.load.load import load, loads
from langchain_core.load.serializable import Serializable
__all__ = ["dumpd", "dumps", "load", "loads", "Serializable"]
_dynamic_imports = {
"dumpd": "dump",
"dumps": "dump",
"load": "load",
"loads": "load",
"Serializable": "serializable",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent # type: ignore[name-defined]
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""**Load** module helps with serialization and deserialization."""
from langchain_core.load.dump import dumpd, dumps
from langchain_core.load.load import load, loads
from langchain_core.load.serializable import Serializable
__all__ = ["dumpd", "dumps", "load", "loads", "Serializable"]
|
from urllib.parse import urlparse
from backend.blocks.github._auth import GithubCredentials
from backend.util.request import Requests
def _convert_to_api_url(url: str) -> str:
"""
Converts a standard GitHub URL to the corresponding GitHub API URL.
Handles repository URLs, issue URLs, pull request URLs, and more.
"""
parsed_url = urlparse(url)
path_parts = parsed_url.path.strip("/").split("/")
if len(path_parts) >= 2:
owner, repo = path_parts[0], path_parts[1]
api_base = f"https://api.github.com/repos/{owner}/{repo}"
if len(path_parts) > 2:
additional_path = "/".join(path_parts[2:])
api_url = f"{api_base}/{additional_path}"
else:
# Repository base URL
api_url = api_base
else:
raise ValueError("Invalid GitHub URL format.")
return api_url
def _get_headers(credentials: GithubCredentials) -> dict[str, str]:
return {
"Authorization": credentials.auth_header(),
"Accept": "application/vnd.github.v3+json",
}
def get_api(credentials: GithubCredentials, convert_urls: bool = True) -> Requests:
return Requests(
trusted_origins=["https://api.github.com", "https://github.com"],
extra_url_validator=_convert_to_api_url if convert_urls else None,
extra_headers=_get_headers(credentials),
)
|
from urllib.parse import urlparse
from backend.blocks.github._auth import GithubCredentials
from backend.util.request import Requests
def _convert_to_api_url(url: str) -> str:
"""
Converts a standard GitHub URL to the corresponding GitHub API URL.
Handles repository URLs, issue URLs, pull request URLs, and more.
"""
parsed_url = urlparse(url)
path_parts = parsed_url.path.strip("/").split("/")
if len(path_parts) >= 2:
owner, repo = path_parts[0], path_parts[1]
api_base = f"https://api.github.com/repos/{owner}/{repo}"
if len(path_parts) > 2:
additional_path = "/".join(path_parts[2:])
api_url = f"{api_base}/{additional_path}"
else:
# Repository base URL
api_url = api_base
else:
raise ValueError("Invalid GitHub URL format.")
return api_url
def _get_headers(credentials: GithubCredentials) -> dict[str, str]:
return {
"Authorization": credentials.bearer(),
"Accept": "application/vnd.github.v3+json",
}
def get_api(credentials: GithubCredentials, convert_urls: bool = True) -> Requests:
return Requests(
trusted_origins=["https://api.github.com", "https://github.com"],
extra_url_validator=_convert_to_api_url if convert_urls else None,
extra_headers=_get_headers(credentials),
)
|
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py'
# Use RepeatDataset to speed up training
# change repeat time from 4 (for 100 epochs) to 2 (for 50 epochs)
train_dataloader = dict(dataset=dict(times=2))
|
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py'
# Use RepeatDataset to speed up training
# change repeat time from 4 (for 100 epochs) to 2 (for 50 epochs)
data = dict(train=dict(times=2))
|
from enum import Enum
from typing import TYPE_CHECKING, Union, overload
import numpy as np
if TYPE_CHECKING:
import torch # pants: no-infer-dep
class Pooling(str, Enum):
"""Enum of possible pooling choices with pooling behaviors."""
CLS = "cls"
MEAN = "mean"
def __call__(self, array: np.ndarray) -> np.ndarray:
if self == self.CLS:
return Pooling.cls_pooling(array)
return Pooling.mean_pooling(array)
@classmethod
@overload
def cls_pooling(cls, array: np.ndarray) -> np.ndarray: ...
@classmethod
@overload
# TODO: Remove this `type: ignore` after the false positive problem
# is addressed in mypy: https://github.com/python/mypy/issues/15683 .
def cls_pooling(cls, array: "torch.Tensor") -> "torch.Tensor": # type: ignore
...
@classmethod
def cls_pooling(
cls, array: "Union[np.ndarray, torch.Tensor]"
) -> "Union[np.ndarray, torch.Tensor]":
if len(array.shape) == 3:
return array[:, 0]
if len(array.shape) == 2:
return array[0]
raise NotImplementedError(f"Unhandled shape {array.shape}.")
@classmethod
def mean_pooling(cls, array: np.ndarray) -> np.ndarray:
if len(array.shape) == 3:
return array.mean(axis=1)
if len(array.shape) == 2:
return array.mean(axis=0)
raise NotImplementedError(f"Unhandled shape {array.shape}.")
|
from enum import Enum
from typing import TYPE_CHECKING, Union, overload
import numpy as np
if TYPE_CHECKING:
import torch # pants: no-infer-dep
class Pooling(str, Enum):
"""Enum of possible pooling choices with pooling behaviors."""
CLS = "cls"
MEAN = "mean"
def __call__(self, array: np.ndarray) -> np.ndarray:
if self == self.CLS:
return Pooling.cls_pooling(array)
return Pooling.mean_pooling(array)
@classmethod
@overload
def cls_pooling(cls, array: np.ndarray) -> np.ndarray:
...
@classmethod
@overload
# TODO: Remove this `type: ignore` after the false positive problem
# is addressed in mypy: https://github.com/python/mypy/issues/15683 .
def cls_pooling(cls, array: "torch.Tensor") -> "torch.Tensor": # type: ignore
...
@classmethod
def cls_pooling(
cls, array: "Union[np.ndarray, torch.Tensor]"
) -> "Union[np.ndarray, torch.Tensor]":
if len(array.shape) == 3:
return array[:, 0]
if len(array.shape) == 2:
return array[0]
raise NotImplementedError(f"Unhandled shape {array.shape}.")
@classmethod
def mean_pooling(cls, array: np.ndarray) -> np.ndarray:
if len(array.shape) == 3:
return array.mean(axis=1)
if len(array.shape) == 2:
return array.mean(axis=0)
raise NotImplementedError(f"Unhandled shape {array.shape}.")
|
from __future__ import annotations
import random
import pytest
import torch
from torch.utils.data import ConcatDataset
from sentence_transformers.sampler import NoDuplicatesBatchSampler, ProportionalBatchSampler
from sentence_transformers.util import is_datasets_available
if is_datasets_available():
from datasets import Dataset
else:
pytest.skip(
reason='Sentence Transformers was not installed with the `["train"]` extra.',
allow_module_level=True,
)
@pytest.fixture
def dummy_dataset() -> Dataset:
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": [0, 47, 3, 30, 3, ... 2],
"label": [0, 1, 0, 1, ..., 0, 1],
}
"""
# Create a list of two 0's, two 1's, two 2's, ... two 49's. Then shuffle.
values = [j for i in range(50) for j in (i, i)]
random.shuffle(values)
data = {"data": values, "label": [i % 2 for i in range(100)]}
return Dataset.from_dict(data)
@pytest.fixture
def dummy_duplicates_dataset() -> Dataset:
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"anchor": ["anchor_1", "anchor_1", "anchor_1", ... "anchor_2", "anchor_2"],
"positive": ["positive_1", "positive_1", "positive_1", ... "positive_2", "positive_2"],
}
"""
values = [{"anchor": "anchor_1", "positive": "positive_1"}] * 10 + [
{"anchor": "anchor_2", "positive": "positive_2"}
] * 8
return Dataset.from_list(values)
def test_group_by_label_batch_sampler_label_a(dummy_dataset: Dataset) -> None:
batch_size = 10
sampler = NoDuplicatesBatchSampler(
dataset=dummy_dataset, batch_size=batch_size, drop_last=True, valid_label_columns=["label"]
)
batches = list(iter(sampler))
# Assert all batch sizes are correct
assert all(len(batch) == batch_size for batch in batches)
# Assert batches contain no duplicate values
for batch in batches:
batch_values = [dummy_dataset[i]["data"] for i in batch]
assert len(batch_values) == len(set(batch_values)), f"Batch {batch} contains duplicate values: {batch_values}"
@pytest.mark.parametrize("drop_last", [True, False])
def test_proportional_no_duplicates(dummy_duplicates_dataset: Dataset, drop_last: bool) -> None:
batch_size = 2
sampler_1 = NoDuplicatesBatchSampler(
dataset=dummy_duplicates_dataset, batch_size=batch_size, drop_last=drop_last, valid_label_columns=["anchor"]
)
sampler_2 = NoDuplicatesBatchSampler(
dataset=dummy_duplicates_dataset, batch_size=batch_size, drop_last=drop_last, valid_label_columns=["positive"]
)
concat_dataset = ConcatDataset([dummy_duplicates_dataset, dummy_duplicates_dataset])
batch_sampler = ProportionalBatchSampler(
concat_dataset, [sampler_1, sampler_2], generator=torch.Generator(), seed=12
)
batches = list(iter(batch_sampler))
if drop_last:
# If we drop the last batch (i.e. incomplete batches), we should have 16 batches out of the 18 possible,
# because of the duplicates being skipped by the NoDuplicatesBatchSampler.
# Notably, we should not crash like reported in #2816.
assert len(batches) == 16
# All batches are the same size: 2
assert all(len(batch) == batch_size for batch in batches)
assert len(sum(batches, [])) == 32
else:
# If we don't drop incomplete batches, we should be able to do 18 batches, and get more data.
# Note: we don't get all data, because the NoDuplicatesBatchSampler will estimate the number of batches
# and it would require more (non-complete) batches to get all data.
assert len(batches) == 18
assert len(sum(batches, [])) == 34
|
from __future__ import annotations
import random
import pytest
import torch
from datasets import Dataset
from torch.utils.data import ConcatDataset
from sentence_transformers.sampler import NoDuplicatesBatchSampler, ProportionalBatchSampler
@pytest.fixture
def dummy_dataset() -> Dataset:
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": [0, 47, 3, 30, 3, ... 2],
"label": [0, 1, 0, 1, ..., 0, 1],
}
"""
# Create a list of two 0's, two 1's, two 2's, ... two 49's. Then shuffle.
values = [j for i in range(50) for j in (i, i)]
random.shuffle(values)
data = {"data": values, "label": [i % 2 for i in range(100)]}
return Dataset.from_dict(data)
@pytest.fixture
def dummy_duplicates_dataset() -> Dataset:
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"anchor": ["anchor_1", "anchor_1", "anchor_1", ... "anchor_2", "anchor_2"],
"positive": ["positive_1", "positive_1", "positive_1", ... "positive_2", "positive_2"],
}
"""
values = [{"anchor": "anchor_1", "positive": "positive_1"}] * 10 + [
{"anchor": "anchor_2", "positive": "positive_2"}
] * 8
return Dataset.from_list(values)
def test_group_by_label_batch_sampler_label_a(dummy_dataset: Dataset) -> None:
batch_size = 10
sampler = NoDuplicatesBatchSampler(
dataset=dummy_dataset, batch_size=batch_size, drop_last=True, valid_label_columns=["label"]
)
batches = list(iter(sampler))
# Assert all batch sizes are correct
assert all(len(batch) == batch_size for batch in batches)
# Assert batches contain no duplicate values
for batch in batches:
batch_values = [dummy_dataset[i]["data"] for i in batch]
assert len(batch_values) == len(set(batch_values)), f"Batch {batch} contains duplicate values: {batch_values}"
@pytest.mark.parametrize("drop_last", [True, False])
def test_proportional_no_duplicates(dummy_duplicates_dataset: Dataset, drop_last: bool) -> None:
batch_size = 2
sampler_1 = NoDuplicatesBatchSampler(
dataset=dummy_duplicates_dataset, batch_size=batch_size, drop_last=drop_last, valid_label_columns=["anchor"]
)
sampler_2 = NoDuplicatesBatchSampler(
dataset=dummy_duplicates_dataset, batch_size=batch_size, drop_last=drop_last, valid_label_columns=["positive"]
)
concat_dataset = ConcatDataset([dummy_duplicates_dataset, dummy_duplicates_dataset])
batch_sampler = ProportionalBatchSampler(
concat_dataset, [sampler_1, sampler_2], generator=torch.Generator(), seed=12
)
batches = list(iter(batch_sampler))
if drop_last:
# If we drop the last batch (i.e. incomplete batches), we should have 16 batches out of the 18 possible,
# because of the duplicates being skipped by the NoDuplicatesBatchSampler.
# Notably, we should not crash like reported in #2816.
assert len(batches) == 16
# All batches are the same size: 2
assert all(len(batch) == batch_size for batch in batches)
assert len(sum(batches, [])) == 32
else:
# If we don't drop incomplete batches, we should be able to do 18 batches, and get more data.
# Note: we don't get all data, because the NoDuplicatesBatchSampler will estimate the number of batches
# and it would require more (non-complete) batches to get all data.
assert len(batches) == 18
assert len(sum(batches, [])) == 34
|
import pathlib
from typing import Any, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import CSVDictParser, Demultiplexer, Filter, IterDataPipe, Mapper, Zipper
from torchvision.datapoints import BoundingBoxes
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
)
from .._api import register_dataset, register_info
NAME = "gtsrb"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(
categories=[f"{label:05d}" for label in range(43)],
)
@register_dataset(NAME)
class GTSRB(Dataset):
"""GTSRB Dataset
homepage="https://benchmark.ini.rub.de"
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_URL_ROOT = "https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/"
_URLS = {
"train": f"{_URL_ROOT}GTSRB-Training_fixed.zip",
"test": f"{_URL_ROOT}GTSRB_Final_Test_Images.zip",
"test_ground_truth": f"{_URL_ROOT}GTSRB_Final_Test_GT.zip",
}
_CHECKSUMS = {
"train": "df4144942083645bd60b594de348aa6930126c3e0e5de09e39611630abf8455a",
"test": "48ba6fab7e877eb64eaf8de99035b0aaecfbc279bee23e35deca4ac1d0a837fa",
"test_ground_truth": "f94e5a7614d75845c74c04ddb26b8796b9e483f43541dd95dd5b726504e16d6d",
}
def _resources(self) -> List[OnlineResource]:
rsrcs: List[OnlineResource] = [HttpResource(self._URLS[self._split], sha256=self._CHECKSUMS[self._split])]
if self._split == "test":
rsrcs.append(
HttpResource(
self._URLS["test_ground_truth"],
sha256=self._CHECKSUMS["test_ground_truth"],
)
)
return rsrcs
def _classify_train_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.suffix == ".ppm":
return 0
elif path.suffix == ".csv":
return 1
else:
return None
def _prepare_sample(self, data: Tuple[Tuple[str, Any], Dict[str, Any]]) -> Dict[str, Any]:
(path, buffer), csv_info = data
label = int(csv_info["ClassId"])
bounding_boxes = BoundingBoxes(
[int(csv_info[k]) for k in ("Roi.X1", "Roi.Y1", "Roi.X2", "Roi.Y2")],
format="xyxy",
spatial_size=(int(csv_info["Height"]), int(csv_info["Width"])),
)
return {
"path": path,
"image": EncodedImage.from_file(buffer),
"label": Label(label, categories=self._categories),
"bounding_boxes": bounding_boxes,
}
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
if self._split == "train":
images_dp, ann_dp = Demultiplexer(
resource_dps[0], 2, self._classify_train_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
else:
images_dp, ann_dp = resource_dps
images_dp = Filter(images_dp, path_comparator("suffix", ".ppm"))
# The order of the image files in the .zip archives perfectly match the order of the entries in the
# (possibly concatenated) .csv files. So we're able to use Zipper here instead of a IterKeyZipper.
ann_dp = CSVDictParser(ann_dp, delimiter=";")
dp = Zipper(images_dp, ann_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 26_640 if self._split == "train" else 12_630
|
import pathlib
from typing import Any, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import CSVDictParser, Demultiplexer, Filter, IterDataPipe, Mapper, Zipper
from torchvision.datapoints import BoundingBox
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
)
from .._api import register_dataset, register_info
NAME = "gtsrb"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(
categories=[f"{label:05d}" for label in range(43)],
)
@register_dataset(NAME)
class GTSRB(Dataset):
"""GTSRB Dataset
homepage="https://benchmark.ini.rub.de"
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_URL_ROOT = "https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/"
_URLS = {
"train": f"{_URL_ROOT}GTSRB-Training_fixed.zip",
"test": f"{_URL_ROOT}GTSRB_Final_Test_Images.zip",
"test_ground_truth": f"{_URL_ROOT}GTSRB_Final_Test_GT.zip",
}
_CHECKSUMS = {
"train": "df4144942083645bd60b594de348aa6930126c3e0e5de09e39611630abf8455a",
"test": "48ba6fab7e877eb64eaf8de99035b0aaecfbc279bee23e35deca4ac1d0a837fa",
"test_ground_truth": "f94e5a7614d75845c74c04ddb26b8796b9e483f43541dd95dd5b726504e16d6d",
}
def _resources(self) -> List[OnlineResource]:
rsrcs: List[OnlineResource] = [HttpResource(self._URLS[self._split], sha256=self._CHECKSUMS[self._split])]
if self._split == "test":
rsrcs.append(
HttpResource(
self._URLS["test_ground_truth"],
sha256=self._CHECKSUMS["test_ground_truth"],
)
)
return rsrcs
def _classify_train_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.suffix == ".ppm":
return 0
elif path.suffix == ".csv":
return 1
else:
return None
def _prepare_sample(self, data: Tuple[Tuple[str, Any], Dict[str, Any]]) -> Dict[str, Any]:
(path, buffer), csv_info = data
label = int(csv_info["ClassId"])
bounding_box = BoundingBox(
[int(csv_info[k]) for k in ("Roi.X1", "Roi.Y1", "Roi.X2", "Roi.Y2")],
format="xyxy",
spatial_size=(int(csv_info["Height"]), int(csv_info["Width"])),
)
return {
"path": path,
"image": EncodedImage.from_file(buffer),
"label": Label(label, categories=self._categories),
"bounding_box": bounding_box,
}
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
if self._split == "train":
images_dp, ann_dp = Demultiplexer(
resource_dps[0], 2, self._classify_train_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
else:
images_dp, ann_dp = resource_dps
images_dp = Filter(images_dp, path_comparator("suffix", ".ppm"))
# The order of the image files in the .zip archives perfectly match the order of the entries in the
# (possibly concatenated) .csv files. So we're able to use Zipper here instead of a IterKeyZipper.
ann_dp = CSVDictParser(ann_dp, delimiter=";")
dp = Zipper(images_dp, ann_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 26_640 if self._split == "train" else 12_630
|
import json
from typing import Optional
from cryptography.fernet import Fernet
from backend.util.settings import Settings
ENCRYPTION_KEY = Settings().secrets.encryption_key
class JSONCryptor:
def __init__(self, key: Optional[str] = None):
# Use provided key or get from environment
self.key = key or ENCRYPTION_KEY
if not self.key:
raise ValueError(
"Encryption key must be provided or set in ENCRYPTION_KEY environment variable"
)
self.fernet = Fernet(
self.key.encode() if isinstance(self.key, str) else self.key
)
def encrypt(self, data: dict) -> str:
"""Encrypt dictionary data to string"""
json_str = json.dumps(data)
encrypted = self.fernet.encrypt(json_str.encode())
return encrypted.decode()
def decrypt(self, encrypted_str: str) -> dict:
"""Decrypt string to dictionary"""
if not encrypted_str:
return {}
try:
decrypted = self.fernet.decrypt(encrypted_str.encode())
return json.loads(decrypted.decode())
except Exception:
return {}
|
import json
from typing import Optional
from cryptography.fernet import Fernet
from backend.util.settings import Settings
ENCRYPTION_KEY = Settings().secrets.encryption_key
class JSONCryptor:
def __init__(self, key: Optional[str] = None):
# Use provided key or get from environment
self.key = key or ENCRYPTION_KEY
if not self.key:
raise ValueError(
"Encryption key must be provided or set in ENCRYPTION_KEY environment variable"
)
self.fernet = Fernet(
self.key.encode() if isinstance(self.key, str) else self.key
)
def encrypt(self, data: dict) -> str:
"""Encrypt dictionary data to string"""
json_str = json.dumps(data)
encrypted = self.fernet.encrypt(json_str.encode())
return encrypted.decode()
def decrypt(self, encrypted_str: str) -> dict:
"""Decrypt string to dictionary"""
if not encrypted_str:
return {}
decrypted = self.fernet.decrypt(encrypted_str.encode())
return json.loads(decrypted.decode())
|
"""Memory used to save agent output AND intermediate steps."""
from typing import Any
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain.agents.format_scratchpad import (
format_to_openai_function_messages,
format_to_tool_messages,
)
from langchain.memory.chat_memory import BaseChatMemory
class AgentTokenBufferMemory(BaseChatMemory):
"""Memory used to save agent output AND intermediate steps.
Parameters:
human_prefix: Prefix for human messages. Default is "Human".
ai_prefix: Prefix for AI messages. Default is "AI".
llm: Language model.
memory_key: Key to save memory under. Default is "history".
max_token_limit: Maximum number of tokens to keep in the buffer.
Once the buffer exceeds this many tokens, the oldest
messages will be pruned. Default is 12000.
return_messages: Whether to return messages. Default is True.
output_key: Key to save output under. Default is "output".
intermediate_steps_key: Key to save intermediate steps under.
Default is "intermediate_steps".
format_as_tools: Whether to format as tools. Default is False.
"""
human_prefix: str = "Human"
ai_prefix: str = "AI"
llm: BaseLanguageModel
memory_key: str = "history"
max_token_limit: int = 12000
"""The max number of tokens to keep in the buffer.
Once the buffer exceeds this many tokens, the oldest messages will be pruned."""
return_messages: bool = True
output_key: str = "output"
intermediate_steps_key: str = "intermediate_steps"
format_as_tools: bool = False
@property
def buffer(self) -> list[BaseMessage]:
"""String buffer of memory."""
return self.chat_memory.messages
@property
def memory_variables(self) -> list[str]:
"""Always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]:
"""Return history buffer.
Args:
inputs: Inputs to the agent.
Returns:
A dictionary with the history buffer.
"""
if self.return_messages:
final_buffer: Any = self.buffer
else:
final_buffer = get_buffer_string(
self.buffer,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
return {self.memory_key: final_buffer}
def save_context(self, inputs: dict[str, Any], outputs: dict[str, Any]) -> None:
"""Save context from this conversation to buffer. Pruned.
Args:
inputs: Inputs to the agent.
outputs: Outputs from the agent.
"""
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_user_message(input_str)
format_to_messages = (
format_to_tool_messages
if self.format_as_tools
else format_to_openai_function_messages
)
steps = format_to_messages(outputs[self.intermediate_steps_key])
for msg in steps:
self.chat_memory.add_message(msg)
self.chat_memory.add_ai_message(output_str)
# Prune buffer if it exceeds max token limit
buffer = self.chat_memory.messages
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
if curr_buffer_length > self.max_token_limit:
while curr_buffer_length > self.max_token_limit:
buffer.pop(0)
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
|
"""Memory used to save agent output AND intermediate steps."""
from typing import Any
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain.agents.format_scratchpad import (
format_to_openai_function_messages,
format_to_tool_messages,
)
from langchain.memory.chat_memory import BaseChatMemory
class AgentTokenBufferMemory(BaseChatMemory): # type: ignore[override]
"""Memory used to save agent output AND intermediate steps.
Parameters:
human_prefix: Prefix for human messages. Default is "Human".
ai_prefix: Prefix for AI messages. Default is "AI".
llm: Language model.
memory_key: Key to save memory under. Default is "history".
max_token_limit: Maximum number of tokens to keep in the buffer.
Once the buffer exceeds this many tokens, the oldest
messages will be pruned. Default is 12000.
return_messages: Whether to return messages. Default is True.
output_key: Key to save output under. Default is "output".
intermediate_steps_key: Key to save intermediate steps under.
Default is "intermediate_steps".
format_as_tools: Whether to format as tools. Default is False.
"""
human_prefix: str = "Human"
ai_prefix: str = "AI"
llm: BaseLanguageModel
memory_key: str = "history"
max_token_limit: int = 12000
"""The max number of tokens to keep in the buffer.
Once the buffer exceeds this many tokens, the oldest messages will be pruned."""
return_messages: bool = True
output_key: str = "output"
intermediate_steps_key: str = "intermediate_steps"
format_as_tools: bool = False
@property
def buffer(self) -> list[BaseMessage]:
"""String buffer of memory."""
return self.chat_memory.messages
@property
def memory_variables(self) -> list[str]:
"""Always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]:
"""Return history buffer.
Args:
inputs: Inputs to the agent.
Returns:
A dictionary with the history buffer.
"""
if self.return_messages:
final_buffer: Any = self.buffer
else:
final_buffer = get_buffer_string(
self.buffer,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
return {self.memory_key: final_buffer}
def save_context(self, inputs: dict[str, Any], outputs: dict[str, Any]) -> None:
"""Save context from this conversation to buffer. Pruned.
Args:
inputs: Inputs to the agent.
outputs: Outputs from the agent.
"""
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_user_message(input_str)
format_to_messages = (
format_to_tool_messages
if self.format_as_tools
else format_to_openai_function_messages
)
steps = format_to_messages(outputs[self.intermediate_steps_key])
for msg in steps:
self.chat_memory.add_message(msg)
self.chat_memory.add_ai_message(output_str)
# Prune buffer if it exceeds max token limit
buffer = self.chat_memory.messages
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
if curr_buffer_length > self.max_token_limit:
while curr_buffer_length > self.max_token_limit:
buffer.pop(0)
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
|
import pytest
from sentence_transformers import SentenceTransformer
@pytest.mark.parametrize(
("revision", "expected_base_revision"),
[
("f3cb857cba53019a20df283396bcca179cf051a4", "f3cb857cba53019a20df283396bcca179cf051a4"),
("f3cb857", "f3cb857"),
("main", "valid-revision"),
(None, "valid-revision"),
],
)
def test_model_card_data(revision, expected_base_revision) -> None:
model_name = "sentence-transformers-testing/stsb-bert-tiny-safetensors"
model = SentenceTransformer(model_name, revision=revision)
assert model.model_card_data.base_model == model_name
if expected_base_revision == "valid-revision":
assert model.model_card_data.base_model_revision
assert len(model.model_card_data.base_model_revision) == 40
else:
assert model.model_card_data.base_model_revision == expected_base_revision
|
from sentence_transformers import SentenceTransformer
import pytest
@pytest.mark.parametrize(
("revision", "expected_base_revision"),
[
("f3cb857cba53019a20df283396bcca179cf051a4", "f3cb857cba53019a20df283396bcca179cf051a4"),
("f3cb857", "f3cb857"),
("main", "valid-revision"),
(None, "valid-revision"),
],
)
def test_model_card_data(revision, expected_base_revision) -> None:
model_name = "sentence-transformers-testing/stsb-bert-tiny-safetensors"
model = SentenceTransformer(model_name, revision=revision)
assert model.model_card_data.base_model == model_name
if expected_base_revision == "valid-revision":
assert model.model_card_data.base_model_revision
assert len(model.model_card_data.base_model_revision) == 40
else:
assert model.model_card_data.base_model_revision == expected_base_revision
|
import pathlib
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, JsonParser, Mapper, UnBatcher
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_accessor,
path_comparator,
)
from .._api import register_dataset, register_info
NAME = "clevr"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict()
@register_dataset(NAME)
class CLEVR(Dataset):
"""
- **homepage**: https://cs.stanford.edu/people/jcjohns/clevr/
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "val", "test"))
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
archive = HttpResource(
"https://dl.fbaipublicfiles.com/clevr/CLEVR_v1.0.zip",
sha256="5cd61cf1096ed20944df93c9adb31e74d189b8459a94f54ba00090e5c59936d1",
)
return [archive]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.parents[1].name == "images":
return 0
elif path.parent.name == "scenes":
return 1
else:
return None
def _filter_scene_anns(self, data: Tuple[str, Any]) -> bool:
key, _ = data
return key == "scenes"
def _add_empty_anns(self, data: Tuple[str, BinaryIO]) -> Tuple[Tuple[str, BinaryIO], None]:
return data, None
def _prepare_sample(self, data: Tuple[Tuple[str, BinaryIO], Optional[Dict[str, Any]]]) -> Dict[str, Any]:
image_data, scenes_data = data
path, buffer = image_data
return dict(
path=path,
image=EncodedImage.from_file(buffer),
label=Label(len(scenes_data["objects"])) if scenes_data else None,
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
images_dp, scenes_dp = Demultiplexer(
archive_dp,
2,
self._classify_archive,
drop_none=True,
buffer_size=INFINITE_BUFFER_SIZE,
)
images_dp = Filter(images_dp, path_comparator("parent.name", self._split))
images_dp = hint_shuffling(images_dp)
images_dp = hint_sharding(images_dp)
if self._split != "test":
scenes_dp = Filter(scenes_dp, path_comparator("name", f"CLEVR_{self._split}_scenes.json"))
scenes_dp = JsonParser(scenes_dp)
scenes_dp = Mapper(scenes_dp, getitem(1, "scenes"))
scenes_dp = UnBatcher(scenes_dp)
dp = IterKeyZipper(
images_dp,
scenes_dp,
key_fn=path_accessor("name"),
ref_key_fn=getitem("image_filename"),
buffer_size=INFINITE_BUFFER_SIZE,
)
else:
for _, file in scenes_dp:
file.close()
dp = Mapper(images_dp, self._add_empty_anns)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 70_000 if self._split == "train" else 15_000
|
import pathlib
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, JsonParser, Mapper, UnBatcher
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_accessor,
path_comparator,
)
from torchvision.prototype.features import Label
from .._api import register_dataset, register_info
NAME = "clevr"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict()
@register_dataset(NAME)
class CLEVR(Dataset):
"""
- **homepage**: https://cs.stanford.edu/people/jcjohns/clevr/
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "val", "test"))
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
archive = HttpResource(
"https://dl.fbaipublicfiles.com/clevr/CLEVR_v1.0.zip",
sha256="5cd61cf1096ed20944df93c9adb31e74d189b8459a94f54ba00090e5c59936d1",
)
return [archive]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.parents[1].name == "images":
return 0
elif path.parent.name == "scenes":
return 1
else:
return None
def _filter_scene_anns(self, data: Tuple[str, Any]) -> bool:
key, _ = data
return key == "scenes"
def _add_empty_anns(self, data: Tuple[str, BinaryIO]) -> Tuple[Tuple[str, BinaryIO], None]:
return data, None
def _prepare_sample(self, data: Tuple[Tuple[str, BinaryIO], Optional[Dict[str, Any]]]) -> Dict[str, Any]:
image_data, scenes_data = data
path, buffer = image_data
return dict(
path=path,
image=EncodedImage.from_file(buffer),
label=Label(len(scenes_data["objects"])) if scenes_data else None,
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
images_dp, scenes_dp = Demultiplexer(
archive_dp,
2,
self._classify_archive,
drop_none=True,
buffer_size=INFINITE_BUFFER_SIZE,
)
images_dp = Filter(images_dp, path_comparator("parent.name", self._split))
images_dp = hint_shuffling(images_dp)
images_dp = hint_sharding(images_dp)
if self._split != "test":
scenes_dp = Filter(scenes_dp, path_comparator("name", f"CLEVR_{self._split}_scenes.json"))
scenes_dp = JsonParser(scenes_dp)
scenes_dp = Mapper(scenes_dp, getitem(1, "scenes"))
scenes_dp = UnBatcher(scenes_dp)
dp = IterKeyZipper(
images_dp,
scenes_dp,
key_fn=path_accessor("name"),
ref_key_fn=getitem("image_filename"),
buffer_size=INFINITE_BUFFER_SIZE,
)
else:
for _, file in scenes_dp:
file.close()
dp = Mapper(images_dp, self._add_empty_anns)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 70_000 if self._split == "train" else 15_000
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from mmcv.utils import Registry, build_from_cfg
PRIOR_GENERATORS = Registry('Generator for anchors and points')
ANCHOR_GENERATORS = PRIOR_GENERATORS
def build_prior_generator(cfg, default_args=None):
return build_from_cfg(cfg, PRIOR_GENERATORS, default_args)
def build_anchor_generator(cfg, default_args=None):
warnings.warn(
'``build_anchor_generator`` would be deprecated soon, please use '
'``build_prior_generator`` ')
return build_prior_generator(cfg, default_args=default_args)
|
import warnings
from mmcv.utils import Registry, build_from_cfg
PRIOR_GENERATORS = Registry('Generator for anchors and points')
ANCHOR_GENERATORS = PRIOR_GENERATORS
def build_prior_generator(cfg, default_args=None):
return build_from_cfg(cfg, PRIOR_GENERATORS, default_args)
def build_anchor_generator(cfg, default_args=None):
warnings.warn(
'``build_anchor_generator`` would be deprecated soon, please use '
'``build_prior_generator`` ')
return build_prior_generator(cfg, default_args=default_args)
|
_base_ = './faster-rcnn_r50-caffe_fpn_ms-1x_coco.py'
model = dict(roi_head=dict(bbox_head=dict(num_classes=3)))
classes = ('person', 'bicycle', 'car')
data = dict(
train=dict(classes=classes),
val=dict(classes=classes),
test=dict(classes=classes))
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_bbox_mAP-0.398_20200504_163323-30042637.pth' # noqa
|
_base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py'
model = dict(roi_head=dict(bbox_head=dict(num_classes=3)))
classes = ('person', 'bicycle', 'car')
data = dict(
train=dict(classes=classes),
val=dict(classes=classes),
test=dict(classes=classes))
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_bbox_mAP-0.398_20200504_163323-30042637.pth' # noqa
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.densenet import DenseNet121 as DenseNet121
from keras.src.applications.densenet import DenseNet169 as DenseNet169
from keras.src.applications.densenet import DenseNet201 as DenseNet201
from keras.src.applications.densenet import (
decode_predictions as decode_predictions,
)
from keras.src.applications.densenet import preprocess_input as preprocess_input
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.densenet import DenseNet121
from keras.src.applications.densenet import DenseNet169
from keras.src.applications.densenet import DenseNet201
from keras.src.applications.densenet import decode_predictions
from keras.src.applications.densenet import preprocess_input
|
import pytest
from jina.enums import GatewayProtocolType
from jina.helper import ArgNamespace
from jina.parsers import set_gateway_parser, set_pod_parser
@pytest.mark.parametrize(
'port,expected_port',
[
('12345', [12345]),
([12345], [12345]),
([12345, 12344], [12345, 12344]),
],
)
@pytest.mark.parametrize(
'protocol,expected_protocol',
[
('http', [GatewayProtocolType.HTTP]),
(['GRPC'], [GatewayProtocolType.GRPC]),
(['grpc', 'http'], [GatewayProtocolType.GRPC, GatewayProtocolType.HTTP]),
],
)
def test_multiple_port_protocol_gateway_kwargs(
port, protocol, expected_port, expected_protocol
):
args = ArgNamespace.kwargs2namespace(
{'port': port, 'protocol': protocol}, set_gateway_parser()
)
assert args.port == expected_port
assert args.protocol == expected_protocol
@pytest.mark.parametrize(
'port,expected_port',
[
(['12345'], [12345]),
(['12345', '12344'], [12345, 12344]),
(['12345, 12344'], [12345, 12344]),
],
)
@pytest.mark.parametrize(
'protocol,expected_protocol',
[
(['http'], [GatewayProtocolType.HTTP]),
(['GRPC'], [GatewayProtocolType.GRPC]),
(['grpc', 'http'], [GatewayProtocolType.GRPC, GatewayProtocolType.HTTP]),
],
)
def test_multiple_port_protocol_gateway_args_list(
port, protocol, expected_port, expected_protocol
):
args = set_gateway_parser().parse_args(
['--port'] + port + ['--protocol'] + protocol
)
assert args.port == expected_port
assert args.protocol == expected_protocol
@pytest.mark.parametrize(
'port,expected_port',
[
(['12345'], [12345]),
(['12345', '12344'], [12345, 12344]),
(['12345, 12344'], [12345, 12344]),
],
)
def test_pod_port_cast(port, expected_port):
args = set_pod_parser().parse_args(['--port'] + port)
assert args.port == expected_port
def test_pod_port_default():
args = set_pod_parser().parse_args([])
assert isinstance(args.port, list)
assert len(args.port) == 1
@pytest.mark.parametrize(
'host,expected_host',
[
(['localhost'], ['localhost']),
(['0.0.0.0,localhost'], ['0.0.0.0', 'localhost']),
(['0.0.0.0,localhost', '127.0.0.1'], ['0.0.0.0', 'localhost', '127.0.0.1']),
],
)
def test_pod_host_cast(host, expected_host):
args = set_pod_parser().parse_args(['--host'] + host)
assert args.host == expected_host
def test_pod_host_default():
from jina.constants import __default_host__
args = set_pod_parser().parse_args([])
assert args.host == [__default_host__]
def test_default_port_protocol_gateway():
args = set_gateway_parser().parse_args([])
assert args.port is None
assert args.protocol == [GatewayProtocolType.GRPC]
def test_get_non_defaults_args():
args = set_gateway_parser().parse_args(
[
'--port',
'12345',
'12344',
'--protocol',
'grpc',
'--uses',
'MyCustomGateway',
'--uses-with',
'{"arg":"value"}',
]
)
non_defaults = ArgNamespace.get_non_defaults_args(
args,
set_gateway_parser(),
)
assert non_defaults['port'] == [12345, 12344]
assert 'protocol' not in non_defaults
assert non_defaults['uses'] == 'MyCustomGateway'
assert non_defaults['uses_with'] == {'arg': 'value'}
|
import pytest
from jina.enums import GatewayProtocolType
from jina.helper import ArgNamespace
from jina.parsers import set_gateway_parser, set_pod_parser
@pytest.mark.parametrize(
'port,expected_port',
[
('12345', [12345]),
([12345], [12345]),
([12345, 12344], [12345, 12344]),
],
)
@pytest.mark.parametrize(
'protocol,expected_protocol',
[
('http', [GatewayProtocolType.HTTP]),
(['GRPC'], [GatewayProtocolType.GRPC]),
(['grpc', 'http'], [GatewayProtocolType.GRPC, GatewayProtocolType.HTTP]),
],
)
def test_multiple_port_protocol_gateway_kwargs(
port, protocol, expected_port, expected_protocol
):
args = ArgNamespace.kwargs2namespace(
{'port': port, 'protocol': protocol}, set_gateway_parser()
)
assert args.port == expected_port
assert args.protocol == expected_protocol
@pytest.mark.parametrize(
'port,expected_port',
[
(['12345'], [12345]),
(['12345', '12344'], [12345, 12344]),
(['12345, 12344'], [12345, 12344]),
],
)
@pytest.mark.parametrize(
'protocol,expected_protocol',
[
(['http'], [GatewayProtocolType.HTTP]),
(['GRPC'], [GatewayProtocolType.GRPC]),
(['grpc', 'http'], [GatewayProtocolType.GRPC, GatewayProtocolType.HTTP]),
],
)
def test_multiple_port_protocol_gateway_args_list(
port, protocol, expected_port, expected_protocol
):
args = set_gateway_parser().parse_args(
['--port'] + port + ['--protocol'] + protocol
)
assert args.port == expected_port
assert args.protocol == expected_protocol
@pytest.mark.parametrize(
'port,expected_port',
[
(['12345'], [12345]),
(['12345', '12344'], [12345, 12344]),
(['12345, 12344'], [12345, 12344]),
],
)
def test_pod_port_cast(port, expected_port):
args = set_pod_parser().parse_args(['--port'] + port)
assert args.port == expected_port
def test_pod_port_default():
args = set_pod_parser().parse_args([])
assert isinstance(args.port, list)
assert len(args.port) == 1
@pytest.mark.parametrize(
'host,expected_host',
[
(['localhost'], ['localhost']),
(['0.0.0.0,localhost'], ['0.0.0.0', 'localhost']),
(['0.0.0.0,localhost', '127.0.0.1'], ['0.0.0.0', 'localhost', '127.0.0.1']),
],
)
def test_pod_host_cast(host, expected_host):
args = set_pod_parser().parse_args(['--host'] + host)
assert args.host == expected_host
def test_pod_host_default():
from jina import __default_host__
args = set_pod_parser().parse_args([])
assert args.host == [__default_host__]
def test_default_port_protocol_gateway():
args = set_gateway_parser().parse_args([])
assert args.port is None
assert args.protocol == [GatewayProtocolType.GRPC]
def test_get_non_defaults_args():
args = set_gateway_parser().parse_args(
[
'--port',
'12345',
'12344',
'--protocol',
'grpc',
'--uses',
'MyCustomGateway',
'--uses-with',
'{"arg":"value"}',
]
)
non_defaults = ArgNamespace.get_non_defaults_args(
args,
set_gateway_parser(),
)
assert non_defaults['port'] == [12345, 12344]
assert 'protocol' not in non_defaults
assert non_defaults['uses'] == 'MyCustomGateway'
assert non_defaults['uses_with'] == {'arg': 'value'}
|
import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
|
import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
pass
|
# Copyright 2019 The OpenXLA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lit runner configuration."""
import os
import sys
import tempfile
import lit.formats
# copybara:uncomment_begin(google-only)
# from xla.lit_google_cfg import ENV_FLAGS as google_env_flags
# copybara:uncomment_end
# pylint: disable=undefined-variable
extra_env_flags = []
# copybara:uncomment_begin(google-only)
# extra_env_flags += google_env_flags
# copybara:uncomment_end
config.name = "XLA"
config.suffixes = [".cc", ".hlo", ".json", ".mlir", ".pbtxt", ".py", ".ll"]
config.test_format = lit.formats.ShTest(execute_external=True)
for env in [
# Passthrough XLA_FLAGS.
"XLA_FLAGS",
# Propagate environment variables used by 'bazel coverage'.
# These are exported by tools/coverage/collect_coverage.sh
"BULK_COVERAGE_RUN",
"COVERAGE",
"COVERAGE_DIR",
"COVERAGE_MANIFEST",
"LLVM_PROFILE_FILE",
"LLVM_COVERAGE_FILE",
"GCOV_PREFIX",
"GCOV_PREFIX_STRIP",
] + extra_env_flags:
value = os.environ.get(env)
if value:
config.environment[env] = value
# Use the most preferred temp directory.
config.test_exec_root = (
os.environ.get("TEST_UNDECLARED_OUTPUTS_DIR")
or os.environ.get("TEST_TMPDIR")
or os.path.join(tempfile.gettempdir(), "lit")
)
config.substitutions.extend([
("%PYTHON", os.getenv("PYTHON", sys.executable) or ""),
])
if lit_config.params.get("PTX") == "GCN":
config.available_features.add("IS_ROCM")
# Include additional substitutions that may be defined via params
config.substitutions.extend(
("%%{%s}" % key, val) for key, val in lit_config.params.items()
)
|
# Copyright 2019 The OpenXLA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lit runner configuration."""
import os
import sys
import tempfile
import lit.formats
# copybara:uncomment_begin(google-only)
# from xla.lit_google_cfg import ENV_FLAGS as google_env_flags
# copybara:uncomment_end
# pylint: disable=undefined-variable
extra_env_flags = []
# copybara:uncomment_begin(google-only)
# extra_env_flags += google_env_flags
# copybara:uncomment_end
config.name = "XLA"
config.suffixes = [".cc", ".hlo", ".json", ".mlir", ".pbtxt", ".py"]
config.test_format = lit.formats.ShTest(execute_external=True)
for env in [
# Passthrough XLA_FLAGS.
"XLA_FLAGS",
# Propagate environment variables used by 'bazel coverage'.
# These are exported by tools/coverage/collect_coverage.sh
"BULK_COVERAGE_RUN",
"COVERAGE",
"COVERAGE_DIR",
"COVERAGE_MANIFEST",
"LLVM_PROFILE_FILE",
"LLVM_COVERAGE_FILE",
"GCOV_PREFIX",
"GCOV_PREFIX_STRIP",
] + extra_env_flags:
value = os.environ.get(env)
if value:
config.environment[env] = value
# Use the most preferred temp directory.
config.test_exec_root = (
os.environ.get("TEST_UNDECLARED_OUTPUTS_DIR")
or os.environ.get("TEST_TMPDIR")
or os.path.join(tempfile.gettempdir(), "lit")
)
config.substitutions.extend([
("%PYTHON", os.getenv("PYTHON", sys.executable) or ""),
])
if lit_config.params.get("PTX") == "GCN":
config.available_features.add("IS_ROCM")
# Include additional substitutions that may be defined via params
config.substitutions.extend(
("%%{%s}" % key, val) for key, val in lit_config.params.items()
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.preprocessing.image import (
DirectoryIterator as DirectoryIterator,
)
from keras.src.legacy.preprocessing.image import (
ImageDataGenerator as ImageDataGenerator,
)
from keras.src.legacy.preprocessing.image import Iterator as Iterator
from keras.src.legacy.preprocessing.image import (
NumpyArrayIterator as NumpyArrayIterator,
)
from keras.src.legacy.preprocessing.image import (
apply_affine_transform as apply_affine_transform,
)
from keras.src.legacy.preprocessing.image import (
apply_brightness_shift as apply_brightness_shift,
)
from keras.src.legacy.preprocessing.image import (
apply_channel_shift as apply_channel_shift,
)
from keras.src.legacy.preprocessing.image import (
random_brightness as random_brightness,
)
from keras.src.legacy.preprocessing.image import (
random_channel_shift as random_channel_shift,
)
from keras.src.legacy.preprocessing.image import (
random_rotation as random_rotation,
)
from keras.src.legacy.preprocessing.image import random_shear as random_shear
from keras.src.legacy.preprocessing.image import random_shift as random_shift
from keras.src.legacy.preprocessing.image import random_zoom as random_zoom
from keras.src.utils.image_utils import array_to_img as array_to_img
from keras.src.utils.image_utils import img_to_array as img_to_array
from keras.src.utils.image_utils import load_img as load_img
from keras.src.utils.image_utils import save_img as save_img
from keras.src.utils.image_utils import smart_resize as smart_resize
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.preprocessing.image import DirectoryIterator
from keras.src.legacy.preprocessing.image import ImageDataGenerator
from keras.src.legacy.preprocessing.image import Iterator
from keras.src.legacy.preprocessing.image import NumpyArrayIterator
from keras.src.legacy.preprocessing.image import apply_affine_transform
from keras.src.legacy.preprocessing.image import apply_brightness_shift
from keras.src.legacy.preprocessing.image import apply_channel_shift
from keras.src.legacy.preprocessing.image import random_brightness
from keras.src.legacy.preprocessing.image import random_channel_shift
from keras.src.legacy.preprocessing.image import random_rotation
from keras.src.legacy.preprocessing.image import random_shear
from keras.src.legacy.preprocessing.image import random_shift
from keras.src.legacy.preprocessing.image import random_zoom
from keras.src.utils.image_utils import array_to_img
from keras.src.utils.image_utils import img_to_array
from keras.src.utils.image_utils import load_img
from keras.src.utils.image_utils import save_img
from keras.src.utils.image_utils import smart_resize
|
import os
import sys
import pytest
import torch
import torchaudio
from torchaudio.pipelines import CONVTASNET_BASE_LIBRI2MIX, HDEMUCS_HIGH_MUSDB, HDEMUCS_HIGH_MUSDB_PLUS
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "examples"))
from source_separation.utils.metrics import sdr
@pytest.mark.parametrize(
"bundle,task,channel,expected_score",
[
[CONVTASNET_BASE_LIBRI2MIX, "speech_separation", 1, 8.1373],
[HDEMUCS_HIGH_MUSDB_PLUS, "music_separation", 2, 8.7480],
[HDEMUCS_HIGH_MUSDB, "music_separation", 2, 8.0697],
],
)
def test_source_separation_models(bundle, task, channel, expected_score, mixture_source, clean_sources):
"""Integration test for the source separation pipeline.
Given the mixture waveform with dimensions `(batch, channel, time)`, the pre-trained pipeline generates
the separated sources Tensor with dimensions `(batch, num_sources, time)`.
The test computes the scale-invariant signal-to-distortion ratio (Si-SDR) score in decibel (dB).
Si-SDR score should be equal to or larger than the expected score.
"""
model = bundle.get_model()
mixture_waveform, sample_rate = torchaudio.load(mixture_source)
assert sample_rate == bundle.sample_rate, "The sample rate of audio must match that in the bundle."
clean_waveforms = []
for source in clean_sources:
clean_waveform, sample_rate = torchaudio.load(source)
assert sample_rate == bundle.sample_rate, "The sample rate of audio must match that in the bundle."
clean_waveforms.append(clean_waveform)
mixture_waveform = mixture_waveform.reshape(1, channel, -1)
estimated_sources = model(mixture_waveform)
clean_waveforms = torch.cat(clean_waveforms).unsqueeze(0)
estimated_sources = estimated_sources.reshape(1, -1, clean_waveforms.shape[-1])
sdr_values = sdr(estimated_sources, clean_waveforms).mean()
assert sdr_values >= expected_score
|
import os
import sys
import pytest
import torch
import torchaudio
from torchaudio.pipelines import CONVTASNET_BASE_LIBRI2MIX
from torchaudio.prototype.pipelines import HDEMUCS_HIGH_MUSDB, HDEMUCS_HIGH_MUSDB_PLUS
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "examples"))
from source_separation.utils.metrics import sdr
@pytest.mark.parametrize(
"bundle,task,channel,expected_score",
[
[CONVTASNET_BASE_LIBRI2MIX, "speech_separation", 1, 8.1373],
[HDEMUCS_HIGH_MUSDB_PLUS, "music_separation", 2, 8.7480],
[HDEMUCS_HIGH_MUSDB, "music_separation", 2, 8.0697],
],
)
def test_source_separation_models(bundle, task, channel, expected_score, mixture_source, clean_sources):
"""Integration test for the source separation pipeline.
Given the mixture waveform with dimensions `(batch, channel, time)`, the pre-trained pipeline generates
the separated sources Tensor with dimensions `(batch, num_sources, time)`.
The test computes the scale-invariant signal-to-distortion ratio (Si-SDR) score in decibel (dB).
Si-SDR score should be equal to or larger than the expected score.
"""
model = bundle.get_model()
mixture_waveform, sample_rate = torchaudio.load(mixture_source)
assert sample_rate == bundle.sample_rate, "The sample rate of audio must match that in the bundle."
clean_waveforms = []
for source in clean_sources:
clean_waveform, sample_rate = torchaudio.load(source)
assert sample_rate == bundle.sample_rate, "The sample rate of audio must match that in the bundle."
clean_waveforms.append(clean_waveform)
mixture_waveform = mixture_waveform.reshape(1, channel, -1)
estimated_sources = model(mixture_waveform)
clean_waveforms = torch.cat(clean_waveforms).unsqueeze(0)
estimated_sources = estimated_sources.reshape(1, -1, clean_waveforms.shape[-1])
sdr_values = sdr(estimated_sources, clean_waveforms).mean()
assert sdr_values >= expected_score
|
from urllib.parse import urlparse
from backend.blocks.github._auth import GithubCredentials
from backend.util.request import Requests
def _convert_to_api_url(url: str) -> str:
"""
Converts a standard GitHub URL to the corresponding GitHub API URL.
Handles repository URLs, issue URLs, pull request URLs, and more.
"""
parsed_url = urlparse(url)
path_parts = parsed_url.path.strip("/").split("/")
if len(path_parts) >= 2:
owner, repo = path_parts[0], path_parts[1]
api_base = f"https://api.github.com/repos/{owner}/{repo}"
if len(path_parts) > 2:
additional_path = "/".join(path_parts[2:])
api_url = f"{api_base}/{additional_path}"
else:
# Repository base URL
api_url = api_base
else:
raise ValueError("Invalid GitHub URL format.")
return api_url
def _get_headers(credentials: GithubCredentials) -> dict[str, str]:
return {
"Authorization": credentials.bearer(),
"Accept": "application/vnd.github.v3+json",
}
def get_api(credentials: GithubCredentials, convert_urls: bool = True) -> Requests:
return Requests(
trusted_origins=["https://api.github.com", "https://github.com"],
extra_url_validator=_convert_to_api_url if convert_urls else None,
extra_headers=_get_headers(credentials),
)
|
from urllib.parse import urlparse
from backend.blocks.github._auth import GithubCredentials
from backend.util.request import Requests
def _convert_to_api_url(url: str) -> str:
"""
Converts a standard GitHub URL to the corresponding GitHub API URL.
Handles repository URLs, issue URLs, pull request URLs, and more.
"""
parsed_url = urlparse(url)
path_parts = parsed_url.path.strip("/").split("/")
if len(path_parts) >= 2:
owner, repo = path_parts[0], path_parts[1]
api_base = f"https://api.github.com/repos/{owner}/{repo}"
if len(path_parts) > 2:
additional_path = "/".join(path_parts[2:])
api_url = f"{api_base}/{additional_path}"
else:
# Repository base URL
api_url = api_base
else:
raise ValueError("Invalid GitHub URL format.")
return api_url
def _get_headers(credentials: GithubCredentials) -> dict[str, str]:
return {
"Authorization": credentials.bearer(),
"Accept": "application/vnd.github.v3+json",
}
def get_api(credentials: GithubCredentials) -> Requests:
return Requests(
trusted_origins=["https://api.github.com", "https://github.com"],
extra_url_validator=_convert_to_api_url,
extra_headers=_get_headers(credentials),
)
|
from torchaudio._internal.module_utils import dropping_io_support, dropping_class_io_support
# Initialize extension and backend first
from . import _extension # noqa # usort: skip
from ._backend import ( # noqa # usort: skip
AudioMetaData as _AudioMetaData,
get_audio_backend as _get_audio_backend,
info as _info,
list_audio_backends as _list_audio_backends,
load as _load,
save as _save,
set_audio_backend as _set_audio_backend,
)
AudioMetaData = dropping_class_io_support(_AudioMetaData)
get_audio_backend = dropping_io_support(_get_audio_backend)
info = dropping_io_support(_info)
list_audio_backends = dropping_io_support(_list_audio_backends)
load = dropping_io_support(_load)
save = dropping_io_support(_save)
set_audio_backend = dropping_io_support(_set_audio_backend)
from . import ( # noqa: F401
compliance,
datasets,
functional,
io,
kaldi_io,
models,
pipelines,
sox_effects,
transforms,
utils,
)
# For BC
from . import backend # noqa # usort: skip
try:
from .version import __version__, git_version # noqa: F401
except ImportError:
pass
__all__ = [
"AudioMetaData",
"load",
"info",
"save",
"io",
"compliance",
"datasets",
"functional",
"models",
"pipelines",
"kaldi_io",
"utils",
"sox_effects",
"transforms",
"list_audio_backends",
"get_audio_backend",
"set_audio_backend",
]
|
from torchaudio._internal.module_utils import dropping_io_support
# Initialize extension and backend first
from . import _extension # noqa # usort: skip
from ._backend import ( # noqa # usort: skip
AudioMetaData,
get_audio_backend as _get_audio_backend,
info as _info,
list_audio_backends as _list_audio_backends,
load as _load,
save as _save,
set_audio_backend as _set_audio_backend,
)
AudioMetaData.__init__ = dropping_io_support(AudioMetaData.__init__)
get_audio_backend = dropping_io_support(_get_audio_backend)
info = dropping_io_support(_info)
list_audio_backends = dropping_io_support(_list_audio_backends)
load = dropping_io_support(_load)
save = dropping_io_support(_save)
set_audio_backend = dropping_io_support(_set_audio_backend)
from . import ( # noqa: F401
compliance,
datasets,
functional,
io,
kaldi_io,
models,
pipelines,
sox_effects,
transforms,
utils,
)
# For BC
from . import backend # noqa # usort: skip
try:
from .version import __version__, git_version # noqa: F401
except ImportError:
pass
__all__ = [
"AudioMetaData",
"load",
"info",
"save",
"io",
"compliance",
"datasets",
"functional",
"models",
"pipelines",
"kaldi_io",
"utils",
"sox_effects",
"transforms",
"list_audio_backends",
"get_audio_backend",
"set_audio_backend",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pycocotools.mask as mask_util
import torch
from mmengine.utils import slice_list
def split_combined_polys(polys, poly_lens, polys_per_mask):
"""Split the combined 1-D polys into masks.
A mask is represented as a list of polys, and a poly is represented as
a 1-D array. In dataset, all masks are concatenated into a single 1-D
tensor. Here we need to split the tensor into original representations.
Args:
polys (list): a list (length = image num) of 1-D tensors
poly_lens (list): a list (length = image num) of poly length
polys_per_mask (list): a list (length = image num) of poly number
of each mask
Returns:
list: a list (length = image num) of list (length = mask num) of \
list (length = poly num) of numpy array.
"""
mask_polys_list = []
for img_id in range(len(polys)):
polys_single = polys[img_id]
polys_lens_single = poly_lens[img_id].tolist()
polys_per_mask_single = polys_per_mask[img_id].tolist()
split_polys = slice_list(polys_single, polys_lens_single)
mask_polys = slice_list(split_polys, polys_per_mask_single)
mask_polys_list.append(mask_polys)
return mask_polys_list
# TODO: move this function to more proper place
def encode_mask_results(mask_results):
"""Encode bitmap mask to RLE code.
Args:
mask_results (list): bitmap mask results.
Returns:
list | tuple: RLE encoded mask.
"""
encoded_mask_results = []
for mask in mask_results:
encoded_mask_results.append(
mask_util.encode(
np.array(mask[:, :, np.newaxis], order='F',
dtype='uint8'))[0]) # encoded with RLE
return encoded_mask_results
def mask2bbox(masks):
"""Obtain tight bounding boxes of binary masks.
Args:
masks (Tensor): Binary mask of shape (n, h, w).
Returns:
Tensor: Bboxe with shape (n, 4) of \
positive region in binary mask.
"""
N = masks.shape[0]
bboxes = masks.new_zeros((N, 4), dtype=torch.float32)
x_any = torch.any(masks, dim=1)
y_any = torch.any(masks, dim=2)
for i in range(N):
x = torch.where(x_any[i, :])[0]
y = torch.where(y_any[i, :])[0]
if len(x) > 0 and len(y) > 0:
bboxes[i, :] = bboxes.new_tensor(
[x[0], y[0], x[-1] + 1, y[-1] + 1])
return bboxes
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import pycocotools.mask as mask_util
import torch
def split_combined_polys(polys, poly_lens, polys_per_mask):
"""Split the combined 1-D polys into masks.
A mask is represented as a list of polys, and a poly is represented as
a 1-D array. In dataset, all masks are concatenated into a single 1-D
tensor. Here we need to split the tensor into original representations.
Args:
polys (list): a list (length = image num) of 1-D tensors
poly_lens (list): a list (length = image num) of poly length
polys_per_mask (list): a list (length = image num) of poly number
of each mask
Returns:
list: a list (length = image num) of list (length = mask num) of \
list (length = poly num) of numpy array.
"""
mask_polys_list = []
for img_id in range(len(polys)):
polys_single = polys[img_id]
polys_lens_single = poly_lens[img_id].tolist()
polys_per_mask_single = polys_per_mask[img_id].tolist()
split_polys = mmcv.slice_list(polys_single, polys_lens_single)
mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single)
mask_polys_list.append(mask_polys)
return mask_polys_list
# TODO: move this function to more proper place
def encode_mask_results(mask_results):
"""Encode bitmap mask to RLE code.
Args:
mask_results (list): bitmap mask results.
Returns:
list | tuple: RLE encoded mask.
"""
encoded_mask_results = []
for mask in mask_results:
encoded_mask_results.append(
mask_util.encode(
np.array(mask[:, :, np.newaxis], order='F',
dtype='uint8'))[0]) # encoded with RLE
return encoded_mask_results
def mask2bbox(masks):
"""Obtain tight bounding boxes of binary masks.
Args:
masks (Tensor): Binary mask of shape (n, h, w).
Returns:
Tensor: Bboxe with shape (n, 4) of \
positive region in binary mask.
"""
N = masks.shape[0]
bboxes = masks.new_zeros((N, 4), dtype=torch.float32)
x_any = torch.any(masks, dim=1)
y_any = torch.any(masks, dim=2)
for i in range(N):
x = torch.where(x_any[i, :])[0]
y = torch.where(y_any[i, :])[0]
if len(x) > 0 and len(y) > 0:
bboxes[i, :] = bboxes.new_tensor(
[x[0], y[0], x[-1] + 1, y[-1] + 1])
return bboxes
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.