input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
from mmdet.datasets import get_loading_pipeline, replace_ImageToTensor
def test_replace_ImageToTensor():
# with MultiScaleFlipAug
pipelines = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize'),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
expected_pipelines = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize'),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img']),
])
]
with pytest.warns(UserWarning):
assert expected_pipelines == replace_ImageToTensor(pipelines)
# without MultiScaleFlipAug
pipelines = [
dict(type='LoadImageFromFile'),
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize'),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
]
expected_pipelines = [
dict(type='LoadImageFromFile'),
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize'),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img']),
]
with pytest.warns(UserWarning):
assert expected_pipelines == replace_ImageToTensor(pipelines)
def test_get_loading_pipeline():
pipelines = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
expected_pipelines = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True)
]
assert expected_pipelines == \
get_loading_pipeline(pipelines)
|
import pytest
from mmdet.datasets import get_loading_pipeline, replace_ImageToTensor
def test_replace_ImageToTensor():
# with MultiScaleFlipAug
pipelines = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize'),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
expected_pipelines = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize'),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img']),
])
]
with pytest.warns(UserWarning):
assert expected_pipelines == replace_ImageToTensor(pipelines)
# without MultiScaleFlipAug
pipelines = [
dict(type='LoadImageFromFile'),
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize'),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
]
expected_pipelines = [
dict(type='LoadImageFromFile'),
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize'),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img']),
]
with pytest.warns(UserWarning):
assert expected_pipelines == replace_ImageToTensor(pipelines)
def test_get_loading_pipeline():
pipelines = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
expected_pipelines = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True)
]
assert expected_pipelines == \
get_loading_pipeline(pipelines)
|
import torch
import torchaudio.functional as F
from parameterized import parameterized
from torchaudio_unittest.common_utils import (
get_sinusoid,
load_params,
save_wav,
skipIfNoExec,
TempDirMixin,
TestBaseMixin,
)
from torchaudio_unittest.common_utils.kaldi_utils import convert_args, run_kaldi
class Kaldi(TempDirMixin, TestBaseMixin):
def assert_equal(self, output, *, expected, rtol=None, atol=None):
expected = expected.to(dtype=self.dtype, device=self.device)
self.assertEqual(output, expected, rtol=rtol, atol=atol)
@skipIfNoExec("apply-cmvn-sliding")
def test_sliding_window_cmn(self):
"""sliding_window_cmn should be numerically compatible with apply-cmvn-sliding"""
kwargs = {
"cmn_window": 600,
"min_cmn_window": 100,
"center": False,
"norm_vars": False,
}
tensor = torch.randn(40, 10, dtype=self.dtype, device=self.device)
result = F.sliding_window_cmn(tensor, **kwargs)
command = ["apply-cmvn-sliding"] + convert_args(**kwargs) + ["ark:-", "ark:-"]
kaldi_result = run_kaldi(command, "ark", tensor)
self.assert_equal(result, expected=kaldi_result)
class KaldiCPUOnly(TempDirMixin, TestBaseMixin):
def assert_equal(self, output, *, expected, rtol=None, atol=None):
expected = expected.to(dtype=self.dtype, device=self.device)
self.assertEqual(output, expected, rtol=rtol, atol=atol)
@parameterized.expand(load_params("kaldi_test_pitch_args.jsonl"))
@skipIfNoExec("compute-kaldi-pitch-feats")
def test_pitch_feats(self, kwargs):
"""compute_kaldi_pitch produces numerically compatible result with compute-kaldi-pitch-feats"""
sample_rate = kwargs["sample_rate"]
waveform = get_sinusoid(dtype="float32", sample_rate=sample_rate)
result = F.compute_kaldi_pitch(waveform[0], **kwargs)
waveform = get_sinusoid(dtype="int16", sample_rate=sample_rate)
wave_file = self.get_temp_path("test.wav")
save_wav(wave_file, waveform, sample_rate)
command = ["compute-kaldi-pitch-feats"] + convert_args(**kwargs) + ["scp:-", "ark:-"]
kaldi_result = run_kaldi(command, "scp", wave_file)
self.assert_equal(result, expected=kaldi_result)
|
import torch
import torchaudio.functional as F
from parameterized import parameterized
from torchaudio_unittest.common_utils import (
get_sinusoid,
load_params,
save_wav,
skipIfNoExec,
TempDirMixin,
TestBaseMixin,
)
from torchaudio_unittest.common_utils.kaldi_utils import (
convert_args,
run_kaldi,
)
class Kaldi(TempDirMixin, TestBaseMixin):
def assert_equal(self, output, *, expected, rtol=None, atol=None):
expected = expected.to(dtype=self.dtype, device=self.device)
self.assertEqual(output, expected, rtol=rtol, atol=atol)
@skipIfNoExec("apply-cmvn-sliding")
def test_sliding_window_cmn(self):
"""sliding_window_cmn should be numerically compatible with apply-cmvn-sliding"""
kwargs = {
"cmn_window": 600,
"min_cmn_window": 100,
"center": False,
"norm_vars": False,
}
tensor = torch.randn(40, 10, dtype=self.dtype, device=self.device)
result = F.sliding_window_cmn(tensor, **kwargs)
command = ["apply-cmvn-sliding"] + convert_args(**kwargs) + ["ark:-", "ark:-"]
kaldi_result = run_kaldi(command, "ark", tensor)
self.assert_equal(result, expected=kaldi_result)
class KaldiCPUOnly(TempDirMixin, TestBaseMixin):
def assert_equal(self, output, *, expected, rtol=None, atol=None):
expected = expected.to(dtype=self.dtype, device=self.device)
self.assertEqual(output, expected, rtol=rtol, atol=atol)
@parameterized.expand(load_params("kaldi_test_pitch_args.jsonl"))
@skipIfNoExec("compute-kaldi-pitch-feats")
def test_pitch_feats(self, kwargs):
"""compute_kaldi_pitch produces numerically compatible result with compute-kaldi-pitch-feats"""
sample_rate = kwargs["sample_rate"]
waveform = get_sinusoid(dtype="float32", sample_rate=sample_rate)
result = F.compute_kaldi_pitch(waveform[0], **kwargs)
waveform = get_sinusoid(dtype="int16", sample_rate=sample_rate)
wave_file = self.get_temp_path("test.wav")
save_wav(wave_file, waveform, sample_rate)
command = ["compute-kaldi-pitch-feats"] + convert_args(**kwargs) + ["scp:-", "ark:-"]
kaldi_result = run_kaldi(command, "scp", wave_file)
self.assert_equal(result, expected=kaldi_result)
|
from contextlib import suppress
from docutils import nodes
from docutils.parsers.rst import Directive
from sklearn.utils import all_estimators
from sklearn.utils._test_common.instance_generator import _construct_instance
from sklearn.utils._testing import SkipTest
class AllowNanEstimators(Directive):
@staticmethod
def make_paragraph_for_estimator_type(estimator_type):
intro = nodes.list_item()
intro += nodes.strong(text="Estimators that allow NaN values for type ")
intro += nodes.literal(text=f"{estimator_type}")
intro += nodes.strong(text=":\n")
exists = False
lst = nodes.bullet_list()
for name, est_class in all_estimators(type_filter=estimator_type):
with suppress(SkipTest):
est = _construct_instance(est_class)
if est._get_tags().get("allow_nan"):
module_name = ".".join(est_class.__module__.split(".")[:2])
class_title = f"{est_class.__name__}"
class_url = f"./generated/{module_name}.{class_title}.html"
item = nodes.list_item()
para = nodes.paragraph()
para += nodes.reference(
class_title, text=class_title, internal=False, refuri=class_url
)
exists = True
item += para
lst += item
intro += lst
return [intro] if exists else None
def run(self):
lst = nodes.bullet_list()
for i in ["cluster", "regressor", "classifier", "transformer"]:
item = self.make_paragraph_for_estimator_type(i)
if item is not None:
lst += item
return [lst]
def setup(app):
app.add_directive("allow_nan_estimators", AllowNanEstimators)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
from contextlib import suppress
from docutils import nodes
from docutils.parsers.rst import Directive
from sklearn.utils import all_estimators
from sklearn.utils._testing import SkipTest
from sklearn.utils.estimator_checks import _construct_instance
class AllowNanEstimators(Directive):
@staticmethod
def make_paragraph_for_estimator_type(estimator_type):
intro = nodes.list_item()
intro += nodes.strong(text="Estimators that allow NaN values for type ")
intro += nodes.literal(text=f"{estimator_type}")
intro += nodes.strong(text=":\n")
exists = False
lst = nodes.bullet_list()
for name, est_class in all_estimators(type_filter=estimator_type):
with suppress(SkipTest):
est = _construct_instance(est_class)
if est._get_tags().get("allow_nan"):
module_name = ".".join(est_class.__module__.split(".")[:2])
class_title = f"{est_class.__name__}"
class_url = f"./generated/{module_name}.{class_title}.html"
item = nodes.list_item()
para = nodes.paragraph()
para += nodes.reference(
class_title, text=class_title, internal=False, refuri=class_url
)
exists = True
item += para
lst += item
intro += lst
return [intro] if exists else None
def run(self):
lst = nodes.bullet_list()
for i in ["cluster", "regressor", "classifier", "transformer"]:
item = self.make_paragraph_for_estimator_type(i)
if item is not None:
lst += item
return [lst]
def setup(app):
app.add_directive("allow_nan_estimators", AllowNanEstimators)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
import os
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as
from docarray import BaseDoc
from docarray.typing import (
AudioNdArray,
AudioTorchTensor,
VideoNdArray,
VideoTorchTensor,
)
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
from docarray.typing.tensor.video import VideoTensorFlowTensor
@pytest.mark.parametrize(
'tensor,cls_video_tensor,cls_tensor',
[
(torch.zeros(1, 224, 224, 3), VideoTorchTensor, torch.Tensor),
(np.zeros((1, 224, 224, 3)), VideoNdArray, np.ndarray),
],
)
def test_set_video_tensor(tensor, cls_video_tensor, cls_tensor):
class MyVideoDoc(BaseDoc):
tensor: cls_video_tensor
doc = MyVideoDoc(tensor=tensor)
assert isinstance(doc.tensor, cls_video_tensor)
assert isinstance(doc.tensor, cls_tensor)
assert (doc.tensor == tensor).all()
@pytest.mark.tensorflow
def test_set_video_tensor_tensorflow():
class MyVideoDoc(BaseDoc):
tensor: VideoTensorFlowTensor
doc = MyVideoDoc(tensor=tf.zeros((1, 224, 224, 3)))
assert isinstance(doc.tensor, VideoTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
assert tnp.allclose(doc.tensor.tensor, tf.zeros((1, 224, 224, 3)))
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(VideoNdArray, np.zeros((1, 224, 224, 3))),
(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
(VideoTorchTensor, np.zeros((1, 224, 224, 3))),
],
)
def test_validation(cls_tensor, tensor):
arr = parse_obj_as(cls_tensor, tensor)
assert isinstance(arr, cls_tensor)
@pytest.mark.tensorflow
def test_validation_tensorflow():
arr = parse_obj_as(VideoTensorFlowTensor, np.zeros((1, 224, 224, 3)))
assert isinstance(arr, VideoTensorFlowTensor)
arr = parse_obj_as(VideoTensorFlowTensor, tf.zeros((1, 224, 224, 3)))
assert isinstance(arr, VideoTensorFlowTensor)
arr = parse_obj_as(VideoTensorFlowTensor, torch.zeros((1, 224, 224, 3)))
assert isinstance(arr, VideoTensorFlowTensor)
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(VideoNdArray, torch.zeros(1, 224, 224, 3)),
(VideoTorchTensor, torch.zeros(224, 3)),
(VideoTorchTensor, torch.zeros(1, 224, 224, 100)),
(VideoNdArray, 'hello'),
(VideoTorchTensor, 'hello'),
],
)
def test_illegal_validation(cls_tensor, tensor):
match = str(cls_tensor).split('.')[-1][:-2]
with pytest.raises(ValueError, match=match):
parse_obj_as(cls_tensor, tensor)
@pytest.mark.parametrize(
'cls_tensor,tensor,proto_key',
[
(
VideoTorchTensor,
torch.zeros(1, 224, 224, 3),
VideoTorchTensor._proto_type_name,
),
(VideoNdArray, np.zeros((1, 224, 224, 3)), VideoNdArray._proto_type_name),
],
)
def test_proto_tensor(cls_tensor, tensor, proto_key):
tensor = parse_obj_as(cls_tensor, tensor)
proto = tensor._to_node_protobuf()
assert proto_key in str(proto)
@pytest.mark.tensorflow
def test_proto_tensor_tensorflow():
tensor = parse_obj_as(VideoTensorFlowTensor, tf.zeros((1, 224, 224, 3)))
proto = tensor._to_node_protobuf()
assert VideoTensorFlowTensor._proto_type_name in str(proto)
@pytest.mark.parametrize(
'video_tensor',
[
parse_obj_as(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
parse_obj_as(VideoNdArray, np.zeros((1, 224, 224, 3))),
],
)
def test_save_video_tensor_to_file(video_tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.mp4')
video_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
@pytest.mark.parametrize(
'video_tensor',
[
parse_obj_as(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
parse_obj_as(VideoNdArray, np.zeros((1, 224, 224, 3))),
],
)
def test_save_video_tensor_to_bytes(video_tensor, tmpdir):
b = video_tensor.to_bytes()
isinstance(b, bytes)
@pytest.mark.tensorflow
def test_save_video_tensorflow_tensor_to_file(tmpdir):
tmp_file = str(tmpdir / 'tmp.mp4')
video_tensor = parse_obj_as(VideoTensorFlowTensor, tf.zeros((1, 224, 224, 3)))
video_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
@pytest.mark.parametrize(
'video_tensor',
[
parse_obj_as(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
parse_obj_as(VideoNdArray, np.zeros((1, 224, 224, 3))),
],
)
@pytest.mark.parametrize(
'audio_tensor',
[
parse_obj_as(AudioTorchTensor, torch.randn(100, 1, 1024).to(torch.float32)),
parse_obj_as(AudioNdArray, np.random.randn(100, 1, 1024).astype('float32')),
],
)
def test_save_video_tensor_to_file_including_audio(video_tensor, audio_tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.mp4')
video_tensor.save(tmp_file, audio_tensor=audio_tensor)
assert os.path.isfile(tmp_file)
|
import os
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as
from docarray import BaseDoc
from docarray.typing import (
AudioNdArray,
AudioTorchTensor,
VideoNdArray,
VideoTorchTensor,
)
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
from docarray.typing.tensor.video import VideoTensorFlowTensor
@pytest.mark.parametrize(
'tensor,cls_video_tensor,cls_tensor',
[
(torch.zeros(1, 224, 224, 3), VideoTorchTensor, torch.Tensor),
(np.zeros((1, 224, 224, 3)), VideoNdArray, np.ndarray),
],
)
def test_set_video_tensor(tensor, cls_video_tensor, cls_tensor):
class MyVideoDoc(BaseDoc):
tensor: cls_video_tensor
doc = MyVideoDoc(tensor=tensor)
assert isinstance(doc.tensor, cls_video_tensor)
assert isinstance(doc.tensor, cls_tensor)
assert (doc.tensor == tensor).all()
@pytest.mark.tensorflow
def test_set_video_tensor_tensorflow():
class MyVideoDoc(BaseDoc):
tensor: VideoTensorFlowTensor
doc = MyVideoDoc(tensor=tf.zeros((1, 224, 224, 3)))
assert isinstance(doc.tensor, VideoTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
assert tnp.allclose(doc.tensor.tensor, tf.zeros((1, 224, 224, 3)))
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(VideoNdArray, np.zeros((1, 224, 224, 3))),
(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
(VideoTorchTensor, np.zeros((1, 224, 224, 3))),
],
)
def test_validation(cls_tensor, tensor):
arr = parse_obj_as(cls_tensor, tensor)
assert isinstance(arr, cls_tensor)
@pytest.mark.tensorflow
def test_validation_tensorflow():
arr = parse_obj_as(VideoTensorFlowTensor, np.zeros((1, 224, 224, 3)))
assert isinstance(arr, VideoTensorFlowTensor)
arr = parse_obj_as(VideoTensorFlowTensor, tf.zeros((1, 224, 224, 3)))
assert isinstance(arr, VideoTensorFlowTensor)
arr = parse_obj_as(VideoTensorFlowTensor, torch.zeros((1, 224, 224, 3)))
assert isinstance(arr, VideoTensorFlowTensor)
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(VideoNdArray, torch.zeros(1, 224, 224, 3)),
(VideoTorchTensor, torch.zeros(224, 3)),
(VideoTorchTensor, torch.zeros(1, 224, 224, 100)),
(VideoNdArray, 'hello'),
(VideoTorchTensor, 'hello'),
],
)
def test_illegal_validation(cls_tensor, tensor):
match = str(cls_tensor).split('.')[-1][:-2]
with pytest.raises(ValueError, match=match):
parse_obj_as(cls_tensor, tensor)
@pytest.mark.parametrize(
'cls_tensor,tensor,proto_key',
[
(
VideoTorchTensor,
torch.zeros(1, 224, 224, 3),
VideoTorchTensor._proto_type_name,
),
(VideoNdArray, np.zeros((1, 224, 224, 3)), VideoNdArray._proto_type_name),
],
)
def test_proto_tensor(cls_tensor, tensor, proto_key):
tensor = parse_obj_as(cls_tensor, tensor)
proto = tensor._to_node_protobuf()
assert proto_key in str(proto)
@pytest.mark.tensorflow
def test_proto_tensor_tensorflow():
tensor = parse_obj_as(VideoTensorFlowTensor, tf.zeros((1, 224, 224, 3)))
proto = tensor._to_node_protobuf()
assert VideoTensorFlowTensor._proto_type_name in str(proto)
@pytest.mark.parametrize(
'video_tensor',
[
parse_obj_as(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
parse_obj_as(VideoNdArray, np.zeros((1, 224, 224, 3))),
],
)
def test_save_video_tensor_to_file(video_tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.mp4')
video_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
@pytest.mark.parametrize(
'video_tensor',
[
parse_obj_as(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
parse_obj_as(VideoNdArray, np.zeros((1, 224, 224, 3))),
],
)
def test_save_video_tensor_to_bytes(video_tensor, tmpdir):
b = video_tensor.to_bytes()
isinstance(b, bytes)
@pytest.mark.tensorflow
def test_save_video_tensorflow_tensor_to_file(tmpdir):
tmp_file = str(tmpdir / 'tmp.mp4')
video_tensor = parse_obj_as(VideoTensorFlowTensor, tf.zeros((1, 224, 224, 3)))
video_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
@pytest.mark.parametrize(
'video_tensor',
[
parse_obj_as(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
parse_obj_as(VideoNdArray, np.zeros((1, 224, 224, 3))),
],
)
@pytest.mark.parametrize(
'audio_tensor',
[
parse_obj_as(AudioTorchTensor, torch.randn(100, 1, 1024).to(torch.float32)),
parse_obj_as(AudioNdArray, np.random.randn(100, 1, 1024).astype('float32')),
],
)
def test_save_video_tensor_to_file_including_audio(video_tensor, audio_tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.mp4')
video_tensor.save(tmp_file, audio_tensor=audio_tensor)
assert os.path.isfile(tmp_file)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import MyScale, MyScaleSettings
from langchain_community.vectorstores.myscale import MyScaleWithoutJSON
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"MyScaleSettings": "langchain_community.vectorstores",
"MyScale": "langchain_community.vectorstores",
"MyScaleWithoutJSON": "langchain_community.vectorstores.myscale",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MyScale",
"MyScaleSettings",
"MyScaleWithoutJSON",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import MyScale, MyScaleSettings
from langchain_community.vectorstores.myscale import MyScaleWithoutJSON
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"MyScaleSettings": "langchain_community.vectorstores",
"MyScale": "langchain_community.vectorstores",
"MyScaleWithoutJSON": "langchain_community.vectorstores.myscale",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MyScaleSettings",
"MyScale",
"MyScaleWithoutJSON",
]
|
from langchain_core.documents import Document
from langchain.retrievers.document_compressors.listwise_rerank import LLMListwiseRerank
def test_list_rerank() -> None:
from langchain_openai import ChatOpenAI
documents = [
Document("Sally is my friend from school"),
Document("Steve is my friend from home"),
Document("I didn't always like yogurt"),
Document("I wonder why it's called football"),
Document("Where's waldo"),
]
reranker = LLMListwiseRerank.from_llm(
llm=ChatOpenAI(model="gpt-3.5-turbo"),
top_n=3,
)
compressed_docs = reranker.compress_documents(documents, "Who is steve")
assert len(compressed_docs) == 3
assert "Steve" in compressed_docs[0].page_content
|
from langchain_core.documents import Document
from langchain.retrievers.document_compressors.listwise_rerank import LLMListwiseRerank
def test_list_rerank() -> None:
from langchain_openai import ChatOpenAI
documents = [
Document("Sally is my friend from school"),
Document("Steve is my friend from home"),
Document("I didn't always like yogurt"),
Document("I wonder why it's called football"),
Document("Where's waldo"),
]
reranker = LLMListwiseRerank.from_llm(
llm=ChatOpenAI(model="gpt-3.5-turbo"), top_n=3
)
compressed_docs = reranker.compress_documents(documents, "Who is steve")
assert len(compressed_docs) == 3
assert "Steve" in compressed_docs[0].page_content
|
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from box_sdk_gen import BoxClient
from llama_index.readers.box.BoxAPI.box_api import (
box_check_connection,
get_box_files_details,
get_files_ai_extract_data,
add_extra_header_to_box_client,
)
from llama_index.readers.box.BoxAPI.box_llama_adaptors import box_file_to_llama_document
class BoxAIExtractToolSpec(BaseToolSpec):
"""
Extracts AI generated content from a Box file.
Args:
box_client (BoxClient): A BoxClient instance for interacting with Box API.
Attributes:
spec_functions (list): A list of supported functions.
_box_client (BoxClient): An instance of BoxClient for interacting with Box API.
Methods:
ai_extract(file_id, ai_prompt): Extracts AI generated content from a Box file.
Args:
file_id (str): The ID of the Box file.
ai_prompt (str): The AI prompt to use for extraction.
Returns:
Document: A Document object containing the extracted AI content.
"""
spec_functions = ["ai_extract"]
_box_client: BoxClient
def __init__(self, box_client: BoxClient) -> None:
"""
Initializes the BoxAIExtractToolSpec with a BoxClient instance.
Args:
box_client (BoxClient): The BoxClient instance to use for interacting with the Box API.
"""
self._box_client = add_extra_header_to_box_client(box_client)
def ai_extract(
self,
file_id: str,
ai_prompt: str,
) -> Document:
"""
Extracts AI generated content from a Box file using the provided AI prompt.
Args:
file_id (str): The ID of the Box file to process.
ai_prompt (str): The AI prompt to use for content extraction.
Returns:
Document: A Document object containing the extracted AI content,
including metadata about the original Box file.
"""
# Connect to Box
box_check_connection(self._box_client)
# get payload information
box_file = get_box_files_details(
box_client=self._box_client, file_ids=[file_id]
)[0]
box_file = get_files_ai_extract_data(
box_client=self._box_client,
box_files=[box_file],
ai_prompt=ai_prompt,
)[0]
doc = box_file_to_llama_document(box_file)
doc.text = box_file.ai_response if box_file.ai_response else ""
doc.metadata["ai_prompt"] = box_file.ai_prompt
doc.metadata["ai_response"] = box_file.ai_response
return doc
|
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from box_sdk_gen import BoxClient
from llama_index.readers.box.BoxAPI.box_api import (
box_check_connection,
get_box_files_details,
get_files_ai_extract_data,
add_extra_header_to_box_client,
)
from llama_index.readers.box.BoxAPI.box_llama_adaptors import box_file_to_llama_document
class BoxAIExtractToolSpec(BaseToolSpec):
"""
Extracts AI generated content from a Box file.
Args:
box_client (BoxClient): A BoxClient instance for interacting with Box API.
Attributes:
spec_functions (list): A list of supported functions.
_box_client (BoxClient): An instance of BoxClient for interacting with Box API.
Methods:
ai_extract(file_id, ai_prompt): Extracts AI generated content from a Box file.
Args:
file_id (str): The ID of the Box file.
ai_prompt (str): The AI prompt to use for extraction.
Returns:
Document: A Document object containing the extracted AI content.
"""
spec_functions = ["ai_extract"]
_box_client: BoxClient
def __init__(self, box_client: BoxClient) -> None:
"""
Initializes the BoxAIExtractToolSpec with a BoxClient instance.
Args:
box_client (BoxClient): The BoxClient instance to use for interacting with the Box API.
"""
self._box_client = add_extra_header_to_box_client(box_client)
def ai_extract(
self,
file_id: str,
ai_prompt: str,
) -> Document:
"""
Extracts AI generated content from a Box file using the provided AI prompt.
Args:
file_id (str): The ID of the Box file to process.
ai_prompt (str): The AI prompt to use for content extraction.
Returns:
Document: A Document object containing the extracted AI content,
including metadata about the original Box file.
"""
# Connect to Box
box_check_connection(self._box_client)
# get payload information
box_file = get_box_files_details(
box_client=self._box_client, file_ids=[file_id]
)[0]
box_file = get_files_ai_extract_data(
box_client=self._box_client,
box_files=[box_file],
ai_prompt=ai_prompt,
)[0]
doc = box_file_to_llama_document(box_file)
doc.text = box_file.ai_response if box_file.ai_response else ""
doc.metadata["ai_prompt"] = box_file.ai_prompt
doc.metadata["ai_response"] = box_file.ai_response
return doc
|
import os.path
from typing import Any, Callable, List, Optional, Tuple
from PIL import Image
from .vision import VisionDataset
class CocoDetection(VisionDataset):
"""`MS Coco Detection <https://cocodataset.org/#detection-2016>`_ Dataset.
It requires the `COCO API to be installed <https://github.com/pdollar/coco/tree/master/PythonAPI>`_.
Args:
root (string): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.PILToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
"""
def __init__(
self,
root: str,
annFile: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
transforms: Optional[Callable] = None,
) -> None:
super().__init__(root, transforms, transform, target_transform)
from pycocotools.coco import COCO
self.coco = COCO(annFile)
self.ids = list(sorted(self.coco.imgs.keys()))
def _load_image(self, id: int) -> Image.Image:
path = self.coco.loadImgs(id)[0]["file_name"]
return Image.open(os.path.join(self.root, path)).convert("RGB")
def _load_target(self, id: int) -> List[Any]:
return self.coco.loadAnns(self.coco.getAnnIds(id))
def __getitem__(self, index: int) -> Tuple[Any, Any]:
id = self.ids[index]
image = self._load_image(id)
target = self._load_target(id)
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target
def __len__(self) -> int:
return len(self.ids)
class CocoCaptions(CocoDetection):
"""`MS Coco Captions <https://cocodataset.org/#captions-2015>`_ Dataset.
It requires the `COCO API to be installed <https://github.com/pdollar/coco/tree/master/PythonAPI>`_.
Args:
root (string): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.PILToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
Example:
.. code:: python
import torchvision.datasets as dset
import torchvision.transforms as transforms
cap = dset.CocoCaptions(root = 'dir where images are',
annFile = 'json annotation file',
transform=transforms.PILToTensor())
print('Number of samples: ', len(cap))
img, target = cap[3] # load 4th sample
print("Image Size: ", img.size())
print(target)
Output: ::
Number of samples: 82783
Image Size: (3L, 427L, 640L)
[u'A plane emitting smoke stream flying over a mountain.',
u'A plane darts across a bright blue sky behind a mountain covered in snow',
u'A plane leaves a contrail above the snowy mountain top.',
u'A mountain that has a plane flying overheard in the distance.',
u'A mountain view with a plume of smoke in the background']
"""
def _load_target(self, id: int) -> List[str]:
return [ann["caption"] for ann in super()._load_target(id)]
|
import os.path
from typing import Any, Callable, List, Optional, Tuple
from PIL import Image
from .vision import VisionDataset
class CocoDetection(VisionDataset):
"""`MS Coco Detection <https://cocodataset.org/#detection-2016>`_ Dataset.
It requires the `COCO API to be installed <https://github.com/pdollar/coco/tree/master/PythonAPI>`_.
Args:
root (string): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.PILToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
"""
def __init__(
self,
root: str,
annFile: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
transforms: Optional[Callable] = None,
) -> None:
super().__init__(root, transforms, transform, target_transform)
from pycocotools.coco import COCO
self.coco = COCO(annFile)
self.ids = list(sorted(self.coco.imgs.keys()))
def _load_image(self, id: int) -> Image.Image:
path = self.coco.loadImgs(id)[0]["file_name"]
return Image.open(os.path.join(self.root, path)).convert("RGB")
def _load_target(self, id: int) -> List[Any]:
return self.coco.loadAnns(self.coco.getAnnIds(id))
def __getitem__(self, index: int) -> Tuple[Any, Any]:
id = self.ids[index]
image = self._load_image(id)
target = self._load_target(id)
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target
def __len__(self) -> int:
return len(self.ids)
class CocoCaptions(CocoDetection):
"""`MS Coco Captions <https://cocodataset.org/#captions-2015>`_ Dataset.
It requires the `COCO API to be installed <https://github.com/pdollar/coco/tree/master/PythonAPI>`_.
Args:
root (string): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.PILToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
Example:
.. code:: python
import torchvision.datasets as dset
import torchvision.transforms as transforms
cap = dset.CocoCaptions(root = 'dir where images are',
annFile = 'json annotation file',
transform=transforms.PILToTensor())
print('Number of samples: ', len(cap))
img, target = cap[3] # load 4th sample
print("Image Size: ", img.size())
print(target)
Output: ::
Number of samples: 82783
Image Size: (3L, 427L, 640L)
[u'A plane emitting smoke stream flying over a mountain.',
u'A plane darts across a bright blue sky behind a mountain covered in snow',
u'A plane leaves a contrail above the snowy mountain top.',
u'A mountain that has a plane flying overheard in the distance.',
u'A mountain view with a plume of smoke in the background']
"""
def _load_target(self, id: int) -> List[str]:
return [ann["caption"] for ann in super()._load_target(id)]
|
import os
import pytest
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.tools.mcp import BasicMCPClient, McpToolSpec
# Path to the test server script - adjust as needed
SERVER_SCRIPT = os.path.join(os.path.dirname(__file__), "server.py")
@pytest.fixture(scope="session")
def client() -> BasicMCPClient:
"""Create a basic MCP client connected to the test server."""
return BasicMCPClient("python", args=[SERVER_SCRIPT], timeout=5)
def test_class():
names_of_base_classes = [b.__name__ for b in McpToolSpec.__mro__]
assert BaseToolSpec.__name__ in names_of_base_classes
def test_get_tools(client: BasicMCPClient):
tool_spec = McpToolSpec(client)
tools = tool_spec.to_tool_list()
assert len(tools) > 0
tool_spec = McpToolSpec(client, include_resources=True)
tools_plus_resources = tool_spec.to_tool_list()
assert len(tools_plus_resources) > len(tools)
@pytest.mark.asyncio
async def test_get_tools_async(client: BasicMCPClient):
tool_spec = McpToolSpec(client)
tools = await tool_spec.to_tool_list_async()
assert len(tools) > 0
tool_spec = McpToolSpec(client, include_resources=True)
tools_plus_resources = await tool_spec.to_tool_list_async()
assert len(tools_plus_resources) > len(tools)
def test_get_single_tool(client: BasicMCPClient):
tool_spec = McpToolSpec(client, allowed_tools=["echo"])
tools = tool_spec.to_tool_list()
assert len(tools) == 1
assert tools[0].metadata.name == "echo"
@pytest.mark.asyncio
async def test_get_single_tool_async(client: BasicMCPClient):
tool_spec = McpToolSpec(client, allowed_tools=["echo"])
tools = await tool_spec.to_tool_list_async()
assert len(tools) == 1
assert tools[0].metadata.name == "echo"
def test_get_zero_tools(client: BasicMCPClient):
tool_spec = McpToolSpec(client, allowed_tools=[])
tools = tool_spec.to_tool_list()
assert len(tools) == 0
@pytest.mark.asyncio
async def test_get_zero_tools_async(client: BasicMCPClient):
tool_spec = McpToolSpec(client, allowed_tools=[])
tools = await tool_spec.to_tool_list_async()
assert len(tools) == 0
|
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.tools.mcp import McpToolSpec
def test_class():
names_of_base_classes = [b.__name__ for b in McpToolSpec.__mro__]
assert BaseToolSpec.__name__ in names_of_base_classes
|
import pytest
from xgboost import testing as tm
pytestmark = [
pytest.mark.skipif(**tm.no_spark()),
tm.timeout(120),
]
from ..test_with_spark.test_data import run_dmatrix_ctor
@pytest.mark.skipif(**tm.no_cudf())
@pytest.mark.parametrize(
"is_feature_cols,is_qdm",
[(True, True), (True, False), (False, True), (False, False)],
)
def test_dmatrix_ctor(is_feature_cols: bool, is_qdm: bool) -> None:
run_dmatrix_ctor(is_feature_cols, is_qdm, on_gpu=True)
|
import pytest
from xgboost import testing as tm
pytestmark = pytest.mark.skipif(**tm.no_spark())
from ..test_with_spark.test_data import run_dmatrix_ctor
@pytest.mark.skipif(**tm.no_cudf())
@pytest.mark.parametrize(
"is_feature_cols,is_qdm",
[(True, True), (True, False), (False, True), (False, False)],
)
def test_dmatrix_ctor(is_feature_cols: bool, is_qdm: bool) -> None:
run_dmatrix_ctor(is_feature_cols, is_qdm, on_gpu=True)
|
from contextlib import asynccontextmanager
from datetime import timedelta
from typing import Optional, List, Dict
from urllib.parse import urlparse
from mcp.client.session import ClientSession
from mcp.client.sse import sse_client
from mcp.client.stdio import stdio_client, StdioServerParameters
class BasicMCPClient(ClientSession):
"""
Basic MCP client that can be used to connect to an MCP server.
This is useful for verifying that the MCP server which implements `FastMCP` is working.
Args:
command_or_url: The command to run or the URL to connect to.
args: The arguments to pass to StdioServerParameters.
env: The environment variables to set for StdioServerParameters.
timeout: The timeout for the command in seconds.
"""
def __init__(
self,
command_or_url: str,
args: Optional[List[str]] = None,
env: Optional[Dict[str, str]] = None,
timeout: int = 30,
):
self.command_or_url = command_or_url
self.args = args or []
self.env = env or {}
self.timeout = timeout
@asynccontextmanager
async def _run_session(self):
if urlparse(self.command_or_url).scheme in ("http", "https"):
async with sse_client(self.command_or_url) as streams:
async with ClientSession(
*streams, read_timeout_seconds=timedelta(seconds=self.timeout)
) as session:
await session.initialize()
yield session
else:
server_parameters = StdioServerParameters(
command=self.command_or_url, args=self.args, env=self.env
)
async with stdio_client(server_parameters) as streams:
async with ClientSession(
*streams, read_timeout_seconds=timedelta(seconds=self.timeout)
) as session:
await session.initialize()
yield session
async def call_tool(self, tool_name: str, arguments: dict):
async with self._run_session() as session:
return await session.call_tool(tool_name, arguments)
async def list_tools(self):
async with self._run_session() as session:
return await session.list_tools()
|
from contextlib import asynccontextmanager
from datetime import timedelta
from typing import Optional, List, Dict
from urllib.parse import urlparse
from mcp.client.session import ClientSession
from mcp.client.sse import sse_client
from mcp.client.stdio import stdio_client, StdioServerParameters
class BasicMCPClient(ClientSession):
"""
Basic MCP client that can be used to connect to an MCP server.
This is useful for verifying that the MCP server which implements `FastMCP` is working.
Args:
command_or_url: The command to run or the URL to connect to.
args: The arguments to pass to StdioServerParameters.
env: The environment variables to set for StdioServerParameters.
timeout: The timeout for the command in seconds.
"""
def __init__(
self,
command_or_url: str,
args: Optional[List[str]] = None,
env: Optional[Dict[str, str]] = None,
timeout: int = 30,
):
self.command_or_url = command_or_url
self.args = args or []
self.env = env or {}
self.timeout = timeout
@asynccontextmanager
async def _run_session(self):
if urlparse(self.command_or_url).scheme in ("http", "https"):
async with sse_client(self.command_or_url) as streams:
async with ClientSession(
*streams, read_timeout_seconds=timedelta(seconds=self.timeout)
) as session:
await session.initialize()
yield session
else:
server_parameters = StdioServerParameters(
command=self.command_or_url, args=self.args, env=self.env
)
async with stdio_client(server_parameters) as streams:
async with ClientSession(
*streams, read_timeout_seconds=timedelta(seconds=self.timeout)
) as session:
await session.initialize()
yield session
async def call_tool(self, tool_name: str, arguments: dict):
async with self._run_session() as session:
return await session.call_tool(tool_name, arguments)
async def list_tools(self):
async with self._run_session() as session:
return await session.list_tools()
|
import json
import os
from typing import List
import torch
from torch import nn
class LSTM(nn.Module):
"""Bidirectional LSTM running over word embeddings."""
def __init__(
self,
word_embedding_dimension: int,
hidden_dim: int,
num_layers: int = 1,
dropout: float = 0,
bidirectional: bool = True,
):
nn.Module.__init__(self)
self.config_keys = ["word_embedding_dimension", "hidden_dim", "num_layers", "dropout", "bidirectional"]
self.word_embedding_dimension = word_embedding_dimension
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = dropout
self.bidirectional = bidirectional
self.embeddings_dimension = hidden_dim
if self.bidirectional:
self.embeddings_dimension *= 2
self.encoder = nn.LSTM(
word_embedding_dimension,
hidden_dim,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional,
batch_first=True,
)
def forward(self, features):
token_embeddings = features["token_embeddings"]
sentence_lengths = torch.clamp(features["sentence_lengths"], min=1)
packed = nn.utils.rnn.pack_padded_sequence(
token_embeddings, sentence_lengths.cpu(), batch_first=True, enforce_sorted=False
)
packed = self.encoder(packed)
unpack = nn.utils.rnn.pad_packed_sequence(packed[0], batch_first=True)[0]
features.update({"token_embeddings": unpack})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> List[int]:
raise NotImplementedError()
def save(self, output_path: str):
with open(os.path.join(output_path, "lstm_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "lstm_config.json"), "r") as fIn:
config = json.load(fIn)
weights = torch.load(os.path.join(input_path, "pytorch_model.bin"))
model = LSTM(**config)
model.load_state_dict(weights)
return model
|
import torch
from torch import nn
from typing import List
import os
import json
class LSTM(nn.Module):
"""Bidirectional LSTM running over word embeddings."""
def __init__(
self,
word_embedding_dimension: int,
hidden_dim: int,
num_layers: int = 1,
dropout: float = 0,
bidirectional: bool = True,
):
nn.Module.__init__(self)
self.config_keys = ["word_embedding_dimension", "hidden_dim", "num_layers", "dropout", "bidirectional"]
self.word_embedding_dimension = word_embedding_dimension
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = dropout
self.bidirectional = bidirectional
self.embeddings_dimension = hidden_dim
if self.bidirectional:
self.embeddings_dimension *= 2
self.encoder = nn.LSTM(
word_embedding_dimension,
hidden_dim,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional,
batch_first=True,
)
def forward(self, features):
token_embeddings = features["token_embeddings"]
sentence_lengths = torch.clamp(features["sentence_lengths"], min=1)
packed = nn.utils.rnn.pack_padded_sequence(
token_embeddings, sentence_lengths.cpu(), batch_first=True, enforce_sorted=False
)
packed = self.encoder(packed)
unpack = nn.utils.rnn.pad_packed_sequence(packed[0], batch_first=True)[0]
features.update({"token_embeddings": unpack})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> List[int]:
raise NotImplementedError()
def save(self, output_path: str):
with open(os.path.join(output_path, "lstm_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "lstm_config.json"), "r") as fIn:
config = json.load(fIn)
weights = torch.load(os.path.join(input_path, "pytorch_model.bin"))
model = LSTM(**config)
model.load_state_dict(weights)
return model
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
# MMEngine support the following two ways, users can choose
# according to convenience
# optim_wrapper = dict(type='AmpOptimWrapper')
_base_.optim_wrapper.type = 'AmpOptimWrapper'
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
fp16 = dict(loss_scale=512.)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .logger import get_root_logger
from .misc import find_latest_checkpoint
from .setup_env import setup_multi_processes
__all__ = [
'get_root_logger', 'collect_env', 'find_latest_checkpoint',
'setup_multi_processes'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .logger import get_root_logger
from .misc import find_latest_checkpoint
__all__ = [
'get_root_logger',
'collect_env',
'find_latest_checkpoint',
]
|
from __future__ import annotations
import csv
import os
from . import InputExample
class TripletReader(object):
"""Reads in the a Triplet Dataset: Each line contains (at least) 3 columns, one anchor column (s1),
one positive example (s2) and one negative example (s3)
"""
def __init__(
self,
dataset_folder,
s1_col_idx=0,
s2_col_idx=1,
s3_col_idx=2,
has_header=False,
delimiter="\t",
quoting=csv.QUOTE_NONE,
):
self.dataset_folder = dataset_folder
self.s1_col_idx = s1_col_idx
self.s2_col_idx = s2_col_idx
self.s3_col_idx = s3_col_idx
self.has_header = has_header
self.delimiter = delimiter
self.quoting = quoting
def get_examples(self, filename, max_examples=0):
data = csv.reader(
open(os.path.join(self.dataset_folder, filename), encoding="utf-8"),
delimiter=self.delimiter,
quoting=self.quoting,
)
examples = []
if self.has_header:
next(data)
for id, row in enumerate(data):
s1 = row[self.s1_col_idx]
s2 = row[self.s2_col_idx]
s3 = row[self.s3_col_idx]
examples.append(InputExample(texts=[s1, s2, s3]))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
|
import csv
import os
from . import InputExample
class TripletReader(object):
"""Reads in the a Triplet Dataset: Each line contains (at least) 3 columns, one anchor column (s1),
one positive example (s2) and one negative example (s3)
"""
def __init__(
self,
dataset_folder,
s1_col_idx=0,
s2_col_idx=1,
s3_col_idx=2,
has_header=False,
delimiter="\t",
quoting=csv.QUOTE_NONE,
):
self.dataset_folder = dataset_folder
self.s1_col_idx = s1_col_idx
self.s2_col_idx = s2_col_idx
self.s3_col_idx = s3_col_idx
self.has_header = has_header
self.delimiter = delimiter
self.quoting = quoting
def get_examples(self, filename, max_examples=0):
data = csv.reader(
open(os.path.join(self.dataset_folder, filename), encoding="utf-8"),
delimiter=self.delimiter,
quoting=self.quoting,
)
examples = []
if self.has_header:
next(data)
for id, row in enumerate(data):
s1 = row[self.s1_col_idx]
s2 = row[self.s2_col_idx]
s3 = row[self.s3_col_idx]
examples.append(InputExample(texts=[s1, s2, s3]))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
|
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_kernel_internal
def erase(
inpt: torch.Tensor,
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> torch.Tensor:
if torch.jit.is_scripting():
return erase_image_tensor(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
_log_api_usage_once(erase)
kernel = _get_kernel(erase, type(inpt))
return kernel(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
@_register_kernel_internal(erase, torch.Tensor)
@_register_kernel_internal(erase, datapoints.Image)
def erase_image_tensor(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@_register_kernel_internal(erase, PIL.Image.Image)
def erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image_tensor(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
@_register_kernel_internal(erase, datapoints.Video)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image_tensor(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
|
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_explicit_noop, _register_kernel_internal
@_register_explicit_noop(datapoints.Mask, datapoints.BoundingBoxes, warn_passthrough=True)
def erase(
inpt: torch.Tensor,
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> torch.Tensor:
if torch.jit.is_scripting():
return erase_image_tensor(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
_log_api_usage_once(erase)
kernel = _get_kernel(erase, type(inpt))
return kernel(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
@_register_kernel_internal(erase, torch.Tensor)
@_register_kernel_internal(erase, datapoints.Image)
def erase_image_tensor(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@_register_kernel_internal(erase, PIL.Image.Image)
def erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image_tensor(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
@_register_kernel_internal(erase, datapoints.Video)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image_tensor(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import subprocess
import torch
from mmengine.logging import print_log
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
parser.add_argument(
'--save-keys',
nargs='+',
type=str,
default=['meta', 'state_dict'],
help='keys to save in the published checkpoint')
args = parser.parse_args()
return args
def process_checkpoint(in_file, out_file, save_keys=['meta', 'state_dict']):
checkpoint = torch.load(in_file, map_location='cpu')
# only keep `meta` and `state_dict` for smaller file size
ckpt_keys = list(checkpoint.keys())
for k in ckpt_keys:
if k not in save_keys:
print_log(
f'Key `{k}` will be removed because it is not in '
f'save_keys. If you want to keep it, '
f'please set --save-keys.',
logger='current')
checkpoint.pop(k, None)
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
if out_file.endswith('.pth'):
out_file_name = out_file[:-4]
else:
out_file_name = out_file
final_file = out_file_name + f'-{sha[:8]}.pth'
subprocess.Popen(['mv', out_file, final_file])
print_log(
f'The published model is saved at {final_file}.', logger='current')
def main():
args = parse_args()
process_checkpoint(args.in_file, args.out_file, args.save_keys)
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import subprocess
import torch
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = parser.parse_args()
return args
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
if out_file.endswith('.pth'):
out_file_name = out_file[:-4]
else:
out_file_name = out_file
final_file = out_file_name + f'-{sha[:8]}.pth'
subprocess.Popen(['mv', out_file, final_file])
def main():
args = parse_args()
process_checkpoint(args.in_file, args.out_file)
if __name__ == '__main__':
main()
|
from ._dsp import adsr_envelope, extend_pitch, oscillator_bank, sinc_impulse_response
from .functional import add_noise, barkscale_fbanks, convolve, deemphasis, fftconvolve, preemphasis, speed
__all__ = [
"add_noise",
"adsr_envelope",
"barkscale_fbanks",
"convolve",
"deemphasis",
"extend_pitch",
"fftconvolve",
"oscillator_bank",
"preemphasis",
"sinc_impulse_response",
"speed",
]
|
from ._dsp import adsr_envelope, extend_pitch, oscillator_bank, sinc_impulse_response
from .functional import add_noise, barkscale_fbanks, convolve, fftconvolve, speed
__all__ = [
"add_noise",
"adsr_envelope",
"barkscale_fbanks",
"convolve",
"extend_pitch",
"fftconvolve",
"oscillator_bank",
"sinc_impulse_response",
"speed",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
AmpOptimWrapper, ApexOptimWrapper, BaseOptimWrapper,
DefaultOptimWrapperConstructor, OptimWrapper,
OptimWrapperDict, ZeroRedundancyOptimizer,
build_optim_wrapper)
# yapf: disable
from .scheduler import (ConstantLR, ConstantMomentum, ConstantParamScheduler,
CosineAnnealingLR, CosineAnnealingMomentum,
CosineAnnealingParamScheduler, ExponentialLR,
ExponentialMomentum, ExponentialParamScheduler,
LinearLR, LinearMomentum, LinearParamScheduler,
MultiStepLR, MultiStepMomentum,
MultiStepParamScheduler, OneCycleLR,
OneCycleParamScheduler, PolyLR, PolyMomentum,
PolyParamScheduler, ReduceOnPlateauLR,
ReduceOnPlateauMomentum, ReduceOnPlateauParamScheduler,
StepLR, StepMomentum, StepParamScheduler,
_ParamScheduler)
# yapf: enable
__all__ = [
'OPTIM_WRAPPER_CONSTRUCTORS', 'OPTIMIZERS', 'build_optim_wrapper',
'DefaultOptimWrapperConstructor', 'ConstantLR', 'CosineAnnealingLR',
'ExponentialLR', 'LinearLR', 'MultiStepLR', 'StepLR', 'ConstantMomentum',
'CosineAnnealingMomentum', 'ExponentialMomentum', 'LinearMomentum',
'MultiStepMomentum', 'StepMomentum', 'ConstantParamScheduler',
'CosineAnnealingParamScheduler', 'ExponentialParamScheduler',
'LinearParamScheduler', 'MultiStepParamScheduler', 'StepParamScheduler',
'_ParamScheduler', 'OptimWrapper', 'AmpOptimWrapper', 'ApexOptimWrapper',
'OptimWrapperDict', 'OneCycleParamScheduler', 'OneCycleLR', 'PolyLR',
'PolyMomentum', 'PolyParamScheduler', 'ReduceOnPlateauLR',
'ReduceOnPlateauMomentum', 'ReduceOnPlateauParamScheduler',
'ZeroRedundancyOptimizer', 'BaseOptimWrapper'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
AmpOptimWrapper, ApexOptimWrapper, BaseOptimWrapper,
DeepSpeedOptimWrapper, DefaultOptimWrapperConstructor,
OptimWrapper, OptimWrapperDict,
ZeroRedundancyOptimizer, build_optim_wrapper)
# yapf: disable
from .scheduler import (ConstantLR, ConstantMomentum, ConstantParamScheduler,
CosineAnnealingLR, CosineAnnealingMomentum,
CosineAnnealingParamScheduler, ExponentialLR,
ExponentialMomentum, ExponentialParamScheduler,
LinearLR, LinearMomentum, LinearParamScheduler,
MultiStepLR, MultiStepMomentum,
MultiStepParamScheduler, OneCycleLR,
OneCycleParamScheduler, PolyLR, PolyMomentum,
PolyParamScheduler, ReduceOnPlateauLR,
ReduceOnPlateauMomentum, ReduceOnPlateauParamScheduler,
StepLR, StepMomentum, StepParamScheduler,
_ParamScheduler)
# yapf: enable
__all__ = [
'OPTIM_WRAPPER_CONSTRUCTORS', 'OPTIMIZERS', 'build_optim_wrapper',
'DefaultOptimWrapperConstructor', 'ConstantLR', 'CosineAnnealingLR',
'ExponentialLR', 'LinearLR', 'MultiStepLR', 'StepLR', 'ConstantMomentum',
'CosineAnnealingMomentum', 'ExponentialMomentum', 'LinearMomentum',
'MultiStepMomentum', 'StepMomentum', 'ConstantParamScheduler',
'CosineAnnealingParamScheduler', 'ExponentialParamScheduler',
'LinearParamScheduler', 'MultiStepParamScheduler', 'StepParamScheduler',
'_ParamScheduler', 'OptimWrapper', 'AmpOptimWrapper', 'ApexOptimWrapper',
'OptimWrapperDict', 'OneCycleParamScheduler', 'OneCycleLR', 'PolyLR',
'PolyMomentum', 'PolyParamScheduler', 'ReduceOnPlateauLR',
'ReduceOnPlateauMomentum', 'ReduceOnPlateauParamScheduler',
'ZeroRedundancyOptimizer', 'BaseOptimWrapper', 'DeepSpeedOptimWrapper'
]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.linalg import cholesky
from keras.src.ops.linalg import det
from keras.src.ops.linalg import eig
from keras.src.ops.linalg import eigh
from keras.src.ops.linalg import inv
from keras.src.ops.linalg import lstsq
from keras.src.ops.linalg import lu_factor
from keras.src.ops.linalg import norm
from keras.src.ops.linalg import qr
from keras.src.ops.linalg import solve
from keras.src.ops.linalg import solve_triangular
from keras.src.ops.linalg import svd
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.linalg import cholesky
from keras.src.ops.linalg import det
from keras.src.ops.linalg import eig
from keras.src.ops.linalg import eigh
from keras.src.ops.linalg import inv
from keras.src.ops.linalg import lu_factor
from keras.src.ops.linalg import norm
from keras.src.ops.linalg import qr
from keras.src.ops.linalg import solve
from keras.src.ops.linalg import solve_triangular
from keras.src.ops.linalg import svd
|
from typing import Any, Optional, Sequence
from langchain_core._api.deprecation import deprecated
from langchain_core.documents import BaseDocumentTransformer, Document
from langchain_community.utilities.vertexai import get_client_info
@deprecated(
since="0.0.32",
removal="1.0",
alternative_import="langchain_google_community.DocAIParser",
)
class GoogleTranslateTransformer(BaseDocumentTransformer):
"""Translate text documents using Google Cloud Translation."""
def __init__(
self,
project_id: str,
*,
location: str = "global",
model_id: Optional[str] = None,
glossary_id: Optional[str] = None,
api_endpoint: Optional[str] = None,
) -> None:
"""
Arguments:
project_id: Google Cloud Project ID.
location: (Optional) Translate model location.
model_id: (Optional) Translate model ID to use.
glossary_id: (Optional) Translate glossary ID to use.
api_endpoint: (Optional) Regional endpoint to use.
"""
try:
from google.api_core.client_options import ClientOptions
from google.cloud import translate
except ImportError as exc:
raise ImportError(
"Install Google Cloud Translate to use this parser."
"(pip install google-cloud-translate)"
) from exc
self.project_id = project_id
self.location = location
self.model_id = model_id
self.glossary_id = glossary_id
self._client = translate.TranslationServiceClient(
client_info=get_client_info("translate"),
client_options=(
ClientOptions(api_endpoint=api_endpoint) if api_endpoint else None
),
)
self._parent_path = self._client.common_location_path(project_id, location)
# For some reason, there's no `model_path()` method for the client.
self._model_path = (
f"{self._parent_path}/models/{model_id}" if model_id else None
)
self._glossary_path = (
self._client.glossary_path(project_id, location, glossary_id)
if glossary_id
else None
)
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Translate text documents using Google Translate.
Arguments:
source_language_code: ISO 639 language code of the input document.
target_language_code: ISO 639 language code of the output document.
For supported languages, refer to:
https://cloud.google.com/translate/docs/languages
mime_type: (Optional) Media Type of input text.
Options: `text/plain`, `text/html`
"""
try:
from google.cloud import translate
except ImportError as exc:
raise ImportError(
"Install Google Cloud Translate to use this parser."
"(pip install google-cloud-translate)"
) from exc
response = self._client.translate_text(
request=translate.TranslateTextRequest(
contents=[doc.page_content for doc in documents],
parent=self._parent_path,
model=self._model_path,
glossary_config=translate.TranslateTextGlossaryConfig(
glossary=self._glossary_path
),
source_language_code=kwargs.get("source_language_code", None),
target_language_code=kwargs.get("target_language_code"),
mime_type=kwargs.get("mime_type", "text/plain"),
)
)
# If using a glossary, the translations will be in `glossary_translations`.
translations = response.glossary_translations or response.translations
return [
Document(
page_content=translation.translated_text,
metadata={
**doc.metadata,
"model": translation.model,
"detected_language_code": translation.detected_language_code,
},
)
for doc, translation in zip(documents, translations)
]
|
from typing import Any, Optional, Sequence
from langchain_core._api.deprecation import deprecated
from langchain_core.documents import BaseDocumentTransformer, Document
from langchain_community.utilities.vertexai import get_client_info
@deprecated(
since="0.0.32",
removal="1.0",
alternative_import="langchain_google_community.DocAIParser",
)
class GoogleTranslateTransformer(BaseDocumentTransformer):
"""Translate text documents using Google Cloud Translation."""
def __init__(
self,
project_id: str,
*,
location: str = "global",
model_id: Optional[str] = None,
glossary_id: Optional[str] = None,
api_endpoint: Optional[str] = None,
) -> None:
"""
Arguments:
project_id: Google Cloud Project ID.
location: (Optional) Translate model location.
model_id: (Optional) Translate model ID to use.
glossary_id: (Optional) Translate glossary ID to use.
api_endpoint: (Optional) Regional endpoint to use.
"""
try:
from google.api_core.client_options import ClientOptions
from google.cloud import translate # type: ignore[attr-defined]
except ImportError as exc:
raise ImportError(
"Install Google Cloud Translate to use this parser."
"(pip install google-cloud-translate)"
) from exc
self.project_id = project_id
self.location = location
self.model_id = model_id
self.glossary_id = glossary_id
self._client = translate.TranslationServiceClient(
client_info=get_client_info("translate"),
client_options=(
ClientOptions(api_endpoint=api_endpoint) if api_endpoint else None
),
)
self._parent_path = self._client.common_location_path(project_id, location)
# For some reason, there's no `model_path()` method for the client.
self._model_path = (
f"{self._parent_path}/models/{model_id}" if model_id else None
)
self._glossary_path = (
self._client.glossary_path(project_id, location, glossary_id)
if glossary_id
else None
)
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Translate text documents using Google Translate.
Arguments:
source_language_code: ISO 639 language code of the input document.
target_language_code: ISO 639 language code of the output document.
For supported languages, refer to:
https://cloud.google.com/translate/docs/languages
mime_type: (Optional) Media Type of input text.
Options: `text/plain`, `text/html`
"""
try:
from google.cloud import translate # type: ignore[attr-defined]
except ImportError as exc:
raise ImportError(
"Install Google Cloud Translate to use this parser."
"(pip install google-cloud-translate)"
) from exc
response = self._client.translate_text(
request=translate.TranslateTextRequest(
contents=[doc.page_content for doc in documents],
parent=self._parent_path,
model=self._model_path,
glossary_config=translate.TranslateTextGlossaryConfig(
glossary=self._glossary_path
),
source_language_code=kwargs.get("source_language_code", None),
target_language_code=kwargs.get("target_language_code"),
mime_type=kwargs.get("mime_type", "text/plain"),
)
)
# If using a glossary, the translations will be in `glossary_translations`.
translations = response.glossary_translations or response.translations
return [
Document(
page_content=translation.translated_text,
metadata={
**doc.metadata,
"model": translation.model,
"detected_language_code": translation.detected_language_code,
},
)
for doc, translation in zip(documents, translations)
]
|
from __future__ import annotations
from typing import Any
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.output_parsers.json import parse_and_check_json_markdown
from pydantic import BaseModel
from langchain.output_parsers.format_instructions import (
STRUCTURED_FORMAT_INSTRUCTIONS,
STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS,
)
line_template = '\t"{name}": {type} // {description}'
class ResponseSchema(BaseModel):
"""Schema for a response from a structured output parser."""
name: str
"""The name of the schema."""
description: str
"""The description of the schema."""
type: str = "string"
"""The type of the response."""
def _get_sub_string(schema: ResponseSchema) -> str:
return line_template.format(
name=schema.name, description=schema.description, type=schema.type
)
class StructuredOutputParser(BaseOutputParser[dict[str, Any]]):
"""Parse the output of an LLM call to a structured output."""
response_schemas: list[ResponseSchema]
"""The schemas for the response."""
@classmethod
def from_response_schemas(
cls, response_schemas: list[ResponseSchema]
) -> StructuredOutputParser:
return cls(response_schemas=response_schemas)
def get_format_instructions(
self,
only_json: bool = False, # noqa: FBT001,FBT002
) -> str:
"""Get format instructions for the output parser.
example:
```python
from langchain.output_parsers.structured import (
StructuredOutputParser, ResponseSchema
)
response_schemas = [
ResponseSchema(
name="foo",
description="a list of strings",
type="List[string]"
),
ResponseSchema(
name="bar",
description="a string",
type="string"
),
]
parser = StructuredOutputParser.from_response_schemas(response_schemas)
print(parser.get_format_instructions()) # noqa: T201
output:
# The output should be a Markdown code snippet formatted in the following
# schema, including the leading and trailing "```json" and "```":
#
# ```json
# {
# "foo": List[string] // a list of strings
# "bar": string // a string
# }
# ```
Args:
only_json (bool): If True, only the json in the Markdown code snippet
will be returned, without the introducing text. Defaults to False.
"""
schema_str = "\n".join(
[_get_sub_string(schema) for schema in self.response_schemas]
)
if only_json:
return STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS.format(format=schema_str)
return STRUCTURED_FORMAT_INSTRUCTIONS.format(format=schema_str)
def parse(self, text: str) -> dict[str, Any]:
expected_keys = [rs.name for rs in self.response_schemas]
return parse_and_check_json_markdown(text, expected_keys)
@property
def _type(self) -> str:
return "structured"
|
from __future__ import annotations
from typing import Any
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.output_parsers.json import parse_and_check_json_markdown
from pydantic import BaseModel
from langchain.output_parsers.format_instructions import (
STRUCTURED_FORMAT_INSTRUCTIONS,
STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS,
)
line_template = '\t"{name}": {type} // {description}'
class ResponseSchema(BaseModel):
"""Schema for a response from a structured output parser."""
name: str
"""The name of the schema."""
description: str
"""The description of the schema."""
type: str = "string"
"""The type of the response."""
def _get_sub_string(schema: ResponseSchema) -> str:
return line_template.format(
name=schema.name, description=schema.description, type=schema.type
)
class StructuredOutputParser(BaseOutputParser[dict[str, Any]]):
"""Parse the output of an LLM call to a structured output."""
response_schemas: list[ResponseSchema]
"""The schemas for the response."""
@classmethod
def from_response_schemas(
cls, response_schemas: list[ResponseSchema]
) -> StructuredOutputParser:
return cls(response_schemas=response_schemas)
def get_format_instructions(self, only_json: bool = False) -> str:
"""Get format instructions for the output parser.
example:
```python
from langchain.output_parsers.structured import (
StructuredOutputParser, ResponseSchema
)
response_schemas = [
ResponseSchema(
name="foo",
description="a list of strings",
type="List[string]"
),
ResponseSchema(
name="bar",
description="a string",
type="string"
),
]
parser = StructuredOutputParser.from_response_schemas(response_schemas)
print(parser.get_format_instructions()) # noqa: T201
output:
# The output should be a Markdown code snippet formatted in the following
# schema, including the leading and trailing "```json" and "```":
#
# ```json
# {
# "foo": List[string] // a list of strings
# "bar": string // a string
# }
# ```
Args:
only_json (bool): If True, only the json in the Markdown code snippet
will be returned, without the introducing text. Defaults to False.
"""
schema_str = "\n".join(
[_get_sub_string(schema) for schema in self.response_schemas]
)
if only_json:
return STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS.format(format=schema_str)
return STRUCTURED_FORMAT_INSTRUCTIONS.format(format=schema_str)
def parse(self, text: str) -> dict[str, Any]:
expected_keys = [rs.name for rs in self.response_schemas]
return parse_and_check_json_markdown(text, expected_keys)
@property
def _type(self) -> str:
return "structured"
|
import os
import grpc
import pytest
from jina import Flow, __default_host__
from jina.clients import Client
from jina.excepts import PortAlreadyUsed
from jina.helper import is_port_free
from jina.serve.runtimes.gateway.grpc import GRPCGatewayRuntime as _GRPCGatewayRuntime
from jina.serve.runtimes.helper import _get_grpc_server_options
from tests import random_docs
@pytest.fixture(scope='function')
def flow_with_grpc(monkeypatch):
class AuthInterceptor(grpc.aio.ServerInterceptor):
def __init__(self, key):
self._valid_metadata = ('rpc-auth-header', key)
def deny(_, context):
context.abort(grpc.StatusCode.UNAUTHENTICATED, 'Invalid key')
self._deny = grpc.unary_unary_rpc_method_handler(deny)
async def intercept_service(self, continuation, handler_call_details):
meta = handler_call_details.invocation_metadata
metas_dicts = {m.key: m.value for m in meta}
assert 'rpc-auth-header' in metas_dicts
assert (
metas_dicts['rpc-auth-header'] == 'access_key'
), f'Invalid access key detected, got {metas_dicts["rpc-auth-header"]}'
for m in meta:
if m == self._valid_metadata:
return await continuation(handler_call_details)
return self._deny
class AlternativeGRPCGatewayRuntime(_GRPCGatewayRuntime):
async def async_setup(self):
"""
The async method to setup.
Create the gRPC server and expose the port for communication.
"""
if not self.args.proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
if not (is_port_free(__default_host__, self.args.port)):
raise PortAlreadyUsed(f'port:{self.args.port}')
self.server = grpc.aio.server(
interceptors=(AuthInterceptor('access_key'),),
options=_get_grpc_server_options(self.args.grpc_server_options),
)
await self._async_setup_server()
monkeypatch.setattr(
'jina.serve.runtimes.gateway.grpc.GRPCGatewayRuntime',
AlternativeGRPCGatewayRuntime,
)
return Flow(protocol='grpc').add()
def test_client_grpc_kwargs(flow_with_grpc):
with flow_with_grpc:
client = Client(
port=flow_with_grpc.port,
host='localhost',
protocol='grpc',
)
meta_data = (('rpc-auth-header', 'invalid_access_key'),)
try:
client.post('', random_docs(1), request_size=1, metadata=meta_data)
except Exception as exc:
assert 'Invalid access key detected, got invalid_access_key' in repr(exc)
|
import os
import grpc
import pytest
from jina import Flow, __default_host__
from jina.clients import Client
from jina.excepts import PortAlreadyUsed
from jina.helper import is_port_free
from jina.serve.runtimes.gateway.grpc import GRPCGatewayRuntime as _GRPCGatewayRuntime
from tests import random_docs
@pytest.fixture(scope='function')
def flow_with_grpc(monkeypatch):
class AuthInterceptor(grpc.aio.ServerInterceptor):
def __init__(self, key):
self._valid_metadata = ('rpc-auth-header', key)
def deny(_, context):
context.abort(grpc.StatusCode.UNAUTHENTICATED, 'Invalid key')
self._deny = grpc.unary_unary_rpc_method_handler(deny)
async def intercept_service(self, continuation, handler_call_details):
meta = handler_call_details.invocation_metadata
metas_dicts = {m.key: m.value for m in meta}
assert 'rpc-auth-header' in metas_dicts
assert (
metas_dicts['rpc-auth-header'] == 'access_key'
), f'Invalid access key detected, got {metas_dicts["rpc-auth-header"]}'
for m in meta:
if m == self._valid_metadata:
return await continuation(handler_call_details)
return self._deny
class AlternativeGRPCGatewayRuntime(_GRPCGatewayRuntime):
async def async_setup(self):
"""
The async method to setup.
Create the gRPC server and expose the port for communication.
"""
if not self.args.proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
if not (is_port_free(__default_host__, self.args.port)):
raise PortAlreadyUsed(f'port:{self.args.port}')
self.server = grpc.aio.server(
interceptors=(AuthInterceptor('access_key'),),
options=[
('grpc.max_send_message_length', -1),
('grpc.max_receive_message_length', -1),
],
)
await self._async_setup_server()
monkeypatch.setattr(
'jina.serve.runtimes.gateway.grpc.GRPCGatewayRuntime',
AlternativeGRPCGatewayRuntime,
)
return Flow(protocol='grpc').add()
def test_client_grpc_kwargs(flow_with_grpc):
with flow_with_grpc:
client = Client(
port=flow_with_grpc.port,
host='localhost',
protocol='grpc',
)
meta_data = (('rpc-auth-header', 'invalid_access_key'),)
try:
client.post('', random_docs(1), request_size=1, metadata=meta_data)
except Exception as exc:
assert 'Invalid access key detected, got invalid_access_key' in repr(exc)
|
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
NodeExecutionEntry,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_start_time,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import (
get_connected_output_nodes,
get_graph,
get_graph_metadata,
get_node,
)
from backend.data.notifications import (
create_or_add_to_user_notification_batch,
empty_user_notification_batch,
get_all_batches_by_type,
get_user_notification_batch,
get_user_notification_oldest_message_in_batch,
)
from backend.data.user import (
get_active_user_ids_in_timerange,
get_user_email_by_id,
get_user_email_verification,
get_user_integrations,
get_user_metadata,
get_user_notification_preference,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose, exposed_run_and_wait
from backend.util.settings import Config
config = Config()
_user_credit_model = get_user_credit_model()
async def _spend_credits(entry: NodeExecutionEntry) -> int:
return await _user_credit_model.spend_credits(entry, 0, 0)
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_start_time = exposed_run_and_wait(
update_graph_execution_start_time
)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
get_connected_output_nodes = exposed_run_and_wait(get_connected_output_nodes)
get_graph_metadata = exposed_run_and_wait(get_graph_metadata)
# Credits
spend_credits = exposed_run_and_wait(_spend_credits)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
# User Comms - async
get_active_user_ids_in_timerange = exposed_run_and_wait(
get_active_user_ids_in_timerange
)
get_user_email_by_id = exposed_run_and_wait(get_user_email_by_id)
get_user_email_verification = exposed_run_and_wait(get_user_email_verification)
get_user_notification_preference = exposed_run_and_wait(
get_user_notification_preference
)
# Notifications - async
create_or_add_to_user_notification_batch = exposed_run_and_wait(
create_or_add_to_user_notification_batch
)
empty_user_notification_batch = exposed_run_and_wait(empty_user_notification_batch)
get_all_batches_by_type = exposed_run_and_wait(get_all_batches_by_type)
get_user_notification_batch = exposed_run_and_wait(get_user_notification_batch)
get_user_notification_oldest_message_in_batch = exposed_run_and_wait(
get_user_notification_oldest_message_in_batch
)
|
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
NodeExecutionEntry,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_start_time,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import (
get_connected_output_nodes,
get_graph,
get_graph_metadata,
get_node,
)
from backend.data.user import (
get_user_integrations,
get_user_metadata,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose, exposed_run_and_wait
from backend.util.settings import Config
config = Config()
_user_credit_model = get_user_credit_model()
async def _spend_credits(entry: NodeExecutionEntry) -> int:
return await _user_credit_model.spend_credits(entry, 0, 0)
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_start_time = exposed_run_and_wait(
update_graph_execution_start_time
)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
get_connected_output_nodes = exposed_run_and_wait(get_connected_output_nodes)
get_graph_metadata = exposed_run_and_wait(get_graph_metadata)
# Credits
spend_credits = exposed_run_and_wait(_spend_credits)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
|
#!/usr/bin/env python
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import pandas as pd
import torch
from datasets import load_dataset
from huggingface_hub.utils import insecure_hashlib
from tqdm.auto import tqdm
from transformers import T5EncoderModel
from diffusers import FluxPipeline
MAX_SEQ_LENGTH = 77
OUTPUT_PATH = "embeddings.parquet"
def generate_image_hash(image):
return insecure_hashlib.sha256(image.tobytes()).hexdigest()
def load_flux_dev_pipeline():
id = "black-forest-labs/FLUX.1-dev"
text_encoder = T5EncoderModel.from_pretrained(id, subfolder="text_encoder_2", load_in_8bit=True, device_map="auto")
pipeline = FluxPipeline.from_pretrained(
id, text_encoder_2=text_encoder, transformer=None, vae=None, device_map="balanced"
)
return pipeline
@torch.no_grad()
def compute_embeddings(pipeline, prompts, max_sequence_length):
all_prompt_embeds = []
all_pooled_prompt_embeds = []
all_text_ids = []
for prompt in tqdm(prompts, desc="Encoding prompts."):
(
prompt_embeds,
pooled_prompt_embeds,
text_ids,
) = pipeline.encode_prompt(prompt=prompt, prompt_2=None, max_sequence_length=max_sequence_length)
all_prompt_embeds.append(prompt_embeds)
all_pooled_prompt_embeds.append(pooled_prompt_embeds)
all_text_ids.append(text_ids)
max_memory = torch.cuda.max_memory_allocated() / 1024 / 1024 / 1024
print(f"Max memory allocated: {max_memory:.3f} GB")
return all_prompt_embeds, all_pooled_prompt_embeds, all_text_ids
def run(args):
dataset = load_dataset("Norod78/Yarn-art-style", split="train")
image_prompts = {generate_image_hash(sample["image"]): sample["text"] for sample in dataset}
all_prompts = list(image_prompts.values())
print(f"{len(all_prompts)=}")
pipeline = load_flux_dev_pipeline()
all_prompt_embeds, all_pooled_prompt_embeds, all_text_ids = compute_embeddings(
pipeline, all_prompts, args.max_sequence_length
)
data = []
for i, (image_hash, _) in enumerate(image_prompts.items()):
data.append((image_hash, all_prompt_embeds[i], all_pooled_prompt_embeds[i], all_text_ids[i]))
print(f"{len(data)=}")
# Create a DataFrame
embedding_cols = ["prompt_embeds", "pooled_prompt_embeds", "text_ids"]
df = pd.DataFrame(data, columns=["image_hash"] + embedding_cols)
print(f"{len(df)=}")
# Convert embedding lists to arrays (for proper storage in parquet)
for col in embedding_cols:
df[col] = df[col].apply(lambda x: x.cpu().numpy().flatten().tolist())
# Save the dataframe to a parquet file
df.to_parquet(args.output_path)
print(f"Data successfully serialized to {args.output_path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--max_sequence_length",
type=int,
default=MAX_SEQ_LENGTH,
help="Maximum sequence length to use for computing the embeddings. The more the higher computational costs.",
)
parser.add_argument("--output_path", type=str, default=OUTPUT_PATH, help="Path to serialize the parquet file.")
args = parser.parse_args()
run(args)
|
#!/usr/bin/env python
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import pandas as pd
import torch
from datasets import load_dataset
from huggingface_hub.utils import insecure_hashlib
from tqdm.auto import tqdm
from transformers import T5EncoderModel
from diffusers import FluxPipeline
MAX_SEQ_LENGTH = 77
OUTPUT_PATH = "embeddings.parquet"
def generate_image_hash(image):
return insecure_hashlib.sha256(image.tobytes()).hexdigest()
def load_flux_dev_pipeline():
id = "black-forest-labs/FLUX.1-dev"
text_encoder = T5EncoderModel.from_pretrained(id, subfolder="text_encoder_2", load_in_8bit=True, device_map="auto")
pipeline = FluxPipeline.from_pretrained(
id, text_encoder_2=text_encoder, transformer=None, vae=None, device_map="balanced"
)
return pipeline
@torch.no_grad()
def compute_embeddings(pipeline, prompts, max_sequence_length):
all_prompt_embeds = []
all_pooled_prompt_embeds = []
all_text_ids = []
for prompt in tqdm(prompts, desc="Encoding prompts."):
(
prompt_embeds,
pooled_prompt_embeds,
text_ids,
) = pipeline.encode_prompt(prompt=prompt, prompt_2=None, max_sequence_length=max_sequence_length)
all_prompt_embeds.append(prompt_embeds)
all_pooled_prompt_embeds.append(pooled_prompt_embeds)
all_text_ids.append(text_ids)
max_memory = torch.cuda.max_memory_allocated() / 1024 / 1024 / 1024
print(f"Max memory allocated: {max_memory:.3f} GB")
return all_prompt_embeds, all_pooled_prompt_embeds, all_text_ids
def run(args):
dataset = load_dataset("Norod78/Yarn-art-style", split="train")
image_prompts = {generate_image_hash(sample["image"]): sample["text"] for sample in dataset}
all_prompts = list(image_prompts.values())
print(f"{len(all_prompts)=}")
pipeline = load_flux_dev_pipeline()
all_prompt_embeds, all_pooled_prompt_embeds, all_text_ids = compute_embeddings(
pipeline, all_prompts, args.max_sequence_length
)
data = []
for i, (image_hash, _) in enumerate(image_prompts.items()):
data.append((image_hash, all_prompt_embeds[i], all_pooled_prompt_embeds[i], all_text_ids[i]))
print(f"{len(data)=}")
# Create a DataFrame
embedding_cols = ["prompt_embeds", "pooled_prompt_embeds", "text_ids"]
df = pd.DataFrame(data, columns=["image_hash"] + embedding_cols)
print(f"{len(df)=}")
# Convert embedding lists to arrays (for proper storage in parquet)
for col in embedding_cols:
df[col] = df[col].apply(lambda x: x.cpu().numpy().flatten().tolist())
# Save the dataframe to a parquet file
df.to_parquet(args.output_path)
print(f"Data successfully serialized to {args.output_path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--max_sequence_length",
type=int,
default=MAX_SEQ_LENGTH,
help="Maximum sequence length to use for computing the embeddings. The more the higher computational costs.",
)
parser.add_argument("--output_path", type=str, default=OUTPUT_PATH, help="Path to serialize the parquet file.")
args = parser.parse_args()
run(args)
|
_base_ = './panoptic_fpn_r50_fpn_1x_coco.py'
# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)],
# multiscale_mode='range'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadPanopticAnnotations',
with_bbox=True,
with_mask=True,
with_seg=True),
dict(type='RandomResize', scale=[(1333, 640), (1333, 800)]),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# TODO: Use RepeatDataset to speed up training
# training schedule for 3x
train_cfg = dict(max_epochs=36, val_interval=3)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[24, 33],
gamma=0.1)
]
|
_base_ = './panoptic_fpn_r50_fpn_1x_coco.py'
# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)],
# multiscale_mode='range'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadPanopticAnnotations',
with_bbox=True,
with_mask=True,
with_seg=True),
dict(type='RandomResize', img_scale=[(1333, 640), (1333, 800)]),
dict(type='RandomFlip', prob=0.5),
dict(type='SegRescale', scale_factor=1 / 4),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# TODO: Use RepeatDataset to speed up training
# training schedule for 3x
train_cfg = dict(max_epochs=36, val_interval=3)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[24, 33],
gamma=0.1)
]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.dtype_policies import deserialize as deserialize
from keras.src.dtype_policies import get as get
from keras.src.dtype_policies import serialize as serialize
from keras.src.dtype_policies.dtype_policy import DTypePolicy as DTypePolicy
from keras.src.dtype_policies.dtype_policy import (
FloatDTypePolicy as FloatDTypePolicy,
)
from keras.src.dtype_policies.dtype_policy import (
QuantizedDTypePolicy as QuantizedDTypePolicy,
)
from keras.src.dtype_policies.dtype_policy import (
QuantizedFloat8DTypePolicy as QuantizedFloat8DTypePolicy,
)
from keras.src.dtype_policies.dtype_policy_map import (
DTypePolicyMap as DTypePolicyMap,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.dtype_policies import deserialize
from keras.src.dtype_policies import get
from keras.src.dtype_policies import serialize
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy
from keras.src.dtype_policies.dtype_policy_map import DTypePolicyMap
|
import json
import multiprocessing
import os
import time
import pytest
from jina.helper import random_port
from jina.parsers import set_gateway_parser, set_pod_parser
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from tests.helper import (
ProcessExecutor,
_validate_custom_gateway_process,
_validate_dummy_custom_gateway_response,
)
from tests.unit.yaml.dummy_gateway import DummyGateway
cur_dir = os.path.dirname(os.path.abspath(__file__))
_dummy_gateway_yaml_path = os.path.join(cur_dir, '../../yaml/test-custom-gateway.yml')
def _create_gateway_runtime(port, uses, uses_with, worker_port):
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}'
deployments_metadata = '{"pod0": {"key1": "value1", "key2": "value2"}}'
with GatewayRuntime(
set_gateway_parser().parse_args(
[
'--port',
str(port),
'--uses',
uses,
'--uses-with',
json.dumps(uses_with),
'--graph-description',
graph_description,
'--deployments-addresses',
pod_addresses,
'--deployments-metadata',
deployments_metadata,
]
)
) as runtime:
runtime.run_forever()
def _start_gateway_runtime(uses, uses_with, worker_port):
port = random_port()
p = multiprocessing.Process(
target=_create_gateway_runtime,
args=(port, uses, uses_with, worker_port),
daemon=True,
)
p.start()
time.sleep(1)
return port, p
def _create_worker_runtime(port, uses):
args = set_pod_parser().parse_args(['--uses', uses, '--port', str(port)])
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _start_worker_runtime(uses):
port = random_port()
p = multiprocessing.Process(
target=_create_worker_runtime,
args=(port, uses),
daemon=True,
)
p.start()
time.sleep(1)
return port, p
@pytest.mark.parametrize(
'uses,uses_with,expected',
[
('DummyGateway', {}, {'arg1': None, 'arg2': None, 'arg3': 'default-arg3'}),
(
_dummy_gateway_yaml_path,
{},
{'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'},
),
(
'DummyGateway',
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
'DummyGateway',
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': None, 'arg3': 'default-arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': 'world', 'arg3': 'default-arg3'},
),
],
)
def test_custom_gateway_no_executors(uses, uses_with, expected):
worker_port, worker_process = _start_worker_runtime('ProcessExecutor')
gateway_port, gateway_process = _start_gateway_runtime(uses, uses_with, worker_port)
_validate_dummy_custom_gateway_response(gateway_port, expected)
_validate_custom_gateway_process(
gateway_port, 'hello', {'text': 'helloworld', 'tags': {'processed': True}}
)
gateway_process.terminate()
gateway_process.join()
worker_process.terminate()
worker_process.join()
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
|
import json
import multiprocessing
import os
import time
import pytest
from docarray import DocumentArray
from jina import Executor, requests
from jina.helper import random_port
from jina.parsers import set_gateway_parser, set_pod_parser
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from tests.helper import (
_validate_custom_gateway_process,
_validate_dummy_custom_gateway_response,
)
from tests.unit.yaml.dummy_gateway import DummyGateway
cur_dir = os.path.dirname(os.path.abspath(__file__))
_dummy_gateway_yaml_path = os.path.join(cur_dir, '../../yaml/test-custom-gateway.yml')
class ProcessExecutor(Executor):
@requests(on='/')
def process(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.text = doc.text + 'world'
doc.tags['processed'] = True
def _create_gateway_runtime(port, uses, uses_with, worker_port):
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}'
deployments_metadata = '{"pod0": {"key1": "value1", "key2": "value2"}}'
with GatewayRuntime(
set_gateway_parser().parse_args(
[
'--port',
str(port),
'--uses',
uses,
'--uses-with',
json.dumps(uses_with),
'--graph-description',
graph_description,
'--deployments-addresses',
pod_addresses,
'--deployments-metadata',
deployments_metadata,
]
)
) as runtime:
runtime.run_forever()
def _start_gateway_runtime(uses, uses_with, worker_port):
port = random_port()
p = multiprocessing.Process(
target=_create_gateway_runtime,
args=(port, uses, uses_with, worker_port),
daemon=True,
)
p.start()
time.sleep(1)
return port, p
def _create_worker_runtime(port, uses):
args = set_pod_parser().parse_args(['--uses', uses, '--port', str(port)])
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _start_worker_runtime(uses):
port = random_port()
p = multiprocessing.Process(
target=_create_worker_runtime,
args=(port, uses),
daemon=True,
)
p.start()
time.sleep(1)
return port, p
@pytest.mark.parametrize(
'uses,uses_with,expected',
[
('DummyGateway', {}, {'arg1': None, 'arg2': None, 'arg3': 'default-arg3'}),
(
_dummy_gateway_yaml_path,
{},
{'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'},
),
(
'DummyGateway',
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
'DummyGateway',
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': None, 'arg3': 'default-arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': 'world', 'arg3': 'default-arg3'},
),
],
)
def test_custom_gateway_no_executors(uses, uses_with, expected):
worker_port, worker_process = _start_worker_runtime('ProcessExecutor')
gateway_port, gateway_process = _start_gateway_runtime(uses, uses_with, worker_port)
_validate_dummy_custom_gateway_response(gateway_port, expected)
_validate_custom_gateway_process(
gateway_port, 'hello', {'text': 'helloworld', 'tags': {'processed': True}}
)
gateway_process.terminate()
gateway_process.join()
worker_process.terminate()
worker_process.join()
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
|
from typing import List
import numpy as np
from torch.utils.data import Dataset
from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available
from sentence_transformers.readers.InputExample import InputExample
class DenoisingAutoEncoderDataset(Dataset):
"""
The DenoisingAutoEncoderDataset returns InputExamples in the format: texts=[noise_fn(sentence), sentence]
It is used in combination with the DenoisingAutoEncoderLoss: Here, a decoder tries to re-construct the
sentence without noise.
Args:
sentences: A list of sentences
noise_fn: A noise function: Given a string, it returns a string
with noise, e.g. deleted words
"""
def __init__(self, sentences: List[str], noise_fn=lambda s: DenoisingAutoEncoderDataset.delete(s)):
if not is_nltk_available():
raise ImportError(NLTK_IMPORT_ERROR.format(self.__class__.__name__))
self.sentences = sentences
self.noise_fn = noise_fn
def __getitem__(self, item):
sent = self.sentences[item]
return InputExample(texts=[self.noise_fn(sent), sent])
def __len__(self):
return len(self.sentences)
# Deletion noise.
@staticmethod
def delete(text, del_ratio=0.6):
from nltk import TreebankWordDetokenizer, word_tokenize
words = word_tokenize(text)
n = len(words)
if n == 0:
return text
keep_or_not = np.random.rand(n) > del_ratio
if sum(keep_or_not) == 0:
keep_or_not[np.random.choice(n)] = True # guarantee that at least one word remains
words_processed = TreebankWordDetokenizer().detokenize(np.array(words)[keep_or_not])
return words_processed
|
from torch.utils.data import Dataset
from typing import List
from ..readers.InputExample import InputExample
import numpy as np
from transformers.utils.import_utils import is_nltk_available, NLTK_IMPORT_ERROR
class DenoisingAutoEncoderDataset(Dataset):
"""
The DenoisingAutoEncoderDataset returns InputExamples in the format: texts=[noise_fn(sentence), sentence]
It is used in combination with the DenoisingAutoEncoderLoss: Here, a decoder tries to re-construct the
sentence without noise.
Args:
sentences: A list of sentences
noise_fn: A noise function: Given a string, it returns a string
with noise, e.g. deleted words
"""
def __init__(self, sentences: List[str], noise_fn=lambda s: DenoisingAutoEncoderDataset.delete(s)):
if not is_nltk_available():
raise ImportError(NLTK_IMPORT_ERROR.format(self.__class__.__name__))
self.sentences = sentences
self.noise_fn = noise_fn
def __getitem__(self, item):
sent = self.sentences[item]
return InputExample(texts=[self.noise_fn(sent), sent])
def __len__(self):
return len(self.sentences)
# Deletion noise.
@staticmethod
def delete(text, del_ratio=0.6):
from nltk import word_tokenize, TreebankWordDetokenizer
words = word_tokenize(text)
n = len(words)
if n == 0:
return text
keep_or_not = np.random.rand(n) > del_ratio
if sum(keep_or_not) == 0:
keep_or_not[np.random.choice(n)] = True # guarantee that at least one word remains
words_processed = TreebankWordDetokenizer().detokenize(np.array(words)[keep_or_not])
return words_processed
|
import asyncio
from typing import AsyncIterator, Iterator, Optional, Union
from jina.helper import get_or_reuse_loop
class RequestsCounter:
"""Class used to wrap a count integer so that it can be updated inside methods.
.. code-block:: python
def count_increment(i: int, rc: RequestCounter):
i += 1
rc.count += 1
c_int = 0
c_rc = RequestsCounter()
count_increment(c_int, c_rc)
assert c_int == 0
assert c_rc.count == 1
"""
count = 0
class AsyncRequestsIterator:
"""Iterator to allow async iteration of blocking/non-blocking iterator from the Client"""
def __init__(
self,
iterator: Union[Iterator, AsyncIterator],
request_counter: Optional[RequestsCounter] = None,
prefetch: int = 0,
) -> None:
"""Async request iterator
:param iterator: request iterator
:param request_counter: counter of the numbers of request being handled at a given moment
:param prefetch: The max amount of requests to be handled at a given moment (0 disables feature)
"""
self.iterator = iterator
self._request_counter = request_counter
self._prefetch = prefetch
def iterator__next__(self):
"""
Executed inside a `ThreadPoolExecutor` via `loop.run_in_executor` to avoid following exception.
"StopIteration interacts badly with generators and cannot be raised into a Future"
:return: next request or None
"""
try:
return self.iterator.__next__()
except StopIteration:
return None
def __aiter__(self):
return self
async def __anext__(self):
if isinstance(self.iterator, Iterator):
"""
An `Iterator` indicates "blocking" code, which might block all tasks in the event loop.
Hence we iterate in the default executor provided by asyncio.
"""
request = await get_or_reuse_loop().run_in_executor(
None, self.iterator__next__
)
"""
`iterator.__next__` can be executed directly and that'd raise `StopIteration` in the executor,
which raises the following exception while chaining states in futures.
"StopIteration interacts badly with generators and cannot be raised into a Future"
To avoid that, we handle the raise by a `return None`
"""
if request is None:
raise StopAsyncIteration
elif isinstance(self.iterator, AsyncIterator):
# we assume that `AsyncIterator` doesn't block the event loop
request = await self.iterator.__anext__()
if self._prefetch > 0:
while self._request_counter.count >= self._prefetch:
await asyncio.sleep(0)
return request
|
import asyncio
from typing import AsyncIterator, Iterator, Optional, Union
from jina.helper import get_or_reuse_loop
class RequestsCounter:
"""Class used to wrap a count integer so that it can be updated inside methods.
.. code-block:: python
def count_increment(i: int, rc: RequestCounter):
i += 1
rc.count += 1
c_int = 0
c_rc = RequestsCounter()
count_increment(c_int, c_rc)
assert c_int == 0
assert c_rc.count == 1
"""
count = 0
class AsyncRequestsIterator:
"""Iterator to allow async iteration of blocking/non-blocking iterator from the Client"""
def __init__(
self,
iterator: Union[Iterator, AsyncIterator],
request_counter: Optional[RequestsCounter] = None,
prefetch: int = 0,
) -> None:
"""Async request iterator
:param iterator: request iterator
:param request_counter: counter of the numbers of request being handled at a given moment
:param prefetch: The max amount of requests to be handled at a given moment (0 disables feature)
"""
self.iterator = iterator
self._request_counter = request_counter
self._prefetch = prefetch
def iterator__next__(self):
"""
Executed inside a `ThreadPoolExecutor` via `loop.run_in_executor` to avoid following exception.
"StopIteration interacts badly with generators and cannot be raised into a Future"
:return: next request or None
"""
try:
return self.iterator.__next__()
except StopIteration:
return None
def __aiter__(self):
return self
async def __anext__(self):
if self._prefetch > 0:
while self._request_counter.count >= self._prefetch:
await asyncio.sleep(0)
if isinstance(self.iterator, Iterator):
"""
An `Iterator` indicates "blocking" code, which might block all tasks in the event loop.
Hence we iterate in the default executor provided by asyncio.
"""
request = await get_or_reuse_loop().run_in_executor(
None, self.iterator__next__
)
"""
`iterator.__next__` can be executed directly and that'd raise `StopIteration` in the executor,
which raises the following exception while chaining states in futures.
"StopIteration interacts badly with generators and cannot be raised into a Future"
To avoid that, we handle the raise by a `return None`
"""
if request is None:
raise StopAsyncIteration
elif isinstance(self.iterator, AsyncIterator):
# we assume that `AsyncIterator` doesn't block the event loop
request = await self.iterator.__anext__()
return request
|
# Copyright (c) OpenMMLab. All rights reserved.
from .class_aware_sampler import ClassAwareSampler
from .distributed_sampler import DistributedSampler
from .group_sampler import DistributedGroupSampler, GroupSampler
from .infinite_sampler import InfiniteBatchSampler, InfiniteGroupBatchSampler
__all__ = [
'DistributedSampler', 'DistributedGroupSampler', 'GroupSampler',
'InfiniteGroupBatchSampler', 'InfiniteBatchSampler', 'ClassAwareSampler'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .distributed_sampler import DistributedSampler
from .group_sampler import DistributedGroupSampler, GroupSampler
from .infinite_sampler import InfiniteBatchSampler, InfiniteGroupBatchSampler
__all__ = [
'DistributedSampler', 'DistributedGroupSampler', 'GroupSampler',
'InfiniteGroupBatchSampler', 'InfiniteBatchSampler'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .logger import get_caller_name, get_root_logger, log_img_scale
from .misc import find_latest_checkpoint, update_data_root
from .setup_env import setup_multi_processes
from .split_batch import split_batch
from .util_distribution import build_ddp, build_dp, get_device
__all__ = [
'get_root_logger', 'collect_env', 'find_latest_checkpoint',
'update_data_root', 'setup_multi_processes', 'get_caller_name',
'log_img_scale', 'compat_cfg', 'split_batch', 'build_ddp', 'build_dp',
'get_device'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .logger import get_caller_name, get_root_logger, log_img_scale
from .misc import find_latest_checkpoint, update_data_root
from .setup_env import setup_multi_processes
from .split_batch import split_batch
from .util_distribution import build_ddp, build_dp, get_device
__all__ = [
'get_root_logger', 'collect_env', 'find_latest_checkpoint',
'update_data_root', 'setup_multi_processes', 'get_caller_name',
'log_img_scale', 'compat_cfg', 'split_batch', 'build_ddp',
'build_dp', 'get_device'
]
|
"""
This is a simple application for sentence embeddings: clustering
Sentences are mapped to sentence embeddings and then agglomerative clustering with a threshold is applied.
"""
from sentence_transformers import SentenceTransformer
from sklearn.cluster import AgglomerativeClustering
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"A man is eating pasta.",
"The girl is carrying a baby.",
"The baby is carried by the woman",
"A man is riding a horse.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah is running behind its prey.",
"A cheetah chases prey on across a field.",
]
corpus_embeddings = embedder.encode(corpus)
# Some models don't automatically normalize the embeddings, in which case you should normalize the embeddings:
# corpus_embeddings = corpus_embeddings / np.linalg.norm(corpus_embeddings, axis=1, keepdims=True)
# Perform agglomerative clustering
clustering_model = AgglomerativeClustering(
n_clusters=None, distance_threshold=1.5
) # , affinity='cosine', linkage='average', distance_threshold=0.4)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = {}
for sentence_id, cluster_id in enumerate(cluster_assignment):
if cluster_id not in clustered_sentences:
clustered_sentences[cluster_id] = []
clustered_sentences[cluster_id].append(corpus[sentence_id])
for i, cluster in clustered_sentences.items():
print("Cluster ", i + 1)
print(cluster)
print("")
|
"""
This is a simple application for sentence embeddings: clustering
Sentences are mapped to sentence embeddings and then agglomerative clustering with a threshold is applied.
"""
from sentence_transformers import SentenceTransformer
from sklearn.cluster import AgglomerativeClustering
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"A man is eating pasta.",
"The girl is carrying a baby.",
"The baby is carried by the woman",
"A man is riding a horse.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah is running behind its prey.",
"A cheetah chases prey on across a field.",
]
corpus_embeddings = embedder.encode(corpus)
# Some models don't automatically normalize the embeddings, in which case you should normalize the embeddings:
# corpus_embeddings = corpus_embeddings / np.linalg.norm(corpus_embeddings, axis=1, keepdims=True)
# Perform kmean clustering
clustering_model = AgglomerativeClustering(
n_clusters=None, distance_threshold=1.5
) # , affinity='cosine', linkage='average', distance_threshold=0.4)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = {}
for sentence_id, cluster_id in enumerate(cluster_assignment):
if cluster_id not in clustered_sentences:
clustered_sentences[cluster_id] = []
clustered_sentences[cluster_id].append(corpus[sentence_id])
for i, cluster in clustered_sentences.items():
print("Cluster ", i + 1)
print(cluster)
print("")
|
import re
import unicodedata
import regex
# non-ASCII letters that are not separated by "NFKD" normalization
ADDITIONAL_DIACRITICS = {
"œ": "oe",
"Œ": "OE",
"ø": "o",
"Ø": "O",
"æ": "ae",
"Æ": "AE",
"ß": "ss",
"ẞ": "SS",
"đ": "d",
"Đ": "D",
"ð": "d",
"Ð": "D",
"þ": "th",
"Þ": "th",
"ł": "l",
"Ł": "L",
}
def remove_symbols_and_diacritics(s: str, keep=""):
"""
Replace any other markers, symbols, and punctuations with a space,
and drop any diacritics (category 'Mn' and some manual mappings)
"""
return "".join(
c
if c in keep
else ADDITIONAL_DIACRITICS[c]
if c in ADDITIONAL_DIACRITICS
else ""
if unicodedata.category(c) == "Mn"
else " "
if unicodedata.category(c)[0] in "MSP"
else c
for c in unicodedata.normalize("NFKD", s)
)
def remove_symbols(s: str):
"""
Replace any other markers, symbols, punctuations with a space, keeping diacritics
"""
return "".join(
" " if unicodedata.category(c)[0] in "MSP" else c
for c in unicodedata.normalize("NFKC", s)
)
class BasicTextNormalizer:
def __init__(self, remove_diacritics: bool = False, split_letters: bool = False):
self.clean = (
remove_symbols_and_diacritics if remove_diacritics else remove_symbols
)
self.split_letters = split_letters
def __call__(self, s: str):
s = s.lower()
s = re.sub(r"[<\[][^>\]]*[>\]]", "", s) # remove words between brackets
s = re.sub(r"\(([^)]+?)\)", "", s) # remove words between parenthesis
s = self.clean(s).lower()
if self.split_letters:
s = " ".join(regex.findall(r"\X", s, regex.U))
s = re.sub(
r"\s+", " ", s
) # replace any successive whitespace characters with a space
return s
|
import re
import unicodedata
import regex
# non-ASCII letters that are not separated by "NFKD" normalization
ADDITIONAL_DIACRITICS = {
"œ": "oe",
"Œ": "OE",
"ø": "o",
"Ø": "O",
"æ": "ae",
"Æ": "AE",
"ß": "ss",
"ẞ": "SS",
"đ": "d",
"Đ": "D",
"ð": "d",
"Ð": "D",
"þ": "th",
"Þ": "th",
"ł": "l",
"Ł": "L",
}
def remove_symbols_and_diacritics(s: str, keep=""):
"""
Replace any other markers, symbols, and punctuations with a space,
and drop any diacritics (category 'Mn' and some manual mappings)
"""
return "".join(
c
if c in keep
else ADDITIONAL_DIACRITICS[c]
if c in ADDITIONAL_DIACRITICS
else ""
if unicodedata.category(c) == "Mn"
else " "
if unicodedata.category(c)[0] in "MSP"
else c
for c in unicodedata.normalize("NFKD", s)
)
def remove_symbols(s: str):
"""
Replace any other markers, symbols, punctuations with a space, keeping diacritics
"""
return "".join(
" " if unicodedata.category(c)[0] in "MSP" else c for c in unicodedata.normalize("NFKC", s)
)
class BasicTextNormalizer:
def __init__(self, remove_diacritics: bool = False, split_letters: bool = False):
self.clean = remove_symbols_and_diacritics if remove_diacritics else remove_symbols
self.split_letters = split_letters
def __call__(self, s: str):
s = s.lower()
s = re.sub(r"[<\[][^>\]]*[>\]]", "", s) # remove words between brackets
s = re.sub(r"\(([^)]+?)\)", "", s) # remove words between parenthesis
s = self.clean(s).lower()
if self.split_letters:
s = " ".join(regex.findall(r"\X", s, regex.U))
s = re.sub(r"\s+", " ", s) # replace any successive whitespace characters with a space
return s
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
T = TypeVar('T', bound='ImageTensorFlowTensor')
@_register_proto(proto_type_name='image_tensorflow_tensor')
class ImageTensorFlowTensor(
TensorFlowTensor, AbstractImageTensor, metaclass=metaTensorFlow
):
"""
Subclass of TensorFlowTensor, to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to image bytes which are
optimized to send over the wire
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import ImageTensorFlowTensor, ImageUrl
class MyImageDoc(BaseDoc):
title: str
tensor: Optional[ImageTensorFlowTensor]
url: Optional[ImageUrl]
bytes: Optional[bytes]
doc = MyImageDoc(
title='my_second_image_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
"""
...
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
T = TypeVar('T', bound='ImageTensorFlowTensor')
@_register_proto(proto_type_name='image_tensorflow_tensor')
class ImageTensorFlowTensor(
TensorFlowTensor, AbstractImageTensor, metaclass=metaTensorFlow
):
"""
Subclass of TensorFlowTensor, to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to image bytes which are
optimized to send over the wire
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
from docarray import BaseDocument
from docarray.typing import ImageTensorFlowTensor, ImageUrl
class MyImageDoc(BaseDocument):
title: str
tensor: Optional[ImageTensorFlowTensor]
url: Optional[ImageUrl]
bytes: Optional[bytes]
doc = MyImageDoc(
title='my_second_image_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
"""
...
|
from pathlib import Path
from typing import List, Tuple, Union
import torch
import torchaudio
from torch.utils.data import Dataset
SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]]
_TASKS_TO_MIXTURE = {
"sep_clean": "mix_clean",
"enh_single": "mix_single",
"enh_both": "mix_both",
"sep_noisy": "mix_both",
}
class LibriMix(Dataset):
r"""*LibriMix* :cite:`cosentino2020librimix` dataset.
Args:
root (str or Path): The path to the directory where the directory ``Libri2Mix`` or
``Libri3Mix`` is stored.
subset (str, optional): The subset to use. Options: [``"train-360"``, ``"train-100"``,
``"dev"``, and ``"test"``] (Default: ``"train-360"``).
num_speakers (int, optional): The number of speakers, which determines the directories
to traverse. The Dataset will traverse ``s1`` to ``sN`` directories to collect
N source audios. (Default: 2)
sample_rate (int, optional): Sample rate of audio files. The ``sample_rate`` determines
which subdirectory the audio are fetched. If any of the audio has a different sample
rate, raises ``ValueError``. Options: [8000, 16000] (Default: 8000)
task (str, optional): The task of LibriMix.
Options: [``"enh_single"``, ``"enh_both"``, ``"sep_clean"``, ``"sep_noisy"``]
(Default: ``"sep_clean"``)
mode (str, optional): The mode when creating the mixture. If set to ``"min"``, the lengths of mixture
and sources are the minimum length of all sources. If set to ``"max"``, the lengths of mixture and
sources are zero padded to the maximum length of all sources.
Options: [``"min"``, ``"max"``]
(Default: ``"min"``)
Note:
The LibriMix dataset needs to be manually generated. Please check https://github.com/JorisCos/LibriMix
"""
def __init__(
self,
root: Union[str, Path],
subset: str = "train-360",
num_speakers: int = 2,
sample_rate: int = 8000,
task: str = "sep_clean",
mode: str = "min",
):
self.root = Path(root) / f"Libri{num_speakers}Mix"
if mode not in ["max", "min"]:
raise ValueError(f'Expect ``mode`` to be one in ["min", "max"]. Found {mode}.')
if sample_rate == 8000:
self.root = self.root / "wav8k" / mode / subset
elif sample_rate == 16000:
self.root = self.root / "wav16k" / mode / subset
else:
raise ValueError(f"Unsupported sample rate. Found {sample_rate}.")
self.sample_rate = sample_rate
self.task = task
self.mix_dir = (self.root / _TASKS_TO_MIXTURE[task]).resolve()
if task == "enh_both":
self.src_dirs = [(self.root / "mix_clean")]
else:
self.src_dirs = [(self.root / f"s{i+1}").resolve() for i in range(num_speakers)]
self.files = [p.name for p in self.mix_dir.glob("*.wav")]
self.files.sort()
def _load_audio(self, path) -> torch.Tensor:
waveform, sample_rate = torchaudio.load(path)
if sample_rate != self.sample_rate:
raise ValueError(
f"The dataset contains audio file of sample rate {sample_rate}, "
f"but the requested sample rate is {self.sample_rate}."
)
return waveform
def _load_sample(self, filename) -> SampleType:
mixed = self._load_audio(str(self.mix_dir / filename))
srcs = []
for i, dir_ in enumerate(self.src_dirs):
src = self._load_audio(str(dir_ / filename))
if mixed.shape != src.shape:
raise ValueError(f"Different waveform shapes. mixed: {mixed.shape}, src[{i}]: {src.shape}")
srcs.append(src)
return self.sample_rate, mixed, srcs
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, key: int) -> SampleType:
"""Load the n-th sample from the dataset.
Args:
key (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
int:
Sample rate
Tensor:
Mixture waveform
list of Tensors:
List of source waveforms
"""
return self._load_sample(self.files[key])
|
from pathlib import Path
from typing import List, Tuple, Union
import torch
import torchaudio
from torch.utils.data import Dataset
SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]]
class LibriMix(Dataset):
r"""*LibriMix* :cite:`cosentino2020librimix` dataset.
Args:
root (str or Path): The path to the directory where the directory ``Libri2Mix`` or
``Libri3Mix`` is stored.
subset (str, optional): The subset to use. Options: [``"train-360"``, ``"train-100"``,
``"dev"``, and ``"test"``] (Default: ``"train-360"``).
num_speakers (int, optional): The number of speakers, which determines the directories
to traverse. The Dataset will traverse ``s1`` to ``sN`` directories to collect
N source audios. (Default: 2)
sample_rate (int, optional): sample rate of audio files. The ``sample_rate`` determines
which subdirectory the audio are fetched. If any of the audio has a different sample
rate, raises ``ValueError``. Options: [8000, 16000] (Default: 8000)
task (str, optional): the task of LibriMix.
Options: [``"enh_single"``, ``"enh_both"``, ``"sep_clean"``, ``"sep_noisy"``]
(Default: ``"sep_clean"``)
Note:
The LibriMix dataset needs to be manually generated. Please check https://github.com/JorisCos/LibriMix
"""
def __init__(
self,
root: Union[str, Path],
subset: str = "train-360",
num_speakers: int = 2,
sample_rate: int = 8000,
task: str = "sep_clean",
):
self.root = Path(root) / f"Libri{num_speakers}Mix"
if sample_rate == 8000:
self.root = self.root / "wav8k/min" / subset
elif sample_rate == 16000:
self.root = self.root / "wav16k/min" / subset
else:
raise ValueError(f"Unsupported sample rate. Found {sample_rate}.")
self.sample_rate = sample_rate
self.task = task
self.mix_dir = (self.root / f"mix_{task.split('_')[1]}").resolve()
self.src_dirs = [(self.root / f"s{i+1}").resolve() for i in range(num_speakers)]
self.files = [p.name for p in self.mix_dir.glob("*wav")]
self.files.sort()
def _load_audio(self, path) -> torch.Tensor:
waveform, sample_rate = torchaudio.load(path)
if sample_rate != self.sample_rate:
raise ValueError(
f"The dataset contains audio file of sample rate {sample_rate}, "
f"but the requested sample rate is {self.sample_rate}."
)
return waveform
def _load_sample(self, filename) -> SampleType:
mixed = self._load_audio(str(self.mix_dir / filename))
srcs = []
for i, dir_ in enumerate(self.src_dirs):
src = self._load_audio(str(dir_ / filename))
if mixed.shape != src.shape:
raise ValueError(f"Different waveform shapes. mixed: {mixed.shape}, src[{i}]: {src.shape}")
srcs.append(src)
return self.sample_rate, mixed, srcs
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, key: int) -> SampleType:
"""Load the n-th sample from the dataset.
Args:
key (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
int:
Sample rate
Tensor:
Mixture waveform
list of Tensors:
List of source waveforms
"""
return self._load_sample(self.files[key])
|
from typing import Any, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.prototype.tv_tensors import Label, OneHotLabel
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import (
_FillType,
_get_fill,
_setup_fill_arg,
_setup_size,
get_bounding_boxes,
has_any,
is_pure_tensor,
query_size,
)
class FixedSizeCrop(Transform):
def __init__(
self,
size: Union[int, Sequence[int]],
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = 0,
padding_mode: str = "constant",
) -> None:
super().__init__()
size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
self.crop_height = size[0]
self.crop_width = size[1]
self.fill = fill
self._fill = _setup_fill_arg(fill)
self.padding_mode = padding_mode
def _check_inputs(self, flat_inputs: List[Any]) -> None:
if not has_any(
flat_inputs,
PIL.Image.Image,
tv_tensors.Image,
is_pure_tensor,
tv_tensors.Video,
):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain an tensor or PIL image or a Video."
)
if has_any(flat_inputs, tv_tensors.BoundingBoxes) and not has_any(flat_inputs, Label, OneHotLabel):
raise TypeError(
f"If a BoundingBoxes is contained in the input sample, "
f"{type(self).__name__}() also requires it to contain a Label or OneHotLabel."
)
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_size(flat_inputs)
new_height = min(height, self.crop_height)
new_width = min(width, self.crop_width)
needs_crop = new_height != height or new_width != width
offset_height = max(height - self.crop_height, 0)
offset_width = max(width - self.crop_width, 0)
r = torch.rand(1)
top = int(offset_height * r)
left = int(offset_width * r)
bounding_boxes: Optional[torch.Tensor]
try:
bounding_boxes = get_bounding_boxes(flat_inputs)
except ValueError:
bounding_boxes = None
if needs_crop and bounding_boxes is not None:
format = bounding_boxes.format
bounding_boxes, canvas_size = F.crop_bounding_boxes(
bounding_boxes.as_subclass(torch.Tensor),
format=format,
top=top,
left=left,
height=new_height,
width=new_width,
)
bounding_boxes = F.clamp_bounding_boxes(bounding_boxes, format=format, canvas_size=canvas_size)
height_and_width = F.convert_bounding_box_format(
bounding_boxes, old_format=format, new_format=tv_tensors.BoundingBoxFormat.XYWH
)[..., 2:]
is_valid = torch.all(height_and_width > 0, dim=-1)
else:
is_valid = None
pad_bottom = max(self.crop_height - new_height, 0)
pad_right = max(self.crop_width - new_width, 0)
needs_pad = pad_bottom != 0 or pad_right != 0
return dict(
needs_crop=needs_crop,
top=top,
left=left,
height=new_height,
width=new_width,
is_valid=is_valid,
padding=[0, 0, pad_right, pad_bottom],
needs_pad=needs_pad,
)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["needs_crop"]:
inpt = self._call_kernel(
F.crop,
inpt,
top=params["top"],
left=params["left"],
height=params["height"],
width=params["width"],
)
if params["is_valid"] is not None:
if isinstance(inpt, (Label, OneHotLabel, tv_tensors.Mask)):
inpt = tv_tensors.wrap(inpt[params["is_valid"]], like=inpt)
elif isinstance(inpt, tv_tensors.BoundingBoxes):
inpt = tv_tensors.wrap(
F.clamp_bounding_boxes(inpt[params["is_valid"]], format=inpt.format, canvas_size=inpt.canvas_size),
like=inpt,
)
if params["needs_pad"]:
fill = _get_fill(self._fill, type(inpt))
inpt = self._call_kernel(F.pad, inpt, params["padding"], fill=fill, padding_mode=self.padding_mode)
return inpt
|
from typing import Any, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.prototype.datapoints import Label, OneHotLabel
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import (
_FillType,
_get_fill,
_setup_fill_arg,
_setup_size,
get_bounding_boxes,
has_any,
is_pure_tensor,
query_size,
)
class FixedSizeCrop(Transform):
def __init__(
self,
size: Union[int, Sequence[int]],
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = 0,
padding_mode: str = "constant",
) -> None:
super().__init__()
size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
self.crop_height = size[0]
self.crop_width = size[1]
self.fill = fill
self._fill = _setup_fill_arg(fill)
self.padding_mode = padding_mode
def _check_inputs(self, flat_inputs: List[Any]) -> None:
if not has_any(
flat_inputs,
PIL.Image.Image,
datapoints.Image,
is_pure_tensor,
datapoints.Video,
):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain an tensor or PIL image or a Video."
)
if has_any(flat_inputs, datapoints.BoundingBoxes) and not has_any(flat_inputs, Label, OneHotLabel):
raise TypeError(
f"If a BoundingBoxes is contained in the input sample, "
f"{type(self).__name__}() also requires it to contain a Label or OneHotLabel."
)
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_size(flat_inputs)
new_height = min(height, self.crop_height)
new_width = min(width, self.crop_width)
needs_crop = new_height != height or new_width != width
offset_height = max(height - self.crop_height, 0)
offset_width = max(width - self.crop_width, 0)
r = torch.rand(1)
top = int(offset_height * r)
left = int(offset_width * r)
bounding_boxes: Optional[torch.Tensor]
try:
bounding_boxes = get_bounding_boxes(flat_inputs)
except ValueError:
bounding_boxes = None
if needs_crop and bounding_boxes is not None:
format = bounding_boxes.format
bounding_boxes, canvas_size = F.crop_bounding_boxes(
bounding_boxes.as_subclass(torch.Tensor),
format=format,
top=top,
left=left,
height=new_height,
width=new_width,
)
bounding_boxes = F.clamp_bounding_boxes(bounding_boxes, format=format, canvas_size=canvas_size)
height_and_width = F.convert_bounding_box_format(
bounding_boxes, old_format=format, new_format=datapoints.BoundingBoxFormat.XYWH
)[..., 2:]
is_valid = torch.all(height_and_width > 0, dim=-1)
else:
is_valid = None
pad_bottom = max(self.crop_height - new_height, 0)
pad_right = max(self.crop_width - new_width, 0)
needs_pad = pad_bottom != 0 or pad_right != 0
return dict(
needs_crop=needs_crop,
top=top,
left=left,
height=new_height,
width=new_width,
is_valid=is_valid,
padding=[0, 0, pad_right, pad_bottom],
needs_pad=needs_pad,
)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["needs_crop"]:
inpt = self._call_kernel(
F.crop,
inpt,
top=params["top"],
left=params["left"],
height=params["height"],
width=params["width"],
)
if params["is_valid"] is not None:
if isinstance(inpt, (Label, OneHotLabel, datapoints.Mask)):
inpt = datapoints.wrap(inpt[params["is_valid"]], like=inpt)
elif isinstance(inpt, datapoints.BoundingBoxes):
inpt = datapoints.wrap(
F.clamp_bounding_boxes(inpt[params["is_valid"]], format=inpt.format, canvas_size=inpt.canvas_size),
like=inpt,
)
if params["needs_pad"]:
fill = _get_fill(self._fill, type(inpt))
inpt = self._call_kernel(F.pad, inpt, params["padding"], fill=fill, padding_mode=self.padding_mode)
return inpt
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# dataset settings
input_size = 300
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean={{_base_.model.data_preprocessor.mean}},
to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}},
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8,
num_workers=2,
batch_sampler=None,
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type={{_base_.dataset_type}},
data_root={{_base_.data_root}},
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(batch_size=8, dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4))
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# dataset settings
input_size = 300
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean={{_base_.model.data_preprocessor.mean}},
to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}},
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8,
num_workers=2,
batch_sampler=None,
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type={{_base_.dataset_type}},
data_root={{_base_.data_root}},
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(batch_size=8, dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4))
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from recommonmark.transform import AutoStructify
import os
from sphinx.domains import Domain
import datetime
# -- Project information -----------------------------------------------------
project = "Sentence-Transformers"
copyright = str(datetime.datetime.now().year) + ", Nils Reimers"
author = "Nils Reimers"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "recommonmark", "sphinx_markdown_tables"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "nr_examples"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_path = ["_themes"]
html_theme_options = {"logo_only": True, "canonical_url": "https://www.sbert.net"}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = [
"css/custom.css",
]
html_js_files = [
"js/custom.js",
]
html_show_sourcelink = False
html_context = {
"display_github": True,
"github_user": "UKPLab",
"github_repo": "sentence-transformers",
"github_version": "master/",
}
html_logo = "img/logo.png"
html_favicon = "img/favicon.ico"
autoclass_content = "both"
class GithubURLDomain(Domain):
"""
Resolve .py links to their respective Github URL
"""
name = "githuburl"
ROOT = "https://github.com/UKPLab/sentence-transformers/tree/master"
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
if (target.endswith(".py") or target.endswith(".ipynb")) and not target.startswith("http"):
from_folder = os.path.dirname(fromdocname)
contnode["refuri"] = "/".join([self.ROOT, from_folder, target])
return [("githuburl:any", contnode)]
return []
def setup(app):
app.add_domain(GithubURLDomain)
app.add_config_value(
"recommonmark_config",
{
#'url_resolver': lambda url: github_doc_root + url,
"auto_toc_tree_section": "Contents",
},
True,
)
app.add_transform(AutoStructify)
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# sphinx-build -c . -a -E .. _build
from recommonmark.transform import AutoStructify
import os
from sphinx.domains import Domain
import datetime
# -- Project information -----------------------------------------------------
project = "Sentence-Transformers"
copyright = str(datetime.datetime.now().year) + ", Nils Reimers"
author = "Nils Reimers"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "recommonmark", "sphinx_markdown_tables"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "nr_examples"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_path = ["_themes"]
html_theme_options = {"logo_only": True, "canonical_url": "https://www.sbert.net"}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = [
"css/custom.css",
]
html_js_files = [
"js/custom.js",
]
html_show_sourcelink = False
html_context = {
"display_github": True,
"github_user": "UKPLab",
"github_repo": "sentence-transformers",
"github_version": "master/",
}
html_logo = "img/logo.png"
html_favicon = "img/favicon.ico"
autoclass_content = "both"
class GithubURLDomain(Domain):
"""
Resolve .py links to their respective Github URL
"""
name = "githuburl"
ROOT = "https://github.com/UKPLab/sentence-transformers/tree/master"
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
if (target.endswith(".py") or target.endswith(".ipynb")) and not target.startswith("http"):
from_folder = os.path.dirname(fromdocname)
contnode["refuri"] = "/".join([self.ROOT, from_folder, target])
return [("githuburl:any", contnode)]
return []
def setup(app):
app.add_domain(GithubURLDomain)
app.add_config_value(
"recommonmark_config",
{
#'url_resolver': lambda url: github_doc_root + url,
"auto_toc_tree_section": "Contents",
},
True,
)
app.add_transform(AutoStructify)
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/deepfashion.py', '../_base_/schedules/schedule_1x.py',
'../_base_/default_runtime.py'
]
model = dict(
roi_head=dict(
bbox_head=dict(num_classes=15), mask_head=dict(num_classes=15)))
# runtime settings
max_epochs = 15
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/deepfashion.py', '../_base_/schedules/schedule_1x.py',
'../_base_/default_runtime.py'
]
model = dict(
roi_head=dict(
bbox_head=dict(num_classes=15), mask_head=dict(num_classes=15)))
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=15)
|
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class SequentialRetriever(BaseRetriever):
"""Test util that returns a sequence of documents"""
sequential_responses: list[list[Document]]
response_index: int = 0
def _get_relevant_documents( # type: ignore[override]
self,
query: str,
) -> list[Document]:
if self.response_index >= len(self.sequential_responses):
return []
else:
self.response_index += 1
return self.sequential_responses[self.response_index - 1]
async def _aget_relevant_documents( # type: ignore[override]
self,
query: str,
) -> list[Document]:
return self._get_relevant_documents(query)
|
from langchain_core.retrievers import BaseRetriever, Document
class SequentialRetriever(BaseRetriever):
"""Test util that returns a sequence of documents"""
sequential_responses: list[list[Document]]
response_index: int = 0
def _get_relevant_documents( # type: ignore[override]
self,
query: str,
) -> list[Document]:
if self.response_index >= len(self.sequential_responses):
return []
else:
self.response_index += 1
return self.sequential_responses[self.response_index - 1]
async def _aget_relevant_documents( # type: ignore[override]
self,
query: str,
) -> list[Document]:
return self._get_relevant_documents(query)
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
import torch
import torch.nn as nn
from mmcv import ops
from mmengine.model import BaseModule
class BaseRoIExtractor(BaseModule, metaclass=ABCMeta):
"""Base class for RoI extractor.
Args:
roi_layer (dict): Specify RoI layer type and arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (int): Strides of input feature maps.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
roi_layer,
out_channels,
featmap_strides,
init_cfg=None):
super(BaseRoIExtractor, self).__init__(init_cfg)
self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides)
self.out_channels = out_channels
self.featmap_strides = featmap_strides
self.fp16_enabled = False
@property
def num_inputs(self):
"""int: Number of input feature maps."""
return len(self.featmap_strides)
def build_roi_layers(self, layer_cfg, featmap_strides):
"""Build RoI operator to extract feature from each level feature map.
Args:
layer_cfg (dict): Dictionary to construct and config RoI layer
operation. Options are modules under ``mmcv/ops`` such as
``RoIAlign``.
featmap_strides (List[int]): The stride of input feature map w.r.t
to the original image size, which would be used to scale RoI
coordinate (original image coordinate system) to feature
coordinate system.
Returns:
nn.ModuleList: The RoI extractor modules for each level feature
map.
"""
cfg = layer_cfg.copy()
layer_type = cfg.pop('type')
assert hasattr(ops, layer_type)
layer_cls = getattr(ops, layer_type)
roi_layers = nn.ModuleList(
[layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides])
return roi_layers
def roi_rescale(self, rois, scale_factor):
"""Scale RoI coordinates by scale factor.
Args:
rois (torch.Tensor): RoI (Region of Interest), shape (n, 5)
scale_factor (float): Scale factor that RoI will be multiplied by.
Returns:
torch.Tensor: Scaled RoI.
"""
cx = (rois[:, 1] + rois[:, 3]) * 0.5
cy = (rois[:, 2] + rois[:, 4]) * 0.5
w = rois[:, 3] - rois[:, 1]
h = rois[:, 4] - rois[:, 2]
new_w = w * scale_factor
new_h = h * scale_factor
x1 = cx - new_w * 0.5
x2 = cx + new_w * 0.5
y1 = cy - new_h * 0.5
y2 = cy + new_h * 0.5
new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1)
return new_rois
@abstractmethod
def forward(self, feats, rois, roi_scale_factor=None):
pass
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
import torch
import torch.nn as nn
from mmcv import ops
from mmcv.runner import BaseModule
class BaseRoIExtractor(BaseModule, metaclass=ABCMeta):
"""Base class for RoI extractor.
Args:
roi_layer (dict): Specify RoI layer type and arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (int): Strides of input feature maps.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
roi_layer,
out_channels,
featmap_strides,
init_cfg=None):
super(BaseRoIExtractor, self).__init__(init_cfg)
self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides)
self.out_channels = out_channels
self.featmap_strides = featmap_strides
self.fp16_enabled = False
@property
def num_inputs(self):
"""int: Number of input feature maps."""
return len(self.featmap_strides)
def build_roi_layers(self, layer_cfg, featmap_strides):
"""Build RoI operator to extract feature from each level feature map.
Args:
layer_cfg (dict): Dictionary to construct and config RoI layer
operation. Options are modules under ``mmcv/ops`` such as
``RoIAlign``.
featmap_strides (List[int]): The stride of input feature map w.r.t
to the original image size, which would be used to scale RoI
coordinate (original image coordinate system) to feature
coordinate system.
Returns:
nn.ModuleList: The RoI extractor modules for each level feature
map.
"""
cfg = layer_cfg.copy()
layer_type = cfg.pop('type')
assert hasattr(ops, layer_type)
layer_cls = getattr(ops, layer_type)
roi_layers = nn.ModuleList(
[layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides])
return roi_layers
def roi_rescale(self, rois, scale_factor):
"""Scale RoI coordinates by scale factor.
Args:
rois (torch.Tensor): RoI (Region of Interest), shape (n, 5)
scale_factor (float): Scale factor that RoI will be multiplied by.
Returns:
torch.Tensor: Scaled RoI.
"""
cx = (rois[:, 1] + rois[:, 3]) * 0.5
cy = (rois[:, 2] + rois[:, 4]) * 0.5
w = rois[:, 3] - rois[:, 1]
h = rois[:, 4] - rois[:, 2]
new_w = w * scale_factor
new_h = h * scale_factor
x1 = cx - new_w * 0.5
x2 = cx + new_w * 0.5
y1 = cy - new_h * 0.5
y2 = cy + new_h * 0.5
new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1)
return new_rois
@abstractmethod
def forward(self, feats, rois, roi_scale_factor=None):
pass
|
_base_ = ['./mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa
depths = [2, 2, 18, 2]
model = dict(
backbone=dict(
depths=depths, init_cfg=dict(type='Pretrained',
checkpoint=pretrained)))
# set all layers in backbone to lr_mult=0.1
# set all norm layers, position_embeding,
# query_embeding, level_embeding to decay_multi=0.0
backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0)
backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0)
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'backbone': dict(lr_mult=0.1, decay_mult=1.0),
'backbone.patch_embed.norm': backbone_norm_multi,
'backbone.norm': backbone_norm_multi,
'absolute_pos_embed': backbone_embed_multi,
'relative_position_bias_table': backbone_embed_multi,
'query_embed': embed_multi,
'query_feat': embed_multi,
'level_embed': embed_multi
}
custom_keys.update({
f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi
for stage_id, num_blocks in enumerate(depths)
for block_id in range(num_blocks)
})
custom_keys.update({
f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi
for stage_id in range(len(depths) - 1)
})
# optimizer
optim_wrapper = dict(
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
|
_base_ = ['./mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa
depths = [2, 2, 18, 2]
model = dict(
backbone=dict(
depths=depths, init_cfg=dict(type='Pretrained',
checkpoint=pretrained)))
# set all layers in backbone to lr_mult=0.1
# set all norm layers, position_embeding,
# query_embeding, level_embeding to decay_multi=0.0
backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0)
backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0)
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'backbone': dict(lr_mult=0.1, decay_mult=1.0),
'backbone.patch_embed.norm': backbone_norm_multi,
'backbone.norm': backbone_norm_multi,
'absolute_pos_embed': backbone_embed_multi,
'relative_position_bias_table': backbone_embed_multi,
'query_embed': embed_multi,
'query_feat': embed_multi,
'level_embed': embed_multi
}
custom_keys.update({
f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi
for stage_id, num_blocks in enumerate(depths)
for block_id in range(num_blocks)
})
custom_keys.update({
f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi
for stage_id in range(len(depths) - 1)
})
# optimizer
optim_wrapper = dict(
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
|
from ._bounding_box import BoundingBox, BoundingBoxFormat
from ._encoded import EncodedData, EncodedImage
from ._feature import _Feature, FillType, FillTypeJIT, InputType, InputTypeJIT, is_simple_tensor
from ._image import ColorSpace, Image, ImageType, ImageTypeJIT, TensorImageType, TensorImageTypeJIT
from ._label import Label, OneHotLabel
from ._mask import Mask
from ._video import TensorVideoType, TensorVideoTypeJIT, Video, VideoType, VideoTypeJIT
|
from ._bounding_box import BoundingBox, BoundingBoxFormat
from ._encoded import EncodedData, EncodedImage
from ._feature import _Feature, FillType, FillTypeJIT, InputType, InputTypeJIT, is_simple_tensor
from ._image import (
ColorSpace,
Image,
ImageType,
ImageTypeJIT,
LegacyImageType,
LegacyImageTypeJIT,
TensorImageType,
TensorImageTypeJIT,
)
from ._label import Label, OneHotLabel
from ._mask import Mask
from ._video import (
LegacyVideoType,
LegacyVideoTypeJIT,
TensorVideoType,
TensorVideoTypeJIT,
Video,
VideoType,
VideoTypeJIT,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.inception_resnet_v2 import (
InceptionResNetV2 as InceptionResNetV2,
)
from keras.src.applications.inception_resnet_v2 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.inception_resnet_v2 import (
preprocess_input as preprocess_input,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.inception_resnet_v2 import InceptionResNetV2
from keras.src.applications.inception_resnet_v2 import decode_predictions
from keras.src.applications.inception_resnet_v2 import preprocess_input
|
"""Test the loading function for evaluators."""
from typing import List
import pytest
from langchain.evaluation.loading import EvaluatorType, load_evaluators
from langchain.evaluation.schema import PairwiseStringEvaluator, StringEvaluator
from langchain_core.embeddings import FakeEmbeddings
from tests.unit_tests.llms.fake_chat_model import FakeChatModel
from tests.unit_tests.llms.fake_llm import FakeLLM
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("evaluator_type", EvaluatorType)
def test_load_evaluators(evaluator_type: EvaluatorType) -> None:
"""Test loading evaluators."""
fake_llm = FakeChatModel()
embeddings = FakeEmbeddings(size=32)
load_evaluators([evaluator_type], llm=fake_llm, embeddings=embeddings)
# Test as string
load_evaluators(
[evaluator_type.value], # type: ignore[list-item]
llm=fake_llm,
embeddings=embeddings,
)
@pytest.mark.parametrize(
"evaluator_types",
[
[EvaluatorType.LABELED_CRITERIA],
[EvaluatorType.LABELED_PAIRWISE_STRING],
[EvaluatorType.LABELED_SCORE_STRING],
[EvaluatorType.QA],
[EvaluatorType.CONTEXT_QA],
[EvaluatorType.COT_QA],
[EvaluatorType.COT_QA, EvaluatorType.LABELED_CRITERIA],
[
EvaluatorType.COT_QA,
EvaluatorType.LABELED_CRITERIA,
EvaluatorType.LABELED_PAIRWISE_STRING,
],
[EvaluatorType.JSON_EQUALITY],
[EvaluatorType.EXACT_MATCH, EvaluatorType.REGEX_MATCH],
],
)
def test_eval_chain_requires_references(evaluator_types: List[EvaluatorType]) -> None:
"""Test loading evaluators."""
fake_llm = FakeLLM(
queries={"text": "The meaning of life\nCORRECT"}, sequential_responses=True
)
evaluators = load_evaluators(
evaluator_types,
llm=fake_llm,
)
for evaluator in evaluators:
if not isinstance(evaluator, (StringEvaluator, PairwiseStringEvaluator)):
raise ValueError("Evaluator is not a [pairwise]string evaluator")
assert evaluator.requires_reference
|
"""Test the loading function for evaluators."""
from typing import List
import pytest
from langchain.evaluation.loading import EvaluatorType, load_evaluators
from langchain.evaluation.schema import PairwiseStringEvaluator, StringEvaluator
from langchain_core.embeddings import FakeEmbeddings
from tests.unit_tests.llms.fake_chat_model import FakeChatModel
from tests.unit_tests.llms.fake_llm import FakeLLM
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("evaluator_type", EvaluatorType)
def test_load_evaluators(evaluator_type: EvaluatorType) -> None:
"""Test loading evaluators."""
fake_llm = FakeChatModel()
embeddings = FakeEmbeddings(size=32)
load_evaluators([evaluator_type], llm=fake_llm, embeddings=embeddings)
# Test as string
load_evaluators(
[evaluator_type.value], # type: ignore
llm=fake_llm,
embeddings=embeddings,
)
@pytest.mark.parametrize(
"evaluator_types",
[
[EvaluatorType.LABELED_CRITERIA],
[EvaluatorType.LABELED_PAIRWISE_STRING],
[EvaluatorType.LABELED_SCORE_STRING],
[EvaluatorType.QA],
[EvaluatorType.CONTEXT_QA],
[EvaluatorType.COT_QA],
[EvaluatorType.COT_QA, EvaluatorType.LABELED_CRITERIA],
[
EvaluatorType.COT_QA,
EvaluatorType.LABELED_CRITERIA,
EvaluatorType.LABELED_PAIRWISE_STRING,
],
[EvaluatorType.JSON_EQUALITY],
[EvaluatorType.EXACT_MATCH, EvaluatorType.REGEX_MATCH],
],
)
def test_eval_chain_requires_references(evaluator_types: List[EvaluatorType]) -> None:
"""Test loading evaluators."""
fake_llm = FakeLLM(
queries={"text": "The meaning of life\nCORRECT"}, sequential_responses=True
)
evaluators = load_evaluators(
evaluator_types,
llm=fake_llm,
)
for evaluator in evaluators:
if not isinstance(evaluator, (StringEvaluator, PairwiseStringEvaluator)):
raise ValueError("Evaluator is not a [pairwise]string evaluator")
assert evaluator.requires_reference
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import uuid
import pytest
@pytest.fixture(scope='session', autouse=True)
def start_redis():
os.system(
'docker run --name redis-stack-server -p 6379:6379 -d redis/redis-stack-server:7.2.0-RC2'
)
time.sleep(1)
yield
os.system('docker rm -f redis-stack-server')
@pytest.fixture(scope='function')
def tmp_index_name():
return uuid.uuid4().hex
|
import os
import time
import uuid
import pytest
@pytest.fixture(scope='session', autouse=True)
def start_redis():
os.system(
'docker run --name redis-stack-server -p 6379:6379 -d redis/redis-stack-server:7.2.0-RC2'
)
time.sleep(1)
yield
os.system('docker rm -f redis-stack-server')
@pytest.fixture(scope='function')
def tmp_index_name():
return uuid.uuid4().hex
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config, load_dataset_builder
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.utils.file_utils import cached_path
DATASETS_ON_HF_GCP = [
{"dataset": "wikipedia", "config_name": "20220301.de", "revision": "4d013bdd32c475c8536aae00a56efc774f061649"},
{"dataset": "wikipedia", "config_name": "20220301.en", "revision": "4d013bdd32c475c8536aae00a56efc774f061649"},
{"dataset": "wikipedia", "config_name": "20220301.fr", "revision": "4d013bdd32c475c8536aae00a56efc774f061649"},
{"dataset": "wikipedia", "config_name": "20220301.frr", "revision": "4d013bdd32c475c8536aae00a56efc774f061649"},
{"dataset": "wikipedia", "config_name": "20220301.it", "revision": "4d013bdd32c475c8536aae00a56efc774f061649"},
{"dataset": "wikipedia", "config_name": "20220301.simple", "revision": "4d013bdd32c475c8536aae00a56efc774f061649"},
{"dataset": "wiki40b", "config_name": "en", "revision": "7b21a2e64b90323b2d3d1b81aa349bb4bc76d9bf"},
{
"dataset": "wiki_dpr",
"config_name": "psgs_w100.nq.compressed",
"revision": "b24a417d802a583f8922946c1c75210290e93108",
},
{
"dataset": "wiki_dpr",
"config_name": "psgs_w100.nq.no_index",
"revision": "b24a417d802a583f8922946c1c75210290e93108",
},
{
"dataset": "wiki_dpr",
"config_name": "psgs_w100.multiset.no_index",
"revision": "b24a417d802a583f8922946c1c75210290e93108",
},
{"dataset": "natural_questions", "config_name": "default", "revision": "19ba7767b174ad046a84f46af056517a3910ee57"},
]
def list_datasets_on_hf_gcp_parameters(with_config=True, with_revision=True):
columns = ["dataset"]
if with_config:
columns.append("config_name")
if with_revision:
columns.append("revision")
dataset_list = [{col: dataset[col] for col in columns} for dataset in DATASETS_ON_HF_GCP]
def get_testcase_name(dataset):
testcase_name = dataset["dataset"]
if with_config:
testcase_name += "/" + dataset["config_name"]
if with_revision:
testcase_name += "@" + dataset["revision"]
return testcase_name
dataset_list = [{"testcase_name": get_testcase_name(dataset), **dataset} for dataset in dataset_list]
return dataset_list
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=True, with_revision=True))
class TestDatasetOnHfGcp(TestCase):
dataset = None
config_name = None
revision = None
def test_dataset_info_available(self, dataset, config_name, revision):
with TemporaryDirectory() as tmp_dir:
builder = load_dataset_builder(
dataset,
config_name,
revision=revision,
cache_dir=tmp_dir,
)
dataset_info_url = "/".join(
[
HF_GCP_BASE_URL,
builder._relative_data_dir(with_hash=False).replace(os.sep, "/"),
config.DATASET_INFO_FILENAME,
]
)
datset_info_path = cached_path(dataset_info_url, cache_dir=tmp_dir)
self.assertTrue(os.path.exists(datset_info_path))
@pytest.mark.integration
def test_as_dataset_from_hf_gcs(tmp_path_factory):
tmp_dir = tmp_path_factory.mktemp("test_hf_gcp") / "test_wikipedia_simple"
builder = load_dataset_builder("wikipedia", "20220301.frr", cache_dir=tmp_dir)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
builder._download_and_prepare = None
builder.download_and_prepare(try_from_hf_gcs=True)
ds = builder.as_dataset()
assert ds
@pytest.mark.integration
def test_as_streaming_dataset_from_hf_gcs(tmp_path):
builder = load_dataset_builder(
"wikipedia", "20220301.frr", revision="4d013bdd32c475c8536aae00a56efc774f061649", cache_dir=tmp_path
)
ds = builder.as_streaming_dataset()
assert ds
assert isinstance(ds, IterableDatasetDict)
assert "train" in ds
assert isinstance(ds["train"], IterableDataset)
assert next(iter(ds["train"]))
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
DATASETS_ON_HF_GCP = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def list_datasets_on_hf_gcp_parameters(with_config=True):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=True))
class TestDatasetOnHfGcp(TestCase):
dataset = None
config_name = None
def test_dataset_info_available(self, dataset, config_name):
with TemporaryDirectory() as tmp_dir:
dataset_module = dataset_module_factory(dataset, cache_dir=tmp_dir)
builder_cls = import_main_class(dataset_module.module_path, dataset=True)
builder_instance: DatasetBuilder = builder_cls(
cache_dir=tmp_dir,
config_name=config_name,
hash=dataset_module.hash,
)
dataset_info_url = "/".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=False).replace(os.sep, "/"),
config.DATASET_INFO_FILENAME,
]
)
datset_info_path = cached_path(dataset_info_url, cache_dir=tmp_dir)
self.assertTrue(os.path.exists(datset_info_path))
@pytest.mark.integration
def test_as_dataset_from_hf_gcs(tmp_path_factory):
tmp_dir = tmp_path_factory.mktemp("test_hf_gcp") / "test_wikipedia_simple"
dataset_module = dataset_module_factory("wikipedia", cache_dir=tmp_dir)
builder_cls = import_main_class(dataset_module.module_path)
builder_instance: DatasetBuilder = builder_cls(
cache_dir=tmp_dir,
config_name="20220301.frr",
hash=dataset_module.hash,
)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
builder_instance._download_and_prepare = None
builder_instance.download_and_prepare()
ds = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def test_as_streaming_dataset_from_hf_gcs(tmp_path):
dataset_module = dataset_module_factory("wikipedia", cache_dir=tmp_path)
builder_cls = import_main_class(dataset_module.module_path, dataset=True)
builder_instance: DatasetBuilder = builder_cls(
cache_dir=tmp_path,
config_name="20220301.frr",
hash=dataset_module.hash,
)
ds = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(ds, IterableDatasetDict)
assert "train" in ds
assert isinstance(ds["train"], IterableDataset)
assert next(iter(ds["train"]))
|
import json
import logging
from enum import Enum
from typing import Any
from requests.exceptions import HTTPError, RequestException
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
logger = logging.getLogger(name=__name__)
class HttpMethod(Enum):
GET = "GET"
POST = "POST"
PUT = "PUT"
DELETE = "DELETE"
PATCH = "PATCH"
OPTIONS = "OPTIONS"
HEAD = "HEAD"
class SendWebRequestBlock(Block):
class Input(BlockSchema):
url: str = SchemaField(
description="The URL to send the request to",
placeholder="https://api.example.com",
)
method: HttpMethod = SchemaField(
description="The HTTP method to use for the request",
default=HttpMethod.POST,
)
headers: dict[str, str] = SchemaField(
description="The headers to include in the request",
default_factory=dict,
)
json_format: bool = SchemaField(
title="JSON format",
description="Whether to send and receive body as JSON",
default=True,
)
body: Any = SchemaField(
description="The body of the request",
default=None,
)
class Output(BlockSchema):
response: object = SchemaField(description="The response from the server")
client_error: object = SchemaField(description="Errors on 4xx status codes")
server_error: object = SchemaField(description="Errors on 5xx status codes")
error: str = SchemaField(description="Errors for all other exceptions")
def __init__(self):
super().__init__(
id="6595ae1f-b924-42cb-9a41-551a0611c4b4",
description="This block makes an HTTP request to the given URL.",
categories={BlockCategory.OUTPUT},
input_schema=SendWebRequestBlock.Input,
output_schema=SendWebRequestBlock.Output,
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
body = input_data.body
if input_data.json_format:
if isinstance(body, str):
try:
# Try to parse as JSON first
body = json.loads(body)
except json.JSONDecodeError:
# If it's not valid JSON and just plain text,
# we should send it as plain text instead
input_data.json_format = False
try:
response = requests.request(
input_data.method.value,
input_data.url,
headers=input_data.headers,
json=body if input_data.json_format else None,
data=body if not input_data.json_format else None,
)
result = response.json() if input_data.json_format else response.text
yield "response", result
except HTTPError as e:
# Handle error responses
try:
result = e.response.json() if input_data.json_format else str(e)
except json.JSONDecodeError:
result = str(e)
if 400 <= e.response.status_code < 500:
yield "client_error", result
elif 500 <= e.response.status_code < 600:
yield "server_error", result
else:
error_msg = (
"Unexpected status code "
f"{e.response.status_code} '{e.response.reason}'"
)
logger.warning(error_msg)
yield "error", error_msg
except RequestException as e:
# Handle other request-related exceptions
yield "error", str(e)
except Exception as e:
# Catch any other unexpected exceptions
yield "error", str(e)
|
import json
import logging
from enum import Enum
from typing import Any
from requests.exceptions import HTTPError, RequestException
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
logger = logging.getLogger(name=__name__)
class HttpMethod(Enum):
GET = "GET"
POST = "POST"
PUT = "PUT"
DELETE = "DELETE"
PATCH = "PATCH"
OPTIONS = "OPTIONS"
HEAD = "HEAD"
class SendWebRequestBlock(Block):
class Input(BlockSchema):
url: str = SchemaField(
description="The URL to send the request to",
placeholder="https://api.example.com",
)
method: HttpMethod = SchemaField(
description="The HTTP method to use for the request",
default=HttpMethod.POST,
)
headers: dict[str, str] = SchemaField(
description="The headers to include in the request",
default={},
)
json_format: bool = SchemaField(
title="JSON format",
description="Whether to send and receive body as JSON",
default=True,
)
body: Any = SchemaField(
description="The body of the request",
default=None,
)
class Output(BlockSchema):
response: object = SchemaField(description="The response from the server")
client_error: object = SchemaField(description="Errors on 4xx status codes")
server_error: object = SchemaField(description="Errors on 5xx status codes")
error: str = SchemaField(description="Errors for all other exceptions")
def __init__(self):
super().__init__(
id="6595ae1f-b924-42cb-9a41-551a0611c4b4",
description="This block makes an HTTP request to the given URL.",
categories={BlockCategory.OUTPUT},
input_schema=SendWebRequestBlock.Input,
output_schema=SendWebRequestBlock.Output,
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
body = input_data.body
if input_data.json_format:
if isinstance(body, str):
try:
# Try to parse as JSON first
body = json.loads(body)
except json.JSONDecodeError:
# If it's not valid JSON and just plain text,
# we should send it as plain text instead
input_data.json_format = False
try:
response = requests.request(
input_data.method.value,
input_data.url,
headers=input_data.headers,
json=body if input_data.json_format else None,
data=body if not input_data.json_format else None,
)
result = response.json() if input_data.json_format else response.text
yield "response", result
except HTTPError as e:
# Handle error responses
try:
result = e.response.json() if input_data.json_format else str(e)
except json.JSONDecodeError:
result = str(e)
if 400 <= e.response.status_code < 500:
yield "client_error", result
elif 500 <= e.response.status_code < 600:
yield "server_error", result
else:
error_msg = (
"Unexpected status code "
f"{e.response.status_code} '{e.response.reason}'"
)
logger.warning(error_msg)
yield "error", error_msg
except RequestException as e:
# Handle other request-related exceptions
yield "error", str(e)
except Exception as e:
# Catch any other unexpected exceptions
yield "error", str(e)
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from diffusers import (
MotionAdapter,
)
from diffusers.utils.testing_utils import (
enable_full_determinism,
)
enable_full_determinism()
class MotionAdapterSingleFileTests(unittest.TestCase):
model_class = MotionAdapter
def test_single_file_components_version_v1_5(self):
ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sd_v15.ckpt"
repo_id = "guoyww/animatediff-motion-adapter-v1-5"
model = self.model_class.from_pretrained(repo_id)
model_single_file = self.model_class.from_single_file(ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between pretrained loading and single file loading"
)
def test_single_file_components_version_v1_5_2(self):
ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sd_v15_v2.ckpt"
repo_id = "guoyww/animatediff-motion-adapter-v1-5-2"
model = self.model_class.from_pretrained(repo_id)
model_single_file = self.model_class.from_single_file(ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between pretrained loading and single file loading"
)
def test_single_file_components_version_v1_5_3(self):
ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/v3_sd15_mm.ckpt"
repo_id = "guoyww/animatediff-motion-adapter-v1-5-3"
model = self.model_class.from_pretrained(repo_id)
model_single_file = self.model_class.from_single_file(ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between pretrained loading and single file loading"
)
def test_single_file_components_version_sdxl_beta(self):
ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sdxl_v10_beta.ckpt"
repo_id = "guoyww/animatediff-motion-adapter-sdxl-beta"
model = self.model_class.from_pretrained(repo_id)
model_single_file = self.model_class.from_single_file(ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between pretrained loading and single file loading"
)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from diffusers import (
MotionAdapter,
)
from diffusers.utils.testing_utils import (
enable_full_determinism,
)
enable_full_determinism()
class MotionAdapterSingleFileTests(unittest.TestCase):
model_class = MotionAdapter
def test_single_file_components_version_v1_5(self):
ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sd_v15.ckpt"
repo_id = "guoyww/animatediff-motion-adapter-v1-5"
model = self.model_class.from_pretrained(repo_id)
model_single_file = self.model_class.from_single_file(ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between pretrained loading and single file loading"
)
def test_single_file_components_version_v1_5_2(self):
ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sd_v15_v2.ckpt"
repo_id = "guoyww/animatediff-motion-adapter-v1-5-2"
model = self.model_class.from_pretrained(repo_id)
model_single_file = self.model_class.from_single_file(ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between pretrained loading and single file loading"
)
def test_single_file_components_version_v1_5_3(self):
ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/v3_sd15_mm.ckpt"
repo_id = "guoyww/animatediff-motion-adapter-v1-5-3"
model = self.model_class.from_pretrained(repo_id)
model_single_file = self.model_class.from_single_file(ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between pretrained loading and single file loading"
)
def test_single_file_components_version_sdxl_beta(self):
ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sdxl_v10_beta.ckpt"
repo_id = "guoyww/animatediff-motion-adapter-sdxl-beta"
model = self.model_class.from_pretrained(repo_id)
model_single_file = self.model_class.from_single_file(ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between pretrained loading and single file loading"
)
|
tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.6), max_per_img=100))
img_scales = [(640, 640), (320, 320), (960, 960)]
tta_pipeline = [
dict(type='LoadImageFromFile', backend_args=None),
dict(
type='TestTimeAug',
transforms=[
[
dict(type='Resize', scale=s, keep_ratio=True)
for s in img_scales
],
[
# ``RandomFlip`` must be placed before ``Pad``, otherwise
# bounding box coordinates after flipping cannot be
# recovered correctly.
dict(type='RandomFlip', prob=1.),
dict(type='RandomFlip', prob=0.)
],
[
dict(
type='Pad',
size=(960, 960),
pad_val=dict(img=(114, 114, 114))),
],
[dict(type='LoadAnnotations', with_bbox=True)],
[
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction'))
]
])
]
|
tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.6), max_per_img=100))
img_scales = [(640, 640), (320, 320), (960, 960)]
tta_pipeline = [
dict(type='LoadImageFromFile', backend_args=None),
dict(
type='TestTimeAug',
transforms=[
[
dict(type='Resize', scale=s, keep_ratio=True)
for s in img_scales
],
[
# ``RandomFlip`` must be placed before ``Pad``, otherwise
# bounding box coordinates after flipping cannot be
# recovered correctly.
dict(type='RandomFlip', prob=1.),
dict(type='RandomFlip', prob=0.)
],
[
dict(
type='Pad',
size=(960, 960),
pad_val=dict(img=(114, 114, 114))),
],
[
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction'))
]
])
]
|
import warnings
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.filetypes import AUDIO_FILE_FORMATS
from docarray.utils._internal.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AudioUrl')
@_register_proto(proto_type_name='audio_url')
class AudioUrl(AnyUrl):
"""
URL to a audio file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
import os
from urllib.parse import urlparse
url = super().validate(value, field, config) # basic url validation
path = urlparse(url).path
ext = os.path.splitext(path)[1][1:].lower()
# pass test if extension is valid or no extension
has_audio_extension = ext in AUDIO_FILE_FORMATS or ext == ''
if not has_audio_extension:
raise ValueError('Audio URL must have a valid extension')
return cls(str(url), scheme=None)
def load(self: T) -> Tuple[np.ndarray, int]:
"""
Load the data from the url into an AudioNdArray.
:return: AudioNdArray representing the audio file content.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDoc
import numpy as np
from docarray.typing import AudioUrl
class MyDoc(Document):
audio_url: AudioUrl
audio_tensor: AudioNdArray
doc = MyDoc(audio_url="toydata/hello.wav")
doc.audio_tensor, doc.frame_rate = doc.audio_url.load()
assert isinstance(doc.audio_tensor, np.ndarray)
"""
bytes_ = AudioBytes(self.load_bytes())
return bytes_.load()
def display(self):
"""
Play the audio sound from url in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Audio(data=self))
else:
display(Audio(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
import warnings
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.filetypes import AUDIO_FILE_FORMATS
from docarray.utils.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AudioUrl')
@_register_proto(proto_type_name='audio_url')
class AudioUrl(AnyUrl):
"""
URL to a audio file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
import os
from urllib.parse import urlparse
url = super().validate(value, field, config) # basic url validation
path = urlparse(url).path
ext = os.path.splitext(path)[1][1:].lower()
# pass test if extension is valid or no extension
has_audio_extension = ext in AUDIO_FILE_FORMATS or ext == ''
if not has_audio_extension:
raise ValueError('Audio URL must have a valid extension')
return cls(str(url), scheme=None)
def load(self: T) -> Tuple[np.ndarray, int]:
"""
Load the data from the url into an AudioNdArray.
:return: AudioNdArray representing the audio file content.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDoc
import numpy as np
from docarray.typing import AudioUrl
class MyDoc(Document):
audio_url: AudioUrl
audio_tensor: AudioNdArray
doc = MyDoc(audio_url="toydata/hello.wav")
doc.audio_tensor, doc.frame_rate = doc.audio_url.load()
assert isinstance(doc.audio_tensor, np.ndarray)
"""
bytes_ = AudioBytes(self.load_bytes())
return bytes_.load()
def display(self):
"""
Play the audio sound from url in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Audio(data=self))
else:
display(Audio(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class GridRCNN(TwoStageDetector):
"""Grid R-CNN.
This detector is the implementation of:
- Grid R-CNN (https://arxiv.org/abs/1811.12030)
- Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688)
"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(GridRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
|
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class GridRCNN(TwoStageDetector):
"""Grid R-CNN.
This detector is the implementation of:
- Grid R-CNN (https://arxiv.org/abs/1811.12030)
- Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688)
"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(GridRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
|
import warnings
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union
from docarray.typing.bytes.video_bytes import VideoLoadResult
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='VideoUrl')
VIDEO_FILE_FORMATS = ['mp4']
@_register_proto(proto_type_name='video_url')
class VideoUrl(AnyUrl):
"""
URL to a .wav file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config)
has_video_extension = any(ext in url for ext in VIDEO_FILE_FORMATS)
if not has_video_extension:
raise ValueError(
f'Video URL must have one of the following extensions:'
f'{VIDEO_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(self: T, **kwargs) -> VideoLoadResult:
"""
Load the data from the url into a named Tuple of VideoNdArray, AudioNdArray and
NdArray.
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described in:
https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open
:return: AudioNdArray representing the audio content, VideoNdArray representing
the images of the video, NdArray of the key frame indices.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import VideoUrl, VideoNdArray, AudioNdArray, NdArray
class MyDoc(BaseDoc):
video_url: VideoUrl
video: Optional[VideoNdArray]
audio: Optional[AudioNdArray]
key_frame_indices: Optional[NdArray]
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
doc.video, doc.audio, doc.key_frame_indices = doc.video_url.load()
assert isinstance(doc.video, VideoNdArray)
assert isinstance(doc.audio, AudioNdArray)
assert isinstance(doc.key_frame_indices, NdArray)
You can load only the key frames (or video, audio respectively):
.. code-block:: python
from pydantic import parse_obj_as
from docarray.typing import NdArray, VideoUrl
url = parse_obj_as(
VideoUrl,
'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true',
)
key_frame_indices = url.load().key_frame_indices
assert isinstance(key_frame_indices, NdArray)
"""
from docarray.typing.bytes.video_bytes import VideoBytes
buffer = VideoBytes(self.load_bytes(**kwargs))
return buffer.load()
def display(self):
"""
Play video from url in notebook.
"""
if is_notebook():
from IPython.display import display
remote_url = True if self.startswith('http') else False
if remote_url:
from IPython.display import Video
b = self.load_bytes()
display(Video(data=b, embed=True, mimetype='video/mp4'))
else:
import os
from IPython.display import HTML
path = os.path.relpath(self)
src = f'''
<body>
<video width="320" height="240" autoplay muted controls>
<source src="{path}">
Your browser does not support the video tag.
</video>
</body>
'''
display(HTML(src))
else:
warnings.warn('Display of video is only possible in a notebook.')
|
import warnings
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union
from docarray.typing.bytes.video_bytes import VideoLoadResult
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='VideoUrl')
VIDEO_FILE_FORMATS = ['mp4']
@_register_proto(proto_type_name='video_url')
class VideoUrl(AnyUrl):
"""
URL to a .wav file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config)
has_video_extension = any(ext in url for ext in VIDEO_FILE_FORMATS)
if not has_video_extension:
raise ValueError(
f'Video URL must have one of the following extensions:'
f'{VIDEO_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(self: T, **kwargs) -> VideoLoadResult:
"""
Load the data from the url into a named Tuple of VideoNdArray, AudioNdArray and
NdArray.
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described in:
https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open
:return: AudioNdArray representing the audio content, VideoNdArray representing
the images of the video, NdArray of the key frame indices.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import VideoUrl, VideoNdArray, AudioNdArray, NdArray
class MyDoc(BaseDoc):
video_url: VideoUrl
video: Optional[VideoNdArray]
audio: Optional[AudioNdArray]
key_frame_indices: Optional[NdArray]
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
doc.video, doc.audio, doc.key_frame_indices = doc.video_url.load()
assert isinstance(doc.video, VideoNdArray)
assert isinstance(doc.audio, AudioNdArray)
assert isinstance(doc.key_frame_indices, NdArray)
You can load only the key frames (or video, audio respectively):
.. code-block:: python
from pydantic import parse_obj_as
from docarray.typing import NdArray, VideoUrl
url = parse_obj_as(
VideoUrl,
'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true',
)
key_frame_indices = url.load().key_frame_indices
assert isinstance(key_frame_indices, NdArray)
"""
from docarray.typing.bytes.video_bytes import VideoBytes
buffer = VideoBytes(self.load_bytes(**kwargs))
return buffer.load()
def display(self):
"""
Play video from url in notebook.
"""
if is_notebook():
from IPython.display import display
remote_url = True if self.startswith('http') else False
if remote_url:
from IPython.display import Video
b = self.load_bytes()
display(Video(data=b, embed=True, mimetype='video/mp4'))
else:
import os
from IPython.display import HTML
path = os.path.relpath(self)
src = f'''
<body>
<video width="320" height="240" autoplay muted controls>
<source src="{path}">
Your browser does not support the video tag.
</video>
</body>
'''
display(HTML(src))
else:
warnings.warn('Display of video is only possible in a notebook.')
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from parameterized import parameterized
from mmdet.models.roi_heads.mask_heads import GridHead
from mmdet.models.utils import unpack_gt_instances
from mmdet.testing import (demo_mm_inputs, demo_mm_proposals,
demo_mm_sampling_results)
class TestGridHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_grid_head_loss(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
grid_head = GridHead()
grid_head.to(device=device)
s = 256
image_shapes = [(3, s, s)]
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=True)
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device=device))
proposals_list[i] = proposals_list[i].to(device=device)
packed_inputs[i]['data_sample'] = \
packed_inputs[i]['data_sample'].to(device=device)
train_cfg = ConfigDict(dict(pos_radius=1))
# prepare ground truth
data_samples = [inputs['data_sample'] for inputs in packed_inputs]
(batch_gt_instances, batch_gt_instances_ignore,
_) = unpack_gt_instances(data_samples)
sampling_results = demo_mm_sampling_results(
proposals_list=proposals_list,
batch_gt_instances=batch_gt_instances,
batch_gt_instances_ignore=batch_gt_instances_ignore)
# prepare grid feats
pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results])
grid_feats = torch.rand((pos_bboxes.size(0), 256, 14, 14)).to(device)
sample_idx = torch.arange(0, pos_bboxes.size(0))
grid_pred = grid_head(grid_feats)
grid_head.loss(grid_pred, sample_idx, sampling_results, train_cfg)
@parameterized.expand(['cpu', 'cuda'])
def test_mask_iou_head_predict_by_feat(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
grid_head = GridHead()
grid_head.to(device=device)
s = 128
num_samples = 2
num_classes = 4
img_metas = {
'img_shape': (s, s, 3),
'scale_factor': (1, 1),
'ori_shape': (s, s, 3)
}
results = InstanceData(metainfo=img_metas)
results.bboxes = torch.rand((num_samples, 4)).to(device)
results.scores = torch.rand((num_samples, )).to(device)
results.labels = torch.randint(
num_classes, (num_samples, ), dtype=torch.long).to(device)
grid_feats = torch.rand((num_samples, 256, 14, 14)).to(device)
grid_preds = grid_head(grid_feats)
grid_head.predict_by_feat(
grid_preds=grid_preds,
results_list=[results],
batch_img_metas=[img_metas])
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from parameterized import parameterized
from mmdet.models.roi_heads.mask_heads import GridHead
from mmdet.models.utils import unpack_gt_instances
from mmdet.testing import (demo_mm_inputs, demo_mm_proposals,
demo_mm_sampling_results)
class TestGridHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_grid_head_loss(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
grid_head = GridHead()
grid_head.to(device=device)
s = 256
image_shapes = [(3, s, s)]
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=True)
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device=device))
proposals_list[i] = proposals_list[i].to(device=device)
train_cfg = ConfigDict(dict(pos_radius=1))
# prepare ground truth
data_samples = [inputs['data_sample'] for inputs in packed_inputs]
(batch_gt_instances, batch_gt_instances_ignore,
_) = unpack_gt_instances(data_samples)
sampling_results = demo_mm_sampling_results(
proposals_list=proposals_list,
batch_gt_instances=batch_gt_instances,
batch_gt_instances_ignore=batch_gt_instances_ignore)
# prepare grid feats
pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results])
grid_feats = torch.rand((pos_bboxes.size(0), 256, 14, 14)).to(device)
sample_idx = torch.arange(0, pos_bboxes.size(0))
grid_pred = grid_head(grid_feats)
grid_head.loss(grid_pred, sample_idx, sampling_results, train_cfg)
@parameterized.expand(['cpu', 'cuda'])
def test_mask_iou_head_predict_by_feat(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
grid_head = GridHead()
grid_head.to(device=device)
s = 128
num_samples = 2
num_classes = 4
img_metas = {
'img_shape': (s, s, 3),
'scale_factor': (1, 1),
'ori_shape': (s, s, 3)
}
results = InstanceData(metainfo=img_metas)
results.bboxes = torch.rand((num_samples, 4)).to(device)
results.scores = torch.rand((num_samples, )).to(device)
results.labels = torch.randint(
num_classes, (num_samples, ), dtype=torch.long).to(device)
grid_feats = torch.rand((num_samples, 256, 14, 14)).to(device)
grid_preds = grid_head(grid_feats)
grid_head.predict_by_feat(
grid_preds=grid_preds,
results_list=[results],
batch_img_metas=[img_metas])
|
from ._conformer_wav2vec2 import (
conformer_wav2vec2_base,
conformer_wav2vec2_model,
conformer_wav2vec2_pretrain_base,
conformer_wav2vec2_pretrain_large,
conformer_wav2vec2_pretrain_model,
ConformerWav2Vec2PretrainModel,
)
from ._emformer_hubert import emformer_hubert_base, emformer_hubert_model
from .conv_emformer import ConvEmformer
from .hifi_gan import hifigan_vocoder, hifigan_vocoder_v1, hifigan_vocoder_v2, hifigan_vocoder_v3, HiFiGANVocoder
from .rnnt import conformer_rnnt_base, conformer_rnnt_biasing, conformer_rnnt_biasing_base, conformer_rnnt_model
from .rnnt_decoder import Hypothesis, RNNTBeamSearchBiasing
from .squim import squim_objective_base, squim_objective_model, SquimObjective
__all__ = [
"conformer_rnnt_base",
"conformer_rnnt_model",
"conformer_rnnt_biasing",
"conformer_rnnt_biasing_base",
"conv_tasnet_base",
"ConvEmformer",
"conformer_wav2vec2_model",
"conformer_wav2vec2_base",
"conformer_wav2vec2_pretrain_model",
"conformer_wav2vec2_pretrain_base",
"conformer_wav2vec2_pretrain_large",
"ConformerWav2Vec2PretrainModel",
"emformer_hubert_base",
"emformer_hubert_model",
"Hypothesis",
"RNNTBeamSearchBiasing",
"HiFiGANVocoder",
"hifigan_vocoder_v1",
"hifigan_vocoder_v2",
"hifigan_vocoder_v3",
"hifigan_vocoder",
"squim_objective_base",
"squim_objective_model",
"SquimObjective",
]
|
from ._conformer_wav2vec2 import (
conformer_wav2vec2_base,
conformer_wav2vec2_model,
conformer_wav2vec2_pretrain_base,
conformer_wav2vec2_pretrain_large,
conformer_wav2vec2_pretrain_model,
ConformerWav2Vec2PretrainModel,
)
from ._emformer_hubert import emformer_hubert_base, emformer_hubert_model
from .conv_emformer import ConvEmformer
from .hifi_gan import hifigan_vocoder, hifigan_vocoder_v1, hifigan_vocoder_v2, hifigan_vocoder_v3, HiFiGANVocoder
from .rnnt import conformer_rnnt_base, conformer_rnnt_model
from .squim import squim_objective_base, squim_objective_model, SquimObjective
__all__ = [
"conformer_rnnt_base",
"conformer_rnnt_model",
"ConvEmformer",
"conformer_wav2vec2_model",
"conformer_wav2vec2_base",
"conformer_wav2vec2_pretrain_model",
"conformer_wav2vec2_pretrain_base",
"conformer_wav2vec2_pretrain_large",
"ConformerWav2Vec2PretrainModel",
"emformer_hubert_base",
"emformer_hubert_model",
"HiFiGANVocoder",
"hifigan_vocoder_v1",
"hifigan_vocoder_v2",
"hifigan_vocoder_v3",
"hifigan_vocoder",
"squim_objective_base",
"squim_objective_model",
"SquimObjective",
]
|
"""Tavily Search API toolkit."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.tavily_search.tool import (
TavilyAnswer,
TavilySearchResults,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"TavilySearchResults": "langchain_community.tools.tavily_search.tool",
"TavilyAnswer": "langchain_community.tools.tavily_search.tool",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"TavilyAnswer",
"TavilySearchResults",
]
|
"""Tavily Search API toolkit."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.tavily_search.tool import (
TavilyAnswer,
TavilySearchResults,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"TavilySearchResults": "langchain_community.tools.tavily_search.tool",
"TavilyAnswer": "langchain_community.tools.tavily_search.tool",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"TavilySearchResults",
"TavilyAnswer",
]
|
"""Test chat model integration."""
import json
from collections.abc import Generator
from contextlib import contextmanager
from typing import Any
import pytest
from httpx import Client, Request, Response
from langchain_core.messages import ChatMessage
from langchain_tests.unit_tests import ChatModelUnitTests
from langchain_ollama.chat_models import ChatOllama, _parse_arguments_from_tool_call
class TestChatOllama(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[ChatOllama]:
return ChatOllama
@property
def chat_model_params(self) -> dict:
return {"model": "llama3-groq-tool-use"}
def test__parse_arguments_from_tool_call() -> None:
raw_response = '{"model":"sample-model","message":{"role":"assistant","content":"","tool_calls":[{"function":{"name":"get_profile_details","arguments":{"arg_1":"12345678901234567890123456"}}}]},"done":false}' # noqa: E501
raw_tool_calls = json.loads(raw_response)["message"]["tool_calls"]
response = _parse_arguments_from_tool_call(raw_tool_calls[0])
assert response is not None
assert isinstance(response["arg_1"], str)
@contextmanager
def _mock_httpx_client_stream(
*args: Any, **kwargs: Any
) -> Generator[Response, Any, Any]:
yield Response(
status_code=200,
content='{"message": {"role": "assistant", "content": "The meaning ..."}}',
request=Request(method="POST", url="http://whocares:11434"),
)
def test_arbitrary_roles_accepted_in_chatmessages(
monkeypatch: pytest.MonkeyPatch,
) -> None:
monkeypatch.setattr(Client, "stream", _mock_httpx_client_stream)
llm = ChatOllama(
base_url="http://whocares:11434",
model="granite3.2",
verbose=True,
format=None,
)
messages = [
ChatMessage(
role="somerandomrole",
content="I'm ok with you adding any role message now!",
),
ChatMessage(role="control", content="thinking"),
ChatMessage(role="user", content="What is the meaning of life?"),
]
llm.invoke(messages)
|
"""Test chat model integration."""
import json
from langchain_tests.unit_tests import ChatModelUnitTests
from langchain_ollama.chat_models import ChatOllama, _parse_arguments_from_tool_call
class TestChatOllama(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[ChatOllama]:
return ChatOllama
@property
def chat_model_params(self) -> dict:
return {"model": "llama3-groq-tool-use"}
def test__parse_arguments_from_tool_call() -> None:
raw_response = '{"model":"sample-model","message":{"role":"assistant","content":"","tool_calls":[{"function":{"name":"get_profile_details","arguments":{"arg_1":"12345678901234567890123456"}}}]},"done":false}' # noqa: E501
raw_tool_calls = json.loads(raw_response)["message"]["tool_calls"]
response = _parse_arguments_from_tool_call(raw_tool_calls[0])
assert response is not None
assert isinstance(response["arg_1"], str)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.google_finance.tool import GoogleFinanceQueryRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GoogleFinanceQueryRun": "langchain_community.tools.google_finance.tool",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GoogleFinanceQueryRun",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.google_finance.tool import GoogleFinanceQueryRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GoogleFinanceQueryRun": "langchain_community.tools.google_finance.tool"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GoogleFinanceQueryRun",
]
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
roi_head=dict(
bbox_head=dict(
num_classes=1203,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0)),
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
dataset_type = 'LVISV1Dataset'
data_root = 'data/lvis_v1/'
train_dataloader = dict(
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/lvis_v1_train.json',
data_prefix=dict(img=''),
pipeline=train_pipeline))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/lvis_v1_val.json',
data_prefix=dict(img='')))
test_dataloader = val_dataloader
val_evaluator = dict(
type='LVISMetric',
ann_file=data_root + 'annotations/lvis_v1_val.json',
metric=['bbox', 'segm'])
test_evaluator = val_evaluator
train_cfg = dict(val_interval=24)
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
roi_head=dict(
bbox_head=dict(
num_classes=1203,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0)),
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
dataset_type = 'LVISV1Dataset'
data_root = 'data/lvis_v1/'
train_dataloader = dict(
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/lvis_v1_train.json',
data_prefix=dict(img=''),
pipeline=train_pipeline))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/lvis_v1_val.json',
data_prefix=dict(img='')))
test_dataloader = val_dataloader
val_evaluator = dict(
type='LVISMetric',
ann_file=data_root + 'annotations/lvis_v1_val.json',
metric=['bbox', 'segm'])
test_evaluator = val_evaluator
train_cfg = dict(val_interval=24)
|
from typing import TYPE_CHECKING, Union
import numpy as np
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
import trimesh
class Mesh:
FILE_EXTENSIONS = [
'glb',
'obj',
'ply',
]
VERTICES = 'vertices'
FACES = 'faces'
class MeshDataMixin:
"""Provide helper functions for :class:`Document` to support 3D mesh data and point cloud."""
def _load_mesh(
self, force: str = None
) -> Union['trimesh.Trimesh', 'trimesh.Scene']:
"""Load a trimesh.Mesh or trimesh.Scene object from :attr:`.uri`.
:param force: str or None. For 'mesh' try to coerce scenes into a single mesh. For 'scene'
try to coerce everything into a scene.
:return: trimesh.Mesh or trimesh.Scene object
"""
import urllib.parse
import trimesh
scheme = urllib.parse.urlparse(self.uri).scheme
loader = trimesh.load_remote if scheme in ['http', 'https'] else trimesh.load
mesh = loader(self.uri, force=force)
return mesh
def load_uri_to_point_cloud_tensor(
self: 'T', samples: int, as_chunks: bool = False
) -> 'T':
"""Convert a 3d mesh-like :attr:`.uri` into :attr:`.tensor`
:param samples: number of points to sample from the mesh
:param as_chunks: when multiple geometry stored in one mesh file,
then store each geometry into different :attr:`.chunks`
:return: itself after processed
"""
if as_chunks:
import trimesh
from docarray.document import Document
# try to coerce everything into a scene
scene = self._load_mesh(force='scene')
for geo in scene.geometry.values():
geo: trimesh.Trimesh
self.chunks.append(Document(tensor=np.array(geo.sample(samples))))
else:
# combine a scene into a single mesh
mesh = self._load_mesh(force='mesh')
self.tensor = np.array(mesh.sample(samples))
return self
def load_uri_to_vertices_and_faces(self: 'T') -> 'T':
"""Convert a 3d mesh-like :attr:`.uri` into :attr:`.chunks` as vertices and faces
:return: itself after processed
"""
from docarray.document import Document
mesh = self._load_mesh(force='mesh')
vertices = mesh.vertices.view(np.ndarray)
faces = mesh.faces.view(np.ndarray)
self.chunks = [
Document(name=Mesh.VERTICES, tensor=vertices),
Document(name=Mesh.FACES, tensor=faces),
]
return self
def load_vertices_and_faces_to_point_cloud(self: 'T', samples: int) -> 'T':
"""Convert a 3d mesh of vertices and faces from :attr:`.chunks` into point cloud :attr:`.tensor`
:param samples: number of points to sample from the mesh
:return: itself after processed
"""
vertices = None
faces = None
for chunk in self.chunks:
if chunk.tags['name'] == Mesh.VERTICES:
vertices = chunk.tensor
if chunk.tags['name'] == Mesh.FACES:
faces = chunk.tensor
if vertices is not None and faces is not None:
import trimesh
mesh = trimesh.Trimesh(vertices=vertices, faces=faces)
self.tensor = np.array(mesh.sample(samples))
else:
raise AttributeError(
'Point cloud tensor can not be set, since vertices and faces chunk tensor have not been set.'
)
return self
|
import warnings
from typing import TYPE_CHECKING
import numpy as np
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
class MeshDataMixin:
"""Provide helper functions for :class:`Document` to support 3D mesh data and point cloud."""
def load_uri_to_point_cloud_tensor(
self: 'T', samples: int, as_chunks: bool = False
) -> 'T':
"""Convert a 3d mesh-like :attr:`.uri` into :attr:`.tensor`
:param samples: number of points to sample from the mesh
:param as_chunks: when multiple geometry stored in one mesh file,
then store each geometry into different :attr:`.chunks`
:return: itself after processed
"""
import trimesh
import urllib.parse
scheme = urllib.parse.urlparse(self.uri).scheme
loader = trimesh.load_remote if scheme in ['http', 'https'] else trimesh.load
if as_chunks:
from docarray.document import Document
# try to coerce everything into a scene
scene = loader(self.uri, force='scene')
for geo in scene.geometry.values():
geo: trimesh.Trimesh
self.chunks.append(Document(tensor=np.array(geo.sample(samples))))
else:
# combine a scene into a single mesh
mesh = loader(self.uri, force='mesh')
self.tensor = np.array(mesh.sample(samples))
return self
def load_uri_to_vertices_and_faces(self: 'T') -> 'T':
"""Convert a 3d mesh-like :attr:`.uri` into :attr:`.chunks` as vertices and faces
:return: itself after processed
"""
import trimesh
import urllib.parse
from docarray.document import Document
scheme = urllib.parse.urlparse(self.uri).scheme
loader = trimesh.load_remote if scheme in ['http', 'https'] else trimesh.load
mesh = loader(self.uri, force='mesh')
vertices = mesh.vertices.view(np.ndarray)
faces = mesh.faces.view(np.ndarray)
self.chunks = [
Document(name='vertices', tensor=vertices),
Document(name='faces', tensor=faces),
]
return self
def load_vertices_and_faces_to_point_cloud(self: 'T', samples: int) -> 'T':
"""Convert a 3d mesh of vertices and faces from :attr:`.chunks` into point cloud :attr:`.tensor`
:param samples: number of points to sample from the mesh
:return: itself after processed
"""
import trimesh
vertices = None
faces = None
for chunk in self.chunks:
if chunk.tags['name'] == 'vertices':
vertices = chunk.tensor
if chunk.tags['name'] == 'faces':
faces = chunk.tensor
if vertices is not None and faces is not None:
mesh = trimesh.Trimesh(vertices=vertices, faces=faces)
self.tensor = np.array(mesh.sample(samples))
else:
raise AttributeError(
'Point cloud tensor can not be set, since vertices and faces chunk tensor have not been set.'
)
return self
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '3.2.0'
short_version = __version__
def parse_version_info(version_str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '3.1.0'
short_version = __version__
def parse_version_info(version_str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .custom import CustomDataset
from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset,
MultiImageMixDataset, RepeatDataset)
from .deepfashion import DeepFashionDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler
from .utils import (NumClassCheckHook, get_loading_pipeline,
replace_ImageToTensor)
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
__all__ = [
'CustomDataset', 'XMLDataset', 'CocoDataset', 'DeepFashionDataset',
'VOCDataset', 'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset',
'LVISV1Dataset', 'GroupSampler', 'DistributedGroupSampler',
'DistributedSampler', 'build_dataloader', 'ConcatDataset', 'RepeatDataset',
'ClassBalancedDataset', 'WIDERFaceDataset', 'DATASETS', 'PIPELINES',
'build_dataset', 'replace_ImageToTensor', 'get_loading_pipeline',
'NumClassCheckHook', 'CocoPanopticDataset', 'MultiImageMixDataset',
'OpenImagesDataset', 'OpenImagesChallengeDataset'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .custom import CustomDataset
from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset,
MultiImageMixDataset, RepeatDataset)
from .deepfashion import DeepFashionDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler
from .utils import (NumClassCheckHook, get_loading_pipeline,
replace_ImageToTensor)
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
__all__ = [
'CustomDataset', 'XMLDataset', 'CocoDataset', 'DeepFashionDataset',
'VOCDataset', 'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset',
'LVISV1Dataset', 'GroupSampler', 'DistributedGroupSampler',
'DistributedSampler', 'build_dataloader', 'ConcatDataset', 'RepeatDataset',
'ClassBalancedDataset', 'WIDERFaceDataset', 'DATASETS', 'PIPELINES',
'build_dataset', 'replace_ImageToTensor', 'get_loading_pipeline',
'NumClassCheckHook', 'CocoPanopticDataset', 'MultiImageMixDataset'
]
|
import functools
import numbers
from collections import defaultdict
from typing import Any, Dict, Literal, Sequence, Type, TypeVar, Union
from torchvision.prototype import datapoints
from torchvision.prototype.datapoints._datapoint import FillType, FillTypeJIT
from torchvision.transforms.transforms import _check_sequence_input, _setup_angle, _setup_size # noqa: F401
def _setup_float_or_seq(arg: Union[float, Sequence[float]], name: str, req_size: int = 2) -> Sequence[float]:
if not isinstance(arg, (float, Sequence)):
raise TypeError(f"{name} should be float or a sequence of floats. Got {type(arg)}")
if isinstance(arg, Sequence) and len(arg) != req_size:
raise ValueError(f"If {name} is a sequence its length should be one of {req_size}. Got {len(arg)}")
if isinstance(arg, Sequence):
for element in arg:
if not isinstance(element, float):
raise ValueError(f"{name} should be a sequence of floats. Got {type(element)}")
if isinstance(arg, float):
arg = [float(arg), float(arg)]
if isinstance(arg, (list, tuple)) and len(arg) == 1:
arg = [arg[0], arg[0]]
return arg
def _check_fill_arg(fill: Union[FillType, Dict[Type, FillType]]) -> None:
if isinstance(fill, dict):
for key, value in fill.items():
# Check key for type
_check_fill_arg(value)
if isinstance(fill, defaultdict) and callable(fill.default_factory):
default_value = fill.default_factory()
_check_fill_arg(default_value)
else:
if fill is not None and not isinstance(fill, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate fill arg, only Numbers, tuples, lists and dicts are allowed.")
T = TypeVar("T")
def _default_arg(value: T) -> T:
return value
def _get_defaultdict(default: T) -> Dict[Any, T]:
# This weird looking construct only exists, since `lambda`'s cannot be serialized by pickle.
# If it were possible, we could replace this with `defaultdict(lambda: default)`
return defaultdict(functools.partial(_default_arg, default))
def _convert_fill_arg(fill: datapoints.FillType) -> datapoints.FillTypeJIT:
# Fill = 0 is not equivalent to None, https://github.com/pytorch/vision/issues/6517
# So, we can't reassign fill to 0
# if fill is None:
# fill = 0
if fill is None:
return fill
if not isinstance(fill, (int, float)):
fill = [float(v) for v in list(fill)]
return fill # type: ignore[return-value]
def _setup_fill_arg(fill: Union[FillType, Dict[Type, FillType]]) -> Dict[Type, FillTypeJIT]:
_check_fill_arg(fill)
if isinstance(fill, dict):
for k, v in fill.items():
fill[k] = _convert_fill_arg(v)
if isinstance(fill, defaultdict) and callable(fill.default_factory):
default_value = fill.default_factory()
sanitized_default = _convert_fill_arg(default_value)
fill.default_factory = functools.partial(_default_arg, sanitized_default)
return fill # type: ignore[return-value]
return _get_defaultdict(_convert_fill_arg(fill))
def _check_padding_arg(padding: Union[int, Sequence[int]]) -> None:
if not isinstance(padding, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate padding arg")
if isinstance(padding, (tuple, list)) and len(padding) not in [1, 2, 4]:
raise ValueError(f"Padding must be an int or a 1, 2, or 4 element tuple, not a {len(padding)} element tuple")
# TODO: let's use torchvision._utils.StrEnum to have the best of both worlds (strings and enums)
# https://github.com/pytorch/vision/issues/6250
def _check_padding_mode_arg(padding_mode: Literal["constant", "edge", "reflect", "symmetric"]) -> None:
if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")
|
import functools
import numbers
from collections import defaultdict
from typing import Any, Dict, Literal, Sequence, Type, TypeVar, Union
from torchvision.prototype import datapoints
from torchvision.prototype.datapoints._datapoint import FillType, FillTypeJIT
from torchvision.transforms.transforms import _check_sequence_input, _setup_angle, _setup_size # noqa: F401
def _setup_float_or_seq(arg: Union[float, Sequence[float]], name: str, req_size: int = 2) -> Sequence[float]:
if not isinstance(arg, (float, Sequence)):
raise TypeError(f"{name} should be float or a sequence of floats. Got {type(arg)}")
if isinstance(arg, Sequence) and len(arg) != req_size:
raise ValueError(f"If {name} is a sequence its length should be one of {req_size}. Got {len(arg)}")
if isinstance(arg, Sequence):
for element in arg:
if not isinstance(element, float):
raise ValueError(f"{name} should be a sequence of floats. Got {type(element)}")
if isinstance(arg, float):
arg = [float(arg), float(arg)]
if isinstance(arg, (list, tuple)) and len(arg) == 1:
arg = [arg[0], arg[0]]
return arg
def _check_fill_arg(fill: Union[FillType, Dict[Type, FillType]]) -> None:
if isinstance(fill, dict):
for key, value in fill.items():
# Check key for type
_check_fill_arg(value)
if isinstance(fill, defaultdict) and callable(fill.default_factory):
default_value = fill.default_factory()
_check_fill_arg(default_value)
else:
if fill is not None and not isinstance(fill, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate fill arg, only Numbers, tuples, lists and dicts are allowed.")
T = TypeVar("T")
def _default_arg(value: T) -> T:
return value
def _get_defaultdict(default: T) -> Dict[Any, T]:
# This weird looking construct only exists, since `lambda`'s cannot be serialized by pickle.
# If it were possible, we could replace this with `defaultdict(lambda: default)`
return defaultdict(functools.partial(_default_arg, default))
def _convert_fill_arg(fill: datapoints.FillType) -> datapoints.FillTypeJIT:
# Fill = 0 is not equivalent to None, https://github.com/pytorch/vision/issues/6517
# So, we can't reassign fill to 0
# if fill is None:
# fill = 0
if fill is None:
return fill
# This cast does Sequence -> List[float] to please mypy and torch.jit.script
if not isinstance(fill, (int, float)):
fill = [float(v) for v in list(fill)]
return fill
def _setup_fill_arg(fill: Union[FillType, Dict[Type, FillType]]) -> Dict[Type, FillTypeJIT]:
_check_fill_arg(fill)
if isinstance(fill, dict):
for k, v in fill.items():
fill[k] = _convert_fill_arg(v)
if isinstance(fill, defaultdict) and callable(fill.default_factory):
default_value = fill.default_factory()
sanitized_default = _convert_fill_arg(default_value)
fill.default_factory = functools.partial(_default_arg, sanitized_default)
return fill # type: ignore[return-value]
return _get_defaultdict(_convert_fill_arg(fill))
def _check_padding_arg(padding: Union[int, Sequence[int]]) -> None:
if not isinstance(padding, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate padding arg")
if isinstance(padding, (tuple, list)) and len(padding) not in [1, 2, 4]:
raise ValueError(f"Padding must be an int or a 1, 2, or 4 element tuple, not a {len(padding)} element tuple")
# TODO: let's use torchvision._utils.StrEnum to have the best of both worlds (strings and enums)
# https://github.com/pytorch/vision/issues/6250
def _check_padding_mode_arg(padding_mode: Literal["constant", "edge", "reflect", "symmetric"]) -> None:
if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from parameterized import parameterized
from mmdet.models.roi_heads.mask_heads import FCNMaskHead
class TestFCNMaskHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_get_seg_masks(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
num_classes = 6
mask_head = FCNMaskHead(
num_convs=1,
in_channels=1,
conv_out_channels=1,
num_classes=num_classes)
rcnn_test_cfg = ConfigDict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)
s = 128
img_metas = {
'img_shape': (s, s, 3),
'scale_factor': (1, 1),
'ori_shape': (s, s, 3)
}
result = InstanceData(metainfo=img_metas)
num_samples = 2
mask_pred = [torch.rand((num_samples, num_classes, 14, 14)).to(device)]
result.bboxes = torch.rand((num_samples, 4)).to(device)
result.labels = torch.randint(
num_classes, (num_samples, ), dtype=torch.long).to(device)
mask_head.to(device=device)
result_list = mask_head.predict_by_feat(
mask_preds=tuple(mask_pred),
results_list=[result],
batch_img_metas=[img_metas],
rcnn_test_cfg=rcnn_test_cfg)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples)
self.assertEqual(result_list[0].masks.shape, (num_samples, s, s))
# test with activate_map, `mask_pred` has been activated before
num_samples = 2
mask_pred = [torch.rand((num_samples, num_classes, 14, 14)).to(device)]
mask_pred = [m.sigmoid().detach() for m in mask_pred]
result.bboxes = torch.rand((num_samples, 4)).to(device)
result.labels = torch.randint(
num_classes, (num_samples, ), dtype=torch.long).to(device)
mask_head.to(device=device)
result_list = mask_head.predict_by_feat(
mask_preds=tuple(mask_pred),
results_list=[result],
batch_img_metas=[img_metas],
rcnn_test_cfg=rcnn_test_cfg,
activate_map=True)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples)
self.assertEqual(result_list[0].masks.shape, (num_samples, s, s))
# num_samples is 0
num_samples = 0
result = InstanceData(metainfo=img_metas)
mask_pred = [torch.rand((num_samples, num_classes, 14, 14)).to(device)]
result.bboxes = torch.zeros((num_samples, 4)).to(device)
result.labels = torch.zeros((num_samples, )).to(device)
result_list = mask_head.predict_by_feat(
mask_preds=tuple(mask_pred),
results_list=[result],
batch_img_metas=[img_metas],
rcnn_test_cfg=rcnn_test_cfg)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples)
self.assertEqual(result_list[0].masks.shape, (num_samples, s, s))
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from parameterized import parameterized
from mmdet.models.roi_heads.mask_heads import FCNMaskHead
class TestFCNMaskHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_get_seg_masks(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
num_classes = 6
mask_head = FCNMaskHead(
num_convs=1,
in_channels=1,
conv_out_channels=1,
num_classes=num_classes)
rcnn_test_cfg = ConfigDict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)
s = 128
img_metas = {
'img_shape': (s, s, 3),
'scale_factor': (1, 1),
'ori_shape': (s, s, 3)
}
result = InstanceData(metainfo=img_metas)
num_samples = 2
mask_pred = [torch.rand((num_samples, num_classes, 14, 14)).to(device)]
result.bboxes = torch.rand((num_samples, 4)).to(device)
result.labels = torch.randint(
num_classes, (num_samples, ), dtype=torch.long).to(device)
mask_head.to(device=device)
result_list = mask_head.get_results(
mask_preds=tuple(mask_pred),
results_list=tuple([result]),
batch_img_metas=[img_metas],
rcnn_test_cfg=rcnn_test_cfg)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples)
self.assertEqual(result_list[0].masks.shape, (num_samples, s, s))
# test with activate_map, `mask_pred` has been activated before
num_samples = 2
mask_pred = [torch.rand((num_samples, num_classes, 14, 14)).to(device)]
mask_pred = [m.sigmoid().detach() for m in mask_pred]
result.bboxes = torch.rand((num_samples, 4)).to(device)
result.labels = torch.randint(
num_classes, (num_samples, ), dtype=torch.long).to(device)
mask_head.to(device=device)
result_list = mask_head.get_results(
mask_preds=tuple(mask_pred),
results_list=tuple([result]),
batch_img_metas=[img_metas],
rcnn_test_cfg=rcnn_test_cfg,
activate_map=True)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples)
self.assertEqual(result_list[0].masks.shape, (num_samples, s, s))
# num_samples is 0
num_samples = 0
result = InstanceData(metainfo=img_metas)
mask_pred = [torch.rand((num_samples, num_classes, 14, 14)).to(device)]
result.bboxes = torch.zeros((num_samples, 4)).to(device)
result.labels = torch.zeros((num_samples, )).to(device)
result_list = mask_head.get_results(
mask_preds=tuple(mask_pred),
results_list=tuple([result]),
batch_img_metas=[img_metas],
rcnn_test_cfg=rcnn_test_cfg)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples)
self.assertEqual(result_list[0].masks.shape, (num_samples, s, s))
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_bbox_coder import BaseBBoxCoder
from .bucketing_bbox_coder import BucketingBBoxCoder
from .delta_xywh_bbox_coder import (DeltaXYWHBBoxCoder,
DeltaXYWHBBoxCoderForGLIP)
from .distance_point_bbox_coder import DistancePointBBoxCoder
from .legacy_delta_xywh_bbox_coder import LegacyDeltaXYWHBBoxCoder
from .pseudo_bbox_coder import PseudoBBoxCoder
from .tblr_bbox_coder import TBLRBBoxCoder
from .yolo_bbox_coder import YOLOBBoxCoder
__all__ = [
'BaseBBoxCoder', 'PseudoBBoxCoder', 'DeltaXYWHBBoxCoder',
'LegacyDeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'YOLOBBoxCoder',
'BucketingBBoxCoder', 'DistancePointBBoxCoder', 'DeltaXYWHBBoxCoderForGLIP'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_bbox_coder import BaseBBoxCoder
from .bucketing_bbox_coder import BucketingBBoxCoder
from .delta_xywh_bbox_coder import DeltaXYWHBBoxCoder
from .distance_point_bbox_coder import DistancePointBBoxCoder
from .legacy_delta_xywh_bbox_coder import LegacyDeltaXYWHBBoxCoder
from .pseudo_bbox_coder import PseudoBBoxCoder
from .tblr_bbox_coder import TBLRBBoxCoder
from .yolo_bbox_coder import YOLOBBoxCoder
__all__ = [
'BaseBBoxCoder', 'PseudoBBoxCoder', 'DeltaXYWHBBoxCoder',
'LegacyDeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'YOLOBBoxCoder',
'BucketingBBoxCoder', 'DistancePointBBoxCoder'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .activations import SiLU
from .bbox_nms import fast_nms, multiclass_nms
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .dropblock import DropBlock
from .ema import ExpMomentumEMA
from .inverted_residual import InvertedResidual
from .matrix_nms import mask_matrix_nms
from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder
from .normed_predictor import NormedConv2d, NormedLinear
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import ChannelAttention, DyReLU, SELayer
# yapf: disable
from .transformer import (MLP, AdaptivePadding, CdnQueryGenerator,
ConditionalAttention,
ConditionalDetrTransformerDecoder,
ConditionalDetrTransformerDecoderLayer,
DABDetrTransformerDecoder,
DABDetrTransformerDecoderLayer,
DABDetrTransformerEncoder,
DeformableDetrTransformerDecoder,
DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder,
DeformableDetrTransformerEncoderLayer,
DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer,
DinoTransformerDecoder, DynamicConv,
Mask2FormerTransformerDecoder,
Mask2FormerTransformerDecoderLayer,
Mask2FormerTransformerEncoder, PatchEmbed,
PatchMerging, coordinate_to_encoding,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
# yapf: enable
__all__ = [
'fast_nms', 'multiclass_nms', 'mask_matrix_nms', 'DropBlock',
'PixelDecoder', 'TransformerEncoderPixelDecoder',
'MSDeformAttnPixelDecoder', 'ResLayer', 'PatchMerging',
'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv',
'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'InvertedResidual',
'SELayer', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d',
'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'DyReLU',
'ExpMomentumEMA', 'inverse_sigmoid', 'ChannelAttention', 'SiLU', 'MLP',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer', 'AdaptivePadding',
'coordinate_to_encoding', 'ConditionalAttention',
'DABDetrTransformerDecoderLayer', 'DABDetrTransformerDecoder',
'DABDetrTransformerEncoder', 'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',
'CdnQueryGenerator', 'Mask2FormerTransformerEncoder',
'Mask2FormerTransformerDecoderLayer', 'Mask2FormerTransformerDecoder'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .activations import SiLU
from .bbox_nms import fast_nms, multiclass_nms
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .dropblock import DropBlock
from .ema import ExpMomentumEMA
from .inverted_residual import InvertedResidual
from .matrix_nms import mask_matrix_nms
from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder
from .normed_predictor import NormedConv2d, NormedLinear
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import ChannelAttention, DyReLU, SELayer
# yapf: disable
from .transformer import (MLP, AdaptivePadding, CdnQueryGenerator,
ConditionalAttention,
ConditionalDetrTransformerDecoder,
ConditionalDetrTransformerDecoderLayer,
DABDetrTransformerDecoder,
DABDetrTransformerDecoderLayer,
DABDetrTransformerEncoder,
DeformableDetrTransformerDecoder,
DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder,
DeformableDetrTransformerEncoderLayer,
DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer,
DinoTransformerDecoder, DynamicConv, PatchEmbed,
PatchMerging, coordinate_to_encoding,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
# yapf: enable
__all__ = [
'fast_nms', 'multiclass_nms', 'mask_matrix_nms', 'DropBlock',
'PixelDecoder', 'TransformerEncoderPixelDecoder',
'MSDeformAttnPixelDecoder', 'ResLayer', 'PatchMerging',
'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv',
'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'InvertedResidual',
'SELayer', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d',
'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'DyReLU',
'ExpMomentumEMA', 'inverse_sigmoid', 'ChannelAttention', 'SiLU', 'MLP',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer', 'AdaptivePadding',
'coordinate_to_encoding', 'ConditionalAttention',
'DABDetrTransformerDecoderLayer', 'DABDetrTransformerDecoder',
'DABDetrTransformerEncoder', 'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',
'CdnQueryGenerator'
]
|
"""
This example loads the pre-trained SentenceTransformer model 'nli-distilroberta-base-v2' from Hugging Face.
It then fine-tunes this model for some epochs on the STS benchmark dataset.
Note: In this example, you must specify a SentenceTransformer model.
If you want to fine-tune a huggingface/transformers model like bert-base-uncased, see training_nli.py and training_stsbenchmark.py
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Sentence Transformer model here, for example all-mpnet-base-v2, all-MiniLM-L6-v2, mixedbread-ai/mxbai-embed-large-v1
model_name = sys.argv[1] if len(sys.argv) > 1 else "sentence-transformers/all-mpnet-base-v2"
train_batch_size = 16
num_epochs = 4
output_dir = (
"output/training_stsbenchmark_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# 1. Here we define our SentenceTransformer model.
model = SentenceTransformer(model_name)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and one
# similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# train_loss = losses.CoSENTLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts')`."
)
|
"""
This example loads the pre-trained SentenceTransformer model 'nli-distilroberta-base-v2' from Hugging Face.
It then fine-tunes this model for some epochs on the STS benchmark dataset.
Note: In this example, you must specify a SentenceTransformer model.
If you want to fine-tune a huggingface/transformers model like bert-base-uncased, see training_nli.py and training_stsbenchmark.py
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Sentence Transformer model here, for example all-mpnet-base-v2, all-MiniLM-L6-v2, mixedbread-ai/mxbai-embed-large-v1
model_name = sys.argv[1] if len(sys.argv) > 1 else "sentence-transformers/all-mpnet-base-v2"
train_batch_size = 16
num_epochs = 4
output_dir = (
"output/training_stsbenchmark_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# 1. Here we define our SentenceTransformer model.
model = SentenceTransformer(model_name)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and one
# similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# train_loss = losses.CoSENTLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts')`."
)
|
import os
import sys
import torch
from ._internally_replaced_utils import _get_extension_path
_HAS_OPS = False
def _has_ops():
return False
try:
# On Windows Python-3.8.x has `os.add_dll_directory` call,
# which is called to configure dll search path.
# To find cuda related dlls we need to make sure the
# conda environment/bin path is configured Please take a look:
# https://stackoverflow.com/questions/59330863/cant-import-dll-module-in-python
# Please note: if some path can't be added using add_dll_directory we simply ignore this path
if os.name == "nt" and sys.version_info < (3, 9):
env_path = os.environ["PATH"]
path_arr = env_path.split(";")
for path in path_arr:
if os.path.exists(path):
try:
os.add_dll_directory(path) # type: ignore[attr-defined]
except Exception:
pass
lib_path = _get_extension_path("_C")
torch.ops.load_library(lib_path)
_HAS_OPS = True
def _has_ops(): # noqa: F811
return True
except (ImportError, OSError):
pass
def _assert_has_ops():
if not _has_ops():
raise RuntimeError(
"Couldn't load custom C++ ops. This can happen if your PyTorch and "
"torchvision versions are incompatible, or if you had errors while compiling "
"torchvision from source. For further information on the compatible versions, check "
"https://github.com/pytorch/vision#installation for the compatibility matrix. "
"Please check your PyTorch version with torch.__version__ and your torchvision "
"version with torchvision.__version__ and verify if they are compatible, and if not "
"please reinstall torchvision so that it matches your PyTorch install."
)
def _check_cuda_version():
"""
Make sure that CUDA versions match between the pytorch install and torchvision install
"""
if not _HAS_OPS:
return -1
from torch.version import cuda as torch_version_cuda
_version = torch.ops.torchvision._cuda_version()
if _version != -1 and torch_version_cuda is not None:
tv_version = str(_version)
if int(tv_version) < 10000:
tv_major = int(tv_version[0])
tv_minor = int(tv_version[2])
else:
tv_major = int(tv_version[0:2])
tv_minor = int(tv_version[3])
t_version = torch_version_cuda.split(".")
t_major = int(t_version[0])
t_minor = int(t_version[1])
if t_major != tv_major or t_minor != tv_minor:
raise RuntimeError(
"Detected that PyTorch and torchvision were compiled with different CUDA versions. "
f"PyTorch has CUDA Version={t_major}.{t_minor} and torchvision has "
f"CUDA Version={tv_major}.{tv_minor}. "
"Please reinstall the torchvision that matches your PyTorch install."
)
return _version
def _load_library(lib_name):
lib_path = _get_extension_path(lib_name)
torch.ops.load_library(lib_path)
_check_cuda_version()
|
import ctypes
import os
import sys
from warnings import warn
import torch
from ._internally_replaced_utils import _get_extension_path
_HAS_OPS = False
def _has_ops():
return False
try:
# On Windows Python-3.8.x has `os.add_dll_directory` call,
# which is called to configure dll search path.
# To find cuda related dlls we need to make sure the
# conda environment/bin path is configured Please take a look:
# https://stackoverflow.com/questions/59330863/cant-import-dll-module-in-python
# Please note: if some path can't be added using add_dll_directory we simply ignore this path
if os.name == "nt" and sys.version_info >= (3, 8) and sys.version_info < (3, 9):
env_path = os.environ["PATH"]
path_arr = env_path.split(";")
for path in path_arr:
if os.path.exists(path):
try:
os.add_dll_directory(path) # type: ignore[attr-defined]
except Exception:
pass
lib_path = _get_extension_path("_C")
torch.ops.load_library(lib_path)
_HAS_OPS = True
def _has_ops(): # noqa: F811
return True
except (ImportError, OSError):
pass
def _assert_has_ops():
if not _has_ops():
raise RuntimeError(
"Couldn't load custom C++ ops. This can happen if your PyTorch and "
"torchvision versions are incompatible, or if you had errors while compiling "
"torchvision from source. For further information on the compatible versions, check "
"https://github.com/pytorch/vision#installation for the compatibility matrix. "
"Please check your PyTorch version with torch.__version__ and your torchvision "
"version with torchvision.__version__ and verify if they are compatible, and if not "
"please reinstall torchvision so that it matches your PyTorch install."
)
def _check_cuda_version():
"""
Make sure that CUDA versions match between the pytorch install and torchvision install
"""
if not _HAS_OPS:
return -1
from torch.version import cuda as torch_version_cuda
_version = torch.ops.torchvision._cuda_version()
if _version != -1 and torch_version_cuda is not None:
tv_version = str(_version)
if int(tv_version) < 10000:
tv_major = int(tv_version[0])
tv_minor = int(tv_version[2])
else:
tv_major = int(tv_version[0:2])
tv_minor = int(tv_version[3])
t_version = torch_version_cuda.split(".")
t_major = int(t_version[0])
t_minor = int(t_version[1])
if t_major != tv_major or t_minor != tv_minor:
raise RuntimeError(
"Detected that PyTorch and torchvision were compiled with different CUDA versions. "
f"PyTorch has CUDA Version={t_major}.{t_minor} and torchvision has "
f"CUDA Version={tv_major}.{tv_minor}. "
"Please reinstall the torchvision that matches your PyTorch install."
)
return _version
def _load_library(lib_name):
lib_path = _get_extension_path(lib_name)
# On Windows Python-3.8+ has `os.add_dll_directory` call,
# which is called from _get_extension_path to configure dll search path
# Condition below adds a workaround for older versions by
# explicitly calling `LoadLibraryExW` with the following flags:
# - LOAD_LIBRARY_SEARCH_DEFAULT_DIRS (0x1000)
# - LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR (0x100)
if os.name == "nt" and sys.version_info < (3, 8):
_kernel32 = ctypes.WinDLL("kernel32.dll", use_last_error=True)
if hasattr(_kernel32, "LoadLibraryExW"):
_kernel32.LoadLibraryExW(lib_path, None, 0x00001100)
else:
warn("LoadLibraryExW is missing in kernel32.dll")
torch.ops.load_library(lib_path)
_check_cuda_version()
|
"""Test retriever tool."""
from typing import List, Optional
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.schema import NodeWithScore, TextNode, QueryBundle
from llama_index.core.tools import RetrieverTool
from llama_index.core.postprocessor.types import BaseNodePostprocessor
import pytest
class MockRetriever(BaseRetriever):
"""Custom retriever for testing."""
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Mock retrieval."""
return [
NodeWithScore(
node=TextNode(
text=f"mock_{query_bundle}",
text_template="Metadata:\n{metadata_str}\n\nContent:\n{content}",
metadata_template="- {key}: {value}",
metadata={"key": "value"},
),
score=0.9,
)
]
async def _aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Mock retrieval."""
return [
NodeWithScore(
node=TextNode(
text=f"mock_{query_bundle}",
text_template="Metadata:\n{metadata_str}\n\nContent:\n{content}",
metadata_template="- {key}: {value}",
metadata={"key": "value"},
),
score=0.9,
)
]
class MockPostProcessor(BaseNodePostprocessor):
@classmethod
def class_name(cls) -> str:
return "CitationPostProcessor"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
for n in nodes:
prefix = f"processed_"
n.node.text = prefix + n.node.text
return nodes
def test_retriever_tool() -> None:
"""Test retriever tool."""
# Test retrieval
retriever = MockRetriever()
retriever_tool = RetrieverTool.from_defaults(retriever=retriever)
response_nodes = retriever_tool("hello world")
assert (
str(response_nodes)
== "Metadata:\n- key: value\n\nContent:\nmock_hello world\n\n"
)
assert response_nodes.raw_output[0].node.text == "mock_hello world\n\n"
# Test node_postprocessors
node_postprocessors = [MockPostProcessor()]
pr_retriever_tool = RetrieverTool.from_defaults(
retriever=retriever, node_postprocessors=node_postprocessors
)
pr_response_nodes = pr_retriever_tool("hello world")
assert (
str(pr_response_nodes)
== "Metadata:\n- key: value\n\nContent:\nprocessed_mock_hello world\n\n"
)
@pytest.mark.asyncio
async def test_retriever_tool_async() -> None:
"""Test retriever tool async call."""
# Test async retrieval
retriever = MockRetriever()
retriever_tool = RetrieverTool.from_defaults(retriever=retriever)
response_nodes = await retriever_tool.acall("hello world")
assert (
str(response_nodes)
== "Metadata:\n- key: value\n\nContent:\nmock_hello world\n\n"
)
assert response_nodes.raw_output[0].node.text == "mock_hello world\n\n"
# Test node_postprocessors async
node_postprocessors = [MockPostProcessor()]
pr_retriever_tool = RetrieverTool.from_defaults(
retriever=retriever, node_postprocessors=node_postprocessors
)
pr_response_nodes = await pr_retriever_tool.acall("hello world")
assert (
str(pr_response_nodes)
== "Metadata:\n- key: value\n\nContent:\nprocessed_mock_hello world\n\n"
)
|
"""Test retriever tool."""
from typing import List, Optional
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.schema import NodeWithScore, TextNode, QueryBundle
from llama_index.core.tools import RetrieverTool
from llama_index.core.postprocessor.types import BaseNodePostprocessor
import pytest
class MockRetriever(BaseRetriever):
"""Custom retriever for testing."""
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Mock retrieval."""
return [
NodeWithScore(
node=TextNode(
text=f"mock_{query_bundle}",
text_template="Metadata:\n{metadata_str}\n\nContent:\n{content}",
metadata_template="- {key}: {value}",
metadata={"key": "value"},
),
score=0.9,
)
]
async def _aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Mock retrieval."""
return [
NodeWithScore(
node=TextNode(
text=f"mock_{query_bundle}",
text_template="Metadata:\n{metadata_str}\n\nContent:\n{content}",
metadata_template="- {key}: {value}",
metadata={"key": "value"},
),
score=0.9,
)
]
class MockPostProcessor(BaseNodePostprocessor):
@classmethod
def class_name(cls) -> str:
return "CitationPostProcessor"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
for n in nodes:
prefix = f"processed_"
n.node.text = prefix + n.node.text
return nodes
def test_retriever_tool() -> None:
"""Test retriever tool."""
# Test retrieval
retriever = MockRetriever()
retriever_tool = RetrieverTool.from_defaults(retriever=retriever)
response_nodes = retriever_tool("hello world")
assert (
str(response_nodes)
== "Metadata:\n- key: value\n\nContent:\nmock_hello world\n\n"
)
assert response_nodes.raw_output[0].node.text == "mock_hello world\n\n"
# Test node_postprocessors
node_postprocessors = [MockPostProcessor()]
pr_retriever_tool = RetrieverTool.from_defaults(
retriever=retriever, node_postprocessors=node_postprocessors
)
pr_response_nodes = pr_retriever_tool("hello world")
assert (
str(pr_response_nodes)
== "Metadata:\n- key: value\n\nContent:\nprocessed_mock_hello world\n\n"
)
@pytest.mark.asyncio()
async def test_retriever_tool_async() -> None:
"""Test retriever tool async call."""
# Test async retrieval
retriever = MockRetriever()
retriever_tool = RetrieverTool.from_defaults(retriever=retriever)
response_nodes = await retriever_tool.acall("hello world")
assert (
str(response_nodes)
== "Metadata:\n- key: value\n\nContent:\nmock_hello world\n\n"
)
assert response_nodes.raw_output[0].node.text == "mock_hello world\n\n"
# Test node_postprocessors async
node_postprocessors = [MockPostProcessor()]
pr_retriever_tool = RetrieverTool.from_defaults(
retriever=retriever, node_postprocessors=node_postprocessors
)
pr_response_nodes = await pr_retriever_tool.acall("hello world")
assert (
str(pr_response_nodes)
== "Metadata:\n- key: value\n\nContent:\nprocessed_mock_hello world\n\n"
)
|
import os
import urllib
import pytest
from pydantic import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import TextUrl
REMOTE_TXT = 'https://de.wikipedia.org/wiki/Brixen'
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
LOCAL_TXT = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'penal_colony.txt')
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'url,expected_beginning',
[(REMOTE_TXT, '<!DOCTYPE html>'), (LOCAL_TXT, '“It’s a peculiar apparatus,”')],
)
def test_load(url, expected_beginning):
uri = parse_obj_as(TextUrl, url)
txt = uri.load()
assert txt.startswith(expected_beginning)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('url', [REMOTE_TXT, LOCAL_TXT])
def test_load_to_bytes(url):
uri = parse_obj_as(TextUrl, url)
txt_bytes = uri.load_bytes()
assert isinstance(txt_bytes, bytes)
@pytest.mark.proto
def test_proto_text_url():
uri = parse_obj_as(TextUrl, LOCAL_TXT)
uri._to_node_protobuf()
@pytest.mark.internet
def test_load_timeout():
url = parse_obj_as(TextUrl, REMOTE_TXT)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
with pytest.raises(urllib.error.URLError):
_ = url.load_bytes(timeout=0.001)
def test_json_schema():
schema_json_of(TextUrl)
def test_dump_json():
url = parse_obj_as(TextUrl, REMOTE_TXT)
orjson_dumps(url)
|
import os
import urllib
import pytest
from pydantic import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import TextUrl
REMOTE_TXT = 'https://de.wikipedia.org/wiki/Brixen'
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
LOCAL_TXT = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'penal_colony.txt')
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'url,expected_beginning',
[(REMOTE_TXT, '<!DOCTYPE html>'), (LOCAL_TXT, '“It’s a peculiar apparatus,”')],
)
def test_load(url, expected_beginning):
uri = parse_obj_as(TextUrl, url)
txt = uri.load()
assert txt.startswith(expected_beginning)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('url', [REMOTE_TXT, LOCAL_TXT])
def test_load_to_bytes(url):
uri = parse_obj_as(TextUrl, url)
txt_bytes = uri.load_bytes()
assert isinstance(txt_bytes, bytes)
def test_proto_text_url():
uri = parse_obj_as(TextUrl, LOCAL_TXT)
uri._to_node_protobuf()
@pytest.mark.internet
def test_load_timeout():
url = parse_obj_as(TextUrl, REMOTE_TXT)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
with pytest.raises(urllib.error.URLError):
_ = url.load_bytes(timeout=0.001)
def test_json_schema():
schema_json_of(TextUrl)
def test_dump_json():
url = parse_obj_as(TextUrl, REMOTE_TXT)
orjson_dumps(url)
|
import asyncio
import logging
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
logger = logging.getLogger(__name__)
class AsyncWebPageReader(BaseReader):
"""
Asynchronous web page reader.
Reads pages from the web asynchronously.
Args:
html_to_text (bool): Whether to convert HTML to text.
Requires `html2text` package.
limit (int): Maximum number of concurrent requests.
dedupe (bool): to deduplicate urls if there is exact-match within given list
fail_on_error (bool): if requested url does not return status code 200 the routine will raise an ValueError
"""
def __init__(
self,
html_to_text: bool = False,
limit: int = 10,
dedupe: bool = True,
fail_on_error: bool = False,
) -> None:
"""Initialize with parameters."""
try:
import html2text # noqa: F401
except ImportError:
raise ImportError(
"`html2text` package not found, please run `pip install html2text`"
)
try:
import aiohttp # noqa: F401
except ImportError:
raise ImportError(
"`aiohttp` package not found, please run `pip install aiohttp`"
)
self._limit = limit
self._html_to_text = html_to_text
self._dedupe = dedupe
self._fail_on_error = fail_on_error
async def aload_data(self, urls: List[str]) -> List[Document]:
"""
Load data from the input urls.
Args:
urls (List[str]): List of URLs to scrape.
Returns:
List[Document]: List of documents.
"""
if self._dedupe:
urls = list(dict.fromkeys(urls))
import aiohttp
def chunked_http_client(limit: int):
semaphore = asyncio.Semaphore(limit)
async def http_get(url: str, session: aiohttp.ClientSession):
async with semaphore:
async with session.get(url) as response:
return response, await response.text()
return http_get
async def fetch_urls(urls: List[str]):
http_client = chunked_http_client(self._limit)
async with aiohttp.ClientSession() as session:
tasks = [http_client(url, session) for url in urls]
return await asyncio.gather(*tasks, return_exceptions=True)
if not isinstance(urls, list):
raise ValueError("urls must be a list of strings.")
documents = []
responses = await fetch_urls(urls)
for i, response_tuple in enumerate(responses):
if not isinstance(response_tuple, tuple):
raise ValueError(f"One of the inputs is not a valid url: {urls[i]}")
response, raw_page = response_tuple
if response.status != 200:
logger.warning(f"error fetching page from {urls[i]}")
logger.info(response)
if self._fail_on_error:
raise ValueError(
f"error fetching page from {urls[i]}. server returned status:"
f" {response.status} and response {raw_page}"
)
continue
if self._html_to_text:
import html2text
response_text = html2text.html2text(raw_page)
else:
response_text = raw_page
documents.append(
Document(text=response_text, extra_info={"Source": str(response.url)})
)
return documents
def load_data(self, urls: List[str]) -> List[Document]:
"""
Load data from the input urls.
Args:
urls (List[str]): List of URLs to scrape.
Returns:
List[Document]: List of documents.
"""
return asyncio.run(self.aload_data(urls))
|
import asyncio
import logging
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
logger = logging.getLogger(__name__)
class AsyncWebPageReader(BaseReader):
"""Asynchronous web page reader.
Reads pages from the web asynchronously.
Args:
html_to_text (bool): Whether to convert HTML to text.
Requires `html2text` package.
limit (int): Maximum number of concurrent requests.
dedupe (bool): to deduplicate urls if there is exact-match within given list
fail_on_error (bool): if requested url does not return status code 200 the routine will raise an ValueError
"""
def __init__(
self,
html_to_text: bool = False,
limit: int = 10,
dedupe: bool = True,
fail_on_error: bool = False,
) -> None:
"""Initialize with parameters."""
try:
import html2text # noqa: F401
except ImportError:
raise ImportError(
"`html2text` package not found, please run `pip install html2text`"
)
try:
import aiohttp # noqa: F401
except ImportError:
raise ImportError(
"`aiohttp` package not found, please run `pip install aiohttp`"
)
self._limit = limit
self._html_to_text = html_to_text
self._dedupe = dedupe
self._fail_on_error = fail_on_error
async def aload_data(self, urls: List[str]) -> List[Document]:
"""Load data from the input urls.
Args:
urls (List[str]): List of URLs to scrape.
Returns:
List[Document]: List of documents.
"""
if self._dedupe:
urls = list(dict.fromkeys(urls))
import aiohttp
def chunked_http_client(limit: int):
semaphore = asyncio.Semaphore(limit)
async def http_get(url: str, session: aiohttp.ClientSession):
async with semaphore:
async with session.get(url) as response:
return response, await response.text()
return http_get
async def fetch_urls(urls: List[str]):
http_client = chunked_http_client(self._limit)
async with aiohttp.ClientSession() as session:
tasks = [http_client(url, session) for url in urls]
return await asyncio.gather(*tasks, return_exceptions=True)
if not isinstance(urls, list):
raise ValueError("urls must be a list of strings.")
documents = []
responses = await fetch_urls(urls)
for i, response_tuple in enumerate(responses):
if not isinstance(response_tuple, tuple):
raise ValueError(f"One of the inputs is not a valid url: {urls[i]}")
response, raw_page = response_tuple
if response.status != 200:
logger.warning(f"error fetching page from {urls[i]}")
logger.info(response)
if self._fail_on_error:
raise ValueError(
f"error fetching page from {urls[i]}. server returned status:"
f" {response.status} and response {raw_page}"
)
continue
if self._html_to_text:
import html2text
response_text = html2text.html2text(raw_page)
else:
response_text = raw_page
documents.append(
Document(text=response_text, extra_info={"Source": str(response.url)})
)
return documents
def load_data(self, urls: List[str]) -> List[Document]:
"""Load data from the input urls.
Args:
urls (List[str]): List of URLs to scrape.
Returns:
List[Document]: List of documents.
"""
return asyncio.run(self.aload_data(urls))
|
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_kernel_internal
def erase(
inpt: torch.Tensor,
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> torch.Tensor:
"""[BETA] See :class:`~torchvision.transforms.v2.RandomErase` for details."""
if torch.jit.is_scripting():
return erase_image(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
_log_api_usage_once(erase)
kernel = _get_kernel(erase, type(inpt))
return kernel(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
@_register_kernel_internal(erase, torch.Tensor)
@_register_kernel_internal(erase, tv_tensors.Image)
def erase_image(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@_register_kernel_internal(erase, PIL.Image.Image)
def _erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
@_register_kernel_internal(erase, tv_tensors.Video)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
|
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_kernel_internal
def erase(
inpt: torch.Tensor,
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> torch.Tensor:
"""[BETA] See :class:`~torchvision.transforms.v2.RandomErase` for details."""
if torch.jit.is_scripting():
return erase_image(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
_log_api_usage_once(erase)
kernel = _get_kernel(erase, type(inpt))
return kernel(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
@_register_kernel_internal(erase, torch.Tensor)
@_register_kernel_internal(erase, datapoints.Image)
def erase_image(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@_register_kernel_internal(erase, PIL.Image.Image)
def _erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
@_register_kernel_internal(erase, datapoints.Video)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
|
import os
from typing import Dict
import numpy as np
import pytest
import xgboost
from xgboost import testing as tm
from xgboost.testing.ranking import run_normalization, run_score_normalization
pytestmark = tm.timeout(30)
def comp_training_with_rank_objective(
dtrain: xgboost.DMatrix,
dtest: xgboost.DMatrix,
rank_objective: str,
metric_name: str,
tolerance: float = 1e-02,
) -> None:
"""Internal method that trains the dataset using the rank objective on GPU and CPU,
evaluates the metric and determines if the delta between the metric is within the
tolerance level.
"""
# specify validations set to watch performance
watchlist = [(dtest, "eval"), (dtrain, "train")]
params = {
"booster": "gbtree",
"tree_method": "gpu_hist",
"gpu_id": 0,
}
num_trees = 100
check_metric_improvement_rounds = 10
evals_result: Dict[str, Dict] = {}
params["objective"] = rank_objective
params["eval_metric"] = metric_name
bst = xgboost.train(
params,
dtrain,
num_boost_round=num_trees,
early_stopping_rounds=check_metric_improvement_rounds,
evals=watchlist,
evals_result=evals_result,
)
gpu_scores = evals_result["train"][metric_name][-1]
evals_result = {}
cpu_params = {
"booster": "gbtree",
"tree_method": "hist",
"gpu_id": -1,
}
cpu_params["objective"] = rank_objective
cpu_params["eval_metric"] = metric_name
bstc = xgboost.train(
cpu_params,
dtrain,
num_boost_round=num_trees,
early_stopping_rounds=check_metric_improvement_rounds,
evals=watchlist,
evals_result=evals_result,
)
cpu_scores = evals_result["train"][metric_name][-1]
info = (rank_objective, metric_name)
assert np.allclose(gpu_scores, cpu_scores, tolerance, tolerance), info
assert np.allclose(bst.best_score, bstc.best_score, tolerance, tolerance), info
evals_result_weighted: Dict[str, Dict] = {}
dtest.set_weight(np.ones((dtest.get_group().size,)))
dtrain.set_weight(np.ones((dtrain.get_group().size,)))
watchlist = [(dtest, "eval"), (dtrain, "train")]
bst_w = xgboost.train(
params,
dtrain,
num_boost_round=num_trees,
early_stopping_rounds=check_metric_improvement_rounds,
evals=watchlist,
evals_result=evals_result_weighted,
)
weighted_metric = evals_result_weighted["train"][metric_name][-1]
tolerance = 1e-5
assert np.allclose(bst_w.best_score, bst.best_score, tolerance, tolerance)
assert np.allclose(weighted_metric, gpu_scores, tolerance, tolerance)
@pytest.mark.parametrize(
"objective,metric",
[
("rank:pairwise", "auc"),
("rank:pairwise", "ndcg"),
("rank:pairwise", "map"),
("rank:ndcg", "auc"),
("rank:ndcg", "ndcg"),
("rank:ndcg", "map"),
("rank:map", "auc"),
("rank:map", "ndcg"),
("rank:map", "map"),
],
)
def test_with_mq2008(objective, metric) -> None:
(
x_train,
y_train,
qid_train,
x_test,
y_test,
qid_test,
x_valid,
y_valid,
qid_valid,
) = tm.data.get_mq2008(tm.demo_dir(__file__))
if metric.find("map") != -1 or objective.find("map") != -1:
y_train[y_train <= 1] = 0.0
y_train[y_train > 1] = 1.0
y_test[y_test <= 1] = 0.0
y_test[y_test > 1] = 1.0
dtrain = xgboost.DMatrix(x_train, y_train, qid=qid_train)
dtest = xgboost.DMatrix(x_test, y_test, qid=qid_test)
comp_training_with_rank_objective(dtrain, dtest, objective, metric)
def test_normalization() -> None:
run_normalization("cuda")
@pytest.mark.parametrize("objective", ["rank:pairwise", "rank:ndcg", "rank:map"])
def test_score_normalization(objective: str) -> None:
run_score_normalization("cuda", objective)
|
import os
from typing import Dict
import numpy as np
import pytest
import xgboost
from xgboost import testing as tm
from xgboost.testing.ranking import run_normalization
pytestmark = tm.timeout(30)
def comp_training_with_rank_objective(
dtrain: xgboost.DMatrix,
dtest: xgboost.DMatrix,
rank_objective: str,
metric_name: str,
tolerance: float = 1e-02,
) -> None:
"""Internal method that trains the dataset using the rank objective on GPU and CPU,
evaluates the metric and determines if the delta between the metric is within the
tolerance level.
"""
# specify validations set to watch performance
watchlist = [(dtest, "eval"), (dtrain, "train")]
params = {
"booster": "gbtree",
"tree_method": "gpu_hist",
"gpu_id": 0,
}
num_trees = 100
check_metric_improvement_rounds = 10
evals_result: Dict[str, Dict] = {}
params["objective"] = rank_objective
params["eval_metric"] = metric_name
bst = xgboost.train(
params,
dtrain,
num_boost_round=num_trees,
early_stopping_rounds=check_metric_improvement_rounds,
evals=watchlist,
evals_result=evals_result,
)
gpu_scores = evals_result["train"][metric_name][-1]
evals_result = {}
cpu_params = {
"booster": "gbtree",
"tree_method": "hist",
"gpu_id": -1,
}
cpu_params["objective"] = rank_objective
cpu_params["eval_metric"] = metric_name
bstc = xgboost.train(
cpu_params,
dtrain,
num_boost_round=num_trees,
early_stopping_rounds=check_metric_improvement_rounds,
evals=watchlist,
evals_result=evals_result,
)
cpu_scores = evals_result["train"][metric_name][-1]
info = (rank_objective, metric_name)
assert np.allclose(gpu_scores, cpu_scores, tolerance, tolerance), info
assert np.allclose(bst.best_score, bstc.best_score, tolerance, tolerance), info
evals_result_weighted: Dict[str, Dict] = {}
dtest.set_weight(np.ones((dtest.get_group().size,)))
dtrain.set_weight(np.ones((dtrain.get_group().size,)))
watchlist = [(dtest, "eval"), (dtrain, "train")]
bst_w = xgboost.train(
params,
dtrain,
num_boost_round=num_trees,
early_stopping_rounds=check_metric_improvement_rounds,
evals=watchlist,
evals_result=evals_result_weighted,
)
weighted_metric = evals_result_weighted["train"][metric_name][-1]
tolerance = 1e-5
assert np.allclose(bst_w.best_score, bst.best_score, tolerance, tolerance)
assert np.allclose(weighted_metric, gpu_scores, tolerance, tolerance)
@pytest.mark.parametrize(
"objective,metric",
[
("rank:pairwise", "auc"),
("rank:pairwise", "ndcg"),
("rank:pairwise", "map"),
("rank:ndcg", "auc"),
("rank:ndcg", "ndcg"),
("rank:ndcg", "map"),
("rank:map", "auc"),
("rank:map", "ndcg"),
("rank:map", "map"),
],
)
def test_with_mq2008(objective, metric) -> None:
(
x_train,
y_train,
qid_train,
x_test,
y_test,
qid_test,
x_valid,
y_valid,
qid_valid,
) = tm.data.get_mq2008(tm.demo_dir(__file__))
if metric.find("map") != -1 or objective.find("map") != -1:
y_train[y_train <= 1] = 0.0
y_train[y_train > 1] = 1.0
y_test[y_test <= 1] = 0.0
y_test[y_test > 1] = 1.0
dtrain = xgboost.DMatrix(x_train, y_train, qid=qid_train)
dtest = xgboost.DMatrix(x_test, y_test, qid=qid_test)
comp_training_with_rank_objective(dtrain, dtest, objective, metric)
def test_normalization() -> None:
run_normalization("cuda")
|
import sys
import warnings
import torch
_onnx_opset_version_11 = 11
_onnx_opset_version_16 = 16
base_onnx_opset_version = _onnx_opset_version_11
def _register_custom_op():
from torch.onnx.symbolic_helper import parse_args
from torch.onnx.symbolic_opset11 import select, squeeze, unsqueeze
@parse_args("v", "v", "f")
def symbolic_multi_label_nms(g, boxes, scores, iou_threshold):
boxes = unsqueeze(g, boxes, 0)
scores = unsqueeze(g, unsqueeze(g, scores, 0), 0)
max_output_per_class = g.op("Constant", value_t=torch.tensor([sys.maxsize], dtype=torch.long))
iou_threshold = g.op("Constant", value_t=torch.tensor([iou_threshold], dtype=torch.float))
nms_out = g.op(
"NonMaxSuppression",
g.op("Cast", boxes, to_i=torch.onnx.TensorProtoDataType.FLOAT),
g.op("Cast", scores, to_i=torch.onnx.TensorProtoDataType.FLOAT),
max_output_per_class,
iou_threshold,
)
return squeeze(g, select(g, nms_out, 1, g.op("Constant", value_t=torch.tensor([2], dtype=torch.long))), 1)
def _process_batch_indices_for_roi_align(g, rois):
indices = squeeze(g, select(g, rois, 1, g.op("Constant", value_t=torch.tensor([0], dtype=torch.long))), 1)
return g.op("Cast", indices, to_i=torch.onnx.TensorProtoDataType.INT64)
def _process_rois_for_roi_align(g, rois):
return select(g, rois, 1, g.op("Constant", value_t=torch.tensor([1, 2, 3, 4], dtype=torch.long)))
def _process_sampling_ratio_for_roi_align(g, sampling_ratio: int):
if sampling_ratio < 0:
warnings.warn(
"ONNX export for RoIAlign with a non-zero sampling_ratio is not supported. "
"The model will be exported with a sampling_ratio of 0."
)
sampling_ratio = 0
return sampling_ratio
@parse_args("v", "v", "f", "i", "i", "i", "i")
def roi_align_opset11(g, input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned):
batch_indices = _process_batch_indices_for_roi_align(g, rois)
rois = _process_rois_for_roi_align(g, rois)
if aligned:
warnings.warn(
"ROIAlign with aligned=True is only supported in opset >= 16. "
"Please export with opset 16 or higher, or use aligned=False."
)
sampling_ratio = _process_sampling_ratio_for_roi_align(g, sampling_ratio)
return g.op(
"RoiAlign",
input,
rois,
batch_indices,
spatial_scale_f=spatial_scale,
output_height_i=pooled_height,
output_width_i=pooled_width,
sampling_ratio_i=sampling_ratio,
)
@parse_args("v", "v", "f", "i", "i", "i", "i")
def roi_align_opset16(g, input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned):
batch_indices = _process_batch_indices_for_roi_align(g, rois)
rois = _process_rois_for_roi_align(g, rois)
coordinate_transformation_mode = "half_pixel" if aligned else "output_half_pixel"
sampling_ratio = _process_sampling_ratio_for_roi_align(g, sampling_ratio)
return g.op(
"RoiAlign",
input,
rois,
batch_indices,
coordinate_transformation_mode_s=coordinate_transformation_mode,
spatial_scale_f=spatial_scale,
output_height_i=pooled_height,
output_width_i=pooled_width,
sampling_ratio_i=sampling_ratio,
)
@parse_args("v", "v", "f", "i", "i")
def roi_pool(g, input, rois, spatial_scale, pooled_height, pooled_width):
roi_pool = g.op(
"MaxRoiPool", input, rois, pooled_shape_i=(pooled_height, pooled_width), spatial_scale_f=spatial_scale
)
return roi_pool, None
from torch.onnx import register_custom_op_symbolic
register_custom_op_symbolic("torchvision::nms", symbolic_multi_label_nms, _onnx_opset_version_11)
register_custom_op_symbolic("torchvision::roi_align", roi_align_opset11, _onnx_opset_version_11)
register_custom_op_symbolic("torchvision::roi_align", roi_align_opset16, _onnx_opset_version_16)
register_custom_op_symbolic("torchvision::roi_pool", roi_pool, _onnx_opset_version_11)
|
import sys
import warnings
import torch
_onnx_opset_version_11 = 11
_onnx_opset_version_16 = 16
base_onnx_opset_version = _onnx_opset_version_11
def _register_custom_op():
from torch.onnx.symbolic_helper import parse_args
from torch.onnx.symbolic_opset11 import select, squeeze, unsqueeze
from torch.onnx.symbolic_opset9 import _cast_Long
@parse_args("v", "v", "f")
def symbolic_multi_label_nms(g, boxes, scores, iou_threshold):
boxes = unsqueeze(g, boxes, 0)
scores = unsqueeze(g, unsqueeze(g, scores, 0), 0)
max_output_per_class = g.op("Constant", value_t=torch.tensor([sys.maxsize], dtype=torch.long))
iou_threshold = g.op("Constant", value_t=torch.tensor([iou_threshold], dtype=torch.float))
nms_out = g.op("NonMaxSuppression", boxes, scores, max_output_per_class, iou_threshold)
return squeeze(g, select(g, nms_out, 1, g.op("Constant", value_t=torch.tensor([2], dtype=torch.long))), 1)
def _process_batch_indices_for_roi_align(g, rois):
return _cast_Long(
g, squeeze(g, select(g, rois, 1, g.op("Constant", value_t=torch.tensor([0], dtype=torch.long))), 1), False
)
def _process_rois_for_roi_align(g, rois):
return select(g, rois, 1, g.op("Constant", value_t=torch.tensor([1, 2, 3, 4], dtype=torch.long)))
def _process_sampling_ratio_for_roi_align(g, sampling_ratio: int):
if sampling_ratio < 0:
warnings.warn(
"ONNX export for RoIAlign with a non-zero sampling_ratio is not supported. "
"The model will be exported with a sampling_ratio of 0."
)
sampling_ratio = 0
return sampling_ratio
@parse_args("v", "v", "f", "i", "i", "i", "i")
def roi_align_opset11(g, input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned):
batch_indices = _process_batch_indices_for_roi_align(g, rois)
rois = _process_rois_for_roi_align(g, rois)
if aligned:
warnings.warn(
"ROIAlign with aligned=True is only supported in opset >= 16. "
"Please export with opset 16 or higher, or use aligned=False."
)
sampling_ratio = _process_sampling_ratio_for_roi_align(g, sampling_ratio)
return g.op(
"RoiAlign",
input,
rois,
batch_indices,
spatial_scale_f=spatial_scale,
output_height_i=pooled_height,
output_width_i=pooled_width,
sampling_ratio_i=sampling_ratio,
)
@parse_args("v", "v", "f", "i", "i", "i", "i")
def roi_align_opset16(g, input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned):
batch_indices = _process_batch_indices_for_roi_align(g, rois)
rois = _process_rois_for_roi_align(g, rois)
coordinate_transformation_mode = "half_pixel" if aligned else "output_half_pixel"
sampling_ratio = _process_sampling_ratio_for_roi_align(g, sampling_ratio)
return g.op(
"RoiAlign",
input,
rois,
batch_indices,
coordinate_transformation_mode_s=coordinate_transformation_mode,
spatial_scale_f=spatial_scale,
output_height_i=pooled_height,
output_width_i=pooled_width,
sampling_ratio_i=sampling_ratio,
)
@parse_args("v", "v", "f", "i", "i")
def roi_pool(g, input, rois, spatial_scale, pooled_height, pooled_width):
roi_pool = g.op(
"MaxRoiPool", input, rois, pooled_shape_i=(pooled_height, pooled_width), spatial_scale_f=spatial_scale
)
return roi_pool, None
from torch.onnx import register_custom_op_symbolic
register_custom_op_symbolic("torchvision::nms", symbolic_multi_label_nms, _onnx_opset_version_11)
register_custom_op_symbolic("torchvision::roi_align", roi_align_opset11, _onnx_opset_version_11)
register_custom_op_symbolic("torchvision::roi_align", roi_align_opset16, _onnx_opset_version_16)
register_custom_op_symbolic("torchvision::roi_pool", roi_pool, _onnx_opset_version_11)
|
"""All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.22.0"
SCIPY_MIN_VERSION = "1.8.0"
JOBLIB_MIN_VERSION = "1.2.0"
THREADPOOLCTL_MIN_VERSION = "3.1.0"
PYTEST_MIN_VERSION = "7.1.2"
CYTHON_MIN_VERSION = "3.0.10"
# 'build' and 'install' is included to have structured metadata for CI.
# It will NOT be included in setup's extras_require
# The values are (version_spec, comma separated tags)
dependent_packages = {
"numpy": (NUMPY_MIN_VERSION, "build, install"),
"scipy": (SCIPY_MIN_VERSION, "build, install"),
"joblib": (JOBLIB_MIN_VERSION, "install"),
"threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"),
"cython": (CYTHON_MIN_VERSION, "build"),
"meson-python": ("0.17.1", "build"),
"matplotlib": ("3.5.0", "benchmark, docs, examples, tests"),
"scikit-image": ("0.19.0", "docs, examples, tests"),
"pandas": ("1.4.0", "benchmark, docs, examples, tests"),
"seaborn": ("0.9.0", "docs, examples"),
"memory_profiler": ("0.57.0", "benchmark, docs"),
"pytest": (PYTEST_MIN_VERSION, "tests"),
"pytest-cov": ("2.9.0", "tests"),
"ruff": ("0.11.7", "tests"),
"mypy": ("1.15", "tests"),
"pyamg": ("4.2.1", "tests"),
"polars": ("0.20.30", "docs, tests"),
"pyarrow": ("12.0.0", "tests"),
"sphinx": ("7.3.7", "docs"),
"sphinx-copybutton": ("0.5.2", "docs"),
"sphinx-gallery": ("0.17.1", "docs"),
"numpydoc": ("1.2.0", "docs, tests"),
"Pillow": ("8.4.0", "docs"),
"pooch": ("1.6.0", "docs, examples, tests"),
"sphinx-prompt": ("1.4.0", "docs"),
"sphinxext-opengraph": ("0.9.1", "docs"),
"plotly": ("5.14.0", "docs, examples"),
"sphinxcontrib-sass": ("0.3.4", "docs"),
"sphinx-remove-toctrees": ("1.0.0.post1", "docs"),
"sphinx-design": ("0.6.0", "docs"),
"pydata-sphinx-theme": ("0.15.3", "docs"),
"towncrier": ("24.8.0", "docs"),
# XXX: Pin conda-lock to the latest released version (needs manual update
# from time to time)
"conda-lock": ("3.0.1", "maintenance"),
}
# create inverse mapping for setuptools
tag_to_packages: dict = defaultdict(list)
for package, (min_version, extras) in dependent_packages.items():
for extra in extras.split(", "):
tag_to_packages[extra].append("{}>={}".format(package, min_version))
# Used by CI to get the min dependencies
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get min dependencies for a package")
parser.add_argument("package", choices=dependent_packages)
args = parser.parse_args()
min_version = dependent_packages[args.package][0]
print(min_version)
|
"""All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.22.0"
SCIPY_MIN_VERSION = "1.8.0"
JOBLIB_MIN_VERSION = "1.2.0"
THREADPOOLCTL_MIN_VERSION = "3.1.0"
PYTEST_MIN_VERSION = "7.1.2"
CYTHON_MIN_VERSION = "3.0.10"
# 'build' and 'install' is included to have structured metadata for CI.
# It will NOT be included in setup's extras_require
# The values are (version_spec, comma separated tags)
dependent_packages = {
"numpy": (NUMPY_MIN_VERSION, "build, install"),
"scipy": (SCIPY_MIN_VERSION, "build, install"),
"joblib": (JOBLIB_MIN_VERSION, "install"),
"threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"),
"cython": (CYTHON_MIN_VERSION, "build"),
"meson-python": ("0.16.0", "build"),
"matplotlib": ("3.5.0", "benchmark, docs, examples, tests"),
"scikit-image": ("0.19.0", "docs, examples, tests"),
"pandas": ("1.4.0", "benchmark, docs, examples, tests"),
"seaborn": ("0.9.0", "docs, examples"),
"memory_profiler": ("0.57.0", "benchmark, docs"),
"pytest": (PYTEST_MIN_VERSION, "tests"),
"pytest-cov": ("2.9.0", "tests"),
"ruff": ("0.11.7", "tests"),
"mypy": ("1.15", "tests"),
"pyamg": ("4.2.1", "tests"),
"polars": ("0.20.30", "docs, tests"),
"pyarrow": ("12.0.0", "tests"),
"sphinx": ("7.3.7", "docs"),
"sphinx-copybutton": ("0.5.2", "docs"),
"sphinx-gallery": ("0.17.1", "docs"),
"numpydoc": ("1.2.0", "docs, tests"),
"Pillow": ("8.4.0", "docs"),
"pooch": ("1.6.0", "docs, examples, tests"),
"sphinx-prompt": ("1.4.0", "docs"),
"sphinxext-opengraph": ("0.9.1", "docs"),
"plotly": ("5.14.0", "docs, examples"),
"sphinxcontrib-sass": ("0.3.4", "docs"),
"sphinx-remove-toctrees": ("1.0.0.post1", "docs"),
"sphinx-design": ("0.6.0", "docs"),
"pydata-sphinx-theme": ("0.15.3", "docs"),
"towncrier": ("24.8.0", "docs"),
# XXX: Pin conda-lock to the latest released version (needs manual update
# from time to time)
"conda-lock": ("3.0.1", "maintenance"),
}
# create inverse mapping for setuptools
tag_to_packages: dict = defaultdict(list)
for package, (min_version, extras) in dependent_packages.items():
for extra in extras.split(", "):
tag_to_packages[extra].append("{}>={}".format(package, min_version))
# Used by CI to get the min dependencies
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get min dependencies for a package")
parser.add_argument("package", choices=dependent_packages)
args = parser.parse_args()
min_version = dependent_packages[args.package][0]
print(min_version)
|
"""Utilities for JSON Schema."""
from __future__ import annotations
from copy import deepcopy
from typing import TYPE_CHECKING, Any, Optional
if TYPE_CHECKING:
from collections.abc import Sequence
def _retrieve_ref(path: str, schema: dict) -> dict:
components = path.split("/")
if components[0] != "#":
msg = (
"ref paths are expected to be URI fragments, meaning they should start "
"with #."
)
raise ValueError(msg)
out = schema
for component in components[1:]:
if component in out:
out = out[component]
elif component.isdigit() and int(component) in out:
out = out[int(component)]
else:
msg = f"Reference '{path}' not found."
raise KeyError(msg)
return deepcopy(out)
def _dereference_refs_helper(
obj: Any,
full_schema: dict[str, Any],
skip_keys: Sequence[str],
processed_refs: Optional[set[str]] = None,
) -> Any:
if processed_refs is None:
processed_refs = set()
if isinstance(obj, dict):
obj_out = {}
for k, v in obj.items():
if k in skip_keys:
obj_out[k] = v
elif k == "$ref":
if v in processed_refs:
continue
processed_refs.add(v)
ref = _retrieve_ref(v, full_schema)
full_ref = _dereference_refs_helper(
ref, full_schema, skip_keys, processed_refs
)
processed_refs.remove(v)
return full_ref
elif isinstance(v, (list, dict)):
obj_out[k] = _dereference_refs_helper(
v, full_schema, skip_keys, processed_refs
)
else:
obj_out[k] = v
return obj_out
elif isinstance(obj, list):
return [
_dereference_refs_helper(el, full_schema, skip_keys, processed_refs)
for el in obj
]
else:
return obj
def _infer_skip_keys(
obj: Any, full_schema: dict, processed_refs: Optional[set[str]] = None
) -> list[str]:
if processed_refs is None:
processed_refs = set()
keys = []
if isinstance(obj, dict):
for k, v in obj.items():
if k == "$ref":
if v in processed_refs:
continue
processed_refs.add(v)
ref = _retrieve_ref(v, full_schema)
keys.append(v.split("/")[1])
keys += _infer_skip_keys(ref, full_schema, processed_refs)
elif isinstance(v, (list, dict)):
keys += _infer_skip_keys(v, full_schema, processed_refs)
elif isinstance(obj, list):
for el in obj:
keys += _infer_skip_keys(el, full_schema, processed_refs)
return keys
def dereference_refs(
schema_obj: dict,
*,
full_schema: Optional[dict] = None,
skip_keys: Optional[Sequence[str]] = None,
) -> dict:
"""Try to substitute $refs in JSON Schema.
Args:
schema_obj: The schema object to dereference.
full_schema: The full schema object. Defaults to None.
skip_keys: The keys to skip. Defaults to None.
Returns:
The dereferenced schema object.
"""
full_schema = full_schema or schema_obj
skip_keys = (
skip_keys
if skip_keys is not None
else _infer_skip_keys(schema_obj, full_schema)
)
return _dereference_refs_helper(schema_obj, full_schema, skip_keys)
|
from __future__ import annotations
from copy import deepcopy
from typing import TYPE_CHECKING, Any, Optional
if TYPE_CHECKING:
from collections.abc import Sequence
def _retrieve_ref(path: str, schema: dict) -> dict:
components = path.split("/")
if components[0] != "#":
msg = (
"ref paths are expected to be URI fragments, meaning they should start "
"with #."
)
raise ValueError(msg)
out = schema
for component in components[1:]:
if component in out:
out = out[component]
elif component.isdigit() and int(component) in out:
out = out[int(component)]
else:
msg = f"Reference '{path}' not found."
raise KeyError(msg)
return deepcopy(out)
def _dereference_refs_helper(
obj: Any,
full_schema: dict[str, Any],
skip_keys: Sequence[str],
processed_refs: Optional[set[str]] = None,
) -> Any:
if processed_refs is None:
processed_refs = set()
if isinstance(obj, dict):
obj_out = {}
for k, v in obj.items():
if k in skip_keys:
obj_out[k] = v
elif k == "$ref":
if v in processed_refs:
continue
processed_refs.add(v)
ref = _retrieve_ref(v, full_schema)
full_ref = _dereference_refs_helper(
ref, full_schema, skip_keys, processed_refs
)
processed_refs.remove(v)
return full_ref
elif isinstance(v, (list, dict)):
obj_out[k] = _dereference_refs_helper(
v, full_schema, skip_keys, processed_refs
)
else:
obj_out[k] = v
return obj_out
elif isinstance(obj, list):
return [
_dereference_refs_helper(el, full_schema, skip_keys, processed_refs)
for el in obj
]
else:
return obj
def _infer_skip_keys(
obj: Any, full_schema: dict, processed_refs: Optional[set[str]] = None
) -> list[str]:
if processed_refs is None:
processed_refs = set()
keys = []
if isinstance(obj, dict):
for k, v in obj.items():
if k == "$ref":
if v in processed_refs:
continue
processed_refs.add(v)
ref = _retrieve_ref(v, full_schema)
keys.append(v.split("/")[1])
keys += _infer_skip_keys(ref, full_schema, processed_refs)
elif isinstance(v, (list, dict)):
keys += _infer_skip_keys(v, full_schema, processed_refs)
elif isinstance(obj, list):
for el in obj:
keys += _infer_skip_keys(el, full_schema, processed_refs)
return keys
def dereference_refs(
schema_obj: dict,
*,
full_schema: Optional[dict] = None,
skip_keys: Optional[Sequence[str]] = None,
) -> dict:
"""Try to substitute $refs in JSON Schema.
Args:
schema_obj: The schema object to dereference.
full_schema: The full schema object. Defaults to None.
skip_keys: The keys to skip. Defaults to None.
Returns:
The dereferenced schema object.
"""
full_schema = full_schema or schema_obj
skip_keys = (
skip_keys
if skip_keys is not None
else _infer_skip_keys(schema_obj, full_schema)
)
return _dereference_refs_helper(schema_obj, full_schema, skip_keys)
|
import importlib
import os
import re
import types
from typing import Any, Optional
import numpy as np
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
tf_imported = False
else:
tf_imported = True
try:
import jax.numpy as jnp # type: ignore # noqa: F401
except (ImportError, TypeError):
jnp_imported = False
else:
jnp_imported = True
INSTALL_INSTRUCTIONS = {
'google.protobuf': '"docarray[proto]"',
'lz4': '"docarray[proto]"',
'pandas': '"docarray[pandas]"',
'PIL': '"docarray[image]"',
'pydub': '"docarray[audio]"',
'av': '"docarray[video]"',
'trimesh': '"docarray[mesh]"',
'hnswlib': '"docarray[hnswlib]"',
'elasticsearch': '"docarray[elasticsearch]"',
'elastic_transport': '"docarray[elasticsearch]"',
'weaviate': '"docarray[weaviate]"',
'qdrant_client': '"docarray[qdrant]"',
'fastapi': '"docarray[web]"',
'torch': '"docarray[torch]"',
'tensorflow': 'protobuf==3.19.0 tensorflow',
'hubble': '"docarray[jac]"',
'smart_open': '"docarray[aws]"',
'boto3': '"docarray[aws]"',
'botocore': '"docarray[aws]"',
'redis': '"docarray[redis]"',
}
def import_library(
package: str, raise_error: bool = True
) -> Optional[types.ModuleType]:
lib: Optional[types.ModuleType]
try:
lib = importlib.import_module(package)
except (ModuleNotFoundError, ImportError):
lib = None
if lib is None and raise_error:
raise ImportError(
f'The following required library is not installed: {package} \n'
f'To install all necessary libraries, run: `pip install {INSTALL_INSTRUCTIONS[package]}`.'
)
else:
return lib
def _get_path_from_docarray_root_level(file_path: str) -> str:
path = os.path.dirname(file_path)
rel_path = re.sub('(?s:.*)docarray', 'docarray', path).replace('/', '.')
return rel_path
def is_torch_available():
return torch_imported
def is_tf_available():
return tf_imported
def is_jax_available():
return jnp_imported
def is_np_int(item: Any) -> bool:
dtype = getattr(item, 'dtype', None)
ndim = getattr(item, 'ndim', None)
if dtype is not None and ndim is not None:
try:
return ndim == 0 and np.issubdtype(dtype, np.integer)
except TypeError:
return False
return False # this is unreachable, but mypy wants it
def is_notebook() -> bool:
"""
Check if we're running in a Jupyter notebook, using magic command
`get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
shell = get_ipython().__class__.__name__ # type: ignore
except NameError:
return False
if shell == 'ZMQInteractiveShell':
return True
elif shell == 'Shell':
return True
elif shell == 'TerminalInteractiveShell':
return False
else:
return False
|
import importlib
import os
import re
import types
from typing import Any, Optional
import numpy as np
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
tf_imported = False
else:
tf_imported = True
INSTALL_INSTRUCTIONS = {
'google.protobuf': '"docarray[proto]"',
'lz4': '"docarray[proto]"',
'pandas': '"docarray[pandas]"',
'PIL': '"docarray[image]"',
'pydub': '"docarray[audio]"',
'av': '"docarray[video]"',
'trimesh': '"docarray[mesh]"',
'hnswlib': '"docarray[hnswlib]"',
'elasticsearch': '"docarray[elasticsearch]"',
'elastic_transport': '"docarray[elasticsearch]"',
'weaviate': '"docarray[weaviate]"',
'qdrant_client': '"docarray[qdrant]"',
'fastapi': '"docarray[web]"',
'torch': '"docarray[torch]"',
'tensorflow': 'protobuf==3.19.0 tensorflow',
'hubble': '"docarray[jac]"',
'smart_open': '"docarray[aws]"',
'boto3': '"docarray[aws]"',
'botocore': '"docarray[aws]"',
'redis': '"docarray[redis]"',
}
def import_library(
package: str, raise_error: bool = True
) -> Optional[types.ModuleType]:
lib: Optional[types.ModuleType]
try:
lib = importlib.import_module(package)
except (ModuleNotFoundError, ImportError):
lib = None
if lib is None and raise_error:
raise ImportError(
f'The following required library is not installed: {package} \n'
f'To install all necessary libraries, run: `pip install {INSTALL_INSTRUCTIONS[package]}`.'
)
else:
return lib
def _get_path_from_docarray_root_level(file_path: str) -> str:
path = os.path.dirname(file_path)
rel_path = re.sub('(?s:.*)docarray', 'docarray', path).replace('/', '.')
return rel_path
def is_torch_available():
return torch_imported
def is_tf_available():
return tf_imported
def is_np_int(item: Any) -> bool:
dtype = getattr(item, 'dtype', None)
ndim = getattr(item, 'ndim', None)
if dtype is not None and ndim is not None:
try:
return ndim == 0 and np.issubdtype(dtype, np.integer)
except TypeError:
return False
return False # this is unreachable, but mypy wants it
def is_notebook() -> bool:
"""
Check if we're running in a Jupyter notebook, using magic command
`get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
shell = get_ipython().__class__.__name__ # type: ignore
except NameError:
return False
if shell == 'ZMQInteractiveShell':
return True
elif shell == 'Shell':
return True
elif shell == 'TerminalInteractiveShell':
return False
else:
return False
|
import types
from typing import TYPE_CHECKING
from docarray.index.backends.in_memory import InMemoryExactNNIndex
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.index.backends.elastic import ElasticDocIndex # noqa: F401
from docarray.index.backends.elasticv7 import ElasticV7DocIndex # noqa: F401
from docarray.index.backends.hnswlib import HnswDocumentIndex # noqa: F401
from docarray.index.backends.qdrant import QdrantDocumentIndex # noqa: F401
from docarray.index.backends.weaviate import WeaviateDocumentIndex # noqa: F401
from docarray.index.backends.redis import RedisDocumentIndex # noqa: F401
__all__ = ['InMemoryExactNNIndex']
def __getattr__(name: str):
lib: types.ModuleType
if name == 'HnswDocumentIndex':
import_library('hnswlib', raise_error=True)
import docarray.index.backends.hnswlib as lib
elif name == 'ElasticDocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elastic as lib
elif name == 'ElasticV7DocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elasticv7 as lib
elif name == 'QdrantDocumentIndex':
import_library('qdrant_client', raise_error=True)
import docarray.index.backends.qdrant as lib
elif name == 'WeaviateDocumentIndex':
import_library('weaviate', raise_error=True)
import docarray.index.backends.weaviate as lib
elif name == 'RedisDocumentIndex':
import_library('redis', raise_error=True)
import docarray.index.backends.redis as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
index_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return index_cls
|
import types
from typing import TYPE_CHECKING
from docarray.index.backends.in_memory import InMemoryExactNNIndex
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.index.backends.elastic import ElasticDocIndex # noqa: F401
from docarray.index.backends.elasticv7 import ElasticV7DocIndex # noqa: F401
from docarray.index.backends.hnswlib import HnswDocumentIndex # noqa: F401
from docarray.index.backends.qdrant import QdrantDocumentIndex # noqa: F401
from docarray.index.backends.weaviate import WeaviateDocumentIndex # noqa: F401
__all__ = ['InMemoryExactNNIndex']
def __getattr__(name: str):
lib: types.ModuleType
if name == 'HnswDocumentIndex':
import_library('hnswlib', raise_error=True)
import docarray.index.backends.hnswlib as lib
elif name == 'ElasticDocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elastic as lib
elif name == 'ElasticV7DocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elasticv7 as lib
elif name == 'QdrantDocumentIndex':
import_library('qdrant_client', raise_error=True)
import docarray.index.backends.qdrant as lib
elif name == 'WeaviateDocumentIndex':
import_library('weaviate', raise_error=True)
import docarray.index.backends.weaviate as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
index_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return index_cls
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"AQLM (Additive Quantization of Language Model) integration file"
from ..utils import ACCELERATE_MIN_VERSION, is_accelerate_available, is_aqlm_available, is_torch_available
if is_torch_available():
import torch.nn as nn
def replace_with_aqlm_linear(
model,
quantization_config=None,
linear_weights_not_to_quantize=None,
current_key_name=None,
has_been_replaced=False,
):
"""
Public method that recursively replaces the Linear layers of the given model with AQLM quantized layers.
`accelerate` is needed to use this method. Returns the converted model and a boolean that indicates if the
conversion has been successful or not.
Args:
model (`torch.nn.Module`):
The model to convert, can be any `torch.nn.Module` instance.
quantization_config (`AqlmConfig`):
The quantization config object that contains the quantization parameters.
linear_weights_not_to_quantize (`list[str]`, *optional*):
A list of nn.Linear weights to not convert. If a parameter path is in the list (e.g. `lm_head.weight`), the corresponding module will not be
converted.
current_key_name (`list`, *optional*):
A list that contains the current key name. This is used for recursion and should not be passed by the user.
has_been_replaced (`bool`, *optional*):
A boolean that indicates if the conversion has been successful or not. This is used for recursion and
should not be passed by the user.
"""
if not is_aqlm_available():
raise ValueError("AQLM is not available. Please install it with `pip install aqlm[cpu,gpu]`")
if not is_accelerate_available():
raise ValueError(
f"AQLM requires Accelerate to be installed: `pip install 'accelerate>={ACCELERATE_MIN_VERSION}'`"
)
if linear_weights_not_to_quantize is None:
linear_weights_not_to_quantize = []
from accelerate import init_empty_weights
from aqlm import QuantizedLinear
for name, module in model.named_children():
if current_key_name is None:
current_key_name = []
current_key_name.append(name)
if isinstance(module, nn.Linear):
# Check if the current key is not in the `linear_weights_not_to_quantize`
if ".".join(current_key_name) + ".weight" not in linear_weights_not_to_quantize:
with init_empty_weights():
in_features = module.in_features
out_features = module.out_features
model._modules[name] = QuantizedLinear(
in_features,
out_features,
bias=module.bias is not None,
in_group_size=quantization_config.in_group_size,
out_group_size=quantization_config.out_group_size,
num_codebooks=quantization_config.num_codebooks,
nbits_per_codebook=quantization_config.nbits_per_codebook,
)
has_been_replaced = True
# Store the module class in case we need to transpose the weight later
model._modules[name].source_cls = type(module)
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(False)
if len(list(module.children())) > 0:
_, has_been_replaced = replace_with_aqlm_linear(
module,
quantization_config=quantization_config,
linear_weights_not_to_quantize=linear_weights_not_to_quantize,
current_key_name=current_key_name,
has_been_replaced=has_been_replaced,
)
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"AQLM (Additive Quantization of Language Model) integration file"
from ..utils import ACCELERATE_MIN_VERSION, is_accelerate_available, is_aqlm_available, is_torch_available
if is_torch_available():
import torch.nn as nn
def replace_with_aqlm_linear(
model,
quantization_config=None,
linear_weights_not_to_quantize=None,
current_key_name=None,
has_been_replaced=False,
):
"""
Public method that recursively replaces the Linear layers of the given model with AQLM quantized layers.
`accelerate` is needed to use this method. Returns the converted model and a boolean that indicates if the
conversion has been successfull or not.
Args:
model (`torch.nn.Module`):
The model to convert, can be any `torch.nn.Module` instance.
quantization_config (`AqlmConfig`):
The quantization config object that contains the quantization parameters.
linear_weights_not_to_quantize (`list[str]`, *optional*):
A list of nn.Linear weights to not convert. If a parameter path is in the list (e.g. `lm_head.weight`), the corresponding module will not be
converted.
current_key_name (`list`, *optional*):
A list that contains the current key name. This is used for recursion and should not be passed by the user.
has_been_replaced (`bool`, *optional*):
A boolean that indicates if the conversion has been successful or not. This is used for recursion and
should not be passed by the user.
"""
if not is_aqlm_available():
raise ValueError("AQLM is not available. Please install it with `pip install aqlm[cpu,gpu]`")
if not is_accelerate_available():
raise ValueError(
f"AQLM requires Accelerate to be installed: `pip install 'accelerate>={ACCELERATE_MIN_VERSION}'`"
)
if linear_weights_not_to_quantize is None:
linear_weights_not_to_quantize = []
from accelerate import init_empty_weights
from aqlm import QuantizedLinear
for name, module in model.named_children():
if current_key_name is None:
current_key_name = []
current_key_name.append(name)
if isinstance(module, nn.Linear):
# Check if the current key is not in the `linear_weights_not_to_quantize`
if ".".join(current_key_name) + ".weight" not in linear_weights_not_to_quantize:
with init_empty_weights():
in_features = module.in_features
out_features = module.out_features
model._modules[name] = QuantizedLinear(
in_features,
out_features,
bias=module.bias is not None,
in_group_size=quantization_config.in_group_size,
out_group_size=quantization_config.out_group_size,
num_codebooks=quantization_config.num_codebooks,
nbits_per_codebook=quantization_config.nbits_per_codebook,
)
has_been_replaced = True
# Store the module class in case we need to transpose the weight later
model._modules[name].source_cls = type(module)
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(False)
if len(list(module.children())) > 0:
_, has_been_replaced = replace_with_aqlm_linear(
module,
quantization_config=quantization_config,
linear_weights_not_to_quantize=linear_weights_not_to_quantize,
current_key_name=current_key_name,
has_been_replaced=has_been_replaced,
)
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
|
"""DashVector reader."""
from typing import Dict, List, Optional
import json
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class DashVectorReader(BaseReader):
"""
DashVector reader.
Args:
api_key (str): DashVector API key.
endpoint (str): DashVector cluster endpoint.
"""
def __init__(self, api_key: str, endpoint: str):
"""Initialize with parameters."""
try:
import dashvector
except ImportError:
raise ImportError(
"`dashvector` package not found, please run `pip install dashvector`"
)
self._client: dashvector.Client = dashvector.Client(
api_key=api_key, endpoint=endpoint
)
def load_data(
self,
collection_name: str,
vector: Optional[List[float]],
topk: int,
filter: Optional[str] = None,
include_vector: bool = True,
partition: Optional[str] = None,
output_fields: Optional[List[str]] = None,
sparse_vector: Optional[Dict[int, float]] = None,
) -> List[Document]:
"""
Load data from DashVector.
Args:
collection_name (str): Name of the collection.
vector (List[float]): Query vector.
topk (int): Number of results to return.
filter (Optional[str]): doc fields filter
conditions that meet the SQL where clause specification.detail in https://help.aliyun.com/document_detail/2513006.html?spm=a2c4g.2510250.0.0.40d25637QMI4eV
include_vector (bool): Whether to include the embedding in the response.Defaults to True.
partition (Optional[str]): The partition name
to query. Defaults to None.
output_fields (Optional[List[str]]): The fields
to return. Defaults to None, meaning all fields
sparse_vector (Optional[Dict[int, float]]): The
sparse vector to query.Defaults to None.
Returns:
List[Document]: A list of documents.
"""
collection = self._client.get(collection_name)
if not collection:
raise ValueError(
f"Failed to get collection: {collection_name}," f"Error: {collection}"
)
ret = collection.query(
vector=vector,
topk=topk,
filter=filter,
include_vector=include_vector,
partition=partition,
output_fields=output_fields,
sparse_vector=sparse_vector,
)
if not ret:
raise Exception(f"Failed to query document," f"Error: {ret}")
doc_metas = ret.output
documents = []
for doc_meta in doc_metas:
node_content = json.loads(doc_meta.fields["_node_content"])
document = Document(
id_=doc_meta.id,
text=node_content["text"],
metadata=node_content["metadata"],
embedding=doc_meta.vector,
)
documents.append(document)
return documents
|
"""DashVector reader."""
from typing import Dict, List, Optional
import json
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class DashVectorReader(BaseReader):
"""DashVector reader.
Args:
api_key (str): DashVector API key.
endpoint (str): DashVector cluster endpoint.
"""
def __init__(self, api_key: str, endpoint: str):
"""Initialize with parameters."""
try:
import dashvector
except ImportError:
raise ImportError(
"`dashvector` package not found, please run `pip install dashvector`"
)
self._client: dashvector.Client = dashvector.Client(
api_key=api_key, endpoint=endpoint
)
def load_data(
self,
collection_name: str,
vector: Optional[List[float]],
topk: int,
filter: Optional[str] = None,
include_vector: bool = True,
partition: Optional[str] = None,
output_fields: Optional[List[str]] = None,
sparse_vector: Optional[Dict[int, float]] = None,
) -> List[Document]:
"""Load data from DashVector.
Args:
collection_name (str): Name of the collection.
vector (List[float]): Query vector.
topk (int): Number of results to return.
filter (Optional[str]): doc fields filter
conditions that meet the SQL where clause specification.detail in https://help.aliyun.com/document_detail/2513006.html?spm=a2c4g.2510250.0.0.40d25637QMI4eV
include_vector (bool): Whether to include the embedding in the response.Defaults to True.
partition (Optional[str]): The partition name
to query. Defaults to None.
output_fields (Optional[List[str]]): The fields
to return. Defaults to None, meaning all fields
sparse_vector (Optional[Dict[int, float]]): The
sparse vector to query.Defaults to None.
Returns:
List[Document]: A list of documents.
"""
collection = self._client.get(collection_name)
if not collection:
raise ValueError(
f"Failed to get collection: {collection_name}," f"Error: {collection}"
)
ret = collection.query(
vector=vector,
topk=topk,
filter=filter,
include_vector=include_vector,
partition=partition,
output_fields=output_fields,
sparse_vector=sparse_vector,
)
if not ret:
raise Exception(f"Failed to query document," f"Error: {ret}")
doc_metas = ret.output
documents = []
for doc_meta in doc_metas:
node_content = json.loads(doc_meta.fields["_node_content"])
document = Document(
id_=doc_meta.id,
text=node_content["text"],
metadata=node_content["metadata"],
embedding=doc_meta.vector,
)
documents.append(document)
return documents
|
import os
from typing import Dict
import numpy as np
import pytest
import xgboost
from xgboost import testing as tm
from xgboost.testing.ranking import run_normalization, run_score_normalization
pytestmark = tm.timeout(30)
def comp_training_with_rank_objective(
dtrain: xgboost.DMatrix,
dtest: xgboost.DMatrix,
rank_objective: str,
metric_name: str,
tolerance: float = 1e-02,
) -> None:
"""Internal method that trains the dataset using the rank objective on GPU and CPU,
evaluates the metric and determines if the delta between the metric is within the
tolerance level.
"""
# specify validations set to watch performance
watchlist = [(dtest, "eval"), (dtrain, "train")]
params = {
"booster": "gbtree",
"tree_method": "hist",
"device": "cuda",
}
num_trees = 100
check_metric_improvement_rounds = 10
evals_result: Dict[str, Dict] = {}
params["objective"] = rank_objective
params["eval_metric"] = metric_name
bst = xgboost.train(
params,
dtrain,
num_boost_round=num_trees,
early_stopping_rounds=check_metric_improvement_rounds,
evals=watchlist,
evals_result=evals_result,
)
gpu_scores = evals_result["train"][metric_name][-1]
evals_result = {}
cpu_params = {
"booster": "gbtree",
"tree_method": "hist",
"device": "cpu",
}
cpu_params["objective"] = rank_objective
cpu_params["eval_metric"] = metric_name
bstc = xgboost.train(
cpu_params,
dtrain,
num_boost_round=num_trees,
early_stopping_rounds=check_metric_improvement_rounds,
evals=watchlist,
evals_result=evals_result,
)
cpu_scores = evals_result["train"][metric_name][-1]
info = (rank_objective, metric_name)
assert np.allclose(gpu_scores, cpu_scores, tolerance, tolerance), info
assert np.allclose(bst.best_score, bstc.best_score, tolerance, tolerance), info
evals_result_weighted: Dict[str, Dict] = {}
dtest.set_weight(np.ones((dtest.get_group().size,)))
dtrain.set_weight(np.ones((dtrain.get_group().size,)))
watchlist = [(dtest, "eval"), (dtrain, "train")]
bst_w = xgboost.train(
params,
dtrain,
num_boost_round=num_trees,
early_stopping_rounds=check_metric_improvement_rounds,
evals=watchlist,
evals_result=evals_result_weighted,
)
weighted_metric = evals_result_weighted["train"][metric_name][-1]
tolerance = 1e-5
assert np.allclose(bst_w.best_score, bst.best_score, tolerance, tolerance)
assert np.allclose(weighted_metric, gpu_scores, tolerance, tolerance)
@pytest.mark.parametrize(
"objective,metric",
[
("rank:pairwise", "auc"),
("rank:pairwise", "ndcg"),
("rank:pairwise", "map"),
("rank:ndcg", "auc"),
("rank:ndcg", "ndcg"),
("rank:ndcg", "map"),
("rank:map", "auc"),
("rank:map", "ndcg"),
("rank:map", "map"),
],
)
def test_with_mq2008(objective, metric) -> None:
(
x_train,
y_train,
qid_train,
x_test,
y_test,
qid_test,
x_valid,
y_valid,
qid_valid,
) = tm.data.get_mq2008(tm.demo_dir(__file__))
if metric.find("map") != -1 or objective.find("map") != -1:
y_train[y_train <= 1] = 0.0
y_train[y_train > 1] = 1.0
y_test[y_test <= 1] = 0.0
y_test[y_test > 1] = 1.0
dtrain = xgboost.DMatrix(x_train, y_train, qid=qid_train)
dtest = xgboost.DMatrix(x_test, y_test, qid=qid_test)
comp_training_with_rank_objective(dtrain, dtest, objective, metric)
def test_normalization() -> None:
run_normalization("cuda")
@pytest.mark.parametrize("objective", ["rank:pairwise", "rank:ndcg", "rank:map"])
def test_score_normalization(objective: str) -> None:
run_score_normalization("cuda", objective)
|
import os
from typing import Dict
import numpy as np
import pytest
import xgboost
from xgboost import testing as tm
from xgboost.testing.ranking import run_normalization, run_score_normalization
pytestmark = tm.timeout(30)
def comp_training_with_rank_objective(
dtrain: xgboost.DMatrix,
dtest: xgboost.DMatrix,
rank_objective: str,
metric_name: str,
tolerance: float = 1e-02,
) -> None:
"""Internal method that trains the dataset using the rank objective on GPU and CPU,
evaluates the metric and determines if the delta between the metric is within the
tolerance level.
"""
# specify validations set to watch performance
watchlist = [(dtest, "eval"), (dtrain, "train")]
params = {
"booster": "gbtree",
"tree_method": "gpu_hist",
"gpu_id": 0,
}
num_trees = 100
check_metric_improvement_rounds = 10
evals_result: Dict[str, Dict] = {}
params["objective"] = rank_objective
params["eval_metric"] = metric_name
bst = xgboost.train(
params,
dtrain,
num_boost_round=num_trees,
early_stopping_rounds=check_metric_improvement_rounds,
evals=watchlist,
evals_result=evals_result,
)
gpu_scores = evals_result["train"][metric_name][-1]
evals_result = {}
cpu_params = {
"booster": "gbtree",
"tree_method": "hist",
"gpu_id": -1,
}
cpu_params["objective"] = rank_objective
cpu_params["eval_metric"] = metric_name
bstc = xgboost.train(
cpu_params,
dtrain,
num_boost_round=num_trees,
early_stopping_rounds=check_metric_improvement_rounds,
evals=watchlist,
evals_result=evals_result,
)
cpu_scores = evals_result["train"][metric_name][-1]
info = (rank_objective, metric_name)
assert np.allclose(gpu_scores, cpu_scores, tolerance, tolerance), info
assert np.allclose(bst.best_score, bstc.best_score, tolerance, tolerance), info
evals_result_weighted: Dict[str, Dict] = {}
dtest.set_weight(np.ones((dtest.get_group().size,)))
dtrain.set_weight(np.ones((dtrain.get_group().size,)))
watchlist = [(dtest, "eval"), (dtrain, "train")]
bst_w = xgboost.train(
params,
dtrain,
num_boost_round=num_trees,
early_stopping_rounds=check_metric_improvement_rounds,
evals=watchlist,
evals_result=evals_result_weighted,
)
weighted_metric = evals_result_weighted["train"][metric_name][-1]
tolerance = 1e-5
assert np.allclose(bst_w.best_score, bst.best_score, tolerance, tolerance)
assert np.allclose(weighted_metric, gpu_scores, tolerance, tolerance)
@pytest.mark.parametrize(
"objective,metric",
[
("rank:pairwise", "auc"),
("rank:pairwise", "ndcg"),
("rank:pairwise", "map"),
("rank:ndcg", "auc"),
("rank:ndcg", "ndcg"),
("rank:ndcg", "map"),
("rank:map", "auc"),
("rank:map", "ndcg"),
("rank:map", "map"),
],
)
def test_with_mq2008(objective, metric) -> None:
(
x_train,
y_train,
qid_train,
x_test,
y_test,
qid_test,
x_valid,
y_valid,
qid_valid,
) = tm.data.get_mq2008(tm.demo_dir(__file__))
if metric.find("map") != -1 or objective.find("map") != -1:
y_train[y_train <= 1] = 0.0
y_train[y_train > 1] = 1.0
y_test[y_test <= 1] = 0.0
y_test[y_test > 1] = 1.0
dtrain = xgboost.DMatrix(x_train, y_train, qid=qid_train)
dtest = xgboost.DMatrix(x_test, y_test, qid=qid_test)
comp_training_with_rank_objective(dtrain, dtest, objective, metric)
def test_normalization() -> None:
run_normalization("cuda")
@pytest.mark.parametrize("objective", ["rank:pairwise", "rank:ndcg", "rank:map"])
def test_score_normalization(objective: str) -> None:
run_score_normalization("cuda", objective)
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def smooth_l1_loss(pred, target, beta=1.0):
"""Smooth L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
Returns:
torch.Tensor: Calculated loss
"""
assert beta > 0
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
diff - 0.5 * beta)
return loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def l1_loss(pred, target):
"""L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
Returns:
torch.Tensor: Calculated loss
"""
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
loss = torch.abs(pred - target)
return loss
@LOSSES.register_module()
class SmoothL1Loss(nn.Module):
"""Smooth L1 loss.
Args:
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum". Defaults to "mean".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
super(SmoothL1Loss, self).__init__()
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * smooth_l1_loss(
pred,
target,
weight,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
@LOSSES.register_module()
class L1Loss(nn.Module):
"""L1 loss.
Args:
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super(L1Loss, self).__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * l1_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss_bbox
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def smooth_l1_loss(pred, target, beta=1.0):
"""Smooth L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
Returns:
torch.Tensor: Calculated loss
"""
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
diff - 0.5 * beta)
return loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def l1_loss(pred, target):
"""L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
Returns:
torch.Tensor: Calculated loss
"""
assert pred.size() == target.size() and target.numel() > 0
loss = torch.abs(pred - target)
return loss
@LOSSES.register_module()
class SmoothL1Loss(nn.Module):
"""Smooth L1 loss.
Args:
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum". Defaults to "mean".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
super(SmoothL1Loss, self).__init__()
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * smooth_l1_loss(
pred,
target,
weight,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
@LOSSES.register_module()
class L1Loss(nn.Module):
"""L1 loss.
Args:
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super(L1Loss, self).__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * l1_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss_bbox
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseRerankingEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with queries, positives, and negatives
eval_dataset = load_dataset("microsoft/ms_marco", "v1.1", split="validation").select(range(1000))
samples = [
{
"query": sample["query"],
"positive": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if is_selected
],
"negative": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if not is_selected
],
}
for sample in eval_dataset
]
# Now evaluate using only the documents from the 1000 samples
reranking_evaluator = SparseRerankingEvaluator(
samples=samples,
name="ms_marco_dev_small",
show_progress_bar=True,
batch_size=32,
)
results = reranking_evaluator(model)
"""
RerankingEvaluator: Evaluating the model on the ms_marco_dev_small dataset:
Queries: 967 Positives: Min 1.0, Mean 1.1, Max 3.0 Negatives: Min 1.0, Mean 7.1, Max 9.0
MAP: 53.61
MRR@10: 54.30
NDCG@10: 65.20
Model Query Sparsity: Active Dimensions: 43.9, Sparsity Ratio: 0.9986
Model Corpus Sparsity: Active Dimensions: 128.4, Sparsity Ratio: 0.9958
"""
# Print the results
print(f"Primary metric: {reranking_evaluator.primary_metric}")
# => Primary metric: ms_marco_dev_small_ndcg@10
print(f"Primary metric value: {results[reranking_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6520
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseRerankingEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with queries, positives, and negatives
eval_dataset = load_dataset("microsoft/ms_marco", "v1.1", split="validation").select(range(1000))
samples = [
{
"query": sample["query"],
"positive": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if is_selected
],
"negative": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if not is_selected
],
}
for sample in eval_dataset
]
# Now evaluate using only the documents from the 1000 samples
reranking_evaluator = SparseRerankingEvaluator(
samples=samples,
name="ms-marco-dev-small",
show_progress_bar=True,
batch_size=32,
)
results = reranking_evaluator(model)
"""
RerankingEvaluator: Evaluating the model on the ms-marco-dev-small dataset:
Queries: 967 Positives: Min 1.0, Mean 1.1, Max 3.0 Negatives: Min 1.0, Mean 7.1, Max 9.0
MAP: 53.46
MRR@10: 54.18
NDCG@10: 65.10
Model Sparsity Stats Query : Row Non-Zero Mean: 43.89658737182617, Row Sparsity Mean: 0.9985617995262146
Model Sparsity Stats Corpus : Row Non-Zero Mean: 128.37216186523438, Row Sparsity Mean: 0.9957940578460693
"""
# Print the results
print(f"Primary metric: {reranking_evaluator.primary_metric}")
# => Primary metric: ms-marco-dev-small_ndcg@10
print(f"Primary metric value: {results[reranking_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6510
|
"""
This module provides dynamic access to deprecated Zapier tools in LangChain.
It supports backward compatibility by forwarding references such as
`ZapierNLAListActions` and `ZapierNLARunAction` to their updated locations
in the `langchain_community.tools` package.
Developers using older import paths will continue to function, while LangChain
internally redirects access to the newer, supported module structure.
"""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import ZapierNLAListActions, ZapierNLARunAction
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ZapierNLARunAction": "langchain_community.tools",
"ZapierNLAListActions": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""
Dynamically retrieve attributes from the updated module path.
This method is used to resolve deprecated attribute imports
at runtime and forward them to their new locations.
Args:
name (str): The name of the attribute to import.
Returns:
Any: The resolved attribute from the appropriate updated module.
"""
return _import_attribute(name)
__all__ = [
"ZapierNLAListActions",
"ZapierNLARunAction",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import ZapierNLAListActions, ZapierNLARunAction
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ZapierNLARunAction": "langchain_community.tools",
"ZapierNLAListActions": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ZapierNLAListActions",
"ZapierNLARunAction",
]
|
__version__ = '0.30.0a3'
import logging
from docarray.array import DocArray, DocArrayStacked
from docarray.base_doc.doc import BaseDoc
__all__ = ['BaseDoc', 'DocArray', 'DocArrayStacked']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
|
__version__ = '0.30.0a3'
from docarray.array import DocumentArray, DocumentArrayStacked
from docarray.base_document.document import BaseDocument
import logging
__all__ = ['BaseDocument', 'DocumentArray', 'DocumentArrayStacked']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
|
# mypy: allow-untyped-defs
from torch.ao.quantization.pt2e.utils import _is_sym_size_node
from torch.ao.quantization.quantizer.quantizer import QuantizationAnnotation
from torch.fx import Node
def _annotate_input_qspec_map(node: Node, input_node: Node, qspec):
quantization_annotation = node.meta.get(
"quantization_annotation", QuantizationAnnotation()
)
if quantization_annotation.input_qspec_map is None:
quantization_annotation.input_qspec_map = {}
quantization_annotation.input_qspec_map[input_node] = qspec
node.meta["quantization_annotation"] = quantization_annotation
def _annotate_output_qspec(node: Node, qspec):
quantization_annotation = node.meta.get(
"quantization_annotation", QuantizationAnnotation()
)
quantization_annotation.output_qspec = qspec
node.meta["quantization_annotation"] = quantization_annotation
def _node_only_used_for_sym_size(node: Node, partition_nodes: list[Node]):
"""
This utility is used to handle cases when dynami_shape=True tracing leads
to symint nodes in the pattern of linear module. In those cases, we need to
distinguish between the nodes that are in input for just extracting value of
some dimensions (and symint nodes) vs. the one that is activation.
For example:
graph(x, y, weight):
size_0 = torch.ops.aten.sym_size([x], [0])
size_1 = torch.ops.aten.sym_size([y], [1])
view_size = size_0 * size_1
size_3 = torch.ops.aten.sym_size([x], [2])
vie_out = torch.ops.aten.view(x, [view_size, size_3])
return mm(view_out, weight)
In the example above y node is not actual input. It exist only to extract size_1
"""
if _is_sym_size_node(node):
return True
return all(
((user not in partition_nodes) or _is_sym_size_node(user))
for user in node.users
)
def _get_module_name_filter(module_name: str):
"""Get the module_name_filter function for a given module name, the filter accepts
a node and checks if the node comes from a module that has certain module name
For example:
node: linear_op = call_function[...](...) # comes from a module with name blocks.sub.linear1
>> module_name_filter = _get_module_name_filter("blocks.sub")
>> print(module_name_filter(node))
True # the node is from "blocks.sub" based on the fully qualified name "blocks.sub.linear1"
"""
def module_name_filter(n: Node) -> bool:
# example: {
# 'L__self___sub': ("L['self'].sub", <class '....Sub'>),
# 'L__self___sub_linear': ("L['self'].sub.linear", <class 'torch.nn.modules.linear.Linear'>)
# }
# get_attr nodes doesn't have nn_module_stack?
nn_module_stack = n.meta.get("nn_module_stack", {})
def _normalize_path(n):
prefix = 0
# TODO This is non standard behavior and should be removed when we migrate off capture_pre_autograd_graph.
if n.startswith("L['self']."):
prefix = len("L['self'].")
return n[prefix:]
names = [_normalize_path(n) for n, _ in nn_module_stack.values()]
return module_name in names
return module_name_filter
|
# mypy: allow-untyped-defs
from torch.ao.quantization.pt2e.utils import _is_sym_size_node
from torch.ao.quantization.quantizer.quantizer import QuantizationAnnotation
from torch.fx import Node
def _annotate_input_qspec_map(node: Node, input_node: Node, qspec):
quantization_annotation = node.meta.get(
"quantization_annotation", QuantizationAnnotation()
)
if quantization_annotation.input_qspec_map is None:
quantization_annotation.input_qspec_map = {}
quantization_annotation.input_qspec_map[input_node] = qspec
node.meta["quantization_annotation"] = quantization_annotation
def _annotate_output_qspec(node: Node, qspec):
quantization_annotation = node.meta.get(
"quantization_annotation", QuantizationAnnotation()
)
quantization_annotation.output_qspec = qspec
node.meta["quantization_annotation"] = quantization_annotation
def _node_only_used_for_sym_size(node: Node, partition_nodes: list[Node]):
"""
This utility is used to handle cases when dynami_shape=True tracing leads
to symint nodes in the pattern of linear module. In those cases, we need to
distinguish between the nodes that are in input for just extracting value of
some dimentions (and symint nodes) vs. the one that is activation.
For example:
graph(x, y, weight):
size_0 = torch.ops.aten.sym_size([x], [0])
size_1 = torch.ops.aten.sym_size([y], [1])
view_size = size_0 * size_1
size_3 = torch.ops.aten.sym_size([x], [2])
vie_out = torch.ops.aten.view(x, [view_size, size_3])
return mm(view_out, weight)
In the example above y node is not actual input. It exist only to extract size_1
"""
if _is_sym_size_node(node):
return True
return all(
((user not in partition_nodes) or _is_sym_size_node(user))
for user in node.users
)
def _get_module_name_filter(module_name: str):
"""Get the module_name_filter function for a given module name, the filter accepts
a node and checks if the node comes from a module that has certain module name
For example:
node: linear_op = call_function[...](...) # comes from a module with name blocks.sub.linear1
>> module_name_filter = _get_module_name_filter("blocks.sub")
>> print(module_name_filter(node))
True # the node is from "blocks.sub" based on the fully qualified name "blocks.sub.linear1"
"""
def module_name_filter(n: Node) -> bool:
# example: {
# 'L__self___sub': ("L['self'].sub", <class '....Sub'>),
# 'L__self___sub_linear': ("L['self'].sub.linear", <class 'torch.nn.modules.linear.Linear'>)
# }
# get_attr nodes doesn't have nn_module_stack?
nn_module_stack = n.meta.get("nn_module_stack", {})
def _normalize_path(n):
prefix = 0
# TODO This is non standard behavior and should be removed when we migrate off capture_pre_autograd_graph.
if n.startswith("L['self']."):
prefix = len("L['self'].")
return n[prefix:]
names = [_normalize_path(n) for n, _ in nn_module_stack.values()]
return module_name in names
return module_name_filter
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseAnglELoss(SparseCoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0) -> None:
"""
This class implements AnglE (Angle Optimized).
This is a modification of :class:`SparseCoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SparseEncoder
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseCoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(model=model, loss=losses.SparseAnglELoss(model), lambda_corpus=5e-5, all_docs=True)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseAnglELoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseAnglELoss(SparseCoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0) -> None:
"""
This class implements AnglE (Angle Optimized).
This is a modification of :class:`SparseCoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SparseEncoder
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseCoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(model=model, loss=losses.SparseAnglELoss(model), lambda_corpus=5e-5, all_docs=True)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseAngleLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
"""XGBoost Experimental Federated Learning related API."""
import ctypes
from threading import Thread
from typing import Any, Dict, Optional
from .core import _LIB, _check_call, make_jcargs
from .tracker import RabitTracker
class FederatedTracker(RabitTracker):
"""Tracker for federated training.
Parameters
----------
n_workers :
The number of federated workers.
port :
The port to listen on.
secure :
Whether this is a secure instance. If True, then the following arguments for SSL
must be provided.
server_key_path :
Path to the server private key file.
server_cert_path :
Path to the server certificate file.
client_cert_path :
Path to the client certificate file.
"""
def __init__( # pylint: disable=R0913, W0231
self,
n_workers: int,
port: int,
secure: bool,
server_key_path: Optional[str] = None,
server_cert_path: Optional[str] = None,
client_cert_path: Optional[str] = None,
timeout: int = 300,
) -> None:
handle = ctypes.c_void_p()
args = make_jcargs(
n_workers=n_workers,
port=port,
dmlc_communicator="federated",
federated_secure=secure,
server_key_path=server_key_path,
server_cert_path=server_cert_path,
client_cert_path=client_cert_path,
timeout=int(timeout),
)
_check_call(_LIB.XGTrackerCreate(args, ctypes.byref(handle)))
self.handle = handle
def run_federated_server( # pylint: disable=too-many-arguments
n_workers: int,
port: int,
server_key_path: Optional[str] = None,
server_cert_path: Optional[str] = None,
client_cert_path: Optional[str] = None,
blocking: bool = True,
timeout: int = 300,
) -> Optional[Dict[str, Any]]:
"""See :py:class:`~xgboost.federated.FederatedTracker` for more info.
Parameters
----------
blocking :
Block the server until the training is finished. If set to False, the function
launches an additional thread and returns the worker arguments. The default is
True and a higher level framework is responsible for setting worker parameters.
"""
args: Dict[str, Any] = {"n_workers": n_workers}
secure = all(
path is not None
for path in [server_key_path, server_cert_path, client_cert_path]
)
tracker = FederatedTracker(
n_workers=n_workers,
port=port,
secure=secure,
timeout=timeout,
server_key_path=server_key_path,
server_cert_path=server_cert_path,
client_cert_path=client_cert_path,
)
tracker.start()
if blocking:
tracker.wait_for()
return None
thread = Thread(target=tracker.wait_for)
thread.daemon = True
thread.start()
args.update(tracker.worker_args())
return args
|
"""XGBoost Experimental Federated Learning related API."""
import ctypes
from threading import Thread
from typing import Any, Dict, Optional
from .core import _LIB, _check_call, make_jcargs
from .tracker import RabitTracker
class FederatedTracker(RabitTracker):
"""Tracker for federated training.
Parameters
----------
n_workers :
The number of federated workers.
port :
The port to listen on.
secure :
Whether this is a secure instance. If True, then the following arguments for SSL
must be provided.
server_key_path :
Path to the server private key file.
server_cert_path :
Path to the server certificate file.
client_cert_path :
Path to the client certificate file.
"""
def __init__( # pylint: disable=R0913, W0231
self,
n_workers: int,
port: int,
secure: bool,
server_key_path: str = "",
server_cert_path: str = "",
client_cert_path: str = "",
timeout: int = 300,
) -> None:
handle = ctypes.c_void_p()
args = make_jcargs(
n_workers=n_workers,
port=port,
dmlc_communicator="federated",
federated_secure=secure,
server_key_path=server_key_path,
server_cert_path=server_cert_path,
client_cert_path=client_cert_path,
timeout=int(timeout),
)
_check_call(_LIB.XGTrackerCreate(args, ctypes.byref(handle)))
self.handle = handle
def run_federated_server( # pylint: disable=too-many-arguments
n_workers: int,
port: int,
server_key_path: Optional[str] = None,
server_cert_path: Optional[str] = None,
client_cert_path: Optional[str] = None,
blocking: bool = True,
timeout: int = 300,
) -> Optional[Dict[str, Any]]:
"""See :py:class:`~xgboost.federated.FederatedTracker` for more info.
Parameters
----------
blocking :
Block the server until the training is finished. If set to False, the function
launches an additional thread and returns the worker arguments. The default is
True and a higher level framework is responsible for setting worker parameters.
"""
args: Dict[str, Any] = {"n_workers": n_workers}
secure = all(
path is not None
for path in [server_key_path, server_cert_path, client_cert_path]
)
tracker = FederatedTracker(
n_workers=n_workers, port=port, secure=secure, timeout=timeout
)
tracker.start()
if blocking:
tracker.wait_for()
return None
thread = Thread(target=tracker.wait_for)
thread.daemon = True
thread.start()
args.update(tracker.worker_args())
return args
|
_base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_1.6gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')),
neck=dict(
type='FPN',
in_channels=[72, 168, 408, 912],
out_channels=256,
num_outs=5))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005),
clip_grad=dict(max_norm=35, norm_type=2))
|
_base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_1.6gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')),
neck=dict(
type='FPN',
in_channels=[72, 168, 408, 912],
out_channels=256,
num_outs=5))
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
from typing import Any, Optional
from llama_index.core.base.agent.types import TaskStepOutput, TaskStep
from llama_index.core.bridge.pydantic import model_validator, field_validator
from llama_index.core.instrumentation.events.base import BaseEvent
from llama_index.core.chat_engine.types import (
AGENT_CHAT_RESPONSE_TYPE,
AgentChatResponse,
StreamingAgentChatResponse,
)
from llama_index.core.tools.types import ToolMetadata
class AgentRunStepStartEvent(BaseEvent):
"""
AgentRunStepStartEvent.
Args:
task_id (str): Task ID.
step (Optional[TaskStep]): Task step.
input (Optional[str]): Optional input.
"""
task_id: str
step: Optional[TaskStep]
input: Optional[str]
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "AgentRunStepStartEvent"
class AgentRunStepEndEvent(BaseEvent):
"""
AgentRunStepEndEvent.
Args:
step_output (TaskStepOutput): Task step output.
"""
step_output: TaskStepOutput
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "AgentRunStepEndEvent"
class AgentChatWithStepStartEvent(BaseEvent):
"""
AgentChatWithStepStartEvent.
Args:
user_msg (str): User input message.
"""
user_msg: str
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "AgentChatWithStepStartEvent"
class AgentChatWithStepEndEvent(BaseEvent):
"""
AgentChatWithStepEndEvent.
Args:
response (Optional[AGENT_CHAT_RESPONSE_TYPE]): Agent chat response.
"""
response: Optional[AGENT_CHAT_RESPONSE_TYPE]
@model_validator(mode="before")
@classmethod
def validate_response(cls: Any, values: Any) -> Any:
"""Validate response."""
response = values.get("response")
if response is None:
pass
elif not isinstance(response, AgentChatResponse) and not isinstance(
response, StreamingAgentChatResponse
):
raise ValueError(
"response must be of type AgentChatResponse or StreamingAgentChatResponse"
)
return values
@field_validator("response", mode="before")
@classmethod
def validate_response_type(cls: Any, response: Any) -> Any:
"""Validate response type."""
if response is None:
return response
if not isinstance(response, AgentChatResponse) and not isinstance(
response, StreamingAgentChatResponse
):
raise ValueError(
"response must be of type AgentChatResponse or StreamingAgentChatResponse"
)
return response
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "AgentChatWithStepEndEvent"
class AgentToolCallEvent(BaseEvent):
"""
AgentToolCallEvent.
Args:
arguments (str): Arguments.
tool (ToolMetadata): Tool metadata.
"""
arguments: str
tool: ToolMetadata
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "AgentToolCallEvent"
|
from typing import Any, Optional
from llama_index.core.base.agent.types import TaskStepOutput, TaskStep
from llama_index.core.bridge.pydantic import model_validator, field_validator
from llama_index.core.instrumentation.events.base import BaseEvent
from llama_index.core.chat_engine.types import (
AGENT_CHAT_RESPONSE_TYPE,
AgentChatResponse,
StreamingAgentChatResponse,
)
from llama_index.core.tools.types import ToolMetadata
class AgentRunStepStartEvent(BaseEvent):
"""AgentRunStepStartEvent.
Args:
task_id (str): Task ID.
step (Optional[TaskStep]): Task step.
input (Optional[str]): Optional input.
"""
task_id: str
step: Optional[TaskStep]
input: Optional[str]
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "AgentRunStepStartEvent"
class AgentRunStepEndEvent(BaseEvent):
"""AgentRunStepEndEvent.
Args:
step_output (TaskStepOutput): Task step output.
"""
step_output: TaskStepOutput
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "AgentRunStepEndEvent"
class AgentChatWithStepStartEvent(BaseEvent):
"""AgentChatWithStepStartEvent.
Args:
user_msg (str): User input message.
"""
user_msg: str
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "AgentChatWithStepStartEvent"
class AgentChatWithStepEndEvent(BaseEvent):
"""AgentChatWithStepEndEvent.
Args:
response (Optional[AGENT_CHAT_RESPONSE_TYPE]): Agent chat response.
"""
response: Optional[AGENT_CHAT_RESPONSE_TYPE]
@model_validator(mode="before")
@classmethod
def validate_response(cls: Any, values: Any) -> Any:
"""Validate response."""
response = values.get("response")
if response is None:
pass
elif not isinstance(response, AgentChatResponse) and not isinstance(
response, StreamingAgentChatResponse
):
raise ValueError(
"response must be of type AgentChatResponse or StreamingAgentChatResponse"
)
return values
@field_validator("response", mode="before")
@classmethod
def validate_response_type(cls: Any, response: Any) -> Any:
"""Validate response type."""
if response is None:
return response
if not isinstance(response, AgentChatResponse) and not isinstance(
response, StreamingAgentChatResponse
):
raise ValueError(
"response must be of type AgentChatResponse or StreamingAgentChatResponse"
)
return response
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "AgentChatWithStepEndEvent"
class AgentToolCallEvent(BaseEvent):
"""AgentToolCallEvent.
Args:
arguments (str): Arguments.
tool (ToolMetadata): Tool metadata.
"""
arguments: str
tool: ToolMetadata
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "AgentToolCallEvent"
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from .se_layer import SELayer
class InvertedResidual(BaseModule):
"""Inverted Residual Block.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
mid_channels (int): The input channels of the depthwise convolution.
kernel_size (int): The kernal size of the depthwise convolution.
Default: 3.
stride (int): The stride of the depthwise convolution. Default: 1.
se_cfg (dict): Config dict for se layer. Defaul: None, which means no
se layer.
with_expand_conv (bool): Use expand conv or not. If set False,
mid_channels must be the same with in_channels.
Default: True.
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
Returns:
Tensor: The output tensor.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
kernel_size=3,
stride=1,
se_cfg=None,
with_expand_conv=True,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
with_cp=False,
init_cfg=None):
super(InvertedResidual, self).__init__(init_cfg)
self.with_res_shortcut = (stride == 1 and in_channels == out_channels)
assert stride in [1, 2], f'stride must in [1, 2]. ' \
f'But received {stride}.'
self.with_cp = with_cp
self.with_se = se_cfg is not None
self.with_expand_conv = with_expand_conv
if self.with_se:
assert isinstance(se_cfg, dict)
if not self.with_expand_conv:
assert mid_channels == in_channels
if self.with_expand_conv:
self.expand_conv = ConvModule(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.depthwise_conv = ConvModule(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
stride=stride,
padding=kernel_size // 2,
groups=mid_channels,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
if self.with_se:
self.se = SELayer(**se_cfg)
self.linear_conv = ConvModule(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
def forward(self, x):
def _inner_forward(x):
out = x
if self.with_expand_conv:
out = self.expand_conv(out)
out = self.depthwise_conv(out)
if self.with_se:
out = self.se(out)
out = self.linear_conv(out)
if self.with_res_shortcut:
return x + out
else:
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
|
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from .se_layer import SELayer
class InvertedResidual(BaseModule):
"""Inverted Residual Block.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
mid_channels (int): The input channels of the depthwise convolution.
kernel_size (int): The kernal size of the depthwise convolution.
Default: 3.
stride (int): The stride of the depthwise convolution. Default: 1.
se_cfg (dict): Config dict for se layer. Defaul: None, which means no
se layer.
with_expand_conv (bool): Use expand conv or not. If set False,
mid_channels must be the same with in_channels.
Default: True.
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
Returns:
Tensor: The output tensor.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
kernel_size=3,
stride=1,
se_cfg=None,
with_expand_conv=True,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
with_cp=False,
init_cfg=None):
super(InvertedResidual, self).__init__(init_cfg)
self.with_res_shortcut = (stride == 1 and in_channels == out_channels)
assert stride in [1, 2], f'stride must in [1, 2]. ' \
f'But received {stride}.'
self.with_cp = with_cp
self.with_se = se_cfg is not None
self.with_expand_conv = with_expand_conv
if self.with_se:
assert isinstance(se_cfg, dict)
if not self.with_expand_conv:
assert mid_channels == in_channels
if self.with_expand_conv:
self.expand_conv = ConvModule(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.depthwise_conv = ConvModule(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
stride=stride,
padding=kernel_size // 2,
groups=mid_channels,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
if self.with_se:
self.se = SELayer(**se_cfg)
self.linear_conv = ConvModule(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
def forward(self, x):
def _inner_forward(x):
out = x
if self.with_expand_conv:
out = self.expand_conv(out)
out = self.depthwise_conv(out)
if self.with_se:
out = self.se(out)
out = self.linear_conv(out)
if self.with_res_shortcut:
return x + out
else:
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
|
import inspect
import logging
import secrets
from typing import Any, Callable, Optional
from fastapi import HTTPException, Request, Security
from fastapi.security import APIKeyHeader, HTTPBearer
from starlette.status import HTTP_401_UNAUTHORIZED
from .config import settings
from .jwt_utils import parse_jwt_token
security = HTTPBearer()
logger = logging.getLogger(__name__)
async def auth_middleware(request: Request):
if not settings.ENABLE_AUTH:
# If authentication is disabled, allow the request to proceed
logger.warning("Auth disabled")
return {}
security = HTTPBearer()
credentials = await security(request)
if not credentials:
raise HTTPException(status_code=401, detail="Authorization header is missing")
try:
payload = parse_jwt_token(credentials.credentials)
request.state.user = payload
logger.debug("Token decoded successfully")
except ValueError as e:
raise HTTPException(status_code=401, detail=str(e))
return payload
class APIKeyValidator:
"""
Configurable API key validator that supports custom validation functions
for FastAPI applications.
This class provides a flexible way to implement API key authentication with optional
custom validation logic. It can be used for simple token matching
or more complex validation scenarios like database lookups.
Examples:
Simple token validation:
```python
validator = APIKeyValidator(
header_name="X-API-Key",
expected_token="your-secret-token"
)
@app.get("/protected", dependencies=[Depends(validator.get_dependency())])
def protected_endpoint():
return {"message": "Access granted"}
```
Custom validation with database lookup:
```python
async def validate_with_db(api_key: str):
api_key_obj = await db.get_api_key(api_key)
return api_key_obj if api_key_obj and api_key_obj.is_active else None
validator = APIKeyValidator(
header_name="X-API-Key",
validate_fn=validate_with_db
)
```
Args:
header_name (str): The name of the header containing the API key
expected_token (Optional[str]): The expected API key value for simple token matching
validate_fn (Optional[Callable]): Custom validation function that takes an API key
string and returns a boolean or object. Can be async.
error_status (int): HTTP status code to use for validation errors
error_message (str): Error message to return when validation fails
"""
def __init__(
self,
header_name: str,
expected_token: Optional[str] = None,
validate_fn: Optional[Callable[[str], bool]] = None,
error_status: int = HTTP_401_UNAUTHORIZED,
error_message: str = "Invalid API key",
):
# Create the APIKeyHeader as a class property
self.security_scheme = APIKeyHeader(name=header_name)
self.expected_token = expected_token
self.custom_validate_fn = validate_fn
self.error_status = error_status
self.error_message = error_message
async def default_validator(self, api_key: str) -> bool:
if not self.expected_token:
raise ValueError(
"Expected Token Required to be set when uisng API Key Validator default validation"
)
return secrets.compare_digest(api_key, self.expected_token)
async def __call__(
self, request: Request, api_key: str = Security(APIKeyHeader)
) -> Any:
if api_key is None:
raise HTTPException(status_code=self.error_status, detail="Missing API key")
# Use custom validation if provided, otherwise use default equality check
validator = self.custom_validate_fn or self.default_validator
result = (
await validator(api_key)
if inspect.iscoroutinefunction(validator)
else validator(api_key)
)
if not result:
raise HTTPException(
status_code=self.error_status, detail=self.error_message
)
# Store validation result in request state if it's not just a boolean
if result is not True:
request.state.api_key = result
return result
def get_dependency(self):
"""
Returns a callable dependency that FastAPI will recognize as a security scheme
"""
async def validate_api_key(
request: Request, api_key: str = Security(self.security_scheme)
) -> Any:
return await self(request, api_key)
# This helps FastAPI recognize it as a security dependency
validate_api_key.__name__ = f"validate_{self.security_scheme.model.name}"
return validate_api_key
|
import inspect
import logging
from typing import Any, Callable, Optional
from fastapi import HTTPException, Request, Security
from fastapi.security import APIKeyHeader, HTTPBearer
from starlette.status import HTTP_401_UNAUTHORIZED
from .config import settings
from .jwt_utils import parse_jwt_token
security = HTTPBearer()
logger = logging.getLogger(__name__)
async def auth_middleware(request: Request):
if not settings.ENABLE_AUTH:
# If authentication is disabled, allow the request to proceed
logger.warning("Auth disabled")
return {}
security = HTTPBearer()
credentials = await security(request)
if not credentials:
raise HTTPException(status_code=401, detail="Authorization header is missing")
try:
payload = parse_jwt_token(credentials.credentials)
request.state.user = payload
logger.debug("Token decoded successfully")
except ValueError as e:
raise HTTPException(status_code=401, detail=str(e))
return payload
class APIKeyValidator:
"""
Configurable API key validator that supports custom validation functions
for FastAPI applications.
This class provides a flexible way to implement API key authentication with optional
custom validation logic. It can be used for simple token matching
or more complex validation scenarios like database lookups.
Examples:
Simple token validation:
```python
validator = APIKeyValidator(
header_name="X-API-Key",
expected_token="your-secret-token"
)
@app.get("/protected", dependencies=[Depends(validator.get_dependency())])
def protected_endpoint():
return {"message": "Access granted"}
```
Custom validation with database lookup:
```python
async def validate_with_db(api_key: str):
api_key_obj = await db.get_api_key(api_key)
return api_key_obj if api_key_obj and api_key_obj.is_active else None
validator = APIKeyValidator(
header_name="X-API-Key",
validate_fn=validate_with_db
)
```
Args:
header_name (str): The name of the header containing the API key
expected_token (Optional[str]): The expected API key value for simple token matching
validate_fn (Optional[Callable]): Custom validation function that takes an API key
string and returns a boolean or object. Can be async.
error_status (int): HTTP status code to use for validation errors
error_message (str): Error message to return when validation fails
"""
def __init__(
self,
header_name: str,
expected_token: Optional[str] = None,
validate_fn: Optional[Callable[[str], bool]] = None,
error_status: int = HTTP_401_UNAUTHORIZED,
error_message: str = "Invalid API key",
):
# Create the APIKeyHeader as a class property
self.security_scheme = APIKeyHeader(name=header_name)
self.expected_token = expected_token
self.custom_validate_fn = validate_fn
self.error_status = error_status
self.error_message = error_message
async def default_validator(self, api_key: str) -> bool:
return api_key == self.expected_token
async def __call__(
self, request: Request, api_key: str = Security(APIKeyHeader)
) -> Any:
if api_key is None:
raise HTTPException(status_code=self.error_status, detail="Missing API key")
# Use custom validation if provided, otherwise use default equality check
validator = self.custom_validate_fn or self.default_validator
result = (
await validator(api_key)
if inspect.iscoroutinefunction(validator)
else validator(api_key)
)
if not result:
raise HTTPException(
status_code=self.error_status, detail=self.error_message
)
# Store validation result in request state if it's not just a boolean
if result is not True:
request.state.api_key = result
return result
def get_dependency(self):
"""
Returns a callable dependency that FastAPI will recognize as a security scheme
"""
async def validate_api_key(
request: Request, api_key: str = Security(self.security_scheme)
) -> Any:
return await self(request, api_key)
# This helps FastAPI recognize it as a security dependency
validate_api_key.__name__ = f"validate_{self.security_scheme.model.name}"
return validate_api_key
|
from collections.abc import Generator
from langchain_huggingface.llms import HuggingFacePipeline
def test_huggingface_pipeline_streaming() -> None:
"""Test streaming tokens from huggingface_pipeline."""
llm = HuggingFacePipeline.from_model_id(
model_id="gpt2", task="text-generation", pipeline_kwargs={"max_new_tokens": 10}
)
generator = llm.stream("Q: How do you say 'hello' in German? A:'", stop=["."])
stream_results_string = ""
assert isinstance(generator, Generator)
for chunk in generator:
assert isinstance(chunk, str)
stream_results_string = chunk
assert len(stream_results_string.strip()) > 1
|
from typing import Generator
from langchain_huggingface.llms import HuggingFacePipeline
def test_huggingface_pipeline_streaming() -> None:
"""Test streaming tokens from huggingface_pipeline."""
llm = HuggingFacePipeline.from_model_id(
model_id="gpt2", task="text-generation", pipeline_kwargs={"max_new_tokens": 10}
)
generator = llm.stream("Q: How do you say 'hello' in German? A:'", stop=["."])
stream_results_string = ""
assert isinstance(generator, Generator)
for chunk in generator:
assert isinstance(chunk, str)
stream_results_string = chunk
assert len(stream_results_string.strip()) > 1
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.saving.file_editor import KerasFileEditor as KerasFileEditor
from keras.src.saving.object_registration import (
CustomObjectScope as CustomObjectScope,
)
from keras.src.saving.object_registration import (
CustomObjectScope as custom_object_scope,
)
from keras.src.saving.object_registration import (
get_custom_objects as get_custom_objects,
)
from keras.src.saving.object_registration import (
get_registered_name as get_registered_name,
)
from keras.src.saving.object_registration import (
get_registered_object as get_registered_object,
)
from keras.src.saving.object_registration import (
register_keras_serializable as register_keras_serializable,
)
from keras.src.saving.saving_api import load_model as load_model
from keras.src.saving.saving_api import load_weights as load_weights
from keras.src.saving.saving_api import save_model as save_model
from keras.src.saving.saving_api import save_weights as save_weights
from keras.src.saving.serialization_lib import (
deserialize_keras_object as deserialize_keras_object,
)
from keras.src.saving.serialization_lib import (
serialize_keras_object as serialize_keras_object,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.saving.file_editor import KerasFileEditor
from keras.src.saving.object_registration import CustomObjectScope
from keras.src.saving.object_registration import (
CustomObjectScope as custom_object_scope,
)
from keras.src.saving.object_registration import get_custom_objects
from keras.src.saving.object_registration import get_registered_name
from keras.src.saving.object_registration import get_registered_object
from keras.src.saving.object_registration import register_keras_serializable
from keras.src.saving.saving_api import load_model
from keras.src.saving.saving_api import load_weights
from keras.src.saving.saving_api import save_model
from keras.src.saving.saving_api import save_weights
from keras.src.saving.serialization_lib import deserialize_keras_object
from keras.src.saving.serialization_lib import serialize_keras_object
|
# deprecated, please use datasets.download.download_manager
|
# deprecated, please use daatsets.download.download_manager
|
from typing import Any
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain.memory.chat_memory import BaseChatMemory
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
),
)
class ConversationTokenBufferMemory(BaseChatMemory):
"""Conversation chat memory with token limit.
Keeps only the most recent messages in the conversation under the constraint
that the total number of tokens in the conversation does not exceed a certain limit.
"""
human_prefix: str = "Human"
ai_prefix: str = "AI"
llm: BaseLanguageModel
memory_key: str = "history"
max_token_limit: int = 2000
@property
def buffer(self) -> Any:
"""String buffer of memory."""
return self.buffer_as_messages if self.return_messages else self.buffer_as_str
@property
def buffer_as_str(self) -> str:
"""Exposes the buffer as a string in case return_messages is False."""
return get_buffer_string(
self.chat_memory.messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
@property
def buffer_as_messages(self) -> list[BaseMessage]:
"""Exposes the buffer as a list of messages in case return_messages is True."""
return self.chat_memory.messages
@property
def memory_variables(self) -> list[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this conversation to buffer. Pruned."""
super().save_context(inputs, outputs)
# Prune buffer if it exceeds max token limit
buffer = self.chat_memory.messages
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
if curr_buffer_length > self.max_token_limit:
pruned_memory = []
while curr_buffer_length > self.max_token_limit:
pruned_memory.append(buffer.pop(0))
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
|
from typing import Any, Dict, List
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain.memory.chat_memory import BaseChatMemory
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
),
)
class ConversationTokenBufferMemory(BaseChatMemory):
"""Conversation chat memory with token limit.
Keeps only the most recent messages in the conversation under the constraint
that the total number of tokens in the conversation does not exceed a certain limit.
"""
human_prefix: str = "Human"
ai_prefix: str = "AI"
llm: BaseLanguageModel
memory_key: str = "history"
max_token_limit: int = 2000
@property
def buffer(self) -> Any:
"""String buffer of memory."""
return self.buffer_as_messages if self.return_messages else self.buffer_as_str
@property
def buffer_as_str(self) -> str:
"""Exposes the buffer as a string in case return_messages is False."""
return get_buffer_string(
self.chat_memory.messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
@property
def buffer_as_messages(self) -> List[BaseMessage]:
"""Exposes the buffer as a list of messages in case return_messages is True."""
return self.chat_memory.messages
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer. Pruned."""
super().save_context(inputs, outputs)
# Prune buffer if it exceeds max token limit
buffer = self.chat_memory.messages
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
if curr_buffer_length > self.max_token_limit:
pruned_memory = []
while curr_buffer_length > self.max_token_limit:
pruned_memory.append(buffer.pop(0))
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
|
import os
from typing import Dict
from jina import __default_executor__, __version__
from jina.enums import PodRoleType
from jina.hubble.helper import parse_hub_uri
from jina.hubble.hubio import HubIO
def get_image_name(uses: str) -> str:
"""The image can be provided in different formats by the user.
This function converts it to an image name which can be understood by k8s.
It uses the Hub api to get the image name and the latest tag on Docker Hub.
If you don't want to rebuild image on Jina Hub,
you can set `JINA_HUB_NO_IMAGE_REBUILD` environment variable.
:param uses: image name
:return: normalized image name
"""
try:
rebuild_image = 'JINA_HUB_NO_IMAGE_REBUILD' not in os.environ
scheme, name, tag, secret = parse_hub_uri(uses)
meta_data, _ = HubIO.fetch_meta(
name, tag, secret=secret, rebuild_image=rebuild_image, force=True
)
image_name = meta_data.image_name
return image_name
except Exception:
if uses.startswith('docker'):
# docker:// is a valid requirement and user may want to put its own image
return uses.replace('docker://', '')
raise
def to_compatible_name(name: str) -> str:
"""Converts the deployment name to a valid name for K8s and docker compose.
:param name: name of the deployment
:return: compatible name
"""
return name.replace('/', '-').replace('_', '-').lower()
def get_base_executor_version():
"""
Get the version of jina to be used
:return: the version tag
"""
import requests
try:
url = 'https://registry.hub.docker.com/v2/repositories/jinaai/jina/tags'
result: Dict = requests.get(url, params={'name': __version__}).json()
if result.get('count', 0) > 0:
return __version__
else:
return 'master'
except:
return 'master'
def construct_runtime_container_args(cargs, uses_metas, uses_with, pod_type):
"""
Construct a set of Namespace arguments into a list of arguments to pass to a container entrypoint
:param cargs: The namespace arguments
:param uses_metas: The uses_metas to override
:param uses_with: The uses_with to override
:param pod_type: The pod_type
:return: Arguments to pass to container
"""
import json
from jina.helper import ArgNamespace
from jina.parsers import set_pod_parser
taboo = {
'uses_with',
'uses_metas',
'volumes',
'uses_before',
'uses_after',
'workspace_id',
'upload_files',
'noblock_on_start',
'env',
}
if pod_type == PodRoleType.HEAD:
taboo.add('uses')
taboo.add('workspace')
if pod_type in {PodRoleType.WORKER, PodRoleType.GATEWAY}:
taboo.add('polling')
non_defaults = ArgNamespace.get_non_defaults_args(
cargs,
set_pod_parser(),
taboo=taboo,
)
_args = ArgNamespace.kwargs2list(non_defaults)
container_args = ['executor'] + _args
if uses_metas is not None:
container_args.extend(['--uses-metas', json.dumps(uses_metas)])
if uses_with is not None:
container_args.extend(['--uses-with', json.dumps(uses_with)])
container_args.append('--native')
return container_args
def validate_uses(uses: str):
"""Validate uses argument
:param uses: uses argument
:return: boolean indicating whether is a valid uses to be used in K8s or docker compose
"""
if uses == __default_executor__ or uses.startswith('docker://'):
return True
try:
scheme, _, _, _ = parse_hub_uri(uses)
if scheme in {'jinahub+docker', 'jinahub+sandbox'}:
return True
except ValueError:
return False
|
import os
from jina import __default_executor__, __version__
from jina.enums import PodRoleType
from jina.hubble.helper import parse_hub_uri
from jina.hubble.hubio import HubIO
def get_image_name(uses: str) -> str:
"""The image can be provided in different formats by the user.
This function converts it to an image name which can be understood by k8s.
It uses the Hub api to get the image name and the latest tag on Docker Hub.
If you don't want to rebuild image on Jina Hub,
you can set `JINA_HUB_NO_IMAGE_REBUILD` environment variable.
:param uses: image name
:return: normalized image name
"""
try:
rebuild_image = 'JINA_HUB_NO_IMAGE_REBUILD' not in os.environ
scheme, name, tag, secret = parse_hub_uri(uses)
meta_data, _ = HubIO.fetch_meta(
name, tag, secret=secret, rebuild_image=rebuild_image, force=True
)
image_name = meta_data.image_name
return image_name
except Exception:
if uses.startswith('docker'):
# docker:// is a valid requirement and user may want to put its own image
return uses.replace('docker://', '')
raise
def to_compatible_name(name: str) -> str:
"""Converts the deployment name to a valid name for K8s and docker compose.
:param name: name of the deployment
:return: compatible name
"""
return name.replace('/', '-').replace('_', '-').lower()
def get_base_executor_version():
"""
Get the version of jina to be used
:return: the version tag
"""
import requests
try:
url = 'https://registry.hub.docker.com/v1/repositories/jinaai/jina/tags'
tags = requests.get(url).json()
name_set = {tag['name'] for tag in tags}
if __version__ in name_set:
return __version__
else:
return 'master'
except:
return 'master'
def construct_runtime_container_args(cargs, uses_metas, uses_with, pod_type):
"""
Construct a set of Namespace arguments into a list of arguments to pass to a container entrypoint
:param cargs: The namespace arguments
:param uses_metas: The uses_metas to override
:param uses_with: The uses_with to override
:param pod_type: The pod_type
:return: Arguments to pass to container
"""
import json
from jina.helper import ArgNamespace
from jina.parsers import set_pod_parser
taboo = {
'uses_with',
'uses_metas',
'volumes',
'uses_before',
'uses_after',
'workspace_id',
'upload_files',
'noblock_on_start',
'env',
}
if pod_type == PodRoleType.HEAD:
taboo.add('uses')
taboo.add('workspace')
if pod_type in {PodRoleType.WORKER, PodRoleType.GATEWAY}:
taboo.add('polling')
non_defaults = ArgNamespace.get_non_defaults_args(
cargs,
set_pod_parser(),
taboo=taboo,
)
_args = ArgNamespace.kwargs2list(non_defaults)
container_args = ['executor'] + _args
if uses_metas is not None:
container_args.extend(['--uses-metas', json.dumps(uses_metas)])
if uses_with is not None:
container_args.extend(['--uses-with', json.dumps(uses_with)])
container_args.append('--native')
return container_args
def validate_uses(uses: str):
"""Validate uses argument
:param uses: uses argument
:return: boolean indicating whether is a valid uses to be used in K8s or docker compose
"""
if uses == __default_executor__ or uses.startswith('docker://'):
return True
try:
scheme, _, _, _ = parse_hub_uri(uses)
if scheme in {'jinahub+docker', 'jinahub+sandbox'}:
return True
except ValueError:
return False
|
from typing import List, cast
from llama_index.core.indices.vector_store.base import VectorStoreIndex
from llama_index.core.schema import (
Document,
NodeRelationship,
QueryBundle,
RelatedNodeInfo,
TextNode,
)
from llama_index.core.vector_stores.simple import SimpleVectorStore
def test_simple_query(
documents: List[Document],
patch_llm_predictor,
patch_token_text_splitter,
mock_embed_model,
) -> None:
"""Test embedding query."""
index = VectorStoreIndex.from_documents(documents, embed_model=mock_embed_model)
# test embedding query
query_str = "What is?"
retriever = index.as_retriever(similarity_top_k=1)
nodes = retriever.retrieve(QueryBundle(query_str))
assert len(nodes) == 1
assert nodes[0].node.get_content() == "This is another test."
def test_query_and_similarity_scores(
patch_llm_predictor,
patch_token_text_splitter,
) -> None:
"""Test that sources nodes have similarity scores."""
doc_text = (
"Hello world.\nThis is a test.\nThis is another test.\nThis is a test v2."
)
document = Document(text=doc_text)
index = VectorStoreIndex.from_documents([document])
# test embedding query
query_str = "What is?"
retriever = index.as_retriever()
nodes = retriever.retrieve(QueryBundle(query_str))
assert len(nodes) > 0
assert nodes[0].score is not None
def test_simple_check_ids(
patch_llm_predictor,
patch_token_text_splitter,
) -> None:
"""Test build VectorStoreIndex."""
ref_doc_id = "ref_doc_id_test"
source_rel = {NodeRelationship.SOURCE: RelatedNodeInfo(node_id=ref_doc_id)}
all_nodes = [
TextNode(text="Hello world.", id_="node1", relationships=source_rel),
TextNode(text="This is a test.", id_="node2", relationships=source_rel),
TextNode(text="This is another test.", id_="node3", relationships=source_rel),
TextNode(text="This is a test v2.", id_="node4", relationships=source_rel),
]
index = VectorStoreIndex(all_nodes)
# test query
query_str = "What is?"
retriever = index.as_retriever()
nodes = retriever.retrieve(QueryBundle(query_str))
assert nodes[0].node.get_content() == "This is another test."
assert nodes[0].node.ref_doc_id == "ref_doc_id_test"
assert nodes[0].node.node_id == "node3"
vector_store = cast(SimpleVectorStore, index._vector_store)
assert "node3" in vector_store._data.embedding_dict
assert "node3" in vector_store._data.text_id_to_ref_doc_id
def test_query(
patch_llm_predictor,
patch_token_text_splitter,
) -> None:
"""Test embedding query."""
doc_text = (
"Hello world.\nThis is a test.\nThis is another test.\nThis is a test v2."
)
document = Document(text=doc_text)
index = VectorStoreIndex.from_documents([document])
# test embedding query
query_str = "What is?"
retriever = index.as_retriever()
_ = retriever.retrieve(QueryBundle(query_str))
|
from typing import List, cast
from llama_index.core.indices.vector_store.base import VectorStoreIndex
from llama_index.core.schema import (
Document,
NodeRelationship,
QueryBundle,
RelatedNodeInfo,
TextNode,
)
from llama_index.core.vector_stores.simple import SimpleVectorStore
def test_simple_query(
documents: List[Document],
patch_llm_predictor,
patch_token_text_splitter,
mock_embed_model,
) -> None:
"""Test embedding query."""
index = VectorStoreIndex.from_documents(documents, embed_model=mock_embed_model)
# test embedding query
query_str = "What is?"
retriever = index.as_retriever(similarity_top_k=1)
nodes = retriever.retrieve(QueryBundle(query_str))
assert len(nodes) == 1
assert nodes[0].node.get_content() == "This is another test."
def test_query_and_similarity_scores(
patch_llm_predictor,
patch_token_text_splitter,
) -> None:
"""Test that sources nodes have similarity scores."""
doc_text = (
"Hello world.\n"
"This is a test.\n"
"This is another test.\n"
"This is a test v2."
)
document = Document(text=doc_text)
index = VectorStoreIndex.from_documents([document])
# test embedding query
query_str = "What is?"
retriever = index.as_retriever()
nodes = retriever.retrieve(QueryBundle(query_str))
assert len(nodes) > 0
assert nodes[0].score is not None
def test_simple_check_ids(
patch_llm_predictor,
patch_token_text_splitter,
) -> None:
"""Test build VectorStoreIndex."""
ref_doc_id = "ref_doc_id_test"
source_rel = {NodeRelationship.SOURCE: RelatedNodeInfo(node_id=ref_doc_id)}
all_nodes = [
TextNode(text="Hello world.", id_="node1", relationships=source_rel),
TextNode(text="This is a test.", id_="node2", relationships=source_rel),
TextNode(text="This is another test.", id_="node3", relationships=source_rel),
TextNode(text="This is a test v2.", id_="node4", relationships=source_rel),
]
index = VectorStoreIndex(all_nodes)
# test query
query_str = "What is?"
retriever = index.as_retriever()
nodes = retriever.retrieve(QueryBundle(query_str))
assert nodes[0].node.get_content() == "This is another test."
assert nodes[0].node.ref_doc_id == "ref_doc_id_test"
assert nodes[0].node.node_id == "node3"
vector_store = cast(SimpleVectorStore, index._vector_store)
assert "node3" in vector_store._data.embedding_dict
assert "node3" in vector_store._data.text_id_to_ref_doc_id
def test_query(
patch_llm_predictor,
patch_token_text_splitter,
) -> None:
"""Test embedding query."""
doc_text = (
"Hello world.\n"
"This is a test.\n"
"This is another test.\n"
"This is a test v2."
)
document = Document(text=doc_text)
index = VectorStoreIndex.from_documents([document])
# test embedding query
query_str = "What is?"
retriever = index.as_retriever()
_ = retriever.retrieve(QueryBundle(query_str))
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import numpy as np
import pytest
from docarray import BaseDoc, DocList, DocVec
from docarray.typing import NdArray
class Nested(BaseDoc):
tensor: NdArray
class Image(BaseDoc):
features: Optional[Nested] = None
def test_optional_field():
docs = DocVec[Image]([Image() for _ in range(10)])
assert docs.features is None
docs.features = DocList[Nested]([Nested(tensor=np.zeros(10)) for _ in range(10)])
assert docs.features.tensor.shape == (10, 10)
for doc in docs:
assert doc.features.tensor.shape == (10,)
def test_set_none():
docs = DocVec[Image](
[Image(features=Nested(tensor=np.zeros(10))) for _ in range(10)]
)
assert docs.features.tensor.shape == (10, 10)
docs.features = None
assert docs.features is None
for doc in docs:
assert doc.features is None
def test_set_doc():
docs = DocVec[Image](
[Image(features=Nested(tensor=np.zeros(10))) for _ in range(10)]
)
assert docs.features.tensor.shape == (10, 10)
for doc in docs:
doc.features = Nested(tensor=np.ones(10))
with pytest.raises(ValueError):
doc.features = None
def test_set_doc_none():
docs = DocVec[Image]([Image() for _ in range(10)])
assert docs.features is None
for doc in docs:
with pytest.raises(ValueError):
doc.features = Nested(tensor=np.ones(10))
def test_no_uniform_none():
with pytest.raises(ValueError):
DocVec[Image]([Image(), Image(features=Nested(tensor=np.zeros(10)))])
with pytest.raises(ValueError):
DocVec[Image]([Image(features=Nested(tensor=np.zeros(10))), Image()])
|
from typing import Optional
import numpy as np
import pytest
from docarray import BaseDoc, DocList, DocVec
from docarray.typing import NdArray
class Nested(BaseDoc):
tensor: NdArray
class Image(BaseDoc):
features: Optional[Nested] = None
def test_optional_field():
docs = DocVec[Image]([Image() for _ in range(10)])
assert docs.features is None
docs.features = DocList[Nested]([Nested(tensor=np.zeros(10)) for _ in range(10)])
assert docs.features.tensor.shape == (10, 10)
for doc in docs:
assert doc.features.tensor.shape == (10,)
def test_set_none():
docs = DocVec[Image](
[Image(features=Nested(tensor=np.zeros(10))) for _ in range(10)]
)
assert docs.features.tensor.shape == (10, 10)
docs.features = None
assert docs.features is None
for doc in docs:
assert doc.features is None
def test_set_doc():
docs = DocVec[Image](
[Image(features=Nested(tensor=np.zeros(10))) for _ in range(10)]
)
assert docs.features.tensor.shape == (10, 10)
for doc in docs:
doc.features = Nested(tensor=np.ones(10))
with pytest.raises(ValueError):
doc.features = None
def test_set_doc_none():
docs = DocVec[Image]([Image() for _ in range(10)])
assert docs.features is None
for doc in docs:
with pytest.raises(ValueError):
doc.features = Nested(tensor=np.ones(10))
def test_no_uniform_none():
with pytest.raises(ValueError):
DocVec[Image]([Image(), Image(features=Nested(tensor=np.zeros(10)))])
with pytest.raises(ValueError):
DocVec[Image]([Image(features=Nested(tensor=np.zeros(10))), Image()])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.