input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class CornerNet(SingleStageDetector):
"""CornerNet.
This detector is the implementation of the paper `CornerNet: Detecting
Objects as Paired Keypoints <https://arxiv.org/abs/1808.01244>`_ .
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core import bbox2result, bbox_mapping_back
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class CornerNet(SingleStageDetector):
"""CornerNet.
This detector is the implementation of the paper `CornerNet: Detecting
Objects as Paired Keypoints <https://arxiv.org/abs/1808.01244>`_ .
"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(CornerNet, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
def merge_aug_results(self, aug_results, img_metas):
"""Merge augmented detection bboxes and score.
Args:
aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each
image.
img_metas (list[list[dict]]): Meta information of each image, e.g.,
image size, scaling factor, etc.
Returns:
tuple: (bboxes, labels)
"""
recovered_bboxes, aug_labels = [], []
for bboxes_labels, img_info in zip(aug_results, img_metas):
img_shape = img_info[0]['img_shape'] # using shape before padding
scale_factor = img_info[0]['scale_factor']
flip = img_info[0]['flip']
bboxes, labels = bboxes_labels
bboxes, scores = bboxes[:, :4], bboxes[:, -1:]
bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip)
recovered_bboxes.append(torch.cat([bboxes, scores], dim=-1))
aug_labels.append(labels)
bboxes = torch.cat(recovered_bboxes, dim=0)
labels = torch.cat(aug_labels)
if bboxes.shape[0] > 0:
out_bboxes, out_labels = self.bbox_head._bboxes_nms(
bboxes, labels, self.bbox_head.test_cfg)
else:
out_bboxes, out_labels = bboxes, labels
return out_bboxes, out_labels
def aug_test(self, imgs, img_metas, rescale=False):
"""Augment testing of CornerNet.
Args:
imgs (list[Tensor]): Augmented images.
img_metas (list[list[dict]]): Meta information of each image, e.g.,
image size, scaling factor, etc.
rescale (bool): If True, return boxes in original image space.
Default: False.
Note:
``imgs`` must including flipped image pairs.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
img_inds = list(range(len(imgs)))
assert img_metas[0][0]['flip'] + img_metas[1][0]['flip'], (
'aug test must have flipped image pair')
aug_results = []
for ind, flip_ind in zip(img_inds[0::2], img_inds[1::2]):
img_pair = torch.cat([imgs[ind], imgs[flip_ind]])
x = self.extract_feat(img_pair)
outs = self.bbox_head(x)
bbox_list = self.bbox_head.get_bboxes(
*outs, [img_metas[ind], img_metas[flip_ind]], False, False)
aug_results.append(bbox_list[0])
aug_results.append(bbox_list[1])
bboxes, labels = self.merge_aug_results(aug_results, img_metas)
bbox_results = bbox2result(bboxes, labels, self.bbox_head.num_classes)
return [bbox_results]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import activations
from keras.api import applications
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import legacy
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import tree
from keras.api import utils
from keras.api import visualization
from keras.api import wrappers
from keras.api._tf_keras.keras import backend
from keras.api._tf_keras.keras import layers
from keras.api._tf_keras.keras import losses
from keras.api._tf_keras.keras import metrics
from keras.api._tf_keras.keras import preprocessing
from keras.src.backend import Variable
from keras.src.backend import device
from keras.src.backend import name_scope
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.remat import RematScope
from keras.src.backend.common.remat import remat
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import __version__
from keras.src.version import version
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import activations
from keras.api import applications
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import legacy
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import tree
from keras.api import utils
from keras.api import visualization
from keras.api import wrappers
from keras.api._tf_keras.keras import backend
from keras.api._tf_keras.keras import layers
from keras.api._tf_keras.keras import losses
from keras.api._tf_keras.keras import metrics
from keras.api._tf_keras.keras import preprocessing
from keras.src.backend import Variable
from keras.src.backend import device
from keras.src.backend import name_scope
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import __version__
from keras.src.version import version
|
import multiprocessing
import time
import pytest
from docarray import Document, DocumentArray
from docarray.array.mixins.post import _parse_host
from docarray.helper import random_port
@pytest.mark.parametrize(
'host, expected_on, expected_host, expected_port, expected_version, expected_scheme',
[
(
'grpc://192.168.0.123:8080/index',
'/index',
'grpc://192.168.0.123',
8080,
None,
'grpc',
),
(
'ws://192.168.0.123:80/encode',
'/encode',
'ws://192.168.0.123',
80,
None,
'ws',
),
(
'http://192.168.192.123:8080/index',
'/index',
'http://192.168.192.123',
8080,
None,
'http',
),
(
'jinahub://Hello/endpoint',
'/endpoint',
'jinahub://Hello',
None,
None,
'jinahub',
),
(
'jinahub+docker://Hello/index',
'/index',
'jinahub+docker://Hello',
None,
None,
'jinahub+docker',
),
(
'jinahub+docker://Hello/v0.0.1/search',
'/search',
'jinahub+docker://Hello/v0.0.1',
None,
'v0.0.1',
'jinahub+docker',
),
(
'jinahub+docker://Hello/latest/index',
'/index',
'jinahub+docker://Hello/latest',
None,
'latest',
'jinahub+docker',
),
(
'jinahub+docker://Hello/v0.0.1-cpu/index',
'/index',
'jinahub+docker://Hello/v0.0.1-cpu',
None,
'v0.0.1-cpu',
'jinahub+docker',
),
(
'jinahub+docker://Hello/v0.5-gpu/index',
'/index',
'jinahub+docker://Hello/v0.5-gpu',
None,
'v0.5-gpu',
'jinahub+docker',
),
(
'jinahub+sandbox://Hello/index',
'/index',
'jinahub+sandbox://Hello',
None,
None,
'jinahub+sandbox',
),
],
)
def test_parse_host(
host, expected_on, expected_host, expected_port, expected_version, expected_scheme
):
parsed_host = _parse_host(host)
assert parsed_host.on == expected_on
assert parsed_host.host == expected_host
assert parsed_host.port == expected_port
assert parsed_host.version == expected_version
assert parsed_host.scheme == expected_scheme
@pytest.mark.parametrize(
'conn_config',
[
(dict(protocol='grpc'), 'grpc://127.0.0.1:$port/'),
(dict(protocol='grpc'), 'grpc://127.0.0.1:$port'),
(dict(protocol='websocket'), 'ws://127.0.0.1:$port'),
# (dict(protocol='http'), 'http://127.0.0.1:$port'), this somehow does not work on GH workflow
],
)
@pytest.mark.parametrize('show_pbar', [True, False])
@pytest.mark.parametrize('batch_size', [None, 1, 10])
def test_post_to_a_flow(show_pbar, conn_config, batch_size):
from jina import Flow
p = random_port()
da = DocumentArray.empty(100)
with Flow(**{**conn_config[0], 'port': p}):
da.post(conn_config[1].replace('$port', str(p)), batch_size=batch_size)
@pytest.mark.parametrize(
'hub_uri',
[
'jinahub://Hello',
'jinahub+sandbox://Hello',
# 'jinahub+docker://Hello', this somehow does not work on GH workflow
],
)
def test_post_with_jinahub(hub_uri):
da = DocumentArray.empty(100)
da.post(hub_uri)
assert isinstance(Document().post(hub_uri), Document)
def test_post_bad_scheme():
da = DocumentArray.empty(100)
with pytest.raises(ValueError):
da.post('haha')
def test_endpoint():
from jina import Executor, Flow, requests
class MyExec(Executor):
@requests(on='/foo')
def foo(self, docs: DocumentArray, **kwargs):
docs.texts = ['foo'] * len(docs)
@requests(on='/bar')
def bar(self, docs: DocumentArray, **kwargs):
docs.texts = ['bar'] * len(docs)
def start_flow(stop_event, **kwargs):
"""start a blocking Flow."""
with Flow(**kwargs).add(uses=MyExec) as f:
f.block(stop_event=stop_event)
e = multiprocessing.Event() # create new Event
p = random_port()
t = multiprocessing.Process(
name='Blocked-Flow', target=start_flow, args=(e,), kwargs={'port': p}
)
t.start()
time.sleep(5)
N = 100
da = DocumentArray.empty(N)
try:
assert da.post(f'grpc://127.0.0.1:{p}/')[:, 'text'] == [''] * N
assert da.post(f'grpc://127.0.0.1:{p}/foo').texts == ['foo'] * N
assert da.post(f'grpc://127.0.0.1:{p}/bar').texts == ['bar'] * N
except:
raise
finally:
e.set()
t.join()
|
import multiprocessing
import time
import pytest
from docarray import DocumentArray, Document
from docarray.helper import random_port
@pytest.mark.parametrize(
'conn_config',
[
(dict(protocol='grpc'), 'grpc://127.0.0.1:$port/'),
(dict(protocol='grpc'), 'grpc://127.0.0.1:$port'),
(dict(protocol='websocket'), 'ws://127.0.0.1:$port'),
# (dict(protocol='http'), 'http://127.0.0.1:$port'), this somehow does not work on GH workflow
],
)
@pytest.mark.parametrize('show_pbar', [True, False])
@pytest.mark.parametrize('batch_size', [None, 1, 10])
def test_post_to_a_flow(show_pbar, conn_config, batch_size):
from jina import Flow
p = random_port()
da = DocumentArray.empty(100)
with Flow(**{**conn_config[0], 'port': p}):
da.post(conn_config[1].replace('$port', str(p)), batch_size=batch_size)
@pytest.mark.parametrize(
'hub_uri',
[
'jinahub://Hello',
'jinahub+sandbox://Hello',
# 'jinahub+docker://Hello', this somehow does not work on GH workflow
],
)
def test_post_with_jinahub(hub_uri):
da = DocumentArray.empty(100)
da.post(hub_uri)
assert isinstance(Document().post(hub_uri), Document)
def test_post_bad_scheme():
da = DocumentArray.empty(100)
with pytest.raises(ValueError):
da.post('haha')
def test_endpoint():
from jina import Executor, requests, Flow
class MyExec(Executor):
@requests(on='/foo')
def foo(self, docs: DocumentArray, **kwargs):
docs.texts = ['foo'] * len(docs)
@requests(on='/bar')
def bar(self, docs: DocumentArray, **kwargs):
docs.texts = ['bar'] * len(docs)
def start_flow(stop_event, **kwargs):
"""start a blocking Flow."""
with Flow(**kwargs).add(uses=MyExec) as f:
f.block(stop_event=stop_event)
e = multiprocessing.Event() # create new Event
p = random_port()
t = multiprocessing.Process(
name='Blocked-Flow', target=start_flow, args=(e,), kwargs={'port': p}
)
t.start()
time.sleep(5)
N = 100
da = DocumentArray.empty(N)
try:
assert da.post(f'grpc://127.0.0.1:{p}/')[:, 'text'] == [''] * N
assert da.post(f'grpc://127.0.0.1:{p}/foo').texts == ['foo'] * N
assert da.post(f'grpc://127.0.0.1:{p}/bar').texts == ['bar'] * N
except:
raise
finally:
e.set()
t.join()
|
import inspect
import re
from typing import Dict, List
from huggingface_hub.utils import insecure_hashlib
from .arrow import arrow
from .audiofolder import audiofolder
from .cache import cache # noqa F401
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
from .webdataset import webdataset
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return insecure_hashlib.sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
"webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_EXTENSION_TO_MODULE = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
".tar": ("webdataset", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
_MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
for _module in _MODULE_TO_EXTENSIONS:
_MODULE_TO_EXTENSIONS[_module].append(".zip")
|
import inspect
import re
from typing import Dict, List
from huggingface_hub.utils import insecure_hashlib
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
from .webdataset import webdataset
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return insecure_hashlib.sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
"webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_EXTENSION_TO_MODULE = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
".tar": ("webdataset", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
_MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
for _module in _MODULE_TO_EXTENSIONS:
_MODULE_TO_EXTENSIONS[_module].append(".zip")
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
import torch.nn.functional as F
from mmcv.runner import BaseModule, force_fp32
from ...core.utils import stack_batch
from ..builder import build_loss
from ..utils import interpolate_as
class BaseSemanticHead(BaseModule, metaclass=ABCMeta):
"""Base module of Semantic Head.
Args:
num_classes (int): the number of classes.
init_cfg (dict): the initialization config.
loss_seg (dict): the loss of the semantic head.
"""
def __init__(self,
num_classes,
init_cfg=None,
loss_seg=dict(
type='CrossEntropyLoss',
ignore_index=255,
loss_weight=1.0)):
super(BaseSemanticHead, self).__init__(init_cfg)
self.loss_seg = build_loss(loss_seg)
self.num_classes = num_classes
@force_fp32(apply_to=('seg_preds', ))
def loss(self, seg_preds, gt_semantic_seg):
"""Get the loss of semantic head.
Args:
seg_preds (Tensor): The input logits with the shape (N, C, H, W).
gt_semantic_seg: The ground truth of semantic segmentation with
the shape (N, H, W).
label_bias: The starting number of the semantic label.
Default: 1.
Returns:
dict: the loss of semantic head.
"""
if seg_preds.shape[-2:] != gt_semantic_seg.shape[-2:]:
seg_preds = interpolate_as(seg_preds, gt_semantic_seg)
seg_preds = seg_preds.permute((0, 2, 3, 1))
loss_seg = self.loss_seg(
seg_preds.reshape(-1, self.num_classes), # => [NxHxW, C]
gt_semantic_seg.reshape(-1).long())
return dict(loss_seg=loss_seg)
@abstractmethod
def forward(self, x):
"""Placeholder of forward function.
Returns:
dict[str, Tensor]: A dictionary, including features
and predicted scores. Required keys: 'seg_preds'
and 'feats'.
"""
pass
def forward_train(self, x, data_samples):
output = self.forward(x)
seg_preds = output['seg_preds']
gt_semantic_segs = [
data_sample.gt_sem_seg for data_sample in data_samples
]
gt_semantic_segs = stack_batch(gt_semantic_segs, pad_value=255)
return self.loss(seg_preds, gt_semantic_segs)
def simple_test(self, x, img_metas, rescale=False):
output = self.forward(x)
seg_preds = output['seg_preds']
seg_preds = F.interpolate(
seg_preds,
size=img_metas[0]['pad_shape'][:2],
mode='bilinear',
align_corners=False)
if rescale:
h, w, _ = img_metas[0]['img_shape']
seg_preds = seg_preds[:, :, :h, :w]
h, w, _ = img_metas[0]['ori_shape']
seg_preds = F.interpolate(
seg_preds, size=(h, w), mode='bilinear', align_corners=False)
return seg_preds
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
import torch.nn.functional as F
from mmcv.runner import BaseModule, force_fp32
from ..builder import build_loss
from ..utils import interpolate_as
class BaseSemanticHead(BaseModule, metaclass=ABCMeta):
"""Base module of Semantic Head.
Args:
num_classes (int): the number of classes.
init_cfg (dict): the initialization config.
loss_seg (dict): the loss of the semantic head.
"""
def __init__(self,
num_classes,
init_cfg=None,
loss_seg=dict(
type='CrossEntropyLoss',
ignore_index=255,
loss_weight=1.0)):
super(BaseSemanticHead, self).__init__(init_cfg)
self.loss_seg = build_loss(loss_seg)
self.num_classes = num_classes
@force_fp32(apply_to=('seg_preds', ))
def loss(self, seg_preds, gt_semantic_seg):
"""Get the loss of semantic head.
Args:
seg_preds (Tensor): The input logits with the shape (N, C, H, W).
gt_semantic_seg: The ground truth of semantic segmentation with
the shape (N, H, W).
label_bias: The starting number of the semantic label.
Default: 1.
Returns:
dict: the loss of semantic head.
"""
if seg_preds.shape[-2:] != gt_semantic_seg.shape[-2:]:
seg_preds = interpolate_as(seg_preds, gt_semantic_seg)
seg_preds = seg_preds.permute((0, 2, 3, 1))
loss_seg = self.loss_seg(
seg_preds.reshape(-1, self.num_classes), # => [NxHxW, C]
gt_semantic_seg.reshape(-1).long())
return dict(loss_seg=loss_seg)
@abstractmethod
def forward(self, x):
"""Placeholder of forward function.
Returns:
dict[str, Tensor]: A dictionary, including features
and predicted scores. Required keys: 'seg_preds'
and 'feats'.
"""
pass
def forward_train(self, x, gt_semantic_seg):
output = self.forward(x)
seg_preds = output['seg_preds']
return self.loss(seg_preds, gt_semantic_seg)
def simple_test(self, x, img_metas, rescale=False):
output = self.forward(x)
seg_preds = output['seg_preds']
seg_preds = F.interpolate(
seg_preds,
size=img_metas[0]['pad_shape'][:2],
mode='bilinear',
align_corners=False)
if rescale:
h, w, _ = img_metas[0]['img_shape']
seg_preds = seg_preds[:, :, :h, :w]
h, w, _ = img_metas[0]['ori_shape']
seg_preds = F.interpolate(
seg_preds, size=(h, w), mode='bilinear', align_corners=False)
return seg_preds
|
import urllib.parse
from typing import ClassVar, Optional
from backend.data.model import OAuth2Credentials, ProviderName
from backend.integrations.oauth.base import BaseOAuthHandler
from backend.util.request import Requests
class TodoistOAuthHandler(BaseOAuthHandler):
PROVIDER_NAME = ProviderName.TODOIST
DEFAULT_SCOPES: ClassVar[list[str]] = [
"task:add",
"data:read",
"data:read_write",
"data:delete",
"project:delete",
]
AUTHORIZE_URL = "https://todoist.com/oauth/authorize"
TOKEN_URL = "https://todoist.com/oauth/access_token"
def __init__(self, client_id: str, client_secret: str, redirect_uri: str):
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
def get_login_url(
self, scopes: list[str], state: str, code_challenge: Optional[str]
) -> str:
params = {
"client_id": self.client_id,
"scope": ",".join(self.DEFAULT_SCOPES),
"state": state,
}
return f"{self.AUTHORIZE_URL}?{urllib.parse.urlencode(params)}"
async def exchange_code_for_tokens(
self, code: str, scopes: list[str], code_verifier: Optional[str]
) -> OAuth2Credentials:
"""Exchange authorization code for access tokens"""
data = {
"client_id": self.client_id,
"client_secret": self.client_secret,
"code": code,
"redirect_uri": self.redirect_uri,
}
response = await Requests().post(self.TOKEN_URL, data=data)
tokens = response.json()
response = await Requests().post(
"https://api.todoist.com/sync/v9/sync",
headers={"Authorization": f"Bearer {tokens['access_token']}"},
data={"sync_token": "*", "resource_types": '["user"]'},
)
user_info = response.json()
user_email = user_info["user"].get("email")
return OAuth2Credentials(
provider=self.PROVIDER_NAME,
title=None,
username=user_email,
access_token=tokens["access_token"],
refresh_token=None,
access_token_expires_at=None,
refresh_token_expires_at=None,
scopes=scopes,
)
async def _refresh_tokens(
self, credentials: OAuth2Credentials
) -> OAuth2Credentials:
# Todoist does not support token refresh
return credentials
async def revoke_tokens(self, credentials: OAuth2Credentials) -> bool:
return False
|
import urllib.parse
from typing import ClassVar, Optional
from backend.data.model import OAuth2Credentials, ProviderName
from backend.integrations.oauth.base import BaseOAuthHandler
from backend.util.request import Requests
class TodoistOAuthHandler(BaseOAuthHandler):
PROVIDER_NAME = ProviderName.TODOIST
DEFAULT_SCOPES: ClassVar[list[str]] = [
"task:add",
"data:read",
"data:read_write",
"data:delete",
"project:delete",
]
AUTHORIZE_URL = "https://todoist.com/oauth/authorize"
TOKEN_URL = "https://todoist.com/oauth/access_token"
def __init__(self, client_id: str, client_secret: str, redirect_uri: str):
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
def get_login_url(
self, scopes: list[str], state: str, code_challenge: Optional[str]
) -> str:
params = {
"client_id": self.client_id,
"scope": ",".join(self.DEFAULT_SCOPES),
"state": state,
}
return f"{self.AUTHORIZE_URL}?{urllib.parse.urlencode(params)}"
def exchange_code_for_tokens(
self, code: str, scopes: list[str], code_verifier: Optional[str]
) -> OAuth2Credentials:
"""Exchange authorization code for access tokens"""
data = {
"client_id": self.client_id,
"client_secret": self.client_secret,
"code": code,
"redirect_uri": self.redirect_uri,
}
response = Requests().post(self.TOKEN_URL, data=data)
response.raise_for_status()
tokens = response.json()
response = Requests().post(
"https://api.todoist.com/sync/v9/sync",
headers={"Authorization": f"Bearer {tokens['access_token']}"},
data={"sync_token": "*", "resource_types": '["user"]'},
)
response.raise_for_status()
user_info = response.json()
user_email = user_info["user"].get("email")
return OAuth2Credentials(
provider=self.PROVIDER_NAME,
title=None,
username=user_email,
access_token=tokens["access_token"],
refresh_token=None,
access_token_expires_at=None,
refresh_token_expires_at=None,
scopes=scopes,
)
def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
# Todoist does not support token refresh
return credentials
def revoke_tokens(self, credentials: OAuth2Credentials) -> bool:
return False
|
class MonitoringMixin:
"""The Monitoring Mixin for pods"""
def _setup_monitoring(self):
"""
Wait for the monitoring server to start
"""
if self.args.monitoring:
from prometheus_client import CollectorRegistry
self.metrics_registry = CollectorRegistry()
else:
self.metrics_registry = None
if self.args.monitoring:
from prometheus_client import start_http_server
start_http_server(
int(self.args.port_monitoring), registry=self.metrics_registry
)
|
class MonitoringMixin:
"""The Monitoring Mixin for pods"""
def _setup_monitoring(self):
"""
Wait for the monitoring server to start
"""
if self.args.monitoring:
from prometheus_client import CollectorRegistry
self.metrics_registry = CollectorRegistry()
else:
self.metrics_registry = None
if self.args.monitoring:
from prometheus_client import start_http_server
start_http_server(self.args.port_monitoring, registry=self.metrics_registry)
|
# coding: utf-8
"""Find the path to LightGBM dynamic library files."""
from pathlib import Path
from platform import system
from typing import List
__all__: List[str] = []
def find_lib_path() -> List[str]:
"""Find the path to LightGBM library files.
Returns
-------
lib_path: list of str
List of all found library paths to LightGBM.
"""
curr_path = Path(__file__).absolute()
dll_path = [curr_path,
curr_path.parents[1],
curr_path.parents[0] / 'bin',
curr_path.parents[0] / 'lib']
if system() in ('Windows', 'Microsoft'):
dll_path.append(curr_path.parents[1] / 'Release')
dll_path.append(curr_path.parents[1] / 'windows' / 'x64' / 'DLL')
dll_path = [p / 'lib_lightgbm.dll' for p in dll_path]
else:
dll_path = [p / 'lib_lightgbm.so' for p in dll_path]
lib_path = [str(p) for p in dll_path if p.is_file()]
if not lib_path:
dll_path_joined = '\n'.join(map(str, dll_path))
raise Exception(f'Cannot find lightgbm library file in following paths:\n{dll_path_joined}')
return lib_path
|
# coding: utf-8
"""Find the path to LightGBM dynamic library files."""
from pathlib import Path
from platform import system
from typing import List
__all__: List[str] = []
def find_lib_path() -> List[str]:
"""Find the path to LightGBM library files.
Returns
-------
lib_path: list of str
List of all found library paths to LightGBM.
"""
curr_path = Path(__file__).absolute().parent
dll_path = [curr_path,
curr_path.parents[1],
curr_path / 'compile',
curr_path.parent / 'compile',
curr_path.parents[1] / 'lib']
if system() in ('Windows', 'Microsoft'):
dll_path.append(curr_path.parent / 'compile' / 'Release')
dll_path.append(curr_path.parent / 'compile' / 'windows' / 'x64' / 'DLL')
dll_path.append(curr_path.parents[1] / 'Release')
dll_path.append(curr_path.parents[1] / 'windows' / 'x64' / 'DLL')
dll_path = [p / 'lib_lightgbm.dll' for p in dll_path]
else:
dll_path = [p / 'lib_lightgbm.so' for p in dll_path]
lib_path = [str(p) for p in dll_path if p.is_file()]
if not lib_path:
dll_path_joined = '\n'.join(map(str, dll_path))
raise Exception(f'Cannot find lightgbm library file in following paths:\n{dll_path_joined}')
return lib_path
|
"""Test functionality related to length based selector."""
import pytest
from langchain_core.example_selectors import (
LengthBasedExampleSelector,
)
from langchain_core.prompts import PromptTemplate
EXAMPLES = [
{"question": "Question: who are you?\nAnswer: foo"},
{"question": "Question: who are you?\nAnswer: foo"},
]
@pytest.fixture
def selector() -> LengthBasedExampleSelector:
"""Get length based selector to use in tests."""
prompts = PromptTemplate(input_variables=["question"], template="{question}")
return LengthBasedExampleSelector(
examples=EXAMPLES,
example_prompt=prompts,
max_length=30,
)
def test_selector_valid(selector: LengthBasedExampleSelector) -> None:
"""Test LengthBasedExampleSelector can select examples.."""
short_question = "Short question?"
output = selector.select_examples({"question": short_question})
assert output == EXAMPLES
def test_selector_add_example(selector: LengthBasedExampleSelector) -> None:
"""Test LengthBasedExampleSelector can add an example."""
new_example = {"question": "Question: what are you?\nAnswer: bar"}
selector.add_example(new_example)
short_question = "Short question?"
output = selector.select_examples({"question": short_question})
assert output == EXAMPLES + [new_example]
def test_selector_trims_one_example(selector: LengthBasedExampleSelector) -> None:
"""Test LengthBasedExampleSelector can trim one example."""
long_question = """I am writing a really long question,
this probably is going to affect the example right?"""
output = selector.select_examples({"question": long_question})
assert output == EXAMPLES[:1]
def test_selector_trims_all_examples(
selector: LengthBasedExampleSelector,
) -> None:
"""Test LengthBasedExampleSelector can trim all examples."""
longest_question = """This question is super super super,
super super super super super super super super super super super,
super super super super long, this will affect the example right?"""
output = selector.select_examples({"question": longest_question})
assert output == []
|
"""Test functionality related to length based selector."""
import pytest
from langchain_core.example_selectors import (
LengthBasedExampleSelector,
)
from langchain_core.prompts import PromptTemplate
EXAMPLES = [
{"question": "Question: who are you?\nAnswer: foo"},
{"question": "Question: who are you?\nAnswer: foo"},
]
@pytest.fixture
def selector() -> LengthBasedExampleSelector:
"""Get length based selector to use in tests."""
prompts = PromptTemplate(input_variables=["question"], template="{question}")
selector = LengthBasedExampleSelector(
examples=EXAMPLES,
example_prompt=prompts,
max_length=30,
)
return selector
def test_selector_valid(selector: LengthBasedExampleSelector) -> None:
"""Test LengthBasedExampleSelector can select examples.."""
short_question = "Short question?"
output = selector.select_examples({"question": short_question})
assert output == EXAMPLES
def test_selector_add_example(selector: LengthBasedExampleSelector) -> None:
"""Test LengthBasedExampleSelector can add an example."""
new_example = {"question": "Question: what are you?\nAnswer: bar"}
selector.add_example(new_example)
short_question = "Short question?"
output = selector.select_examples({"question": short_question})
assert output == EXAMPLES + [new_example]
def test_selector_trims_one_example(selector: LengthBasedExampleSelector) -> None:
"""Test LengthBasedExampleSelector can trim one example."""
long_question = """I am writing a really long question,
this probably is going to affect the example right?"""
output = selector.select_examples({"question": long_question})
assert output == EXAMPLES[:1]
def test_selector_trims_all_examples(
selector: LengthBasedExampleSelector,
) -> None:
"""Test LengthBasedExampleSelector can trim all examples."""
longest_question = """This question is super super super,
super super super super super super super super super super super,
super super super super long, this will affect the example right?"""
output = selector.select_examples({"question": longest_question})
assert output == []
|
"""
This is a more complex example on performing clustering on large scale dataset.
This examples find in a large set of sentences local communities, i.e., groups of sentences that are highly
similar. You can freely configure the threshold what is considered as similar. A high threshold will
only find extremely similar sentences, a lower threshold will find more sentence that are less similar.
A second parameter is 'min_community_size': Only communities with at least a certain number of sentences will be returned.
The method for finding the communities is extremely fast, for clustering 50k sentences it requires only 5 seconds (plus embedding comuptation).
In this example, we download a large set of questions from Quora and then find similar questions in this set.
"""
from sentence_transformers import SentenceTransformer, util
import os
import csv
import time
# Model for computing sentence embeddings. We use one trained for similar questions detection
model = SentenceTransformer("all-MiniLM-L6-v2")
# We download the Quora Duplicate Questions Dataset (https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs)
# and find similar question in it
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 50000 # We limit our corpus to only the first 50k questions
# Check if the dataset exists. If not, download and extract
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
corpus_sentences = set()
with open(dataset_path, encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for row in reader:
corpus_sentences.add(row["question1"])
corpus_sentences.add(row["question2"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences = list(corpus_sentences)
print("Encode the corpus. This might take a while")
corpus_embeddings = model.encode(corpus_sentences, batch_size=64, show_progress_bar=True, convert_to_tensor=True)
print("Start clustering")
start_time = time.time()
# Two parameters to tune:
# min_cluster_size: Only consider cluster that have at least 25 elements
# threshold: Consider sentence pairs with a cosine-similarity larger than threshold as similar
clusters = util.community_detection(corpus_embeddings, min_community_size=25, threshold=0.75)
print("Clustering done after {:.2f} sec".format(time.time() - start_time))
# Print for all clusters the top 3 and bottom 3 elements
for i, cluster in enumerate(clusters):
print("\nCluster {}, #{} Elements ".format(i + 1, len(cluster)))
for sentence_id in cluster[0:3]:
print("\t", corpus_sentences[sentence_id])
print("\t", "...")
for sentence_id in cluster[-3:]:
print("\t", corpus_sentences[sentence_id])
|
"""
This is a more complex example on performing clustering on large scale dataset.
This examples find in a large set of sentences local communities, i.e., groups of sentences that are highly
similar. You can freely configure the threshold what is considered as similar. A high threshold will
only find extremely similar sentences, a lower threshold will find more sentence that are less similar.
A second parameter is 'min_community_size': Only communities with at least a certain number of sentences will be returned.
The method for finding the communities is extremely fast, for clustering 50k sentences it requires only 5 seconds (plus embedding comuptation).
In this example, we download a large set of questions from Quora and then find similar questions in this set.
"""
from sentence_transformers import SentenceTransformer, util
import os
import csv
import time
# Model for computing sentence embeddings. We use one trained for similar questions detection
model = SentenceTransformer("all-MiniLM-L6-v2")
# We download the Quora Duplicate Questions Dataset (https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs)
# and find similar question in it
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 50000 # We limit our corpus to only the first 50k questions
# Check if the dataset exists. If not, download and extract
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
corpus_sentences = set()
with open(dataset_path, encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for row in reader:
corpus_sentences.add(row["question1"])
corpus_sentences.add(row["question2"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences = list(corpus_sentences)
print("Encode the corpus. This might take a while")
corpus_embeddings = model.encode(corpus_sentences, batch_size=64, show_progress_bar=True, convert_to_tensor=True)
print("Start clustering")
start_time = time.time()
# Two parameters to tune:
# min_cluster_size: Only consider cluster that have at least 25 elements
# threshold: Consider sentence pairs with a cosine-similarity larger than threshold as similar
clusters = util.community_detection(corpus_embeddings, min_community_size=25, threshold=0.75)
print("Clustering done after {:.2f} sec".format(time.time() - start_time))
# Print for all clusters the top 3 and bottom 3 elements
for i, cluster in enumerate(clusters):
print("\nCluster {}, #{} Elements ".format(i + 1, len(cluster)))
for sentence_id in cluster[0:3]:
print("\t", corpus_sentences[sentence_id])
print("\t", "...")
for sentence_id in cluster[-3:]:
print("\t", corpus_sentences[sentence_id])
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class RetinaNet(SingleStageDetector):
"""Implementation of `RetinaNet <https://arxiv.org/abs/1708.02002>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None,
img_norm_cfg=None):
super(RetinaNet,
self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
pretrained, init_cfg, img_norm_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class RetinaNet(SingleStageDetector):
"""Implementation of `RetinaNet <https://arxiv.org/abs/1708.02002>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None,
img_norm_cfg=None):
super(RetinaNet,
self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
pretrained, init_cfg, img_norm_cfg)
|
"""Feishu docs reader."""
import json
import os
import time
from typing import List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
# Copyright (2023) Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class FeishuDocsReader(BaseReader):
"""
Feishu Docs reader.
Reads a page from Google Docs
"""
host = "https://open.feishu.cn"
documents_raw_content_url_path = "/open-apis/docx/v1/documents/{}/raw_content"
tenant_access_token_internal_url_path = (
"/open-apis/auth/v3/tenant_access_token/internal"
)
def __init__(self, app_id, app_secret) -> None:
"""
Args:
app_id: The unique identifier of the application is obtained after the application is created.
app_secret: Application key, obtained after creating the application.
"""
super().__init__()
self.app_id = app_id
self.app_secret = app_secret
self.tenant_access_token = ""
self.expire = 0
def load_data(self, document_ids: List[str]) -> List[Document]:
"""
Load data from the input directory.
Args:
document_ids (List[str]): a list of document ids.
"""
if document_ids is None:
raise ValueError('Must specify a "document_ids" in `load_kwargs`.')
results = []
for document_id in document_ids:
doc = self._load_doc(document_id)
results.append(Document(text=doc, extra_info={"document_id": document_id}))
return results
def _load_doc(self, document_id) -> str:
"""
Load a document from Feishu Docs.
Args:
document_id: the document id.
Returns:
The document text.
"""
url = self.host + self.documents_raw_content_url_path.format(document_id)
if self.tenant_access_token == "" or self.expire < time.time():
self._update_tenant_access_token()
headers = {
"Authorization": f"Bearer {self.tenant_access_token}",
"Content-Type": "application/json; charset=utf-8",
}
response = requests.get(url, headers=headers)
return response.json()["data"]["content"]
def _update_tenant_access_token(self):
"""For update tenant_access_token."""
url = self.host + self.tenant_access_token_internal_url_path
headers = {"Content-Type": "application/json; charset=utf-8"}
data = {"app_id": self.app_id, "app_secret": self.app_secret}
response = requests.post(url, data=json.dumps(data), headers=headers)
self.tenant_access_token = response.json()["tenant_access_token"]
self.expire = time.time() + response.json()["expire"]
def set_lark_domain(self):
"""The default API endpoints are for Feishu, in order to switch to Lark, we should use set_lark_domain."""
self.host = "https://open.larksuite.com"
if __name__ == "__main__":
app_id = os.environ.get("FEISHU_APP_ID")
app_secret = os.environ.get("FEISHU_APP_SECRET")
reader = FeishuDocsReader(app_id, app_secret)
print(reader.load_data(document_ids=[os.environ.get("FEISHU_DOC_ID")]))
|
"""Feishu docs reader."""
import json
import os
import time
from typing import List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
# Copyright (2023) Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class FeishuDocsReader(BaseReader):
"""
Feishu Docs reader.
Reads a page from Google Docs
"""
host = "https://open.feishu.cn"
documents_raw_content_url_path = "/open-apis/docx/v1/documents/{}/raw_content"
tenant_access_token_internal_url_path = (
"/open-apis/auth/v3/tenant_access_token/internal"
)
def __init__(self, app_id, app_secret) -> None:
"""
Args:
app_id: The unique identifier of the application is obtained after the application is created.
app_secret: Application key, obtained after creating the application.
"""
super().__init__()
self.app_id = app_id
self.app_secret = app_secret
self.tenant_access_token = ""
self.expire = 0
def load_data(self, document_ids: List[str]) -> List[Document]:
"""
Load data from the input directory.
Args:
document_ids (List[str]): a list of document ids.
"""
if document_ids is None:
raise ValueError('Must specify a "document_ids" in `load_kwargs`.')
results = []
for document_id in document_ids:
doc = self._load_doc(document_id)
results.append(Document(text=doc, extra_info={"document_id": document_id}))
return results
def _load_doc(self, document_id) -> str:
"""
Load a document from Feishu Docs.
Args:
document_id: the document id.
Returns:
The document text.
"""
url = self.host + self.documents_raw_content_url_path.format(document_id)
if self.tenant_access_token == "" or self.expire < time.time():
self._update_tenant_access_token()
headers = {
"Authorization": f"Bearer {self.tenant_access_token}",
"Content-Type": "application/json; charset=utf-8",
}
response = requests.get(url, headers=headers)
return response.json()["data"]["content"]
def _update_tenant_access_token(self):
"""For update tenant_access_token."""
url = self.host + self.tenant_access_token_internal_url_path
headers = {"Content-Type": "application/json; charset=utf-8"}
data = {"app_id": self.app_id, "app_secret": self.app_secret}
response = requests.post(url, data=json.dumps(data), headers=headers)
self.tenant_access_token = response.json()["tenant_access_token"]
self.expire = time.time() + response.json()["expire"]
def set_lark_domain(self):
"""The default API endpoints are for Feishu, in order to switch to Lark, we should use set_lark_domain."""
self.host = "https://open.larksuite.com"
if __name__ == "__main__":
app_id = os.environ.get("FEISHU_APP_ID")
app_secret = os.environ.get("FEISHU_APP_SECRET")
reader = FeishuDocsReader(app_id, app_secret)
print(reader.load_data(document_ids=[os.environ.get("FEISHU_DOC_ID")]))
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseTripletLoss(TripletLoss):
def __init__(
self, model: SparseEncoder, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SparseEncoder
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. Need to be used in SpladeLoss or CSRLoss as a loss function.
2. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
}
)
loss = losses.SpladeLoss(model=model, loss=losses.SparseTripletLoss(model), lambda_corpus=3e-5, lambda_query=5e-5)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
super().__init__(model, distance_metric=distance_metric, triplet_margin=triplet_margin)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseTripletLoss shold not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from __future__ import annotations
from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseTripletLoss(TripletLoss):
def __init__(
self, model: SparseEncoder, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
super().__init__(model, distance_metric=distance_metric, triplet_margin=triplet_margin)
|
#!/usr/bin/env python3
"""Generate feature statistics for training set.
Example:
python global_stats.py --model-type librispeech --dataset-path /home/librispeech
"""
import json
import logging
import pathlib
from argparse import ArgumentParser, RawTextHelpFormatter
import torch
import torchaudio
from common import (
MODEL_TYPE_LIBRISPEECH,
MODEL_TYPE_TEDLIUM3,
MODEL_TYPE_MUSTC,
piecewise_linear_log,
spectrogram_transform,
)
from must.dataset import MUSTC
logger = logging.getLogger()
def parse_args():
parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument(
"--model-type", type=str, choices=[MODEL_TYPE_LIBRISPEECH, MODEL_TYPE_TEDLIUM3, MODEL_TYPE_MUSTC], required=True
)
parser.add_argument(
"--dataset-path",
required=True,
type=pathlib.Path,
help="Path to dataset. "
"For LibriSpeech, all of 'train-clean-360', 'train-clean-100', and 'train-other-500' must exist.",
)
parser.add_argument(
"--output-path",
default=pathlib.Path("global_stats.json"),
type=pathlib.Path,
help="File to save feature statistics to. (Default: './global_stats.json')",
)
return parser.parse_args()
def generate_statistics(samples):
E_x = 0
E_x_2 = 0
N = 0
for idx, sample in enumerate(samples):
mel_spec = spectrogram_transform(sample[0].squeeze()).transpose(1, 0)
scaled_mel_spec = piecewise_linear_log(mel_spec)
sum = scaled_mel_spec.sum(0)
sq_sum = scaled_mel_spec.pow(2).sum(0)
M = scaled_mel_spec.size(0)
E_x = E_x * (N / (N + M)) + sum / (N + M)
E_x_2 = E_x_2 * (N / (N + M)) + sq_sum / (N + M)
N += M
if idx % 100 == 0:
logger.info(f"Processed {idx}")
return E_x, (E_x_2 - E_x**2) ** 0.5
def get_dataset(args):
if args.model_type == MODEL_TYPE_LIBRISPEECH:
return torch.utils.data.ConcatDataset(
[
torchaudio.datasets.LIBRISPEECH(args.dataset_path, url="train-clean-360"),
torchaudio.datasets.LIBRISPEECH(args.dataset_path, url="train-clean-100"),
torchaudio.datasets.LIBRISPEECH(args.dataset_path, url="train-other-500"),
]
)
elif args.model_type == MODEL_TYPE_TEDLIUM3:
return torchaudio.datasets.TEDLIUM(args.dataset_path, release="release3", subset="train")
elif args.model_type == MODEL_TYPE_MUSTC:
return MUSTC(args.dataset_path, subset="train")
else:
raise ValueError(f"Encountered unsupported model type {args.model_type}.")
def cli_main():
args = parse_args()
dataset = get_dataset(args)
dataloader = torch.utils.data.DataLoader(dataset, num_workers=4)
mean, stddev = generate_statistics(iter(dataloader))
json_str = json.dumps({"mean": mean.tolist(), "invstddev": (1 / stddev).tolist()}, indent=2)
with open(args.output_path, "w") as f:
f.write(json_str)
if __name__ == "__main__":
cli_main()
|
#!/usr/bin/env python3
"""Generate feature statistics for training set.
Example:
python global_stats.py --model-type librispeech --dataset-path /home/librispeech
"""
import json
import logging
import pathlib
from argparse import ArgumentParser, RawTextHelpFormatter
import torch
import torchaudio
from common import (
MODEL_TYPE_LIBRISPEECH,
MODEL_TYPE_TEDLIUM3,
MODEL_TYPE_MUSTC,
piecewise_linear_log,
spectrogram_transform,
)
from must.dataset import MUSTC
logger = logging.getLogger()
def parse_args():
parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument(
"--model-type", type=str, choices=[MODEL_TYPE_LIBRISPEECH, MODEL_TYPE_TEDLIUM3, MODEL_TYPE_MUSTC], required=True
)
parser.add_argument(
"--dataset-path",
required=True,
type=pathlib.Path,
help="Path to dataset. "
"For LibriSpeech, all of 'train-clean-360', 'train-clean-100', and 'train-other-500' must exist.",
)
parser.add_argument(
"--output-path",
default=pathlib.Path("global_stats.json"),
type=pathlib.Path,
help="File to save feature statistics to. (Default: './global_stats.json')",
)
return parser.parse_args()
def generate_statistics(samples):
E_x = 0
E_x_2 = 0
N = 0
for idx, sample in enumerate(samples):
mel_spec = spectrogram_transform(sample[0].squeeze()).transpose(1, 0)
scaled_mel_spec = piecewise_linear_log(mel_spec)
sum = scaled_mel_spec.sum(0)
sq_sum = scaled_mel_spec.pow(2).sum(0)
M = scaled_mel_spec.size(0)
E_x = E_x * (N / (N + M)) + sum / (N + M)
E_x_2 = E_x_2 * (N / (N + M)) + sq_sum / (N + M)
N += M
if idx % 100 == 0:
logger.info(f"Processed {idx}")
return E_x, (E_x_2 - E_x ** 2) ** 0.5
def get_dataset(args):
if args.model_type == MODEL_TYPE_LIBRISPEECH:
return torch.utils.data.ConcatDataset(
[
torchaudio.datasets.LIBRISPEECH(args.dataset_path, url="train-clean-360"),
torchaudio.datasets.LIBRISPEECH(args.dataset_path, url="train-clean-100"),
torchaudio.datasets.LIBRISPEECH(args.dataset_path, url="train-other-500"),
]
)
elif args.model_type == MODEL_TYPE_TEDLIUM3:
return torchaudio.datasets.TEDLIUM(args.dataset_path, release="release3", subset="train")
elif args.model_type == MODEL_TYPE_MUSTC:
return MUSTC(args.dataset_path, subset="train")
else:
raise ValueError(f"Encountered unsupported model type {args.model_type}.")
def cli_main():
args = parse_args()
dataset = get_dataset(args)
dataloader = torch.utils.data.DataLoader(dataset, num_workers=4)
mean, stddev = generate_statistics(iter(dataloader))
json_str = json.dumps({"mean": mean.tolist(), "invstddev": (1 / stddev).tolist()}, indent=2)
with open(args.output_path, "w") as f:
f.write(json_str)
if __name__ == "__main__":
cli_main()
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class FlopsLoss(nn.Module):
def __init__(self, model: SparseEncoder, threshold: float = None) -> None:
"""
FlopsLoss implements a regularization technique to promote sparsity in sparse encoder models.
It calculates the squared L2 norm of the mean embedding vector, which helps reduce the number of floating-point
operations (FLOPs) required during inference by encouraging more zero values in the embeddings.
It can use a threshold to ignore embeddings with too few non-zero elements.
This loss is used as a regularization component within other losses like :class:`SpladeLoss` rather than
being used as a standalone loss function.
Args:
model: SparseEncoder model to be regularized
threshold: Optional threshold for the number of non-zero elements in the embeddings.
If specified, only embeddings with more than this number of non-zero elements will be considered.
This can help to ignore embeddings that are too sparse and may not contribute meaningfully to the loss.
References:
- For further details, see: https://arxiv.org/pdf/2004.05665 for the general FLOPS loss and https://arxiv.org/pdf/2504.14839 for FLOPS with thresholds, a.k.a. FLOPS with l0 masking.
Relations:
- Used as a component within :class:`SpladeLoss` to regularize both query and document embeddings
Example:
- This loss is typically used within the :class:`SpladeLoss` class, which combines it with other loss components.
"""
super().__init__()
self.model = model
self.threshold = threshold
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
# Compute the embeddings and distribute them to anchor and candidates (positive and optionally negatives)
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(embeddings)
def compute_loss_from_embeddings(self, embeddings: list[torch.Tensor], embeddings_type: str) -> torch.Tensor:
if embeddings_type == "query":
embeddings_to_use = embeddings[0] # (batch_size, embedding_dim)
else:
embeddings_to_use = torch.cat(embeddings[1:]) # (batch_size * (1 + num_negatives), embedding_dim)
if self.threshold is not None:
l0_norm = (embeddings_to_use != 0).sum(dim=1)
mask = (l0_norm > self.threshold).float()
embeddings_to_use = embeddings_to_use * mask.unsqueeze(1)
if embeddings_type == "query":
return torch.sum(torch.mean(embeddings_to_use, dim=0) ** 2)
else:
return torch.sum(torch.mean(embeddings_to_use, dim=0) ** 2)
@property
def citation(self) -> str:
return """
@article{paria2020minimizing,
title={Minimizing flops to learn efficient sparse representations},
author={Paria, Biswajit and Yeh, Chih-Kuan and Yen, Ian EH and Xu, Ning and Ravikumar, Pradeep and P{\'o}czos, Barnab{\'a}s},
journal={arXiv preprint arXiv:2004.05665},
year={2020}
}
"""
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class FlopsLoss(nn.Module):
def __init__(self, model: SparseEncoder, threshold: float = None) -> None:
"""
FlopsLoss implements a regularization technique to promote sparsity in sparse encoder models.
It calculates the squared L2 norm of the mean embedding vector, which helps reduce the number of floating-point
operations (FLOPs) required during inference by encouraging more zero values in the embeddings.
It can use a threshold to ignore embeddings with too few non-zero elements.
This loss is used as a regularization component within other losses like :class:`SpladeLoss` rather than
being used as a standalone loss function.
Args:
model: SparseEncoder model to be regularized
threshold: Optional threshold for the number of non-zero elements in the embeddings.
If specified, only embeddings with more than this number of non-zero elements will be considered.
This can help to ignore embeddings that are too sparse and may not contribute meaningfully to the loss.
References:
- For further details, see: https://arxiv.org/pdf/2004.05665 for the general FLOPS loss and https://arxiv.org/pdf/2504.14839 for FLOPS with thresholds, a.k.a. FLOPS with l0 masking.
Relations:
- Used as a component within :class:`SpladeLoss` to regularize both query and document embeddings
Example:
- This loss is typically used within the :class:`SpladeLoss` class, which combines it with other loss components.
"""
super().__init__()
self.model = model
self.threshold = threshold
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
# Compute the embeddings and distribute them to anchor and candidates (positive and optionally negatives)
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(embeddings)
def compute_loss_from_embeddings(self, embeddings: list[torch.Tensor], embeddings_type: str) -> torch.Tensor:
if embeddings_type == "query":
embeddings_to_use = embeddings[0] # (batch_size, embedding_dim)
else:
embeddings_to_use = torch.cat(embeddings[1:]) # (batch_size * (1 + num_negatives), embedding_dim)
if self.treshold is not None:
l0_norm = (embeddings_to_use != 0).sum(dim=1)
mask = (l0_norm > self.threshold).float()
embeddings_to_use = embeddings_to_use * mask.unsqueeze(1)
if embeddings_type == "query":
return torch.sum(torch.mean(embeddings_to_use, dim=0) ** 2)
else:
return torch.sum(torch.mean(embeddings_to_use, dim=0) ** 2)
@property
def citation(self) -> str:
return """
@article{paria2020minimizing,
title={Minimizing flops to learn efficient sparse representations},
author={Paria, Biswajit and Yeh, Chih-Kuan and Yen, Ian EH and Xu, Ning and Ravikumar, Pradeep and P{\'o}czos, Barnab{\'a}s},
journal={arXiv preprint arXiv:2004.05665},
year={2020}
}
"""
|
from __future__ import annotations
import json
import logging
import os
import torch
from torch import Tensor, nn
logger = logging.getLogger(__name__)
class WordWeights(nn.Module):
"""This model can weight word embeddings, for example, with idf-values."""
def __init__(self, vocab: list[str], word_weights: dict[str, float], unknown_word_weight: float = 1):
"""
Initializes the WordWeights class.
Args:
vocab (List[str]): Vocabulary of the tokenizer.
word_weights (Dict[str, float]): Mapping of tokens to a float weight value. Word embeddings are multiplied
by this float value. Tokens in word_weights must not be equal to the vocab (can contain more or less values).
unknown_word_weight (float, optional): Weight for words in vocab that do not appear in the word_weights lookup.
These can be, for example, rare words in the vocab where no weight exists. Defaults to 1.
"""
super(WordWeights, self).__init__()
self.config_keys = ["vocab", "word_weights", "unknown_word_weight"]
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
weights.append(weight)
logger.info(
"{} of {} words without a weighting value. Set weight to {}".format(
num_unknown_words, len(vocab), unknown_word_weight
)
)
self.emb_layer = nn.Embedding(len(vocab), 1)
self.emb_layer.load_state_dict({"weight": torch.FloatTensor(weights).unsqueeze(1)})
def forward(self, features: dict[str, Tensor]):
attention_mask = features["attention_mask"]
token_embeddings = features["token_embeddings"]
# Compute a weight value for each token
token_weights_raw = self.emb_layer(features["input_ids"]).squeeze(-1)
token_weights = token_weights_raw * attention_mask.float()
token_weights_sum = torch.sum(token_weights, 1)
# Multiply embedding by token weight value
token_weights_expanded = token_weights.unsqueeze(-1).expand(token_embeddings.size())
token_embeddings = token_embeddings * token_weights_expanded
features.update({"token_embeddings": token_embeddings, "token_weights_sum": token_weights_sum})
return features
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return WordWeights(**config)
|
import json
import logging
import os
from typing import Dict, List
import torch
from torch import Tensor, nn
logger = logging.getLogger(__name__)
class WordWeights(nn.Module):
"""This model can weight word embeddings, for example, with idf-values."""
def __init__(self, vocab: List[str], word_weights: Dict[str, float], unknown_word_weight: float = 1):
"""
Initializes the WordWeights class.
Args:
vocab (List[str]): Vocabulary of the tokenizer.
word_weights (Dict[str, float]): Mapping of tokens to a float weight value. Word embeddings are multiplied
by this float value. Tokens in word_weights must not be equal to the vocab (can contain more or less values).
unknown_word_weight (float, optional): Weight for words in vocab that do not appear in the word_weights lookup.
These can be, for example, rare words in the vocab where no weight exists. Defaults to 1.
"""
super(WordWeights, self).__init__()
self.config_keys = ["vocab", "word_weights", "unknown_word_weight"]
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
weights.append(weight)
logger.info(
"{} of {} words without a weighting value. Set weight to {}".format(
num_unknown_words, len(vocab), unknown_word_weight
)
)
self.emb_layer = nn.Embedding(len(vocab), 1)
self.emb_layer.load_state_dict({"weight": torch.FloatTensor(weights).unsqueeze(1)})
def forward(self, features: Dict[str, Tensor]):
attention_mask = features["attention_mask"]
token_embeddings = features["token_embeddings"]
# Compute a weight value for each token
token_weights_raw = self.emb_layer(features["input_ids"]).squeeze(-1)
token_weights = token_weights_raw * attention_mask.float()
token_weights_sum = torch.sum(token_weights, 1)
# Multiply embedding by token weight value
token_weights_expanded = token_weights.unsqueeze(-1).expand(token_embeddings.size())
token_embeddings = token_embeddings * token_weights_expanded
features.update({"token_embeddings": token_embeddings, "token_weights_sum": token_weights_sum})
return features
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return WordWeights(**config)
|
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
import torch.nn.functional as F
from sentence_transformers.sparse_encoder import SparseEncoder
def normalized_mean_squared_error(reconstruction: torch.Tensor, original_input: torch.Tensor) -> torch.Tensor:
"""
:param reconstruction: output of Autoencoder.decode (shape: [batch, n_inputs])
:param original_input: input of Autoencoder.encode (shape: [batch, n_inputs])
:return: normalized mean squared error (shape: [1])
"""
return (((reconstruction - original_input) ** 2).mean(dim=1) / (original_input**2).mean(dim=1)).mean()
class CSRReconstructionLoss(nn.Module):
def __init__(self, model: SparseEncoder, beta: float = 1.0) -> None:
"""
CSRReconstructionLoss implements the reconstruction loss component for Contrastive Sparse Representation (CSR) models.
This loss ensures that the sparse encoding can accurately reconstruct the original model embeddings through
three components:
1. A primary reconstruction loss (L_k) that measures the error between the original embedding and its
reconstruction using the top-k sparse components.
2. A secondary reconstruction loss (L_4k) that measures the error using the top-4k sparse components.
3. An auxiliary loss (L_aux) that helps to learn residual information.
Args:
model: SparseEncoder model with autoencoder components
beta: Weight for the auxiliary loss component (L_aux)
References:
- For more details, see the paper "Beyond Matryoshka: Revisiting Sparse Coding for Adaptive Representation"
https://arxiv.org/abs/2503.01776
Requirements:
1. The model must be configured to output the necessary reconstruction components
2. Used with SparseEncoder models that implement compositional sparse autoencoding
Relations:
- Used as a component within :class:`CSRLoss` combined with a contrastive loss
Example:
::
This loss is typically used within the :class:`CSRLoss` class, which combines it with other loss components.
"""
super().__init__()
self.model = model
self.beta = beta
def forward(self, sentence_features: Iterable[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
"""
Forward pass of the CSRReconstruction Loss module.
This method is used when the loss is computed as part of the model's forward pass.
Args:
sentence_features: Iterable of dictionaries containing sentence embeddings and their sparse representations
Returns:
Dictionary containing the total loss and individual loss components
"""
# Compute embeddings using the model
outputs = [self.model(sentence_feature) for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(outputs)
def compute_loss_from_embeddings(self, outputs: list[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
"""
Compute the CSRReconstruction loss from embeddings.
Args:
outputs: List of dictionaries containing sentence embeddings and their sparse representations
Returns:
Dictionary containing the total loss and individual loss components
"""
# Initialize loss components
total_L_k = 0.0
total_L_4k = 0.0
total_L_aux = 0.0
# Process each sentence feature
for features in outputs:
x = features["sentence_embedding_backbone"]
recons_k = features["decoded_embedding_k"]
recons_4k = features["decoded_embedding_4k"]
recons_aux = features["decoded_embedding_aux"]
reconsk_pre_bias = features["decoded_embedding_k_pre_bias"]
# L(k) = ||f(x) - f(dx)_k||₂²
L_k = F.mse_loss(x, recons_k)
# L(4k) = ||f(x) - f(dx)_4k||₂²
L_4k = F.mse_loss(x, recons_4k)
# L_aux = ||e - ê||₂²
L_aux = normalized_mean_squared_error(recons_aux, x - reconsk_pre_bias)
# Accumulate losses
total_L_k += L_k
total_L_4k += L_4k
total_L_aux += L_aux
# Average losses over batch
batch_size = len(outputs)
if batch_size > 0:
total_L_k /= batch_size
total_L_4k /= batch_size
total_L_aux /= batch_size
# Total loss: L_recon = L(k) + L(4k)/8 + β*L_aux
total_loss = total_L_k + total_L_4k / 8 + self.beta * total_L_aux
return total_loss
def get_config_dict(self):
"""
Get the configuration dictionary.
Returns:
Dictionary containing the configuration parameters
"""
return {"beta": self.beta}
|
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
import torch.nn.functional as F
from sentence_transformers.sparse_encoder import SparseEncoder
def normalized_mean_squared_error(reconstruction: torch.Tensor, original_input: torch.Tensor) -> torch.Tensor:
"""
:param reconstruction: output of Autoencoder.decode (shape: [batch, n_inputs])
:param original_input: input of Autoencoder.encode (shape: [batch, n_inputs])
:return: normalized mean squared error (shape: [1])
"""
return (((reconstruction - original_input) ** 2).mean(dim=1) / (original_input**2).mean(dim=1)).mean()
class CSRReconstructionLoss(nn.Module):
def __init__(self, model: SparseEncoder, beta: float = 1.0) -> None:
"""
CSRReconstructionLoss implements the reconstruction loss component for Contrastive Sparse Representation (CSR) models.
This loss ensures that the sparse encoding can accurately reconstruct the original model embeddings through
three components:
1. A primary reconstruction loss (L_k) that measures the error between the original embedding and its
reconstruction using the top-k sparse components.
2. A secondary reconstruction loss (L_4k) that measures the error using the top-4k sparse components.
3. An auxiliary loss (L_aux) that helps to learn residual information.
Args:
model: SparseEncoder model with autoencoder components
beta: Weight for the auxiliary loss component (L_aux)
References:
- For more details, see the paper "Beyond Matryoshka: Revisiting Sparse Coding for Adaptive Representation"
https://arxiv.org/abs/2503.01776
Requirements:
1. The model must be configured to output the necessary reconstruction components
2. Used with SparseEncoder models that implement compositional sparse autoencoding
Relations:
- Used as a component within :class:`CSRLoss` combined with a contrastive loss
Example:
::
This loss is typically used within the :class:`CSRLoss` class, which combines it with other loss components.
"""
super().__init__()
self.model = model
self.beta = beta
def forward(self, sentence_features: Iterable[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
"""
Forward pass of the CSRReconstruction Loss module.
This method is used when the loss is computed as part of the model's forward pass.
Args:
sentence_features: Iterable of dictionaries containing sentence embeddings and their sparse representations
Returns:
Dictionary containing the total loss and individual loss components
"""
# Compute embeddings using the model
outputs = [self.model(sentence_feature) for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(outputs)
def compute_loss_from_embeddings(self, outputs: list[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
"""
Compute the CSRReconstruction loss from embeddings.
Args:
outputs: List of dictionaries containing sentence embeddings and their sparse representations
Returns:
Dictionary containing the total loss and individual loss components
"""
# Initialize loss components
total_L_k = 0.0
total_L_4k = 0.0
total_L_aux = 0.0
# Process each sentence feature
for features in outputs:
x = features["sentence_embedding_backbone"]
recons_k = features["decoded_embedding_k"]
recons_4k = features["decoded_embedding_4k"]
recons_aux = features["decoded_embedding_aux"]
reconsk_pre_bias = features["decoded_embedding_k_pre_bias"]
# L(k) = ||f(x) - f(dx)_k||₂²
L_k = F.mse_loss(x, recons_k)
# L(4k) = ||f(x) - f(dx)_4k||₂²
L_4k = F.mse_loss(x, recons_4k)
# L_aux = ||e - ê||₂²
L_aux = normalized_mean_squared_error(recons_aux, x - reconsk_pre_bias)
# Accumulate losses
total_L_k += L_k
total_L_4k += L_4k
total_L_aux += L_aux
# Average losses over batch
batch_size = len(outputs)
if batch_size > 0:
total_L_k /= batch_size
total_L_4k /= batch_size
total_L_aux /= batch_size
# Total loss: L_recon = L(k) + L(4k)/8 + β*L_aux
total_loss = total_L_k + total_L_4k / 8 + self.beta * total_L_aux
return total_loss
def get_config_dict(self):
"""
Get the configuration dictionary.
Returns:
Dictionary containing the configuration parameters
"""
return {"beta": self.beta}
|
"""
This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
Nowadays, with Sentence Transformers v3+, it is recommended to use the `SentenceTransformerTrainer` class to train models.
See https://www.sbert.net/docs/sentence_transformer/training_overview.html for more information.
Instead, you should create a `datasets` `Dataset` for training: https://huggingface.co/docs/datasets/create_dataset
"""
from __future__ import annotations
import csv
import gzip
import os
from . import InputExample
class STSDataReader:
"""Reads in the STS dataset. Each line contains two sentences (s1_col_idx, s2_col_idx) and one label (score_col_idx)
Default values expects a tab separated file with the first & second column the sentence pair and third column the score (0...1). Default config normalizes scores from 0...5 to 0...1
"""
def __init__(
self,
dataset_folder,
s1_col_idx=0,
s2_col_idx=1,
score_col_idx=2,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize_scores=True,
min_score=0,
max_score=5,
):
self.dataset_folder = dataset_folder
self.score_col_idx = score_col_idx
self.s1_col_idx = s1_col_idx
self.s2_col_idx = s2_col_idx
self.delimiter = delimiter
self.quoting = quoting
self.normalize_scores = normalize_scores
self.min_score = min_score
self.max_score = max_score
def get_examples(self, filename, max_examples=0):
"""filename specified which data split to use (train.csv, dev.csv, test.csv)."""
filepath = os.path.join(self.dataset_folder, filename)
with (
gzip.open(filepath, "rt", encoding="utf8")
if filename.endswith(".gz")
else open(filepath, encoding="utf-8")
) as fIn:
data = csv.reader(fIn, delimiter=self.delimiter, quoting=self.quoting)
examples = []
for id, row in enumerate(data):
score = float(row[self.score_col_idx])
if self.normalize_scores: # Normalize to a 0...1 value
score = (score - self.min_score) / (self.max_score - self.min_score)
s1 = row[self.s1_col_idx]
s2 = row[self.s2_col_idx]
examples.append(InputExample(guid=filename + str(id), texts=[s1, s2], label=score))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
class STSBenchmarkDataReader(STSDataReader):
"""Reader especially for the STS benchmark dataset. There, the sentences are in column 5 and 6, the score is in column 4.
Scores are normalized from 0...5 to 0...1
"""
def __init__(
self,
dataset_folder,
s1_col_idx=5,
s2_col_idx=6,
score_col_idx=4,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize_scores=True,
min_score=0,
max_score=5,
):
super().__init__(
dataset_folder=dataset_folder,
s1_col_idx=s1_col_idx,
s2_col_idx=s2_col_idx,
score_col_idx=score_col_idx,
delimiter=delimiter,
quoting=quoting,
normalize_scores=normalize_scores,
min_score=min_score,
max_score=max_score,
)
|
"""
This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
Nowadays, with Sentence Transformers v3+, it is recommended to use the `SentenceTransformerTrainer` class to train models.
See https://www.sbert.net/docs/sentence_transformer/training_overview.html for more information.
Instead, you should create a `datasets` `Dataset` for training: https://huggingface.co/docs/datasets/create_dataset
"""
from __future__ import annotations
import csv
import gzip
import os
from . import InputExample
class STSDataReader:
"""Reads in the STS dataset. Each line contains two sentences (s1_col_idx, s2_col_idx) and one label (score_col_idx)
Default values expects a tab separated file with the first & second column the sentence pair and third column the score (0...1). Default config normalizes scores from 0...5 to 0...1
"""
def __init__(
self,
dataset_folder,
s1_col_idx=0,
s2_col_idx=1,
score_col_idx=2,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize_scores=True,
min_score=0,
max_score=5,
):
self.dataset_folder = dataset_folder
self.score_col_idx = score_col_idx
self.s1_col_idx = s1_col_idx
self.s2_col_idx = s2_col_idx
self.delimiter = delimiter
self.quoting = quoting
self.normalize_scores = normalize_scores
self.min_score = min_score
self.max_score = max_score
def get_examples(self, filename, max_examples=0):
"""filename specified which data split to use (train.csv, dev.csv, test.csv)."""
filepath = os.path.join(self.dataset_folder, filename)
with (
gzip.open(filepath, "rt", encoding="utf8")
if filename.endswith(".gz")
else open(filepath, encoding="utf-8") as fIn
):
data = csv.reader(fIn, delimiter=self.delimiter, quoting=self.quoting)
examples = []
for id, row in enumerate(data):
score = float(row[self.score_col_idx])
if self.normalize_scores: # Normalize to a 0...1 value
score = (score - self.min_score) / (self.max_score - self.min_score)
s1 = row[self.s1_col_idx]
s2 = row[self.s2_col_idx]
examples.append(InputExample(guid=filename + str(id), texts=[s1, s2], label=score))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
class STSBenchmarkDataReader(STSDataReader):
"""Reader especially for the STS benchmark dataset. There, the sentences are in column 5 and 6, the score is in column 4.
Scores are normalized from 0...5 to 0...1
"""
def __init__(
self,
dataset_folder,
s1_col_idx=5,
s2_col_idx=6,
score_col_idx=4,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize_scores=True,
min_score=0,
max_score=5,
):
super().__init__(
dataset_folder=dataset_folder,
s1_col_idx=s1_col_idx,
s2_col_idx=s2_col_idx,
score_col_idx=score_col_idx,
delimiter=delimiter,
quoting=quoting,
normalize_scores=normalize_scores,
min_score=min_score,
max_score=max_score,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.data import BaseDataElement
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.misc import tensor2imgs
# TODO: Due to interface changes, the current class
# functions incorrectly
@HOOKS.register_module()
class NaiveVisualizationHook(Hook):
"""Show or Write the predicted results during the process of testing.
Args:
interval (int): Visualization interval. Default: 1.
draw_gt (bool): Whether to draw the ground truth. Default to True.
draw_pred (bool): Whether to draw the predicted result.
Default to True.
"""
priority = 'NORMAL'
def __init__(self,
interval: int = 1,
draw_gt: bool = True,
draw_pred: bool = True):
self.draw_gt = draw_gt
self.draw_pred = draw_pred
self._interval = interval
def _unpad(self, input: np.ndarray, unpad_shape: Tuple[int,
int]) -> np.ndarray:
"""Unpad the input image.
Args:
input (np.ndarray): The image to unpad.
unpad_shape (tuple): The shape of image before padding.
Returns:
np.ndarray: The image before padding.
"""
unpad_width, unpad_height = unpad_shape
unpad_image = input[:unpad_height, :unpad_width]
return unpad_image
def after_test_iter(
self,
runner,
batch_idx: int,
data_batch: Optional[Sequence[dict]] = None,
outputs: Optional[Sequence[BaseDataElement]] = None) -> None:
"""Show or Write the predicted results.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the test loop.
data_batch (Sequence[dict], optional): Data
from dataloader. Defaults to None.
outputs (Sequence[BaseDataElement], optional): Outputs from model.
Defaults to None.
"""
if self.every_n_iters(runner, self._interval):
for data, output in zip(data_batch, outputs): # type: ignore
input = data['inputs']
data_sample = data['data_sample']
input = tensor2imgs(input,
**data_sample.get('img_norm_cfg',
dict()))[0]
# TODO We will implement a function to revert the augmentation
# in the future.
ori_shape = (data_sample.ori_width, data_sample.ori_height)
if 'pad_shape' in data_sample:
input = self._unpad(input,
data_sample.get('scale', ori_shape))
origin_image = cv2.resize(input, ori_shape)
name = osp.basename(data_sample.img_path)
runner.visualizer.add_datasample(name, origin_image,
data_sample, output,
self.draw_gt, self.draw_pred)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.data import BaseDataElement
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.misc import tensor2imgs
# TODO: Due to interface changes, the current class
# functions incorrectly
@HOOKS.register_module()
class NaiveVisualizationHook(Hook):
"""Show or Write the predicted results during the process of testing.
Args:
interval (int): Visualization interval. Default: 1.
draw_gt (bool): Whether to draw the ground truth. Default to True.
draw_pred (bool): Whether to draw the predicted result.
Default to True.
"""
priority = 'NORMAL'
def __init__(self,
interval: int = 1,
draw_gt: bool = True,
draw_pred: bool = True):
self.draw_gt = draw_gt
self.draw_pred = draw_pred
self._interval = interval
def _unpad(self, input: np.ndarray, unpad_shape: Tuple[int,
int]) -> np.ndarray:
unpad_width, unpad_height = unpad_shape
unpad_image = input[:unpad_height, :unpad_width]
return unpad_image
def after_test_iter(
self,
runner,
batch_idx: int,
data_batch: Optional[Sequence[dict]] = None,
outputs: Optional[Sequence[BaseDataElement]] = None) -> None:
"""Show or Write the predicted results.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the test loop.
data_batch (Sequence[dict], optional): Data
from dataloader. Defaults to None.
outputs (Sequence[BaseDataElement], optional): Outputs from model.
Defaults to None.
"""
if self.every_n_iters(runner, self._interval):
for data, output in zip(data_batch, outputs): # type: ignore
input = data['inputs']
data_sample = data['data_sample']
input = tensor2imgs(input,
**data_sample.get('img_norm_cfg',
dict()))[0]
# TODO We will implement a function to revert the augmentation
# in the future.
ori_shape = (data_sample.ori_width, data_sample.ori_height)
if 'pad_shape' in data_sample:
input = self._unpad(input,
data_sample.get('scale', ori_shape))
origin_image = cv2.resize(input, ori_shape)
name = osp.basename(data_sample.img_path)
runner.visualizer.add_datasample(name, origin_image,
data_sample, output,
self.draw_gt, self.draw_pred)
|
from collections import ChainMap
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
MutableMapping,
Optional,
Type,
TypeVar,
Union,
)
from docarray.array.list_advance_indexing import ListAdvancedIndexing
from docarray.typing import NdArray
from docarray.typing.tensor.abstract_tensor import AbstractTensor
if TYPE_CHECKING:
from docarray.array.doc_vec.doc_vec import DocVec
IndexIterType = Union[slice, Iterable[int], Iterable[bool], None]
T = TypeVar('T', bound='ColumnStorage')
class ColumnStorage:
"""
ColumnStorage is a container to store the columns of the
:class:`~docarray.array.doc_vec.DocVec`.
:param tensor_columns: a Dict of AbstractTensor
:param doc_columns: a Dict of :class:`~docarray.array.doc_vec.DocVec`
:param docs_vec_columns: a Dict of List of :class:`~docarray.array.doc_vec.DocVec`
:param any_columns: a Dict of List
:param tensor_type: Class used to wrap the doc_vec tensors
"""
def __init__(
self,
tensor_columns: Dict[str, Optional[AbstractTensor]],
doc_columns: Dict[str, Optional['DocVec']],
docs_vec_columns: Dict[str, Optional[ListAdvancedIndexing['DocVec']]],
any_columns: Dict[str, ListAdvancedIndexing],
tensor_type: Type[AbstractTensor] = NdArray,
):
self.tensor_columns = tensor_columns
self.doc_columns = doc_columns
self.docs_vec_columns = docs_vec_columns
self.any_columns = any_columns
self.tensor_type = tensor_type
self.columns = ChainMap( # type: ignore
self.tensor_columns, # type: ignore
self.doc_columns, # type: ignore
self.docs_vec_columns, # type: ignore
self.any_columns, # type: ignore
) # type: ignore
def __len__(self) -> int:
return len(self.any_columns['id']) # TODO what if ID are None ?
def __getitem__(self: T, item: IndexIterType) -> T:
if isinstance(item, tuple):
item = list(item)
tensor_columns = {
key: col[item] if col is not None else None
for key, col in self.tensor_columns.items()
}
doc_columns = {
key: col[item] if col is not None else None
for key, col in self.doc_columns.items()
}
docs_vec_columns = {
key: col[item] if col is not None else None
for key, col in self.docs_vec_columns.items()
}
any_columns = {
key: col[item] if col is not None else None
for key, col in self.any_columns.items()
}
return self.__class__(
tensor_columns,
doc_columns,
docs_vec_columns,
any_columns,
self.tensor_type,
)
class ColumnStorageView(dict, MutableMapping[str, Any]):
index: int
storage: ColumnStorage
def __init__(self, index: int, storage: ColumnStorage):
super().__init__()
self.index = index
self.storage = storage
def __getitem__(self, name: str) -> Any:
if name in self.storage.tensor_columns.keys():
tensor = self.storage.tensor_columns[name]
if tensor is None:
return None
if tensor.get_comp_backend().n_dim(tensor) == 1:
# to ensure consistensy between numpy and pytorch
# we wrap the scalr in a tensor of ndim = 1
# otherwise numpy pass by value whereas torch by reference
col = self.storage.tensor_columns[name]
if col is not None:
return col[self.index : self.index + 1]
else:
return None
col = self.storage.columns[name]
if col is None:
return None
return col[self.index]
def __setitem__(self, name, value) -> None:
if self.storage.columns[name] is None:
raise ValueError(
f'Cannot set an item to a None column. This mean that '
f'the DocVec that encapsulate this doc has the field '
f'{name} set to None. If you want to modify that you need to do it at the'
f'DocVec level. `docs.field = np.zeros(10)`'
)
self.storage.columns[name][self.index] = value
def __delitem__(self, key):
raise RuntimeError('Cannot delete an item from a StorageView')
def __iter__(self):
return self.storage.columns.keys()
def __len__(self):
return len(self.storage.columns)
def keys(self):
return self.storage.columns.keys()
|
from collections import ChainMap
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
MutableMapping,
Optional,
Type,
TypeVar,
Union,
)
from docarray.array.list_advance_indexing import ListAdvancedIndexing
from docarray.typing import NdArray
from docarray.typing.tensor.abstract_tensor import AbstractTensor
if TYPE_CHECKING:
from docarray.array.doc_vec.doc_vec import DocVec
IndexIterType = Union[slice, Iterable[int], Iterable[bool], None]
T = TypeVar('T', bound='ColumnStorage')
class ColumnStorage:
"""
ColumnStorage is a container to store the columns of the
:class:`~docarray.array.doc_vec.DocVec`.
:param tensor_columns: a Dict of AbstractTensor
:param doc_columns: a Dict of :class:`~docarray.array.doc_vec.DocVec`
:param docs_vec_columns: a Dict of List of :class:`~docarray.array.doc_vec.DocVec`
:param any_columns: a Dict of List
:param tensor_type: Class used to wrap the doc_vec tensors
"""
def __init__(
self,
tensor_columns: Dict[str, Optional[AbstractTensor]],
doc_columns: Dict[str, Optional['DocVec']],
docs_vec_columns: Dict[str, Optional[ListAdvancedIndexing['DocVec']]],
any_columns: Dict[str, ListAdvancedIndexing],
tensor_type: Type[AbstractTensor] = NdArray,
):
self.tensor_columns = tensor_columns
self.doc_columns = doc_columns
self.docs_vec_columns = docs_vec_columns
self.any_columns = any_columns
self.tensor_type = tensor_type
self.columns = ChainMap( # type: ignore
self.tensor_columns, # type: ignore
self.doc_columns, # type: ignore
self.docs_vec_columns, # type: ignore
self.any_columns, # type: ignore
) # type: ignore
def __len__(self) -> int:
return len(self.any_columns['id']) # TODO what if ID are None ?
def __getitem__(self: T, item: IndexIterType) -> T:
if isinstance(item, tuple):
item = list(item)
tensor_columns = {
key: col[item] if col is not None else None
for key, col in self.tensor_columns.items()
}
doc_columns = {
key: col[item] if col is not None else None
for key, col in self.doc_columns.items()
}
docs_vec_columns = {
key: col[item] if col is not None else None
for key, col in self.docs_vec_columns.items()
}
any_columns = {
key: col[item] if col is not None else None
for key, col in self.any_columns.items()
}
return self.__class__(
tensor_columns,
doc_columns,
docs_vec_columns,
any_columns,
self.tensor_type,
)
class ColumnStorageView(dict, MutableMapping[str, Any]):
index: int
storage: ColumnStorage
def __init__(self, index: int, storage: ColumnStorage):
super().__init__()
self.index = index
self.storage = storage
def __getitem__(self, name: str) -> Any:
if name in self.storage.tensor_columns.keys():
tensor = self.storage.tensor_columns[name]
if tensor is None:
return None
if tensor.get_comp_backend().n_dim(tensor) == 1:
# to ensure consistensy between numpy and pytorch
# we wrap the scalr in a tensor of ndim = 1
# otherwise numpy pass by value whereas torch by reference
col = self.storage.tensor_columns[name]
if col is not None:
return col[self.index : self.index + 1]
else:
return None
col = self.storage.columns[name]
if col is None:
return None
return col[self.index]
def __setitem__(self, name, value) -> None:
if self.storage.columns[name] is None:
raise ValueError(
f'Cannot set an item to a None column. This mean that '
f'the DocVec that encapsulate this doc has the field '
f'{name} set to None. If you want to modify that you need to do it at the'
f'DocVec level. `docs.field = np.zeros(10)`'
)
self.storage.columns[name][self.index] = value
def __delitem__(self, key):
raise RuntimeError('Cannot delete an item from a StorageView')
def __iter__(self):
return self.storage.columns.keys()
def __len__(self):
return len(self.storage.columns)
|
_base_ = 'mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py'
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(type='AmpOptimWrapper')
|
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py'
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(type='AmpOptimWrapper')
|
import pathlib
from typing import Any, Optional, Union
from torchdata.datapipes.iter import CSVDictParser, Demultiplexer, Filter, IterDataPipe, Mapper, Zipper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
)
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import BoundingBoxes
from .._api import register_dataset, register_info
NAME = "gtsrb"
@register_info(NAME)
def _info() -> dict[str, Any]:
return dict(
categories=[f"{label:05d}" for label in range(43)],
)
@register_dataset(NAME)
class GTSRB(Dataset):
"""GTSRB Dataset
homepage="https://benchmark.ini.rub.de"
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_URL_ROOT = "https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/"
_URLS = {
"train": f"{_URL_ROOT}GTSRB-Training_fixed.zip",
"test": f"{_URL_ROOT}GTSRB_Final_Test_Images.zip",
"test_ground_truth": f"{_URL_ROOT}GTSRB_Final_Test_GT.zip",
}
_CHECKSUMS = {
"train": "df4144942083645bd60b594de348aa6930126c3e0e5de09e39611630abf8455a",
"test": "48ba6fab7e877eb64eaf8de99035b0aaecfbc279bee23e35deca4ac1d0a837fa",
"test_ground_truth": "f94e5a7614d75845c74c04ddb26b8796b9e483f43541dd95dd5b726504e16d6d",
}
def _resources(self) -> list[OnlineResource]:
rsrcs: list[OnlineResource] = [HttpResource(self._URLS[self._split], sha256=self._CHECKSUMS[self._split])]
if self._split == "test":
rsrcs.append(
HttpResource(
self._URLS["test_ground_truth"],
sha256=self._CHECKSUMS["test_ground_truth"],
)
)
return rsrcs
def _classify_train_archive(self, data: tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.suffix == ".ppm":
return 0
elif path.suffix == ".csv":
return 1
else:
return None
def _prepare_sample(self, data: tuple[tuple[str, Any], dict[str, Any]]) -> dict[str, Any]:
(path, buffer), csv_info = data
label = int(csv_info["ClassId"])
bounding_boxes = BoundingBoxes(
[int(csv_info[k]) for k in ("Roi.X1", "Roi.Y1", "Roi.X2", "Roi.Y2")],
format="xyxy",
spatial_size=(int(csv_info["Height"]), int(csv_info["Width"])),
)
return {
"path": path,
"image": EncodedImage.from_file(buffer),
"label": Label(label, categories=self._categories),
"bounding_boxes": bounding_boxes,
}
def _datapipe(self, resource_dps: list[IterDataPipe]) -> IterDataPipe[dict[str, Any]]:
if self._split == "train":
images_dp, ann_dp = Demultiplexer(
resource_dps[0], 2, self._classify_train_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
else:
images_dp, ann_dp = resource_dps
images_dp = Filter(images_dp, path_comparator("suffix", ".ppm"))
# The order of the image files in the .zip archives perfectly match the order of the entries in the
# (possibly concatenated) .csv files. So we're able to use Zipper here instead of a IterKeyZipper.
ann_dp = CSVDictParser(ann_dp, delimiter=";")
dp = Zipper(images_dp, ann_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 26_640 if self._split == "train" else 12_630
|
import pathlib
from typing import Any, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import CSVDictParser, Demultiplexer, Filter, IterDataPipe, Mapper, Zipper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
)
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import BoundingBoxes
from .._api import register_dataset, register_info
NAME = "gtsrb"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(
categories=[f"{label:05d}" for label in range(43)],
)
@register_dataset(NAME)
class GTSRB(Dataset):
"""GTSRB Dataset
homepage="https://benchmark.ini.rub.de"
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_URL_ROOT = "https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/"
_URLS = {
"train": f"{_URL_ROOT}GTSRB-Training_fixed.zip",
"test": f"{_URL_ROOT}GTSRB_Final_Test_Images.zip",
"test_ground_truth": f"{_URL_ROOT}GTSRB_Final_Test_GT.zip",
}
_CHECKSUMS = {
"train": "df4144942083645bd60b594de348aa6930126c3e0e5de09e39611630abf8455a",
"test": "48ba6fab7e877eb64eaf8de99035b0aaecfbc279bee23e35deca4ac1d0a837fa",
"test_ground_truth": "f94e5a7614d75845c74c04ddb26b8796b9e483f43541dd95dd5b726504e16d6d",
}
def _resources(self) -> List[OnlineResource]:
rsrcs: List[OnlineResource] = [HttpResource(self._URLS[self._split], sha256=self._CHECKSUMS[self._split])]
if self._split == "test":
rsrcs.append(
HttpResource(
self._URLS["test_ground_truth"],
sha256=self._CHECKSUMS["test_ground_truth"],
)
)
return rsrcs
def _classify_train_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.suffix == ".ppm":
return 0
elif path.suffix == ".csv":
return 1
else:
return None
def _prepare_sample(self, data: Tuple[Tuple[str, Any], Dict[str, Any]]) -> Dict[str, Any]:
(path, buffer), csv_info = data
label = int(csv_info["ClassId"])
bounding_boxes = BoundingBoxes(
[int(csv_info[k]) for k in ("Roi.X1", "Roi.Y1", "Roi.X2", "Roi.Y2")],
format="xyxy",
spatial_size=(int(csv_info["Height"]), int(csv_info["Width"])),
)
return {
"path": path,
"image": EncodedImage.from_file(buffer),
"label": Label(label, categories=self._categories),
"bounding_boxes": bounding_boxes,
}
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
if self._split == "train":
images_dp, ann_dp = Demultiplexer(
resource_dps[0], 2, self._classify_train_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
else:
images_dp, ann_dp = resource_dps
images_dp = Filter(images_dp, path_comparator("suffix", ".ppm"))
# The order of the image files in the .zip archives perfectly match the order of the entries in the
# (possibly concatenated) .csv files. So we're able to use Zipper here instead of a IterKeyZipper.
ann_dp = CSVDictParser(ann_dp, delimiter=";")
dp = Zipper(images_dp, ann_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 26_640 if self._split == "train" else 12_630
|
from dataclasses import dataclass, field
from typing import Any, Dict, Type
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index.abstract import BaseDocIndex
from docarray.typing import NdArray
pytestmark = pytest.mark.index
class SimpleDoc(BaseDoc):
tens: NdArray[10] = Field(dim=1000)
class FakeQueryBuilder:
...
@dataclass
class DBConfig(BaseDocIndex.DBConfig):
work_dir: str = '.'
other: int = 5
@dataclass
class RuntimeConfig(BaseDocIndex.RuntimeConfig):
default_column_config: Dict[Type, Dict[str, Any]] = field(
default_factory=lambda: {
str: {
'dim': 128,
'space': 'l2',
},
}
)
default_ef: int = 50
def _identity(*x, **y):
return x, y
class DummyDocIndex(BaseDocIndex):
DBConfig = DBConfig
RuntimeConfig = RuntimeConfig
def python_type_to_db_type(self, x):
return str
_index = _identity
num_docs = _identity
_del_items = _identity
_get_items = _identity
execute_query = _identity
_find = _identity
_find_batched = _identity
_filter = _identity
_filter_batched = _identity
_text_search = _identity
_text_search_batched = _identity
def test_defaults():
index = DummyDocIndex[SimpleDoc]()
assert index._db_config.other == 5
assert index._db_config.work_dir == '.'
assert index._runtime_config.default_column_config[str] == {
'dim': 128,
'space': 'l2',
}
def test_set_by_class():
# change all settings
index = DummyDocIndex[SimpleDoc](DBConfig(work_dir='hi', other=10))
assert index._db_config.other == 10
assert index._db_config.work_dir == 'hi'
index.configure(RuntimeConfig(default_column_config={}, default_ef=10))
assert index._runtime_config.default_column_config == {}
# change only some settings
index = DummyDocIndex[SimpleDoc](DBConfig(work_dir='hi'))
assert index._db_config.other == 5
assert index._db_config.work_dir == 'hi'
index.configure(RuntimeConfig(default_column_config={}))
assert index._runtime_config.default_column_config == {}
def test_set_by_kwargs():
# change all settings
index = DummyDocIndex[SimpleDoc](work_dir='hi', other=10)
assert index._db_config.other == 10
assert index._db_config.work_dir == 'hi'
index.configure(default_column_config={}, default_ef=10)
assert index._runtime_config.default_column_config == {}
# change only some settings
index = DummyDocIndex[SimpleDoc](work_dir='hi')
assert index._db_config.other == 5
assert index._db_config.work_dir == 'hi'
index.configure(default_column_config={})
assert index._runtime_config.default_column_config == {}
def test_default_column_config():
index = DummyDocIndex[SimpleDoc]()
assert index._runtime_config.default_column_config == {
str: {
'dim': 128,
'space': 'l2',
},
}
|
from dataclasses import dataclass, field
from typing import Any, Dict, Type
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index.abstract import BaseDocIndex
from docarray.typing import NdArray
pytestmark = pytest.mark.index
class SimpleDoc(BaseDoc):
tens: NdArray[10] = Field(dim=1000)
class FakeQueryBuilder:
...
@dataclass
class DBConfig(BaseDocIndex.DBConfig):
work_dir: str = '.'
other: int = 5
@dataclass
class RuntimeConfig(BaseDocIndex.RuntimeConfig):
default_column_config: Dict[Type, Dict[str, Any]] = field(
default_factory=lambda: {
str: {
'dim': 128,
'space': 'l2',
},
}
)
default_ef: int = 50
def _identity(*x, **y):
return x, y
class DummyDocIndex(BaseDocIndex):
DBConfig = DBConfig
RuntimeConfig = RuntimeConfig
def python_type_to_db_type(self, x):
return str
_index = _identity
num_docs = _identity
_del_items = _identity
_get_items = _identity
execute_query = _identity
_find = _identity
_find_batched = _identity
_filter = _identity
_filter_batched = _identity
_text_search = _identity
_text_search_batched = _identity
def test_defaults():
store = DummyDocIndex[SimpleDoc]()
assert store._db_config.other == 5
assert store._db_config.work_dir == '.'
assert store._runtime_config.default_column_config[str] == {
'dim': 128,
'space': 'l2',
}
def test_set_by_class():
# change all settings
store = DummyDocIndex[SimpleDoc](DBConfig(work_dir='hi', other=10))
assert store._db_config.other == 10
assert store._db_config.work_dir == 'hi'
store.configure(RuntimeConfig(default_column_config={}, default_ef=10))
assert store._runtime_config.default_column_config == {}
# change only some settings
store = DummyDocIndex[SimpleDoc](DBConfig(work_dir='hi'))
assert store._db_config.other == 5
assert store._db_config.work_dir == 'hi'
store.configure(RuntimeConfig(default_column_config={}))
assert store._runtime_config.default_column_config == {}
def test_set_by_kwargs():
# change all settings
store = DummyDocIndex[SimpleDoc](work_dir='hi', other=10)
assert store._db_config.other == 10
assert store._db_config.work_dir == 'hi'
store.configure(default_column_config={}, default_ef=10)
assert store._runtime_config.default_column_config == {}
# change only some settings
store = DummyDocIndex[SimpleDoc](work_dir='hi')
assert store._db_config.other == 5
assert store._db_config.work_dir == 'hi'
store.configure(default_column_config={})
assert store._runtime_config.default_column_config == {}
def test_default_column_config():
store = DummyDocIndex[SimpleDoc]()
assert store._runtime_config.default_column_config == {
str: {
'dim': 128,
'space': 'l2',
},
}
|
import numpy as np
import pytest
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras.src import layers
from keras.src import testing
class MelSpectrogramTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_mel_spectrogram_basics(self):
self.run_layer_test(
layers.MelSpectrogram,
init_kwargs={
"num_mel_bins": 80,
"sampling_rate": 8000,
"sequence_stride": 128,
"fft_length": 2048,
},
input_shape=(2, 16000),
expected_output_shape=(2, 80, 126),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
self.run_layer_test(
layers.MelSpectrogram,
init_kwargs={
"num_mel_bins": 80,
"sampling_rate": 8000,
"sequence_stride": 128,
"fft_length": 2048,
},
input_shape=(16000,),
expected_output_shape=(80, 126),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
@parameterized.parameters(
[
((2, 16000), 80, 128, 2048, 8000, False),
((16000,), 80, 128, 2048, 8000, False),
((2, 16001), 80, 128, 2048, 16000, False),
((16001,), 80, 128, 2048, 8000, False),
((2, 8000), 128, 64, 512, 32000, False),
((8000,), 128, 64, 512, 32000, False),
((2, 8000), 128, 64, 512, 32000, True),
((8000,), 128, 64, 512, 32000, True),
]
)
def test_output_shape(
self,
input_shape,
num_mel_bins,
sequence_stride,
fft_length,
sampling_rate,
all_zero,
):
if all_zero:
audios = np.zeros(input_shape)
else:
audios = np.random.random(input_shape)
out = layers.MelSpectrogram(
num_mel_bins=num_mel_bins,
sequence_stride=sequence_stride,
fft_length=fft_length,
sampling_rate=sampling_rate,
)(audios)
if len(input_shape) == 1:
ref_shape = (
num_mel_bins,
(input_shape[0] + sequence_stride + 1) // sequence_stride,
)
else:
ref_shape = (
input_shape[0],
num_mel_bins,
(input_shape[1] + sequence_stride + 1) // sequence_stride,
)
self.assertEqual(tuple(out.shape), ref_shape)
def test_tf_data_compatibility(self):
input_shape = (2, 16000)
output_shape = (2, 80, 126)
layer = layers.MelSpectrogram(
num_mel_bins=80,
sampling_rate=8000,
sequence_stride=128,
fft_length=2048,
)
input_data = np.random.random(input_shape)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertEqual(tuple(output.shape), output_shape)
|
import numpy as np
import pytest
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras.src import layers
from keras.src import testing
class MelSpectrogramTest(testing.TestCase, parameterized.TestCase):
@pytest.mark.requires_trainable_backend
def test_mel_spectrogram_basics(self):
self.run_layer_test(
layers.MelSpectrogram,
init_kwargs={
"num_mel_bins": 80,
"sampling_rate": 8000,
"sequence_stride": 128,
"fft_length": 2048,
},
input_shape=(2, 16000),
expected_output_shape=(2, 80, 126),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
self.run_layer_test(
layers.MelSpectrogram,
init_kwargs={
"num_mel_bins": 80,
"sampling_rate": 8000,
"sequence_stride": 128,
"fft_length": 2048,
},
input_shape=(16000,),
expected_output_shape=(80, 126),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
@parameterized.parameters(
[
((2, 16000), 80, 128, 2048, 8000, False),
((16000,), 80, 128, 2048, 8000, False),
((2, 16001), 80, 128, 2048, 16000, False),
((16001,), 80, 128, 2048, 8000, False),
((2, 8000), 128, 64, 512, 32000, False),
((8000,), 128, 64, 512, 32000, False),
((2, 8000), 128, 64, 512, 32000, True),
((8000,), 128, 64, 512, 32000, True),
]
)
def test_output_shape(
self,
input_shape,
num_mel_bins,
sequence_stride,
fft_length,
sampling_rate,
all_zero,
):
if all_zero:
audios = np.zeros(input_shape)
else:
audios = np.random.random(input_shape)
out = layers.MelSpectrogram(
num_mel_bins=num_mel_bins,
sequence_stride=sequence_stride,
fft_length=fft_length,
sampling_rate=sampling_rate,
)(audios)
if len(input_shape) == 1:
ref_shape = (
num_mel_bins,
(input_shape[0] + sequence_stride + 1) // sequence_stride,
)
else:
ref_shape = (
input_shape[0],
num_mel_bins,
(input_shape[1] + sequence_stride + 1) // sequence_stride,
)
self.assertEqual(tuple(out.shape), ref_shape)
def test_tf_data_compatibility(self):
input_shape = (2, 16000)
output_shape = (2, 80, 126)
layer = layers.MelSpectrogram(
num_mel_bins=80,
sampling_rate=8000,
sequence_stride=128,
fft_length=2048,
)
input_data = np.random.random(input_shape)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertEqual(tuple(output.shape), output_shape)
|
from llama_index.llms.bedrock.base import (
Bedrock,
completion_response_to_chat_response,
completion_with_retry,
)
from llama_index.llms.bedrock.utils import ProviderType
__all__ = [
"Bedrock",
"completion_with_retry",
"completion_response_to_chat_response",
"ProviderType",
]
|
from llama_index.llms.bedrock.base import (
Bedrock,
completion_response_to_chat_response,
completion_with_retry,
)
__all__ = ["Bedrock", "completion_with_retry", "completion_response_to_chat_response"]
|
"""Base argparser module for Pod and Deployment runtime"""
import argparse
import os
from jina.enums import PollingType
from jina.helper import random_identity
from jina.parsers.helper import _SHOW_ALL_ARGS, add_arg_group
def mixin_essential_parser(parser):
"""Mixing in arguments required by every module into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Essential')
gp.add_argument(
'--name',
type=str,
help='''
The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
''',
)
gp.add_argument(
'--workspace',
type=str,
default=None,
help='The working directory for any IO operations in this object. '
'If not set, then derive from its parent `workspace`.',
)
gp.add_argument(
'--log-config',
type=str,
default='default',
help='The YAML config of the logger used in this object.',
)
gp.add_argument(
'--quiet',
action='store_true',
default=False,
help='If set, then no log will be emitted from this object.',
)
gp.add_argument(
'--quiet-error',
action='store_true',
default=False,
help='If set, then exception stack information will not be added to the log',
)
gp.add_argument(
'--workspace-id',
type=str,
default=random_identity(),
help='the UUID for identifying the workspace. When not given a random id will be assigned.'
'Multiple Pod/Deployment/Flow will work under the same workspace if they share the same '
'`workspace-id`.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
def mixin_base_deployment_parser(parser, title='Base Deployment'):
"""Mixing in arguments required by a deployment into the given parser.
The Deployment doesn't have scalable features like shards, replicas and polling
:param parser: the parser instance to which we add arguments
:param title: the title of the create args group
:return: returns the created arg group
"""
mixin_essential_parser(parser)
gp = add_arg_group(parser, title=title)
gp.add_argument(
'--extra-search-paths',
type=str,
default=[],
nargs='*',
help='Extra search paths to be used when loading modules and finding YAML config files.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--timeout-ctrl',
type=int,
default=int(os.getenv('JINA_DEFAULT_TIMEOUT_CTRL', '60')),
help='The timeout in milliseconds of the control request, -1 for waiting forever',
)
gp.add_argument(
'--k8s-namespace',
type=str,
help='Name of the namespace where Kubernetes deployment should be deployed, to be filled by flow name'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
return gp
def mixin_scalable_deployment_parser(parser):
"""Mixing in arguments required by a scalable deployment into the given parser.
The deployment is scalable and can have shards, replicas and polling
:param parser: the parser instance to which we add arguments
"""
gp = mixin_base_deployment_parser(parser, title='Scalable Deployment')
gp.add_argument(
'--polling',
type=str,
default=PollingType.ANY.name,
help='''
The polling strategy of the Deployment and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Deployment or by endpoint.
Define per Deployment:
- ANY: only one (whoever is idle) Pod polls the message
- ALL: all Pods poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
''',
)
gp.add_argument(
'--shards',
type=int,
default=1,
help='The number of shards in the deployment running at the same time. For more details check '
'https://docs.jina.ai/fundamentals/flow/create-flow/#complex-flow-topologies',
)
gp.add_argument(
'--replicas',
type=int,
default=1,
help='The number of replicas in the deployment',
)
gp.add_argument(
'--native',
action='store_true',
default=False,
help='If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.',
)
|
"""Base argparser module for Pod and Deployment runtime"""
import argparse
import os
from jina.enums import PollingType
from jina.helper import random_identity
from jina.parsers.helper import _SHOW_ALL_ARGS, add_arg_group
def mixin_essential_parser(parser):
"""Mixing in arguments required by every module into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Essential')
gp.add_argument(
'--name',
type=str,
help='''
The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
''',
)
gp.add_argument(
'--workspace',
type=str,
default=None,
help='The working directory for any IO operations in this object. '
'If not set, then derive from its parent `workspace`.',
)
gp.add_argument(
'--log-config',
type=str,
default='default',
help='The YAML config of the logger used in this object.',
)
gp.add_argument(
'--quiet',
action='store_true',
default=False,
help='If set, then no log will be emitted from this object.',
)
gp.add_argument(
'--quiet-error',
action='store_true',
default=False,
help='If set, then exception stack information will not be added to the log',
)
gp.add_argument(
'--workspace-id',
type=str,
default=random_identity(),
help='the UUID for identifying the workspace. When not given a random id will be assigned.'
'Multiple Pod/Deployment/Flow will work under the same workspace if they share the same '
'`workspace-id`.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
def mixin_base_deployment_parser(parser, title='Base Deployment'):
"""Mixing in arguments required by a deployment into the given parser.
The Deployment doesn't have scalable features like shards, replicas and polling
:param parser: the parser instance to which we add arguments
:param title: the title of the create args group
:return: returns the created arg group
"""
mixin_essential_parser(parser)
gp = add_arg_group(parser, title=title)
gp.add_argument(
'--extra-search-paths',
type=str,
default=[],
nargs='*',
help='Extra search paths to be used when loading modules and finding YAML config files.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--timeout-ctrl',
type=int,
default=int(os.getenv('JINA_DEFAULT_TIMEOUT_CTRL', '60')),
help='The timeout in milliseconds of the control request, -1 for waiting forever',
)
gp.add_argument(
'--k8s-namespace',
type=str,
help='Name of the namespace where Kubernetes deployment should be deployed, to be filled by flow name'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
return gp
def mixin_scalable_deployment_parser(parser):
"""Mixing in arguments required by a scalable deployment into the given parser.
The deployment is scalable and can have shards, replicas and polling
:param parser: the parser instance to which we add arguments
"""
gp = mixin_base_deployment_parser(parser, title='Scalable Deployment')
gp.add_argument(
'--polling',
type=str,
default=PollingType.ANY.name,
help='''
The polling strategy of the Deployment and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Deployment or by endpoint.
Define per Deployment:
- ANY: only one (whoever is idle) Pod polls the message
- ALL: all Pods poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
''',
)
gp.add_argument(
'--shards',
type=int,
default=1,
help='The number of shards in the deployment running at the same time. For more details check '
'https://docs.jina.ai/fundamentals/flow/create-flow/#complex-flow-topologies',
)
gp.add_argument(
'--replicas',
type=int,
default=1,
help='The number of replicas in the deployment',
)
gp.add_argument(
'--native',
action='store_true',
default=False,
help='If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.',
)
|
from __future__ import annotations
import logging
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field, HttpUrl
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
logger = logging.getLogger(__name__)
class InvoiceParsingInput(BaseModel):
query: HttpUrl = Field(description="url of the document to parse")
class EdenAiParsingInvoiceTool(EdenaiTool):
"""Tool that queries the Eden AI Invoice parsing API.
for api reference check edenai documentation:
https://docs.edenai.co/reference/ocr_invoice_parser_create.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
name: str = "edenai_invoice_parsing"
description: str = (
"A wrapper around edenai Services invoice parsing. "
"""Useful for when you have to extract information from
an image it enables to take invoices
in a variety of formats and returns the data in contains
(items, prices, addresses, vendor name, etc.)
in a structured format to automate the invoice processing """
"Input should be the string url of the document to parse."
)
args_schema: Type[BaseModel] = InvoiceParsingInput
language: Optional[str] = None
"""
language of the image passed to the model.
"""
feature: str = "ocr"
subfeature: str = "invoice_parser"
def _parse_response(self, response: list) -> str:
formatted_list: list = []
if len(response) == 1:
self._parse_json_multilevel(
response[0]["extracted_data"][0], formatted_list
)
else:
for entry in response:
if entry.get("provider") == "eden-ai":
self._parse_json_multilevel(
entry["extracted_data"][0], formatted_list
)
return "\n".join(formatted_list)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
query_params = {
"file_url": query,
"language": self.language,
"attributes_as_list": False,
}
return self._call_eden_ai(query_params)
|
from __future__ import annotations
import logging
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field, HttpUrl
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
logger = logging.getLogger(__name__)
class InvoiceParsingInput(BaseModel):
query: HttpUrl = Field(description="url of the document to parse")
class EdenAiParsingInvoiceTool(EdenaiTool): # type: ignore[override, override, override]
"""Tool that queries the Eden AI Invoice parsing API.
for api reference check edenai documentation:
https://docs.edenai.co/reference/ocr_invoice_parser_create.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
name: str = "edenai_invoice_parsing"
description: str = (
"A wrapper around edenai Services invoice parsing. "
"""Useful for when you have to extract information from
an image it enables to take invoices
in a variety of formats and returns the data in contains
(items, prices, addresses, vendor name, etc.)
in a structured format to automate the invoice processing """
"Input should be the string url of the document to parse."
)
args_schema: Type[BaseModel] = InvoiceParsingInput
language: Optional[str] = None
"""
language of the image passed to the model.
"""
feature: str = "ocr"
subfeature: str = "invoice_parser"
def _parse_response(self, response: list) -> str:
formatted_list: list = []
if len(response) == 1:
self._parse_json_multilevel(
response[0]["extracted_data"][0], formatted_list
)
else:
for entry in response:
if entry.get("provider") == "eden-ai":
self._parse_json_multilevel(
entry["extracted_data"][0], formatted_list
)
return "\n".join(formatted_list)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
query_params = {
"file_url": query,
"language": self.language,
"attributes_as_list": False,
}
return self._call_eden_ai(query_params)
|
import pathlib
from typing import Any, Callable, Optional, TypeVar, Union
from torchvision.prototype.datasets import home
from torchvision.prototype.datasets.utils import Dataset
from torchvision.prototype.utils._internal import add_suggestion
T = TypeVar("T")
D = TypeVar("D", bound=type[Dataset])
BUILTIN_INFOS: dict[str, dict[str, Any]] = {}
def register_info(name: str) -> Callable[[Callable[[], dict[str, Any]]], Callable[[], dict[str, Any]]]:
def wrapper(fn: Callable[[], dict[str, Any]]) -> Callable[[], dict[str, Any]]:
BUILTIN_INFOS[name] = fn()
return fn
return wrapper
BUILTIN_DATASETS = {}
def register_dataset(name: str) -> Callable[[D], D]:
def wrapper(dataset_cls: D) -> D:
BUILTIN_DATASETS[name] = dataset_cls
return dataset_cls
return wrapper
def list_datasets() -> list[str]:
return sorted(BUILTIN_DATASETS.keys())
def find(dct: dict[str, T], name: str) -> T:
name = name.lower()
try:
return dct[name]
except KeyError as error:
raise ValueError(
add_suggestion(
f"Unknown dataset '{name}'.",
word=name,
possibilities=dct.keys(),
alternative_hint=lambda _: (
"You can use torchvision.datasets.list_datasets() to get a list of all available datasets."
),
)
) from error
def info(name: str) -> dict[str, Any]:
return find(BUILTIN_INFOS, name)
def load(name: str, *, root: Optional[Union[str, pathlib.Path]] = None, **config: Any) -> Dataset:
dataset_cls = find(BUILTIN_DATASETS, name)
if root is None:
root = pathlib.Path(home()) / name
return dataset_cls(root, **config)
|
import pathlib
from typing import Any, Callable, Dict, List, Optional, Type, TypeVar, Union
from torchvision.prototype.datasets import home
from torchvision.prototype.datasets.utils import Dataset
from torchvision.prototype.utils._internal import add_suggestion
T = TypeVar("T")
D = TypeVar("D", bound=Type[Dataset])
BUILTIN_INFOS: Dict[str, Dict[str, Any]] = {}
def register_info(name: str) -> Callable[[Callable[[], Dict[str, Any]]], Callable[[], Dict[str, Any]]]:
def wrapper(fn: Callable[[], Dict[str, Any]]) -> Callable[[], Dict[str, Any]]:
BUILTIN_INFOS[name] = fn()
return fn
return wrapper
BUILTIN_DATASETS = {}
def register_dataset(name: str) -> Callable[[D], D]:
def wrapper(dataset_cls: D) -> D:
BUILTIN_DATASETS[name] = dataset_cls
return dataset_cls
return wrapper
def list_datasets() -> List[str]:
return sorted(BUILTIN_DATASETS.keys())
def find(dct: Dict[str, T], name: str) -> T:
name = name.lower()
try:
return dct[name]
except KeyError as error:
raise ValueError(
add_suggestion(
f"Unknown dataset '{name}'.",
word=name,
possibilities=dct.keys(),
alternative_hint=lambda _: (
"You can use torchvision.datasets.list_datasets() to get a list of all available datasets."
),
)
) from error
def info(name: str) -> Dict[str, Any]:
return find(BUILTIN_INFOS, name)
def load(name: str, *, root: Optional[Union[str, pathlib.Path]] = None, **config: Any) -> Dataset:
dataset_cls = find(BUILTIN_DATASETS, name)
if root is None:
root = pathlib.Path(home()) / name
return dataset_cls(root, **config)
|
import importlib
import os
from pathlib import Path
import pytest
from fastapi.testclient import TestClient
from ...utils import needs_py39, needs_py310
@pytest.fixture(
name="client",
params=[
"tutorial002",
pytest.param("tutorial002_py310", marks=needs_py310),
"tutorial002_an",
pytest.param("tutorial002_an_py39", marks=needs_py39),
pytest.param("tutorial002_an_py310", marks=needs_py310),
],
)
def get_client(request: pytest.FixtureRequest):
mod = importlib.import_module(f"docs_src.background_tasks.{request.param}")
client = TestClient(mod.app)
return client
def test(client: TestClient):
log = Path("log.txt")
if log.is_file():
os.remove(log) # pragma: no cover
response = client.post("/send-notification/[email protected]?q=some-query")
assert response.status_code == 200, response.text
assert response.json() == {"message": "Message sent"}
with open("./log.txt") as f:
assert "found query: some-query\nmessage to [email protected]" in f.read()
|
import os
from pathlib import Path
from fastapi.testclient import TestClient
from docs_src.background_tasks.tutorial002 import app
client = TestClient(app)
def test():
log = Path("log.txt")
if log.is_file():
os.remove(log) # pragma: no cover
response = client.post("/send-notification/[email protected]?q=some-query")
assert response.status_code == 200, response.text
assert response.json() == {"message": "Message sent"}
with open("./log.txt") as f:
assert "found query: some-query\nmessage to [email protected]" in f.read()
|
"""
Tests the correct computation of evaluation scores from BinaryClassificationEvaluator
"""
from __future__ import annotations
import csv
import gzip
import os
from pathlib import Path
from torch.utils.data import DataLoader
from sentence_transformers import (
InputExample,
SentenceTransformer,
evaluation,
losses,
util,
)
def test_LabelAccuracyEvaluator(paraphrase_distilroberta_base_v1_model: SentenceTransformer, tmp_path: Path) -> None:
"""Tests that the LabelAccuracyEvaluator can be loaded correctly"""
model = paraphrase_distilroberta_base_v1_model
nli_dataset_path = "datasets/AllNLI.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
dev_samples = []
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "train":
label_id = label2int[row["label"]]
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
if len(dev_samples) >= 100:
break
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=len(label2int),
)
dev_dataloader = DataLoader(dev_samples, shuffle=False, batch_size=16)
evaluator = evaluation.LabelAccuracyEvaluator(dev_dataloader, softmax_model=train_loss)
metrics = evaluator(model, output_path=str(tmp_path))
assert "accuracy" in metrics
assert metrics["accuracy"] > 0.2
|
"""
Tests the correct computation of evaluation scores from BinaryClassificationEvaluator
"""
from __future__ import annotations
import csv
import gzip
import os
from torch.utils.data import DataLoader
from sentence_transformers import (
InputExample,
SentenceTransformer,
evaluation,
losses,
util,
)
def test_LabelAccuracyEvaluator(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
"""Tests that the LabelAccuracyEvaluator can be loaded correctly"""
model = paraphrase_distilroberta_base_v1_model
nli_dataset_path = "datasets/AllNLI.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
dev_samples = []
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "train":
label_id = label2int[row["label"]]
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
if len(dev_samples) >= 100:
break
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=len(label2int),
)
dev_dataloader = DataLoader(dev_samples, shuffle=False, batch_size=16)
evaluator = evaluation.LabelAccuracyEvaluator(dev_dataloader, softmax_model=train_loss)
metrics = evaluator(model)
assert "accuracy" in metrics
assert metrics["accuracy"] > 0.2
|
from abc import abstractmethod
import pytest
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_tests.base import BaseStandardTests
class RetrieversIntegrationTests(BaseStandardTests):
"""
Base class for retrievers integration tests.
"""
@property
@abstractmethod
def retriever_constructor(self) -> type[BaseRetriever]:
"""
A BaseRetriever subclass to be tested.
"""
...
@property
def retriever_constructor_params(self) -> dict:
"""
Returns a dictionary of parameters to pass to the retriever constructor.
"""
return {}
@property
@abstractmethod
def retriever_query_example(self) -> str:
"""
Returns a str representing the "query" of an example retriever call.
"""
...
@pytest.fixture
def retriever(self) -> BaseRetriever:
"""
:private:
"""
return self.retriever_constructor(**self.retriever_constructor_params)
def test_k_constructor_param(self) -> None:
"""
Test that the retriever constructor accepts a k parameter, representing
the number of documents to return.
.. dropdown:: Troubleshooting
If this test fails, either the retriever constructor does not accept a k
parameter, or the retriever does not return the correct number of documents
(`k`) when it is set.
For example, a retriever like
.. code-block:: python
MyRetriever(k=3).invoke("query")
should return 3 documents when invoked with a query.
"""
params = {
k: v for k, v in self.retriever_constructor_params.items() if k != "k"
}
params_3 = {**params, "k": 3}
retriever_3 = self.retriever_constructor(**params_3)
result_3 = retriever_3.invoke(self.retriever_query_example)
assert len(result_3) == 3
assert all(isinstance(doc, Document) for doc in result_3)
params_1 = {**params, "k": 1}
retriever_1 = self.retriever_constructor(**params_1)
result_1 = retriever_1.invoke(self.retriever_query_example)
assert len(result_1) == 1
assert all(isinstance(doc, Document) for doc in result_1)
def test_invoke_with_k_kwarg(self, retriever: BaseRetriever) -> None:
"""
Test that the invoke method accepts a k parameter, representing the number of
documents to return.
.. dropdown:: Troubleshooting
If this test fails, the retriever's invoke method does not accept a k
parameter, or the retriever does not return the correct number of documents
(`k`) when it is set.
For example, a retriever like
.. code-block:: python
MyRetriever().invoke("query", k=3)
should return 3 documents when invoked with a query.
"""
result_1 = retriever.invoke(self.retriever_query_example, k=1)
assert len(result_1) == 1
assert all(isinstance(doc, Document) for doc in result_1)
result_3 = retriever.invoke(self.retriever_query_example, k=3)
assert len(result_3) == 3
assert all(isinstance(doc, Document) for doc in result_3)
def test_invoke_returns_documents(self, retriever: BaseRetriever) -> None:
"""
If invoked with the example params, the retriever should return a list of
Documents.
.. dropdown:: Troubleshooting
If this test fails, the retriever's invoke method does not return a list of
`langchain_core.document.Document` objects. Please confirm that your
`_get_relevant_documents` method returns a list of `Document` objects.
"""
result = retriever.invoke(self.retriever_query_example)
assert isinstance(result, list)
assert all(isinstance(doc, Document) for doc in result)
async def test_ainvoke_returns_documents(self, retriever: BaseRetriever) -> None:
"""
If ainvoked with the example params, the retriever should return a list of
Documents.
See :meth:`test_invoke_returns_documents` for more information on
troubleshooting.
"""
result = await retriever.ainvoke(self.retriever_query_example)
assert isinstance(result, list)
assert all(isinstance(doc, Document) for doc in result)
|
from abc import abstractmethod
from typing import Type
import pytest
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_tests.base import BaseStandardTests
class RetrieversIntegrationTests(BaseStandardTests):
"""
Base class for retrievers integration tests.
"""
@property
@abstractmethod
def retriever_constructor(self) -> Type[BaseRetriever]:
"""
A BaseRetriever subclass to be tested.
"""
...
@property
def retriever_constructor_params(self) -> dict:
"""
Returns a dictionary of parameters to pass to the retriever constructor.
"""
return {}
@property
@abstractmethod
def retriever_query_example(self) -> str:
"""
Returns a str representing the "query" of an example retriever call.
"""
...
@pytest.fixture
def retriever(self) -> BaseRetriever:
"""
:private:
"""
return self.retriever_constructor(**self.retriever_constructor_params)
def test_k_constructor_param(self) -> None:
"""
Test that the retriever constructor accepts a k parameter, representing
the number of documents to return.
.. dropdown:: Troubleshooting
If this test fails, either the retriever constructor does not accept a k
parameter, or the retriever does not return the correct number of documents
(`k`) when it is set.
For example, a retriever like
.. code-block:: python
MyRetriever(k=3).invoke("query")
should return 3 documents when invoked with a query.
"""
params = {
k: v for k, v in self.retriever_constructor_params.items() if k != "k"
}
params_3 = {**params, "k": 3}
retriever_3 = self.retriever_constructor(**params_3)
result_3 = retriever_3.invoke(self.retriever_query_example)
assert len(result_3) == 3
assert all(isinstance(doc, Document) for doc in result_3)
params_1 = {**params, "k": 1}
retriever_1 = self.retriever_constructor(**params_1)
result_1 = retriever_1.invoke(self.retriever_query_example)
assert len(result_1) == 1
assert all(isinstance(doc, Document) for doc in result_1)
def test_invoke_with_k_kwarg(self, retriever: BaseRetriever) -> None:
"""
Test that the invoke method accepts a k parameter, representing the number of
documents to return.
.. dropdown:: Troubleshooting
If this test fails, the retriever's invoke method does not accept a k
parameter, or the retriever does not return the correct number of documents
(`k`) when it is set.
For example, a retriever like
.. code-block:: python
MyRetriever().invoke("query", k=3)
should return 3 documents when invoked with a query.
"""
result_1 = retriever.invoke(self.retriever_query_example, k=1)
assert len(result_1) == 1
assert all(isinstance(doc, Document) for doc in result_1)
result_3 = retriever.invoke(self.retriever_query_example, k=3)
assert len(result_3) == 3
assert all(isinstance(doc, Document) for doc in result_3)
def test_invoke_returns_documents(self, retriever: BaseRetriever) -> None:
"""
If invoked with the example params, the retriever should return a list of
Documents.
.. dropdown:: Troubleshooting
If this test fails, the retriever's invoke method does not return a list of
`langchain_core.document.Document` objects. Please confirm that your
`_get_relevant_documents` method returns a list of `Document` objects.
"""
result = retriever.invoke(self.retriever_query_example)
assert isinstance(result, list)
assert all(isinstance(doc, Document) for doc in result)
async def test_ainvoke_returns_documents(self, retriever: BaseRetriever) -> None:
"""
If ainvoked with the example params, the retriever should return a list of
Documents.
See :meth:`test_invoke_returns_documents` for more information on
troubleshooting.
"""
result = await retriever.ainvoke(self.retriever_query_example)
assert isinstance(result, list)
assert all(isinstance(doc, Document) for doc in result)
|
from typing import TYPE_CHECKING, Dict, Iterable
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SequentialEvaluator(SentenceEvaluator):
"""
This evaluator allows that multiple sub-evaluators are passed. When the model is evaluated,
the data is passed sequentially to all sub-evaluators.
All scores are passed to 'main_score_function', which derives one final score value
"""
def __init__(self, evaluators: Iterable[SentenceEvaluator], main_score_function=lambda scores: scores[-1]):
"""
Initializes a SequentialEvaluator object.
Args:
evaluators (Iterable[SentenceEvaluator]): A collection of SentenceEvaluator objects.
main_score_function (function, optional): A function that takes a list of scores and returns the main score.
Defaults to selecting the last score in the list.
Example:
::
evaluator1 = BinaryClassificationEvaluator(...)
evaluator2 = InformationRetrievalEvaluator(...)
evaluator3 = MSEEvaluator(...)
seq_evaluator = SequentialEvaluator([evaluator1, evaluator2, evaluator3])
"""
super().__init__()
self.evaluators = evaluators
self.main_score_function = main_score_function
def __call__(
self, model: "SentenceTransformer", output_path: str = None, epoch: int = -1, steps: int = -1
) -> Dict[str, float]:
evaluations = []
scores = []
for evaluator_idx, evaluator in enumerate(self.evaluators):
evaluation = evaluator(model, output_path, epoch, steps)
if not isinstance(evaluation, dict):
scores.append(evaluation)
evaluation = {f"evaluator_{evaluator_idx}": evaluation}
else:
if hasattr(evaluator, "primary_metric"):
scores.append(evaluation[evaluator.primary_metric])
else:
scores.append(evaluation[list(evaluation.keys())[0]])
evaluations.append(evaluation)
self.primary_metric = "sequential_score"
main_score = self.main_score_function(scores)
results = {key: value for evaluation in evaluations for key, value in evaluation.items()}
results["sequential_score"] = main_score
return results
|
from typing import TYPE_CHECKING, Dict, Iterable
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SequentialEvaluator(SentenceEvaluator):
"""
This evaluator allows that multiple sub-evaluators are passed. When the model is evaluated,
the data is passed sequentially to all sub-evaluators.
All scores are passed to 'main_score_function', which derives one final score value
"""
def __init__(self, evaluators: Iterable[SentenceEvaluator], main_score_function=lambda scores: scores[-1]):
"""
Initializes a SequentialEvaluator object.
Args:
evaluators (Iterable[SentenceEvaluator]): A collection of SentenceEvaluator objects.
main_score_function (function, optional): A function that takes a list of scores and returns the main score.
Defaults to selecting the last score in the list.
Example:
::
evaluator1 = BinaryClassificationEvaluator(...)
evaluator2 = InformationRetrievalEvaluator(...)
evaluator3 = MSEEvaluator(...)
seq_evaluator = SequentialEvaluator([evaluator1, evaluator2, evaluator3])
"""
super().__init__()
self.evaluators = evaluators
self.main_score_function = main_score_function
def __call__(
self, model: "SentenceTransformer", output_path: str = None, epoch: int = -1, steps: int = -1
) -> Dict[str, float]:
evaluations = []
scores = []
for evaluator_idx, evaluator in enumerate(self.evaluators):
evaluation = evaluator(model, output_path, epoch, steps)
if not isinstance(evaluation, dict):
scores.append(evaluation)
evaluation = {f"evaluator_{evaluator_idx}": evaluation}
else:
if hasattr(evaluation, "primary_metric"):
scores.append(evaluation[evaluation.primary_metric])
else:
scores.append(evaluation[list(evaluation.keys())[0]])
evaluations.append(evaluation)
self.primary_metric = "sequential_score"
main_score = self.main_score_function(scores)
results = {key: value for evaluation in evaluations for key, value in evaluation.items()}
results["sequential_score"] = main_score
return results
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, Optional
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList, TrackSampleList
from mmdet.utils import OptConfigType, OptMultiConfig
from .base import BaseMOTModel
@MODELS.register_module()
class ByteTrack(BaseMOTModel):
"""ByteTrack: Multi-Object Tracking by Associating Every Detection Box.
This multi object tracker is the implementation of `ByteTrack
<https://arxiv.org/abs/2110.06864>`_.
Args:
detector (dict): Configuration of detector. Defaults to None.
tracker (dict): Configuration of tracker. Defaults to None.
data_preprocessor (dict or ConfigDict, optional): The pre-process
config of :class:`TrackDataPreprocessor`. it usually includes,
``pad_size_divisor``, ``pad_value``, ``mean`` and ``std``.
init_cfg (dict or list[dict]): Configuration of initialization.
Defaults to None.
"""
def __init__(self,
detector: Optional[dict] = None,
tracker: Optional[dict] = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(data_preprocessor, init_cfg)
if detector is not None:
self.detector = MODELS.build(detector)
if tracker is not None:
self.tracker = MODELS.build(tracker)
def loss(self, inputs: Tensor, data_samples: SampleList, **kwargs) -> dict:
"""Calculate losses from a batch of inputs and data samples.
Args:
inputs (Tensor): of shape (N, C, H, W) encoding
input images. Typically these should be mean centered and std
scaled. The N denotes batch size
data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance`.
Returns:
dict: A dictionary of loss components.
"""
return self.detector.loss(inputs, data_samples, **kwargs)
def predict(self, inputs: Dict[str, Tensor], data_samples: TrackSampleList,
**kwargs) -> TrackSampleList:
"""Predict results from a video and data samples with post-processing.
Args:
inputs (Tensor): of shape (N, T, C, H, W) encoding
input images. The N denotes batch size.
The T denotes the number of frames in a video.
data_samples (list[:obj:`TrackDataSample`]): The batch
data samples. It usually includes information such
as `video_data_samples`.
Returns:
TrackSampleList: Tracking results of the inputs.
"""
assert inputs.dim() == 5, 'The img must be 5D Tensor (N, T, C, H, W).'
assert inputs.size(0) == 1, \
'Bytetrack inference only support ' \
'1 batch size per gpu for now.'
assert len(data_samples) == 1, \
'Bytetrack inference only support 1 batch size per gpu for now.'
track_data_sample = data_samples[0]
video_len = len(track_data_sample)
for frame_id in range(video_len):
img_data_sample = track_data_sample[frame_id]
single_img = inputs[:, frame_id].contiguous()
# det_results List[DetDataSample]
det_results = self.detector.predict(single_img, [img_data_sample])
assert len(det_results) == 1, 'Batch inference is not supported.'
pred_track_instances = self.tracker.track(
data_sample=det_results[0], **kwargs)
img_data_sample.pred_track_instances = pred_track_instances
return [track_data_sample]
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, Optional
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList, TrackSampleList
from mmdet.utils import OptConfigType, OptMultiConfig
from .base import BaseMOTModel
@MODELS.register_module()
class ByteTrack(BaseMOTModel):
"""ByteTrack: Multi-Object Tracking by Associating Every Detection Box.
This multi object tracker is the implementation of `ByteTrack
<https://arxiv.org/abs/2110.06864>`_.
Args:
detector (dict): Configuration of detector. Defaults to None.
tracker (dict): Configuration of tracker. Defaults to None.
data_preprocessor (dict or ConfigDict, optional): The pre-process
config of :class:`TrackDataPreprocessor`. it usually includes,
``pad_size_divisor``, ``pad_value``, ``mean`` and ``std``.
init_cfg (dict or list[dict]): Configuration of initialization.
Defaults to None.
"""
def __init__(self,
detector: Optional[dict] = None,
tracker: Optional[dict] = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(data_preprocessor, init_cfg)
if detector is not None:
self.detector = MODELS.build(detector)
if tracker is not None:
self.tracker = MODELS.build(tracker)
def loss(self, inputs: Tensor, data_samples: SampleList, **kwargs) -> dict:
"""Calculate losses from a batch of inputs and data samples.
Args:
inputs (Tensor): of shape (N, C, H, W) encoding
input images. Typically these should be mean centered and std
scaled. The N denotes batch size
data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance`.
Returns:
dict: A dictionary of loss components.
"""
return self.detector.loss(inputs, data_samples, **kwargs)
def predict(self, inputs: Dict[str, Tensor], data_samples: TrackSampleList,
**kwargs) -> TrackSampleList:
"""Predict results from a video and data samples with post-processing.
Args:
inputs (Tensor): of shape (N, T, C, H, W) encoding
input images. The N denotes batch size.
The T denotes the number of frames in a video.
data_samples (list[:obj:`TrackDataSample`]): The batch
data samples. It usually includes information such
as `video_data_samples`.
Returns:
TrackSampleList: Tracking results of the inputs.
"""
assert inputs.dim() == 5, 'The img must be 5D Tensor (N, T, C, H, W).'
assert inputs.size(0) == 1, \
'SORT/DeepSORT inference only support ' \
'1 batch size per gpu for now.'
assert len(data_samples) == 1, \
'Bytetrack inference only support 1 batch size per gpu for now.'
track_data_sample = data_samples[0]
video_len = len(track_data_sample)
for frame_id in range(video_len):
img_data_sample = track_data_sample[frame_id]
single_img = inputs[:, frame_id].contiguous()
# det_results List[DetDataSample]
det_results = self.detector.predict(single_img, [img_data_sample])
assert len(det_results) == 1, 'Batch inference is not supported.'
pred_track_instances = self.tracker.track(
data_sample=det_results[0], **kwargs)
img_data_sample.pred_track_instances = pred_track_instances
return [track_data_sample]
|
from typing import Any, Dict, Union
import torch
from torchvision import transforms as _transforms
from torchvision.prototype import datapoints
from torchvision.prototype.transforms import functional as F, Transform
from .utils import is_simple_tensor
class ConvertBoundingBoxFormat(Transform):
_transformed_types = (datapoints.BoundingBox,)
def __init__(self, format: Union[str, datapoints.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = datapoints.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
# We need to unwrap here to avoid unnecessary `__torch_function__` calls,
# since `convert_format_bounding_box` does not have a dispatcher function that would do that for us
output = F.convert_format_bounding_box(
inpt.as_subclass(torch.Tensor), old_format=inpt.format, new_format=params["format"]
)
return datapoints.BoundingBox.wrap_like(inpt, output, format=params["format"])
class ConvertDtype(Transform):
_v1_transform_cls = _transforms.ConvertImageDtype
_transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dtype: torch.dtype = torch.float32) -> None:
super().__init__()
self.dtype = dtype
def _transform(
self, inpt: Union[datapoints.TensorImageType, datapoints.TensorVideoType], params: Dict[str, Any]
) -> Union[datapoints.TensorImageType, datapoints.TensorVideoType]:
return F.convert_dtype(inpt, self.dtype)
# We changed the name to align it with the new naming scheme. Still, `ConvertImageDtype` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
ConvertImageDtype = ConvertDtype
class ClampBoundingBoxes(Transform):
_transformed_types = (datapoints.BoundingBox,)
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
# We need to unwrap here to avoid unnecessary `__torch_function__` calls,
# since `clamp_bounding_box` does not have a dispatcher function that would do that for us
output = F.clamp_bounding_box(
inpt.as_subclass(torch.Tensor), format=inpt.format, spatial_size=inpt.spatial_size
)
return datapoints.BoundingBox.wrap_like(inpt, output)
|
from typing import Any, Dict, Union
import torch
from torchvision.prototype import datapoints
from torchvision.prototype.transforms import functional as F, Transform
from .utils import is_simple_tensor
class ConvertBoundingBoxFormat(Transform):
_transformed_types = (datapoints.BoundingBox,)
def __init__(self, format: Union[str, datapoints.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = datapoints.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
# We need to unwrap here to avoid unnecessary `__torch_function__` calls,
# since `convert_format_bounding_box` does not have a dispatcher function that would do that for us
output = F.convert_format_bounding_box(
inpt.as_subclass(torch.Tensor), old_format=inpt.format, new_format=params["format"]
)
return datapoints.BoundingBox.wrap_like(inpt, output, format=params["format"])
class ConvertDtype(Transform):
_transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dtype: torch.dtype = torch.float32) -> None:
super().__init__()
self.dtype = dtype
def _transform(
self, inpt: Union[datapoints.TensorImageType, datapoints.TensorVideoType], params: Dict[str, Any]
) -> Union[datapoints.TensorImageType, datapoints.TensorVideoType]:
return F.convert_dtype(inpt, self.dtype)
# We changed the name to align it with the new naming scheme. Still, `ConvertImageDtype` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
ConvertImageDtype = ConvertDtype
class ClampBoundingBoxes(Transform):
_transformed_types = (datapoints.BoundingBox,)
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
# We need to unwrap here to avoid unnecessary `__torch_function__` calls,
# since `clamp_bounding_box` does not have a dispatcher function that would do that for us
output = F.clamp_bounding_box(
inpt.as_subclass(torch.Tensor), format=inpt.format, spatial_size=inpt.spatial_size
)
return datapoints.BoundingBox.wrap_like(inpt, output)
|
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
NodeExecutionEntry,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_start_time,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import get_graph, get_graph_metadata, get_node
from backend.data.user import (
get_user_integrations,
get_user_metadata,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose, exposed_run_and_wait
from backend.util.settings import Config
config = Config()
_user_credit_model = get_user_credit_model()
async def _spend_credits(entry: NodeExecutionEntry) -> int:
return await _user_credit_model.spend_credits(entry, 0, 0)
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_start_time = exposed_run_and_wait(
update_graph_execution_start_time
)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
get_graph_metadata = exposed_run_and_wait(get_graph_metadata)
# Credits
spend_credits = exposed_run_and_wait(_spend_credits)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
|
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
NodeExecutionEntry,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import get_graph, get_graph_metadata, get_node
from backend.data.user import (
get_user_integrations,
get_user_metadata,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose, exposed_run_and_wait
from backend.util.settings import Config
config = Config()
_user_credit_model = get_user_credit_model()
async def _spend_credits(entry: NodeExecutionEntry) -> int:
return await _user_credit_model.spend_credits(entry, 0, 0)
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
get_graph_metadata = exposed_run_and_wait(get_graph_metadata)
# Credits
spend_credits = exposed_run_and_wait(_spend_credits)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
|
"""Tests for tf.distribute related functionality under tf implementation."""
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.python.eager import context
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import testing
from keras.src.backend.tensorflow import trainer as tf_trainer
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="The distribute test can only run with TF backend.",
)
class DistributeTest(testing.TestCase):
def setUp(self):
super().setUp()
# Need at least 2 devices for distribution related tests.
cpus = tf.config.list_physical_devices("CPU")
context._reset_context()
tf.config.set_logical_device_configuration(
cpus[0],
[
tf.config.LogicalDeviceConfiguration(),
tf.config.LogicalDeviceConfiguration(),
],
)
def test_variable_creation(self):
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
with strategy.scope():
dense = layers.Dense(2)
dense.build([4, 2])
self.assertIsInstance(dense.kernel, backend.Variable)
self.assertIsInstance(
dense.kernel.value, tf.distribute.DistributedValues
)
self.assertIn("MirroredVariable", dense.kernel.value.__class__.__name__)
self.assertIsInstance(dense.kernel, backend.Variable)
self.assertIsInstance(dense.bias.value, tf.distribute.DistributedValues)
self.assertIn("MirroredVariable", dense.bias.value.__class__.__name__)
def test_strategy_run(self):
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
with strategy.scope():
inputs = layers.Input(shape=[4])
dense = layers.Dense(2)
output = dense(inputs)
model = models.Functional(inputs, output)
self.assertIsInstance(dense.kernel, backend.Variable)
self.assertIsInstance(
dense.kernel.value, tf.distribute.DistributedValues
)
def input_fn(ctx):
if ctx.replica_id_in_sync_group == 1:
return tf.ones([8, 4])
else:
return tf.zeros([8, 4])
distributed_inputs = (
strategy.experimental_distribute_values_from_function(input_fn)
)
@tf.function
def run_fn(data):
return model(data)
result = strategy.run(run_fn, args=(distributed_inputs,))
self.assertIsInstance(
result, tf.types.experimental.distributed.PerReplica
)
self.assertLen(result.values, 2)
self.assertEqual(result.values[0].shape, [8, 2])
self.assertEqual(result.values[1].shape, [8, 2])
self.assertNotAllClose(result.values[0], result.values[1])
self.assertAllClose(result.values[0], tf.zeros([8, 2]))
def test_epoch_iterator(self):
x = np.random.random((100, 16))
y = np.random.random((100, 4))
sample_weight = np.random.random((100,))
batch_size = 16
shuffle = True
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
epoch_iterator = tf_trainer.TFEpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
shuffle=shuffle,
distribute_strategy=strategy,
)
steps_seen = []
for step, data_iterator in epoch_iterator:
steps_seen.append(step)
batch = next(data_iterator)
self.assertEqual(len(batch), 3)
x, y, sample_weight = batch
self.assertTrue(
isinstance(x, tf.types.experimental.distributed.PerReplica)
)
# Make sure the local batch size is 8
if step < 6:
self.assertEqual(x.values[0].shape, [8, 16])
self.assertEqual(y.values[0].shape, [8, 4])
self.assertEqual(sample_weight.values[0].shape, [8])
else:
# Last partial batch
self.assertEqual(x.values[0].shape, [2, 16])
self.assertEqual(y.values[0].shape, [2, 4])
self.assertEqual(sample_weight.values[0].shape, [2])
self.assertEqual(steps_seen, [0, 1, 2, 3, 4, 5, 6])
|
"""Tests for tf.distribute related functionality under tf implementation."""
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.python.eager import context
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import testing
from keras.src.backend.tensorflow import trainer as tf_trainer
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="The distribute test can only run with TF backend.",
)
class DistributeTest(testing.TestCase):
def setUp(self):
super().setUp()
# Need at least 2 devices for distribution related tests.
cpus = tf.config.list_physical_devices("CPU")
context._reset_context()
tf.config.set_logical_device_configuration(
cpus[0],
[
tf.config.LogicalDeviceConfiguration(),
tf.config.LogicalDeviceConfiguration(),
],
)
def test_variable_creation(self):
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
with strategy.scope():
dense = layers.Dense(2)
dense.build([4, 2])
self.assertIsInstance(dense.kernel, backend.Variable)
self.assertIsInstance(
dense.kernel.value, tf.distribute.DistributedValues
)
self.assertIn("MirroredVariable", dense.kernel.value.__class__.__name__)
self.assertIsInstance(dense.kernel, backend.Variable)
self.assertIsInstance(dense.bias.value, tf.distribute.DistributedValues)
self.assertIn("MirroredVariable", dense.bias.value.__class__.__name__)
def test_strategy_run(self):
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
with strategy.scope():
inputs = layers.Input(shape=[4])
dense = layers.Dense(2)
output = dense(inputs)
model = models.Functional(inputs, output)
self.assertIsInstance(dense.kernel, backend.Variable)
self.assertIsInstance(
dense.kernel.value, tf.distribute.DistributedValues
)
def input_fn(ctx):
if ctx.replica_id_in_sync_group == 1:
return tf.ones([8, 4])
else:
return tf.zeros([8, 4])
distributed_inputs = (
strategy.experimental_distribute_values_from_function(input_fn)
)
@tf.function
def run_fn(data):
return model(data)
result = strategy.run(run_fn, args=(distributed_inputs,))
self.assertIsInstance(
result, tf.types.experimental.distributed.PerReplica
)
self.assertLen(result.values, 2)
self.assertEqual(result.values[0].shape, [8, 2])
self.assertEqual(result.values[1].shape, [8, 2])
self.assertNotAllClose(result.values[0], result.values[1])
self.assertAllClose(result.values[0], tf.zeros([8, 2]))
def test_epoch_iterator(self):
x = np.random.random((100, 16))
y = np.random.random((100, 4))
sample_weight = np.random.random((100,))
batch_size = 16
shuffle = True
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
epoch_iterator = tf_trainer.TFEpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
shuffle=shuffle,
distribute_strategy=strategy,
)
steps_seen = []
for step, data_iterator in epoch_iterator.enumerate_epoch():
steps_seen.append(step)
batch = next(data_iterator)
self.assertEqual(len(batch), 3)
x, y, sample_weight = batch
self.assertTrue(
isinstance(x, tf.types.experimental.distributed.PerReplica)
)
# Make sure the local batch size is 8
if step < 6:
self.assertEqual(x.values[0].shape, [8, 16])
self.assertEqual(y.values[0].shape, [8, 4])
self.assertEqual(sample_weight.values[0].shape, [8])
else:
# Last partial batch
self.assertEqual(x.values[0].shape, [2, 16])
self.assertEqual(y.values[0].shape, [2, 4])
self.assertEqual(sample_weight.values[0].shape, [2])
self.assertEqual(steps_seen, [0, 1, 2, 3, 4, 5, 6])
|
from pathlib import Path
def find_and_replace(source: str, replacements: dict[str, str]) -> str:
rtn = source
# replace keys in deterministic alphabetical order
finds = sorted(replacements.keys())
for find in finds:
replace = replacements[find]
rtn = rtn.replace(find, replace)
return rtn
def replace_file(source: Path, replacements: dict[str, str]) -> None:
try:
content = source.read_text()
except UnicodeDecodeError:
# binary file
return
new_content = find_and_replace(content, replacements)
if new_content != content:
source.write_text(new_content)
def replace_glob(parent: Path, glob: str, replacements: dict[str, str]) -> None:
for file in parent.glob(glob):
if not file.is_file():
continue
replace_file(file, replacements)
|
from pathlib import Path
from typing import Dict
def find_and_replace(source: str, replacements: Dict[str, str]) -> str:
rtn = source
# replace keys in deterministic alphabetical order
finds = sorted(replacements.keys())
for find in finds:
replace = replacements[find]
rtn = rtn.replace(find, replace)
return rtn
def replace_file(source: Path, replacements: dict[str, str]) -> None:
try:
content = source.read_text()
except UnicodeDecodeError:
# binary file
return
new_content = find_and_replace(content, replacements)
if new_content != content:
source.write_text(new_content)
def replace_glob(parent: Path, glob: str, replacements: dict[str, str]) -> None:
for file in parent.glob(glob):
if not file.is_file():
continue
replace_file(file, replacements)
|
import pytest
from keras.src import backend
from keras.src import testing
class DeviceTest(testing.TestCase):
@pytest.mark.skipif(backend.backend() != "tensorflow", reason="tf only")
def test_tf_device_scope(self):
import tensorflow as tf
if not tf.config.list_physical_devices("GPU"):
self.skipTest("Need at least one GPU for testing")
with backend.device_scope("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("CPU:0", t.device)
with backend.device_scope("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("CPU:0", t.device)
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertIn("GPU:0", t.device)
# Also verify the explicit gpu device
with backend.device_scope("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("GPU:0", t.device)
@pytest.mark.skipif(backend.backend() != "jax", reason="jax only")
def test_jax_device_scope(self):
import jax
from jax.lib import xla_bridge
platform = xla_bridge.get_backend().platform
if platform != "gpu":
self.skipTest("Need at least one GPU for testing")
with backend.device_scope("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, jax.devices("cpu")[0])
with backend.device_scope("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, jax.devices("cpu")[0])
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, jax.devices("gpu")[0])
# Also verify the explicit gpu device
with backend.device_scope("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, jax.devices("gpu")[0])
@pytest.mark.skipif(backend.backend() != "jax", reason="jax only")
def test_invalid_jax_device(self):
with self.assertRaisesRegex(ValueError, "Received: device_name='123'"):
backend.device_scope(123).__enter__()
@pytest.mark.skipif(backend.backend() != "torch", reason="torch only")
def test_torch_device_scope(self):
import torch
if not torch.cuda.device_count():
self.skipTest("Need at least one GPU for testing")
with backend.device_scope("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cpu"))
with backend.device_scope("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cpu"))
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cuda", 0))
# Also verify the explicit gpu -> cuda conversion
with backend.device_scope("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cuda", 0))
@pytest.mark.skipif(backend.backend() != "torch", reason="torch only")
def test_invalid_torch_device(self):
with self.assertRaisesRegex(ValueError, "Received: device_name='123'"):
backend.device_scope(123).__enter__()
|
import pytest
from keras.src import backend
from keras.src import testing
class DeviceTest(testing.TestCase):
@pytest.mark.skipif(backend.backend() != "tensorflow", reason="tf only")
def test_tf_device_scope(self):
import tensorflow as tf
if not tf.config.list_physical_devices("GPU"):
self.skipTest("Need at least one GPU for testing")
with backend.device_scope("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("CPU:0", t.device)
with backend.device_scope("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("CPU:0", t.device)
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertIn("GPU:0", t.device)
# Also verify the explicit gpu device
with backend.device_scope("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("GPU:0", t.device)
@pytest.mark.skipif(backend.backend() != "jax", reason="jax only")
def test_jax_device_scope(self):
import jax
from jax.lib import xla_bridge
platform = xla_bridge.get_backend().platform
if platform != "gpu":
self.skipTest("Need at least one GPU for testing")
with backend.device_scope("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device(), jax.devices("cpu")[0])
with backend.device_scope("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device(), jax.devices("cpu")[0])
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device(), jax.devices("gpu")[0])
# Also verify the explicit gpu device
with backend.device_scope("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device(), jax.devices("gpu")[0])
@pytest.mark.skipif(backend.backend() != "jax", reason="jax only")
def test_invalid_jax_device(self):
with self.assertRaisesRegex(ValueError, "Received: device_name='123'"):
backend.device_scope(123).__enter__()
@pytest.mark.skipif(backend.backend() != "torch", reason="torch only")
def test_torch_device_scope(self):
import torch
if not torch.cuda.device_count():
self.skipTest("Need at least one GPU for testing")
with backend.device_scope("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cpu"))
with backend.device_scope("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cpu"))
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cuda", 0))
# Also verify the explicit gpu -> cuda conversion
with backend.device_scope("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cuda", 0))
@pytest.mark.skipif(backend.backend() != "torch", reason="torch only")
def test_invalid_torch_device(self):
with self.assertRaisesRegex(ValueError, "Received: device_name='123'"):
backend.device_scope(123).__enter__()
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import random
from typing import List, Optional, Tuple
import numpy as np
import torch
from mmengine.dist import get_rank, sync_random_seed
from mmengine.logging import print_log
from mmengine.utils import digit_version, is_list_of
from mmengine.utils.dl_utils import TORCH_VERSION
def calc_dynamic_intervals(
start_interval: int,
dynamic_interval_list: Optional[List[Tuple[int, int]]] = None
) -> Tuple[List[int], List[int]]:
"""Calculate dynamic intervals.
Args:
start_interval (int): The interval used in the beginning.
dynamic_interval_list (List[Tuple[int, int]], optional): The
first element in the tuple is a milestone and the second
element is a interval. The interval is used after the
corresponding milestone. Defaults to None.
Returns:
Tuple[List[int], List[int]]: a list of milestone and its corresponding
intervals.
"""
if dynamic_interval_list is None:
return [0], [start_interval]
assert is_list_of(dynamic_interval_list, tuple)
dynamic_milestones = [0]
dynamic_milestones.extend(
[dynamic_interval[0] for dynamic_interval in dynamic_interval_list])
dynamic_intervals = [start_interval]
dynamic_intervals.extend(
[dynamic_interval[1] for dynamic_interval in dynamic_interval_list])
return dynamic_milestones, dynamic_intervals
def set_random_seed(seed: Optional[int] = None,
deterministic: bool = False,
diff_rank_seed: bool = False) -> int:
"""Set random seed.
Args:
seed (int, optional): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
diff_rank_seed (bool): Whether to add rank number to the random seed to
have different random seed in different threads. Default: False.
"""
if seed is None:
seed = sync_random_seed()
if diff_rank_seed:
rank = get_rank()
seed += rank
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# os.environ['PYTHONHASHSEED'] = str(seed)
if deterministic:
if torch.backends.cudnn.benchmark:
print_log(
'torch.backends.cudnn.benchmark is going to be set as '
'`False` to cause cuDNN to deterministically select an '
'algorithm',
logger='current',
level=logging.WARNING)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if digit_version(TORCH_VERSION) >= digit_version('1.10.0'):
torch.use_deterministic_algorithms(True)
return seed
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Tuple
from mmengine.utils import is_list_of
def calc_dynamic_intervals(
start_interval: int,
dynamic_interval_list: Optional[List[Tuple[int, int]]] = None
) -> Tuple[List[int], List[int]]:
"""Calculate dynamic intervals.
Args:
start_interval (int): The interval used in the beginning.
dynamic_interval_list (List[Tuple[int, int]], optional): The
first element in the tuple is a milestone and the second
element is a interval. The interval is used after the
corresponding milestone. Defaults to None.
Returns:
Tuple[List[int], List[int]]: a list of milestone and its corresponding
intervals.
"""
if dynamic_interval_list is None:
return [0], [start_interval]
assert is_list_of(dynamic_interval_list, tuple)
dynamic_milestones = [0]
dynamic_milestones.extend(
[dynamic_interval[0] for dynamic_interval in dynamic_interval_list])
dynamic_intervals = [start_interval]
dynamic_intervals.extend(
[dynamic_interval[1] for dynamic_interval in dynamic_interval_list])
return dynamic_milestones, dynamic_intervals
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_video_metric import BaseVideoMetric
from .cityscapes_metric import CityScapesMetric
from .coco_caption_metric import COCOCaptionMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .coco_video_metric import CocoVideoMetric
from .crowdhuman_metric import CrowdHumanMetric
from .dod_metric import DODCocoMetric
from .dump_det_results import DumpDetResults
from .dump_odvg_results import DumpODVGResults
from .dump_proposals_metric import DumpProposals
from .flickr30k_metric import Flickr30kMetric
from .grefcoco_metric import gRefCOCOMetric
from .lvis_metric import LVISMetric
from .mot_challenge_metric import MOTChallengeMetric
from .openimages_metric import OpenImagesMetric
from .ov_coco_metric import OVCocoMetric
from .refexp_metric import RefExpMetric
from .refseg_metric import RefSegMetric
from .reid_metric import ReIDMetrics
from .semseg_metric import SemSegMetric
from .voc_metric import VOCMetric
from .youtube_vis_metric import YouTubeVISMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric', 'OpenImagesMetric',
'VOCMetric', 'LVISMetric', 'CrowdHumanMetric', 'DumpProposals',
'CocoOccludedSeparatedMetric', 'DumpDetResults', 'BaseVideoMetric',
'MOTChallengeMetric', 'CocoVideoMetric', 'ReIDMetrics', 'YouTubeVISMetric',
'COCOCaptionMetric', 'SemSegMetric', 'RefSegMetric', 'RefExpMetric',
'gRefCOCOMetric', 'DODCocoMetric', 'DumpODVGResults', 'Flickr30kMetric',
'OVCocoMetric'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_video_metric import BaseVideoMetric
from .cityscapes_metric import CityScapesMetric
from .coco_caption_metric import COCOCaptionMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .coco_video_metric import CocoVideoMetric
from .crowdhuman_metric import CrowdHumanMetric
from .dump_det_results import DumpDetResults
from .dump_proposals_metric import DumpProposals
from .lvis_metric import LVISMetric
from .mot_challenge_metric import MOTChallengeMetric
from .openimages_metric import OpenImagesMetric
from .refseg_metric import RefSegMetric
from .reid_metric import ReIDMetrics
from .semseg_metric import SemSegMetric
from .voc_metric import VOCMetric
from .youtube_vis_metric import YouTubeVISMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric', 'OpenImagesMetric',
'VOCMetric', 'LVISMetric', 'CrowdHumanMetric', 'DumpProposals',
'CocoOccludedSeparatedMetric', 'DumpDetResults', 'BaseVideoMetric',
'MOTChallengeMetric', 'CocoVideoMetric', 'ReIDMetrics', 'YouTubeVISMetric',
'COCOCaptionMetric', 'SemSegMetric', 'RefSegMetric'
]
|
__version__ = '0.13.16'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.13.15'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
"""
NOTE: This file must be imported like
``import torch.distributed.fsdp._traversal_utils`` and not like
``from torch.distributed.fsdp._traversal_utils import ...`` to avoid circular
imports. For brevity, we may import the file as ``traversal_utils``.
"""
import collections
import torch.nn as nn
from torch.distributed._composable.contract import _get_registry
from torch.distributed.fsdp._common_utils import _FSDPState, _get_module_fsdp_state
"""
[Note: FSDP State Traversal]
For the wrapper code path, ``_FSDPState`` is the ``FullyShardedDataParallel``
module wrapping a fully sharded module, and for the non-wrapper code path,
``_FSDPState`` is an object that gets embedded on a fully sharded module.
See [Note: Fully Sharded Module] for the definition.
There are three common traversal idioms: Given a root module,
- ``_get_fsdp_states()`` returns all ``_FSDPState`` s in the tree.
- ``get_fsdp_root_states()`` returns all local root ``_FSDPState`` s in the
tree (i.e. those with ``_is_root == True``).
- ``_get_fsdp_handles()``returns all ``FlatParamHandle`` s in the tree.
All of these methods must take in the root module (i.e. an ``nn.Module``) and
not a general ``_FSDPState`` because ``_FSDPState`` does not support a graph
traversal, whereas ``nn.Module`` has ``nn.Module.modules()`` for traversal.
"""
def _composable(module: nn.Module) -> bool:
"""
Returns if ``module`` can compose with ``fully_shard``.
"""
# TODO: Add any other composable APIs that are mutually exclusive.
registry = _get_registry(module)
if registry is None:
return True
return "replicate" not in registry
# TODO (awgu): We may be able to remove this function if we retired the
# `use_orig_params=False` code path since so far we only need the module for
# `FlatParameter` registration, which is not needed for `use_orig_params=True`.
def _get_fsdp_states_with_modules(
module: nn.Module,
) -> tuple[list[_FSDPState], list[nn.Module]]:
"""
Returns a tuple containing:
1. A list of the ``_FSDPState`` instances in the module tree rooted at
``module`` without any duplicates and following the ``module.modules()``
traversal order (which is assumed to be depth-first).
2. A corresponding list of the modules owning the states in the first list.
For the wrapper code path, both returned lists are the same, each
containing all ``FullyShardedDataParallel`` instances. For the composable
code path, this returns a list of all composable state instances and a list
of the corresponding fully sharded modules. See [Note: Fully Sharded
Module].
NOTE: The traversal does not proceed into any module annotated by an
incompatible API (e.g. ``replicate``).
"""
fsdp_states: list[_FSDPState] = []
fsdp_modules: list[nn.Module] = []
# Track the visited FSDP states since multiple modules may share the same
# one and we want to return a de-duplicated list
visited_fsdp_states: set[_FSDPState] = set()
# Track the visited modules in case of shared modules, which implies the
# module graph is no longer a tree
visited_modules: set[nn.Module] = set()
# Perform depth-first search from `module` to ensure that we do not
# traverse into an incompatible API's subtree (use DFS instead of BFS to
# match `.modules()` order)
deque: collections.deque[nn.Module] = collections.deque([module])
while deque:
submodule = deque.popleft()
visited_modules.add(submodule)
if not _composable(submodule):
continue
for child_module in reversed(list(submodule.children())):
if child_module not in visited_modules:
deque.appendleft(child_module)
optional_state = _get_module_fsdp_state(submodule)
if optional_state is not None and optional_state not in visited_fsdp_states:
visited_fsdp_states.add(optional_state)
fsdp_states.append(optional_state)
fsdp_modules.append(submodule)
return fsdp_states, fsdp_modules
def _get_fsdp_states(module: nn.Module) -> list[_FSDPState]:
"""See :func:`_get_fsdp_states_with_modules`."""
fsdp_states, _ = _get_fsdp_states_with_modules(module)
return fsdp_states
def _get_fsdp_handles(module: nn.Module) -> list:
"""
Returns all ``FlatParamHandle`` s in the module tree rooted at ``module``
following the rules in :func:`_get_fsdp_state`.
"""
handles = [
fsdp_state._handle
for fsdp_state in _get_fsdp_states(module)
if fsdp_state._handle is not None
]
return handles
|
"""
NOTE: This file must be imported like
``import torch.distributed.fsdp._traversal_utils`` and not like
``from torch.distirbuted.fsdp._traversal_utils import ...`` to avoid circular
imports. For brevity, we may import the file as ``traversal_utils``.
"""
import collections
import torch.nn as nn
from torch.distributed._composable.contract import _get_registry
from torch.distributed.fsdp._common_utils import _FSDPState, _get_module_fsdp_state
"""
[Note: FSDP State Traversal]
For the wrapper code path, ``_FSDPState`` is the ``FullyShardedDataParallel``
module wrapping a fully sharded module, and for the non-wrapper code path,
``_FSDPState`` is an object that gets embedded on a fully sharded module.
See [Note: Fully Sharded Module] for the definition.
There are three common traversal idioms: Given a root module,
- ``_get_fsdp_states()`` returns all ``_FSDPState`` s in the tree.
- ``get_fsdp_root_states()`` returns all local root ``_FSDPState`` s in the
tree (i.e. those with ``_is_root == True``).
- ``_get_fsdp_handles()``returns all ``FlatParamHandle`` s in the tree.
All of these methods must take in the root module (i.e. an ``nn.Module``) and
not a general ``_FSDPState`` because ``_FSDPState`` does not support a graph
traversal, whereas ``nn.Module`` has ``nn.Module.modules()`` for traversal.
"""
def _composable(module: nn.Module) -> bool:
"""
Returns if ``module`` can compose with ``fully_shard``.
"""
# TODO: Add any other composable APIs that are mutually exclusive.
registry = _get_registry(module)
if registry is None:
return True
return "replicate" not in registry
# TODO (awgu): We may be able to remove this function if we retired the
# `use_orig_params=False` code path since so far we only need the module for
# `FlatParameter` registration, which is not needed for `use_orig_params=True`.
def _get_fsdp_states_with_modules(
module: nn.Module,
) -> tuple[list[_FSDPState], list[nn.Module]]:
"""
Returns a tuple containing:
1. A list of the ``_FSDPState`` instances in the module tree rooted at
``module`` without any duplicates and following the ``module.modules()``
traversal order (which is assumed to be depth-first).
2. A corresponding list of the modules owning the states in the first list.
For the wrapper code path, both returned lists are the same, each
containing all ``FullyShardedDataParallel`` instances. For the composable
code path, this returns a list of all composable state instances and a list
of the corresponding fully sharded modules. See [Note: Fully Sharded
Module].
NOTE: The traversal does not proceed into any module annotated by an
incompatible API (e.g. ``replicate``).
"""
fsdp_states: list[_FSDPState] = []
fsdp_modules: list[nn.Module] = []
# Track the visited FSDP states since multiple modules may share the same
# one and we want to return a de-duplicated list
visited_fsdp_states: set[_FSDPState] = set()
# Track the visited modules in case of shared modules, which implies the
# module graph is no longer a tree
visited_modules: set[nn.Module] = set()
# Perform depth-first search from `module` to ensure that we do not
# traverse into an incompatible API's subtree (use DFS instead of BFS to
# match `.modules()` order)
deque: collections.deque[nn.Module] = collections.deque([module])
while deque:
submodule = deque.popleft()
visited_modules.add(submodule)
if not _composable(submodule):
continue
for child_module in reversed(list(submodule.children())):
if child_module not in visited_modules:
deque.appendleft(child_module)
optional_state = _get_module_fsdp_state(submodule)
if optional_state is not None and optional_state not in visited_fsdp_states:
visited_fsdp_states.add(optional_state)
fsdp_states.append(optional_state)
fsdp_modules.append(submodule)
return fsdp_states, fsdp_modules
def _get_fsdp_states(module: nn.Module) -> list[_FSDPState]:
"""See :func:`_get_fsdp_states_with_modules`."""
fsdp_states, _ = _get_fsdp_states_with_modules(module)
return fsdp_states
def _get_fsdp_handles(module: nn.Module) -> list:
"""
Returns all ``FlatParamHandle`` s in the module tree rooted at ``module``
following the rules in :func:`_get_fsdp_state`.
"""
handles = [
fsdp_state._handle
for fsdp_state in _get_fsdp_states(module)
if fsdp_state._handle is not None
]
return handles
|
from typing import Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=SiameseDistanceMetric.COSINE_DISTANCE, margin: float = 0.5
):
"""
This Online Contrastive loss is similar to :class:`ConstrativeLoss`, but it selects hard positive (positives that
are far apart) and hard negative pairs (negatives that are close) and computes the loss only for these pairs.
This loss often yields better performances than ContrastiveLoss.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrics that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
References:
- `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
2. Data should include hard positives and hard negatives
Relations:
- :class:`ContrastiveLoss` is similar, but does not use hard positive and hard negative pairs.
:class:`OnlineContrastiveLoss` often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.OnlineContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(OnlineContrastiveLoss, self).__init__()
self.model = model
self.margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor, size_average=False):
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
distance_matrix = self.distance_metric(embeddings[0], embeddings[1])
negs = distance_matrix[labels == 0]
poss = distance_matrix[labels == 1]
# select hard positive and hard negative pairs
negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())]
positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())]
positive_loss = positive_pairs.pow(2).sum()
negative_loss = F.relu(self.margin - negative_pairs).pow(2).sum()
loss = positive_loss + negative_loss
return loss
|
from typing import Iterable, Dict
import torch.nn.functional as F
from torch import nn, Tensor
from .ContrastiveLoss import SiameseDistanceMetric
from sentence_transformers.SentenceTransformer import SentenceTransformer
class OnlineContrastiveLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=SiameseDistanceMetric.COSINE_DISTANCE, margin: float = 0.5
):
"""
This Online Contrastive loss is similar to :class:`ConstrativeLoss`, but it selects hard positive (positives that
are far apart) and hard negative pairs (negatives that are close) and computes the loss only for these pairs.
This loss often yields better performances than ContrastiveLoss.
:param model: SentenceTransformer model
:param distance_metric: Function that returns a distance between two embeddings. The class SiameseDistanceMetric contains pre-defined metrics that can be used
:param margin: Negative samples (label == 0) should have a distance of at least the margin value.
References:
- `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
2. Data should include hard positives and hard negatives
Relations:
- :class:`ContrastiveLoss` is similar, but does not use hard positive and hard negative pairs.
:class:`OnlineContrastiveLoss` often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, losses, InputExample
from torch.utils.data import DataLoader
model = SentenceTransformer('all-MiniLM-L6-v2')
train_examples = [
InputExample(texts=['This is a positive pair', 'Where the distance will be minimized'], label=1),
InputExample(texts=['This is a negative pair', 'Their distance will be increased'], label=0),
]
train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=2)
train_loss = losses.OnlineContrastiveLoss(model=model)
model.fit(
[(train_dataloader, train_loss)],
epochs=10,
)
"""
super(OnlineContrastiveLoss, self).__init__()
self.model = model
self.margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor, size_average=False):
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
distance_matrix = self.distance_metric(embeddings[0], embeddings[1])
negs = distance_matrix[labels == 0]
poss = distance_matrix[labels == 1]
# select hard positive and hard negative pairs
negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())]
positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())]
positive_loss = positive_pairs.pow(2).sum()
negative_loss = F.relu(self.margin - negative_pairs).pow(2).sum()
loss = positive_loss + negative_loss
return loss
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from abc import ABCMeta, abstractmethod
from typing import Any, List, Optional, Sequence, Union
from mmengine.dist import (broadcast_object_list, collect_results,
is_main_process)
class BaseMetric(metaclass=ABCMeta):
"""Base class for a metric.
The metric first processes each batch of data_samples and predictions,
and appends the processed results to the results list. Then it
collects all results together from all ranks if distributed training
is used. Finally, it computes the metrics of the entire dataset.
A subclass of class:`BaseMetric` should assign a meaningful value to the
class attribute `default_prefix`. See the argument `prefix` for details.
Args:
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Default: None
"""
default_prefix: Optional[str] = None
def __init__(self,
collect_device: str = 'cpu',
prefix: Optional[str] = None) -> None:
self._dataset_meta: Union[None, dict] = None
self.collect_device = collect_device
self.results: List[Any] = []
self.prefix = prefix or self.default_prefix
if self.prefix is None:
warnings.warn('The prefix is not set in metric class '
f'{self.__class__.__name__}.')
@property
def dataset_meta(self) -> Optional[dict]:
return self._dataset_meta
@dataset_meta.setter
def dataset_meta(self, dataset_meta: dict) -> None:
self._dataset_meta = dataset_meta
@abstractmethod
def process(self, data_batch: Sequence[dict],
predictions: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (Sequence[dict]): A batch of data from the dataloader.
predictions (Sequence[dict]): A batch of outputs from
the model.
"""
@abstractmethod
def compute_metrics(self, results: list) -> dict:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results.
"""
def evaluate(self, size: int) -> dict:
"""Evaluate the model performance of the whole dataset after processing
all batches.
Args:
size (int): Length of the entire validation dataset. When batch
size > 1, the dataloader may pad some data samples to make
sure all ranks have the same length of dataset slice. The
``collect_results`` function will drop the padded data based on
this size.
Returns:
dict: Evaluation metrics dict on the val dataset. The keys are the
names of the metrics, and the values are corresponding results.
"""
if len(self.results) == 0:
warnings.warn(
f'{self.__class__.__name__} got empty `self.results`. Please '
'ensure that the processed results are properly added into '
'`self.results` in `process` method.')
results = collect_results(self.results, size, self.collect_device)
if is_main_process():
_metrics = self.compute_metrics(results) # type: ignore
# Add prefix to metric names
if self.prefix:
_metrics = {
'/'.join((self.prefix, k)): v
for k, v in _metrics.items()
}
metrics = [_metrics]
else:
metrics = [None] # type: ignore
broadcast_object_list(metrics)
# reset the results list
self.results.clear()
return metrics[0]
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from abc import ABCMeta, abstractmethod
from typing import Any, List, Optional, Sequence, Tuple, Union
from mmengine.dist import (broadcast_object_list, collect_results,
is_main_process)
class BaseMetric(metaclass=ABCMeta):
"""Base class for a metric.
The metric first processes each batch of data_samples and predictions,
and appends the processed results to the results list. Then it
collects all results together from all ranks if distributed training
is used. Finally, it computes the metrics of the entire dataset.
A subclass of class:`BaseMetric` should assign a meaningful value to the
class attribute `default_prefix`. See the argument `prefix` for details.
Args:
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Default: None
"""
default_prefix: Optional[str] = None
def __init__(self,
collect_device: str = 'cpu',
prefix: Optional[str] = None) -> None:
self._dataset_meta: Union[None, dict] = None
self.collect_device = collect_device
self.results: List[Any] = []
self.prefix = prefix or self.default_prefix
if self.prefix is None:
warnings.warn('The prefix is not set in metric class '
f'{self.__class__.__name__}.')
@property
def dataset_meta(self) -> Optional[dict]:
return self._dataset_meta
@dataset_meta.setter
def dataset_meta(self, dataset_meta: dict) -> None:
self._dataset_meta = dataset_meta
@abstractmethod
def process(self, data_batch: Sequence[Tuple[Any, dict]],
predictions: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (Sequence[Tuple[Any, dict]]): A batch of data
from the dataloader.
predictions (Sequence[dict]): A batch of outputs from
the model.
"""
@abstractmethod
def compute_metrics(self, results: list) -> dict:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results.
"""
def evaluate(self, size: int) -> dict:
"""Evaluate the model performance of the whole dataset after processing
all batches.
Args:
size (int): Length of the entire validation dataset. When batch
size > 1, the dataloader may pad some data samples to make
sure all ranks have the same length of dataset slice. The
``collect_results`` function will drop the padded data based on
this size.
Returns:
dict: Evaluation metrics dict on the val dataset. The keys are the
names of the metrics, and the values are corresponding results.
"""
if len(self.results) == 0:
warnings.warn(
f'{self.__class__.__name__} got empty `self.results`. Please '
'ensure that the processed results are properly added into '
'`self.results` in `process` method.')
results = collect_results(self.results, size, self.collect_device)
if is_main_process():
_metrics = self.compute_metrics(results) # type: ignore
# Add prefix to metric names
if self.prefix:
_metrics = {
'/'.join((self.prefix, k)): v
for k, v in _metrics.items()
}
metrics = [_metrics]
else:
metrics = [None] # type: ignore
broadcast_object_list(metrics)
# reset the results list
self.results.clear()
return metrics[0]
|
import sys
from dataclasses import dataclass
from typing import TYPE_CHECKING, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
if TYPE_CHECKING:
import sqlite3
import sqlalchemy
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class SqlConfig(datasets.BuilderConfig):
"""BuilderConfig for SQL."""
sql: Union[str, "sqlalchemy.sql.Selectable"] = None
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] = None
index_col: Optional[Union[str, list[str]]] = None
coerce_float: bool = True
params: Optional[Union[list, tuple, dict]] = None
parse_dates: Optional[Union[list, dict]] = None
columns: Optional[list[str]] = None
chunksize: Optional[int] = 10_000
features: Optional[datasets.Features] = None
def __post_init__(self):
super().__post_init__()
if self.sql is None:
raise ValueError("sql must be specified")
if self.con is None:
raise ValueError("con must be specified")
def create_config_id(
self,
config_kwargs: dict,
custom_features: Optional[datasets.Features] = None,
) -> str:
config_kwargs = config_kwargs.copy()
# We need to stringify the Selectable object to make its hash deterministic
# The process of stringifying is explained here: http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html
sql = config_kwargs["sql"]
if not isinstance(sql, str):
if datasets.config.SQLALCHEMY_AVAILABLE and "sqlalchemy" in sys.modules:
import sqlalchemy
if isinstance(sql, sqlalchemy.sql.Selectable):
engine = sqlalchemy.create_engine(config_kwargs["con"].split("://")[0] + "://")
sql_str = str(sql.compile(dialect=engine.dialect))
config_kwargs["sql"] = sql_str
else:
raise TypeError(
f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
)
else:
raise TypeError(
f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
)
con = config_kwargs["con"]
if not isinstance(con, str):
config_kwargs["con"] = id(con)
logger.info(
f"SQL connection 'con' of type {type(con)} couldn't be hashed properly. To enable hashing, specify 'con' as URI string instead."
)
return super().create_config_id(config_kwargs, custom_features=custom_features)
@property
def pd_read_sql_kwargs(self):
pd_read_sql_kwargs = {
"index_col": self.index_col,
"columns": self.columns,
"params": self.params,
"coerce_float": self.coerce_float,
"parse_dates": self.parse_dates,
}
return pd_read_sql_kwargs
class Sql(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = SqlConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={})]
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
schema = self.config.features.arrow_schema
if all(not require_storage_cast(feature) for feature in self.config.features.values()):
# cheaper cast
pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, schema)
return pa_table
def _generate_tables(self):
chunksize = self.config.chunksize
sql_reader = pd.read_sql(
self.config.sql, self.config.con, chunksize=chunksize, **self.config.pd_read_sql_kwargs
)
sql_reader = [sql_reader] if chunksize is None else sql_reader
for chunk_idx, df in enumerate(sql_reader):
pa_table = pa.Table.from_pandas(df)
yield chunk_idx, self._cast_table(pa_table)
|
import sys
from dataclasses import dataclass
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
if TYPE_CHECKING:
import sqlite3
import sqlalchemy
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class SqlConfig(datasets.BuilderConfig):
"""BuilderConfig for SQL."""
sql: Union[str, "sqlalchemy.sql.Selectable"] = None
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] = None
index_col: Optional[Union[str, List[str]]] = None
coerce_float: bool = True
params: Optional[Union[List, Tuple, Dict]] = None
parse_dates: Optional[Union[List, Dict]] = None
columns: Optional[List[str]] = None
chunksize: Optional[int] = 10_000
features: Optional[datasets.Features] = None
def __post_init__(self):
super().__post_init__()
if self.sql is None:
raise ValueError("sql must be specified")
if self.con is None:
raise ValueError("con must be specified")
def create_config_id(
self,
config_kwargs: dict,
custom_features: Optional[datasets.Features] = None,
) -> str:
config_kwargs = config_kwargs.copy()
# We need to stringify the Selectable object to make its hash deterministic
# The process of stringifying is explained here: http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html
sql = config_kwargs["sql"]
if not isinstance(sql, str):
if datasets.config.SQLALCHEMY_AVAILABLE and "sqlalchemy" in sys.modules:
import sqlalchemy
if isinstance(sql, sqlalchemy.sql.Selectable):
engine = sqlalchemy.create_engine(config_kwargs["con"].split("://")[0] + "://")
sql_str = str(sql.compile(dialect=engine.dialect))
config_kwargs["sql"] = sql_str
else:
raise TypeError(
f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
)
else:
raise TypeError(
f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
)
con = config_kwargs["con"]
if not isinstance(con, str):
config_kwargs["con"] = id(con)
logger.info(
f"SQL connection 'con' of type {type(con)} couldn't be hashed properly. To enable hashing, specify 'con' as URI string instead."
)
return super().create_config_id(config_kwargs, custom_features=custom_features)
@property
def pd_read_sql_kwargs(self):
pd_read_sql_kwargs = {
"index_col": self.index_col,
"columns": self.columns,
"params": self.params,
"coerce_float": self.coerce_float,
"parse_dates": self.parse_dates,
}
return pd_read_sql_kwargs
class Sql(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = SqlConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={})]
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
schema = self.config.features.arrow_schema
if all(not require_storage_cast(feature) for feature in self.config.features.values()):
# cheaper cast
pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, schema)
return pa_table
def _generate_tables(self):
chunksize = self.config.chunksize
sql_reader = pd.read_sql(
self.config.sql, self.config.con, chunksize=chunksize, **self.config.pd_read_sql_kwargs
)
sql_reader = [sql_reader] if chunksize is None else sql_reader
for chunk_idx, df in enumerate(sql_reader):
pa_table = pa.Table.from_pandas(df)
yield chunk_idx, self._cast_table(pa_table)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import (
BaseSQLDatabaseTool,
InfoSQLDatabaseTool,
ListSQLDatabaseTool,
QuerySQLCheckerTool,
QuerySQLDataBaseTool,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BaseSQLDatabaseTool": "langchain_community.tools",
"QuerySQLDataBaseTool": "langchain_community.tools",
"InfoSQLDatabaseTool": "langchain_community.tools",
"ListSQLDatabaseTool": "langchain_community.tools",
"QuerySQLCheckerTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BaseSQLDatabaseTool",
"InfoSQLDatabaseTool",
"ListSQLDatabaseTool",
"QuerySQLCheckerTool",
"QuerySQLDataBaseTool",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import (
BaseSQLDatabaseTool,
InfoSQLDatabaseTool,
ListSQLDatabaseTool,
QuerySQLCheckerTool,
QuerySQLDataBaseTool,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BaseSQLDatabaseTool": "langchain_community.tools",
"QuerySQLDataBaseTool": "langchain_community.tools",
"InfoSQLDatabaseTool": "langchain_community.tools",
"ListSQLDatabaseTool": "langchain_community.tools",
"QuerySQLCheckerTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BaseSQLDatabaseTool",
"QuerySQLDataBaseTool",
"InfoSQLDatabaseTool",
"ListSQLDatabaseTool",
"QuerySQLCheckerTool",
]
|
import os
from typing import Dict
DEPLOYMENT_FILES = [
'deployment-executor',
'deployment-gateway',
'deployment-uses-before',
'deployment-uses-after',
'deployment-uses-before-after',
]
cur_dir = os.path.dirname(__file__)
DEFAULT_RESOURCE_DIR = os.path.join(
cur_dir, '..', '..', '..', '..', 'resources', 'k8s', 'template'
)
def get_yaml(template: str, params: Dict) -> Dict:
"""Create a resource on Kubernetes based on the `template`. It fills the `template` using the `params`.
:param template: path to the template file.
:param params: dictionary for replacing the placeholders (keys) with the actual values.
:return: The yaml dictionary with the corresponding template filled with parameters
"""
if template == 'configmap':
yaml = _get_configmap_yaml(template, params)
elif template in DEPLOYMENT_FILES and params.get('device_plugins'):
yaml = _get_yaml(template, params)
yaml = _get_deployment_with_device_plugins(yaml, params)
else:
yaml = _get_yaml(template, params)
return yaml
def _get_yaml(template: str, params: Dict) -> Dict:
import yaml
path = os.path.join(DEFAULT_RESOURCE_DIR, f'{template}.yml')
with open(path) as f:
content = f.read()
for k, v in params.items():
content = content.replace(f'{{{k}}}', str(v))
d = yaml.safe_load(content)
return d
def _get_configmap_yaml(template: str, params: Dict):
import yaml
path = os.path.join(DEFAULT_RESOURCE_DIR, f'{template}.yml')
with open(path) as f:
config_map = yaml.safe_load(f)
config_map['metadata']['name'] = params.get('name') + '-' + 'configmap'
config_map['metadata']['namespace'] = params.get('namespace')
if params.get('data'):
for key, value in params['data'].items():
config_map['data'][key] = value
return config_map
def _get_device_plugins(params: Dict):
data = {'limits': {}}
for key, value in params.items():
data['limits'][key] = value
return data
def _get_deployment_with_device_plugins(deployment: Dict, params: Dict) -> Dict:
device_plugins = _get_device_plugins(params['device_plugins'])
deployment['spec']['template']['spec']['containers'][0][
'resources'
] = device_plugins
return deployment
|
import os
from typing import Dict
DEPLOYMENT_FILES = [
'deployment',
'deployment-uses-before',
'deployment-uses-after',
'deployment-uses-before-after',
]
cur_dir = os.path.dirname(__file__)
DEFAULT_RESOURCE_DIR = os.path.join(
cur_dir, '..', '..', '..', '..', 'resources', 'k8s', 'template'
)
def get_yaml(template: str, params: Dict) -> Dict:
"""Create a resource on Kubernetes based on the `template`. It fills the `template` using the `params`.
:param template: path to the template file.
:param params: dictionary for replacing the placeholders (keys) with the actual values.
:return: The yaml dictionary with the corresponding template filled with parameters
"""
if template == 'configmap':
yaml = _get_configmap_yaml(template, params)
elif template in DEPLOYMENT_FILES and params.get('device_plugins'):
yaml = _get_yaml(template, params)
yaml = _get_deployment_with_device_plugins(yaml, params)
else:
yaml = _get_yaml(template, params)
return yaml
def _get_yaml(template: str, params: Dict) -> Dict:
import yaml
path = os.path.join(DEFAULT_RESOURCE_DIR, f'{template}.yml')
with open(path) as f:
content = f.read()
for k, v in params.items():
content = content.replace(f'{{{k}}}', str(v))
d = yaml.safe_load(content)
return d
def _get_configmap_yaml(template: str, params: Dict):
import yaml
path = os.path.join(DEFAULT_RESOURCE_DIR, f'{template}.yml')
with open(path) as f:
config_map = yaml.safe_load(f)
config_map['metadata']['name'] = params.get('name') + '-' + 'configmap'
config_map['metadata']['namespace'] = params.get('namespace')
if params.get('data'):
for key, value in params['data'].items():
config_map['data'][key] = value
return config_map
def _get_device_plugins(params: Dict):
data = {'limits': {}}
for key, value in params.items():
data['limits'][key] = value
return data
def _get_deployment_with_device_plugins(deployment: Dict, params: Dict) -> Dict:
device_plugins = _get_device_plugins(params['device_plugins'])
deployment['spec']['template']['spec']['containers'][0][
'resources'
] = device_plugins
return deployment
|
from __future__ import annotations
from pathlib import Path
from unittest.mock import Mock, PropertyMock
import pytest
import torch
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import InformationRetrievalEvaluator
from sentence_transformers.util import cos_sim
@pytest.fixture
def mock_model():
def mock_encode(sentences: str | list[str], **kwargs) -> torch.Tensor:
"""
We simply one-hot encode the sentences; if a sentence contains a keyword, the corresponding one-hot
encoding is added to the sentence embedding.
"""
one_hot_encodings = {
"pokemon": torch.tensor([1.0, 0.0, 0.0, 0.0, 0.0]),
"car": torch.tensor([0.0, 1.0, 0.0, 0.0, 0.0]),
"vehicle": torch.tensor([0.0, 0.0, 1.0, 0.0, 0.0]),
"fruit": torch.tensor([0.0, 0.0, 0.0, 1.0, 0.0]),
"vegetable": torch.tensor([0.0, 0.0, 0.0, 0.0, 1.0]),
}
if isinstance(sentences, str):
sentences = [sentences]
embeddings = []
for sentence in sentences:
encoding = torch.zeros(5)
for keyword, one_hot in one_hot_encodings.items():
if keyword in sentence:
encoding += one_hot
embeddings.append(encoding)
return torch.stack(embeddings)
model = Mock(spec=SentenceTransformer)
model.similarity_fn_name = "cosine"
model.similarity.side_effect = cos_sim
model.encode.side_effect = mock_encode
model.encode_query.side_effect = mock_encode
model.encode_document.side_effect = mock_encode
model.model_card_data = PropertyMock(return_value=Mock())
return model
@pytest.fixture
def test_data():
queries = {
"0": "What is a pokemon?",
"1": "What is a vegetable?",
"2": "What is a fruit?",
"3": "What is a vehicle?",
"4": "What is a car?",
}
corpus = {
"0": "A pokemon is a fictional creature",
"1": "A vegetable is a plant",
"2": "A fruit is a plant",
"3": "A vehicle is a machine",
"4": "A car is a vehicle",
}
relevant_docs = {"0": {"0"}, "1": {"1"}, "2": {"2"}, "3": {"3", "4"}, "4": {"4"}}
return queries, corpus, relevant_docs
def test_simple(test_data, tmp_path: Path):
queries, corpus, relevant_docs = test_data
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
ir_evaluator = InformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="test",
accuracy_at_k=[1, 3],
precision_recall_at_k=[1, 3],
mrr_at_k=[3],
ndcg_at_k=[3],
map_at_k=[5],
)
results = ir_evaluator(model, output_path=str(tmp_path))
expected_keys = [
"test_cosine_accuracy@1",
"test_cosine_accuracy@3",
"test_cosine_precision@1",
"test_cosine_precision@3",
"test_cosine_recall@1",
"test_cosine_recall@3",
"test_cosine_ndcg@3",
"test_cosine_mrr@3",
"test_cosine_map@5",
]
assert set(results.keys()) == set(expected_keys)
def test_metrices(test_data, mock_model, tmp_path: Path):
queries, corpus, relevant_docs = test_data
ir_evaluator = InformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="test",
accuracy_at_k=[1, 3],
precision_recall_at_k=[1, 3],
mrr_at_k=[3],
ndcg_at_k=[3],
map_at_k=[5],
)
results = ir_evaluator(mock_model, output_path=str(tmp_path))
# We expect test_cosine_precision@3 to be 0.4, since 6 out of 15 (5 queries * 3) are True Positives
# We expect test_cosine_recall@1 to be 0.9; the average of 4 times a recall of 1 and once a recall of 0.5
expected_results = {
"test_cosine_accuracy@1": 1.0,
"test_cosine_accuracy@3": 1.0,
"test_cosine_precision@1": 1.0,
"test_cosine_precision@3": 0.4,
"test_cosine_recall@1": 0.9,
"test_cosine_recall@3": 1.0,
"test_cosine_ndcg@3": 1.0,
"test_cosine_mrr@3": 1.0,
"test_cosine_map@5": 1.0,
}
for key, expected_value in expected_results.items():
assert results[key] == pytest.approx(expected_value, abs=1e-9)
|
from __future__ import annotations
from pathlib import Path
from unittest.mock import Mock, PropertyMock
import pytest
import torch
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import InformationRetrievalEvaluator
from sentence_transformers.util import cos_sim
@pytest.fixture
def mock_model():
def mock_encode(sentences: str | list[str], **kwargs) -> torch.Tensor:
"""
We simply one-hot encode the sentences; if a sentence contains a keyword, the corresponding one-hot
encoding is added to the sentence embedding.
"""
one_hot_encodings = {
"pokemon": torch.tensor([1.0, 0.0, 0.0, 0.0, 0.0]),
"car": torch.tensor([0.0, 1.0, 0.0, 0.0, 0.0]),
"vehicle": torch.tensor([0.0, 0.0, 1.0, 0.0, 0.0]),
"fruit": torch.tensor([0.0, 0.0, 0.0, 1.0, 0.0]),
"vegetable": torch.tensor([0.0, 0.0, 0.0, 0.0, 1.0]),
}
if isinstance(sentences, str):
sentences = [sentences]
embeddings = []
for sentence in sentences:
encoding = torch.zeros(5)
for keyword, one_hot in one_hot_encodings.items():
if keyword in sentence:
encoding += one_hot
embeddings.append(encoding)
return torch.stack(embeddings)
model = Mock(spec=SentenceTransformer)
model.similarity_fn_name = "cosine"
model.similarity.side_effect = cos_sim
model.encode.side_effect = mock_encode
model.model_card_data = PropertyMock(return_value=Mock())
return model
@pytest.fixture
def test_data():
queries = {
"0": "What is a pokemon?",
"1": "What is a vegetable?",
"2": "What is a fruit?",
"3": "What is a vehicle?",
"4": "What is a car?",
}
corpus = {
"0": "A pokemon is a fictional creature",
"1": "A vegetable is a plant",
"2": "A fruit is a plant",
"3": "A vehicle is a machine",
"4": "A car is a vehicle",
}
relevant_docs = {"0": {"0"}, "1": {"1"}, "2": {"2"}, "3": {"3", "4"}, "4": {"4"}}
return queries, corpus, relevant_docs
def test_simple(test_data, tmp_path: Path):
queries, corpus, relevant_docs = test_data
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
ir_evaluator = InformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="test",
accuracy_at_k=[1, 3],
precision_recall_at_k=[1, 3],
mrr_at_k=[3],
ndcg_at_k=[3],
map_at_k=[5],
)
results = ir_evaluator(model, output_path=str(tmp_path))
expected_keys = [
"test_cosine_accuracy@1",
"test_cosine_accuracy@3",
"test_cosine_precision@1",
"test_cosine_precision@3",
"test_cosine_recall@1",
"test_cosine_recall@3",
"test_cosine_ndcg@3",
"test_cosine_mrr@3",
"test_cosine_map@5",
]
assert set(results.keys()) == set(expected_keys)
def test_metrices(test_data, mock_model, tmp_path: Path):
queries, corpus, relevant_docs = test_data
ir_evaluator = InformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="test",
accuracy_at_k=[1, 3],
precision_recall_at_k=[1, 3],
mrr_at_k=[3],
ndcg_at_k=[3],
map_at_k=[5],
)
results = ir_evaluator(mock_model, output_path=str(tmp_path))
# We expect test_cosine_precision@3 to be 0.4, since 6 out of 15 (5 queries * 3) are True Positives
# We expect test_cosine_recall@1 to be 0.9; the average of 4 times a recall of 1 and once a recall of 0.5
expected_results = {
"test_cosine_accuracy@1": 1.0,
"test_cosine_accuracy@3": 1.0,
"test_cosine_precision@1": 1.0,
"test_cosine_precision@3": 0.4,
"test_cosine_recall@1": 0.9,
"test_cosine_recall@3": 1.0,
"test_cosine_ndcg@3": 1.0,
"test_cosine_mrr@3": 1.0,
"test_cosine_map@5": 1.0,
}
for key, expected_value in expected_results.items():
assert results[key] == pytest.approx(expected_value, abs=1e-9)
|
from functools import partial
from huggingface_hub import hf_hub_url
hf_dataset_url = partial(hf_hub_url, repo_type="dataset")
|
import time
from functools import partial
from huggingface_hub import HfApi, hf_hub_url
from huggingface_hub.hf_api import RepoFile
from packaging import version
from requests import ConnectionError, HTTPError
from .. import config
from . import logging
logger = logging.get_logger(__name__)
# Retry `preupload_lfs_files` in `huggingface_hub<0.20.0` on the "500 (Internal Server Error)" and "503 (Service Unavailable)" HTTP errors
if config.HF_HUB_VERSION.release < version.parse("0.20.0").release:
def preupload_lfs_files(hf_api: HfApi, **kwargs):
max_retries = 5
base_wait_time = 1
max_wait_time = 8
retry = 0
while True:
try:
hf_api.preupload_lfs_files(**kwargs)
except (RuntimeError, HTTPError, ConnectionError) as err:
if isinstance(err, RuntimeError):
if isinstance(err.__cause__, (HTTPError, ConnectionError)):
err = err.__cause__
else:
raise err
if retry >= max_retries or err.response and err.response.status_code not in [500, 503]:
raise err
else:
sleep_time = min(max_wait_time, base_wait_time * 2**retry) # Exponential backoff
logger.info(
f"{hf_api.preupload_lfs_files} timed out, retrying in {sleep_time}s... [{retry/max_retries}]"
)
time.sleep(sleep_time)
retry += 1
else:
break
else:
def preupload_lfs_files(hf_api: HfApi, **kwargs):
hf_api.preupload_lfs_files(**kwargs)
# `list_files_info` is deprecated in favor of `list_repo_tree` in `huggingface_hub>=0.20.0`
if config.HF_HUB_VERSION.release < version.parse("0.20.0").release:
def list_files_info(hf_api: HfApi, **kwargs):
yield from hf_api.list_files_info(**kwargs)
else:
def list_files_info(hf_api: HfApi, **kwargs):
kwargs = {**kwargs, "recursive": True}
for repo_path in hf_api.list_repo_tree(**kwargs):
if isinstance(repo_path, RepoFile):
yield repo_path
# bakckward compatibility
hf_hub_url = partial(hf_hub_url, repo_type="dataset")
|
import warnings
from typing import Any, Dict, Union
import numpy as np
import PIL.Image
import torch
from torchvision.transforms import functional as _F
from torchvision.transforms.v2 import Transform
class ToTensor(Transform):
"""[BETA] Convert a PIL Image or ndarray to tensor and scale the values accordingly.
.. v2betastatus:: ToTensor transform
.. warning::
:class:`v2.ToTensor` is deprecated and will be removed in a future release.
Please use instead ``transforms.Compose([transforms.ToImageTensor(), transforms.ConvertImageDtype()])``.
This transform does not support torchscript.
Converts a PIL Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)
or if the numpy.ndarray has dtype = np.uint8
In the other cases, tensors are returned without scaling.
.. note::
Because the input image is scaled to [0.0, 1.0], this transformation should not be used when
transforming target image masks. See the `references`_ for implementing the transforms for image masks.
.. _references: https://github.com/pytorch/vision/tree/main/references/segmentation
"""
_transformed_types = (PIL.Image.Image, np.ndarray)
def __init__(self) -> None:
warnings.warn(
"The transform `ToTensor()` is deprecated and will be removed in a future release. "
"Instead, please use `transforms.Compose([transforms.ToImageTensor(), transforms.ConvertImageDtype()])`."
)
super().__init__()
def _transform(self, inpt: Union[PIL.Image.Image, np.ndarray], params: Dict[str, Any]) -> torch.Tensor:
return _F.to_tensor(inpt)
|
import warnings
from typing import Any, Dict, Union
import numpy as np
import PIL.Image
import torch
from torchvision.transforms import functional as _F
from torchvision.transforms.v2 import Transform
class ToTensor(Transform):
"""[BETA] Convert a PIL Image or ndarray to tensor and scale the values accordingly.
.. betastatus:: ToTensor transform
.. warning::
:class:`v2.ToTensor` is deprecated and will be removed in a future release.
Please use instead ``transforms.Compose([transforms.ToImageTensor(), transforms.ConvertImageDtype()])``.
This transform does not support torchscript.
Converts a PIL Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)
or if the numpy.ndarray has dtype = np.uint8
In the other cases, tensors are returned without scaling.
.. note::
Because the input image is scaled to [0.0, 1.0], this transformation should not be used when
transforming target image masks. See the `references`_ for implementing the transforms for image masks.
.. _references: https://github.com/pytorch/vision/tree/main/references/segmentation
"""
_transformed_types = (PIL.Image.Image, np.ndarray)
def __init__(self) -> None:
warnings.warn(
"The transform `ToTensor()` is deprecated and will be removed in a future release. "
"Instead, please use `transforms.Compose([transforms.ToImageTensor(), transforms.ConvertImageDtype()])`."
)
super().__init__()
def _transform(self, inpt: Union[PIL.Image.Image, np.ndarray], params: Dict[str, Any]) -> torch.Tensor:
return _F.to_tensor(inpt)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
from pathlib import Path
import pytest
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def build_docker_image(docker_image_name: str) -> str:
subprocess.run(['docker', 'build', '-t', docker_image_name, '.'], check=True)
return docker_image_name
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from pathlib import Path
import pytest
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def build_docker_image(docker_image_name: str) -> str:
subprocess.run(['docker', 'build', '-t', docker_image_name, '.'], check=True)
return docker_image_name
|
import torch
from torchvision import tv_tensors
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_kernel_internal
def uniform_temporal_subsample(inpt: torch.Tensor, num_samples: int) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.UniformTemporalSubsample` for details."""
if torch.jit.is_scripting():
return uniform_temporal_subsample_video(inpt, num_samples=num_samples)
_log_api_usage_once(uniform_temporal_subsample)
kernel = _get_kernel(uniform_temporal_subsample, type(inpt))
return kernel(inpt, num_samples=num_samples)
@_register_kernel_internal(uniform_temporal_subsample, torch.Tensor)
@_register_kernel_internal(uniform_temporal_subsample, tv_tensors.Video)
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchvideo/transforms/functional.py#L19
t_max = video.shape[-4] - 1
indices = torch.linspace(0, t_max, num_samples, device=video.device).long()
return torch.index_select(video, -4, indices)
|
import torch
from torchvision import tv_tensors
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_kernel_internal
def uniform_temporal_subsample(inpt: torch.Tensor, num_samples: int) -> torch.Tensor:
"""[BETA] See :class:`~torchvision.transforms.v2.UniformTemporalSubsample` for details."""
if torch.jit.is_scripting():
return uniform_temporal_subsample_video(inpt, num_samples=num_samples)
_log_api_usage_once(uniform_temporal_subsample)
kernel = _get_kernel(uniform_temporal_subsample, type(inpt))
return kernel(inpt, num_samples=num_samples)
@_register_kernel_internal(uniform_temporal_subsample, torch.Tensor)
@_register_kernel_internal(uniform_temporal_subsample, tv_tensors.Video)
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchvideo/transforms/functional.py#L19
t_max = video.shape[-4] - 1
indices = torch.linspace(0, t_max, num_samples, device=video.device).long()
return torch.index_select(video, -4, indices)
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import time
from contextlib import contextmanager
from typing import Generator, Optional
from mmengine.utils.manager import ManagerMixin, _accquire_lock, _release_lock
class DefaultScope(ManagerMixin):
"""Scope of current task used to reset the current registry, which can be
accessed globally.
Consider the case of resetting the current ``Resgitry`` by``default_scope``
in the internal module which cannot access runner directly, it is difficult
to get the ``default_scope`` defined in ``Runner``. However, if ``Runner``
created ``DefaultScope`` instance by given ``default_scope``, the internal
module can get ``default_scope`` by ``DefaultScope.get_current_instance``
everywhere.
Args:
name (str): Name of default scope for global access.
scope_name (str): Scope of current task.
Examples:
>>> from mmengine.model import MODELS
>>> # Define default scope in runner.
>>> DefaultScope.get_instance('task', scope_name='mmdet')
>>> # Get default scope globally.
>>> scope_name = DefaultScope.get_instance('task').scope_name
"""
def __init__(self, name: str, scope_name: str):
super().__init__(name)
assert isinstance(
scope_name,
str), (f'scope_name should be a string, but got {scope_name}')
self._scope_name = scope_name
@property
def scope_name(self) -> str:
"""
Returns:
str: Get current scope.
"""
return self._scope_name
@classmethod
def get_current_instance(cls) -> Optional['DefaultScope']:
"""Get latest created default scope.
Since default_scope is an optional argument for ``Registry.build``.
``get_current_instance`` should return ``None`` if there is no
``DefaultScope`` created.
Examples:
>>> default_scope = DefaultScope.get_current_instance()
>>> # There is no `DefaultScope` created yet,
>>> # `get_current_instance` return `None`.
>>> default_scope = DefaultScope.get_instance(
>>> 'instance_name', scope_name='mmengine')
>>> default_scope.scope_name
mmengine
>>> default_scope = DefaultScope.get_current_instance()
>>> default_scope.scope_name
mmengine
Returns:
Optional[DefaultScope]: Return None If there has not been
``DefaultScope`` instance created yet, otherwise return the
latest created DefaultScope instance.
"""
_accquire_lock()
if cls._instance_dict:
instance = super().get_current_instance()
else:
instance = None
_release_lock()
return instance
@classmethod
@contextmanager
def overwrite_default_scope(cls, scope_name: Optional[str]) -> Generator:
"""overwrite the current default scope with `scope_name`"""
if scope_name is None:
yield
else:
tmp = copy.deepcopy(cls._instance_dict)
# To avoid create an instance with the same name.
time.sleep(1e-6)
cls.get_instance(f'overwrite-{time.time()}', scope_name=scope_name)
try:
yield
finally:
cls._instance_dict = tmp
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import time
from contextlib import contextmanager
from typing import Generator, Optional
from mmengine.utils.manager import ManagerMixin, _accquire_lock, _release_lock
class DefaultScope(ManagerMixin):
"""Scope of current task used to reset the current registry, which can be
accessed globally.
Consider the case of resetting the current ``Resgitry`` by``default_scope``
in the internal module which cannot access runner directly, it is difficult
to get the ``default_scope`` defined in ``Runner``. However, if ``Runner``
created ``DefaultScope`` instance by given ``default_scope``, the internal
module can get ``default_scope`` by ``DefaultScope.get_current_instance``
everywhere.
Args:
name (str): Name of default scope for global access.
scope_name (str): Scope of current task.
Examples:
>>> from mmengine.model import MODELS
>>> # Define default scope in runner.
>>> DefaultScope.get_instance('task', scope_name='mmdet')
>>> # Get default scope globally.
>>> scope_name = DefaultScope.get_instance('task').scope_name
"""
def __init__(self, name: str, scope_name: str):
super().__init__(name)
assert isinstance(
scope_name,
str), (f'scope_name should be a string, but got {scope_name}')
self._scope_name = scope_name
@property
def scope_name(self) -> str:
"""
Returns:
str: Get current scope.
"""
return self._scope_name
@classmethod
def get_current_instance(cls) -> Optional['DefaultScope']:
"""Get latest created default scope.
Since default_scope is an optional argument for ``Registry.build``.
``get_current_instance`` should return ``None`` if there is no
``DefaultScope`` created.
Examples:
>>> default_scope = DefaultScope.get_current_instance()
>>> # There is no `DefaultScope` created yet,
>>> # `get_current_instance` return `None`.
>>> default_scope = DefaultScope.get_instance(
>>> 'instance_name', scope_name='mmengine')
>>> default_scope.scope_name
mmengine
>>> default_scope = DefaultScope.get_current_instance()
>>> default_scope.scope_name
mmengine
Returns:
Optional[DefaultScope]: Return None If there has not been
``DefaultScope`` instance created yet, otherwise return the
latest created DefaultScope instance.
"""
_accquire_lock()
if cls._instance_dict:
instance = super().get_current_instance()
else:
instance = None
_release_lock()
return instance
@classmethod
@contextmanager
def overwrite_default_scope(cls, scope_name: Optional[str]) -> Generator:
"""overwrite the current default scope with `scope_name`"""
if scope_name is None:
yield
else:
tmp = copy.deepcopy(cls._instance_dict)
cls.get_instance(f'overwrite-{time.time()}', scope_name=scope_name)
try:
yield
finally:
cls._instance_dict = tmp
|
_base_ = '../mask_rcnn/mask-rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
|
_base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
|
"""PDF Table reader."""
from pathlib import Path
from typing import Any, Dict, List, Optional
import pandas as pd
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class PDFTableReader(BaseReader):
"""
PDF Table Reader. Reads table from PDF.
Args:
row_separator (str): Row separator used to join rows of a DataFrame.
col_separator (str): Col separator used to join columns of a DataFrame.
"""
def __init__(
self,
*args: Any,
row_separator: str = "\n",
col_separator: str = ", ",
**kwargs: Any,
) -> None:
super().__init__(*args, **kwargs)
self._row_separator = row_separator
self._col_separator = col_separator
def load_data(
self, file: Path, pages: str = "1", extra_info: Optional[Dict] = None
) -> List[Document]:
"""
Load data and extract table from PDF file.
Args:
file (Path): Path for the PDF file.
pages (str): Pages to read tables from.
extra_info (Optional[Dict]): Extra information.
Returns:
List[Document]: List of documents.
"""
import camelot
results = []
tables = camelot.read_pdf(filepath=str(file), pages=pages)
for table in tables:
document = self._dataframe_to_document(df=table.df, extra_info=extra_info)
results.append(document)
return results
def _dataframe_to_document(
self, df: pd.DataFrame, extra_info: Optional[Dict] = None
) -> Document:
df_list = df.apply(
lambda row: (self._col_separator).join(row.astype(str).tolist()), axis=1
).tolist()
return Document(
text=self._row_separator.join(df_list), extra_info=extra_info or {}
)
|
"""PDF Table reader."""
from pathlib import Path
from typing import Any, Dict, List, Optional
import pandas as pd
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class PDFTableReader(BaseReader):
"""
PDF Table Reader. Reads table from PDF.
Args:
row_separator (str): Row separator used to join rows of a DataFrame.
col_separator (str): Col separator used to join columns of a DataFrame.
"""
def __init__(
self,
*args: Any,
row_separator: str = "\n",
col_separator: str = ", ",
**kwargs: Any
) -> None:
super().__init__(*args, **kwargs)
self._row_separator = row_separator
self._col_separator = col_separator
def load_data(
self, file: Path, pages: str = "1", extra_info: Optional[Dict] = None
) -> List[Document]:
"""
Load data and extract table from PDF file.
Args:
file (Path): Path for the PDF file.
pages (str): Pages to read tables from.
extra_info (Optional[Dict]): Extra information.
Returns:
List[Document]: List of documents.
"""
import camelot
results = []
tables = camelot.read_pdf(filepath=str(file), pages=pages)
for table in tables:
document = self._dataframe_to_document(df=table.df, extra_info=extra_info)
results.append(document)
return results
def _dataframe_to_document(
self, df: pd.DataFrame, extra_info: Optional[Dict] = None
) -> Document:
df_list = df.apply(
lambda row: (self._col_separator).join(row.astype(str).tolist()), axis=1
).tolist()
return Document(
text=self._row_separator.join(df_list), extra_info=extra_info or {}
)
|
from __future__ import annotations
from .CSRLoss import CSRLoss, CSRReconstructionLoss
from .RegularizerLoss import FlopsLoss, L0FlopsLoss
from .SparseAnglELoss import SparseAnglELoss
from .SparseCachedGISTEmbedLoss import SparseCachedGISTEmbedLoss
from .SparseCachedMultipleNegativesRankingLoss import SparseCachedMultipleNegativesRankingLoss
from .SparseCoSENTLoss import SparseCoSENTLoss
from .SparseCosineSimilarityLoss import SparseCosineSimilarityLoss
from .SparseDistillKLDivLoss import SparseDistillKLDivLoss
from .SparseGISTEmbedLoss import SparseGISTEmbedLoss
from .SparseMarginMSELoss import SparseMarginMSELoss
from .SparseMSELoss import SparseMSELoss
from .SparseMultipleNegativesRankingLoss import SparseMultipleNegativesRankingLoss
from .SparseTripletLoss import SparseTripletLoss
from .SpladeLoss import SpladeLoss
__all__ = [
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"L0FlopsLoss",
"SpladeLoss",
]
# TODO: Test cached losses
|
from __future__ import annotations
from .CSRLoss import CSRLoss, CSRReconstructionLoss
from .RegularizerLoss import FlopsLoss, IDFFlopsLoss
from .SparseAnglELoss import SparseAnglELoss
from .SparseCachedGISTEmbedLoss import SparseCachedGISTEmbedLoss
from .SparseCachedMultipleNegativesRankingLoss import SparseCachedMultipleNegativesRankingLoss
from .SparseCoSENTLoss import SparseCoSENTLoss
from .SparseCosineSimilarityLoss import SparseCosineSimilarityLoss
from .SparseDistillKLDivLoss import SparseDistillKLDivLoss
from .SparseGISTEmbedLoss import SparseGISTEmbedLoss
from .SparseMarginMSELoss import SparseMarginMSELoss
from .SparseMSELoss import SparseMSELoss
from .SparseMultipleNegativesRankingLoss import SparseMultipleNegativesRankingLoss
from .SparseTripletLoss import SparseTripletLoss
from .SpladeLoss import SpladeLoss
__all__ = [
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"IDFFlopsLoss",
"SpladeLoss",
]
# TODO: Test cached losses
|
"""
This script translates the queries in the MS MARCO dataset to the defined target languages.
For machine translation, we use EasyNMT: https://github.com/UKPLab/EasyNMT
You can install it via: pip install easynmt
Usage:
python translate_queries [target_language]
"""
import os
from sentence_transformers import LoggingHandler, util
import logging
import tarfile
from easynmt import EasyNMT
import sys
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
target_lang = sys.argv[1]
output_folder = "multilingual-data"
data_folder = "../msmarco-data"
output_filename = os.path.join(output_folder, "train_queries.en-{}.tsv".format(target_lang))
os.makedirs(output_folder, exist_ok=True)
## Does the output file exists? If yes, read it so we can continue the translation
translated_qids = set()
if os.path.exists(output_filename):
with open(output_filename, "r", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
translated_qids.add(splits[0])
### Now we read the MS Marco dataset
os.makedirs(data_folder, exist_ok=True)
# Read qrels file for relevant positives per query
train_queries = {}
qrels_train = os.path.join(data_folder, "qrels.train.tsv")
if not os.path.exists(qrels_train):
util.http_get("https://msmarco.blob.core.windows.net/msmarcoranking/qrels.train.tsv", qrels_train)
with open(qrels_train) as fIn:
for line in fIn:
qid, _, pid, _ = line.strip().split()
if qid not in translated_qids:
train_queries[qid] = None
# Read all queries
queries_filepath = os.path.join(data_folder, "queries.train.tsv")
if not os.path.exists(queries_filepath):
tar_filepath = os.path.join(data_folder, "queries.tar.gz")
if not os.path.exists(tar_filepath):
logging.info("Download queries.tar.gz")
util.http_get("https://msmarco.blob.core.windows.net/msmarcoranking/queries.tar.gz", tar_filepath)
with tarfile.open(tar_filepath, "r:gz") as tar:
tar.extractall(path=data_folder)
with open(queries_filepath, "r", encoding="utf8") as fIn:
for line in fIn:
qid, query = line.strip().split("\t")
if qid in train_queries:
train_queries[qid] = query.strip()
qids = [qid for qid in train_queries if train_queries[qid] is not None]
queries = [train_queries[qid] for qid in qids]
# Define our translation model
translation_model = EasyNMT("opus-mt")
print("Start translation of {} queries.".format(len(queries)))
print("This can take a while. But you can stop this script at any point")
with open(output_filename, "a" if os.path.exists(output_filename) else "w", encoding="utf8") as fOut:
for qid, query, translated_query in zip(
qids,
queries,
translation_model.translate_stream(
queries,
source_lang="en",
target_lang=target_lang,
beam_size=2,
perform_sentence_splitting=False,
chunk_size=256,
batch_size=64,
),
):
fOut.write("{}\t{}\n".format(qid, translated_query.replace("\t", " ")))
fOut.flush()
|
"""
This script translates the queries in the MS MARCO dataset to the defined target languages.
For machine translation, we use EasyNMT: https://github.com/UKPLab/EasyNMT
You can install it via: pip install easynmt
Usage:
python translate_queries [target_language]
"""
import os
from sentence_transformers import LoggingHandler, util
import logging
import tarfile
from easynmt import EasyNMT
import sys
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
target_lang = sys.argv[1]
output_folder = "multilingual-data"
data_folder = "../msmarco-data"
output_filename = os.path.join(output_folder, "train_queries.en-{}.tsv".format(target_lang))
os.makedirs(output_folder, exist_ok=True)
## Does the output file exists? If yes, read it so we can continue the translation
translated_qids = set()
if os.path.exists(output_filename):
with open(output_filename, "r", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
translated_qids.add(splits[0])
### Now we read the MS Marco dataset
os.makedirs(data_folder, exist_ok=True)
# Read qrels file for relevant positives per query
train_queries = {}
qrels_train = os.path.join(data_folder, "qrels.train.tsv")
if not os.path.exists(qrels_train):
util.http_get("https://msmarco.blob.core.windows.net/msmarcoranking/qrels.train.tsv", qrels_train)
with open(qrels_train) as fIn:
for line in fIn:
qid, _, pid, _ = line.strip().split()
if qid not in translated_qids:
train_queries[qid] = None
# Read all queries
queries_filepath = os.path.join(data_folder, "queries.train.tsv")
if not os.path.exists(queries_filepath):
tar_filepath = os.path.join(data_folder, "queries.tar.gz")
if not os.path.exists(tar_filepath):
logging.info("Download queries.tar.gz")
util.http_get("https://msmarco.blob.core.windows.net/msmarcoranking/queries.tar.gz", tar_filepath)
with tarfile.open(tar_filepath, "r:gz") as tar:
tar.extractall(path=data_folder)
with open(queries_filepath, "r", encoding="utf8") as fIn:
for line in fIn:
qid, query = line.strip().split("\t")
if qid in train_queries:
train_queries[qid] = query.strip()
qids = [qid for qid in train_queries if train_queries[qid] is not None]
queries = [train_queries[qid] for qid in qids]
# Define our translation model
translation_model = EasyNMT("opus-mt")
print("Start translation of {} queries.".format(len(queries)))
print("This can take a while. But you can stop this script at any point")
with open(output_filename, "a" if os.path.exists(output_filename) else "w", encoding="utf8") as fOut:
for qid, query, translated_query in zip(
qids,
queries,
translation_model.translate_stream(
queries,
source_lang="en",
target_lang=target_lang,
beam_size=2,
perform_sentence_splitting=False,
chunk_size=256,
batch_size=64,
),
):
fOut.write("{}\t{}\n".format(qid, translated_query.replace("\t", " ")))
fOut.flush()
|
from langchain_core.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForChainGroup,
AsyncCallbackManagerForChainRun,
AsyncCallbackManagerForLLMRun,
AsyncCallbackManagerForRetrieverRun,
AsyncCallbackManagerForToolRun,
AsyncParentRunManager,
AsyncRunManager,
BaseRunManager,
CallbackManager,
CallbackManagerForChainGroup,
CallbackManagerForChainRun,
CallbackManagerForLLMRun,
CallbackManagerForRetrieverRun,
CallbackManagerForToolRun,
ParentRunManager,
RunManager,
handle_event,
trace_as_chain_group,
)
from langchain_core.tracers.context import (
collect_runs,
register_configure_hook,
tracing_enabled,
tracing_v2_enabled,
)
from langchain_core.utils.env import env_var_is_set
__all__ = [
"AsyncCallbackManager",
"AsyncCallbackManagerForChainGroup",
"AsyncCallbackManagerForChainRun",
"AsyncCallbackManagerForLLMRun",
"AsyncCallbackManagerForRetrieverRun",
"AsyncCallbackManagerForToolRun",
"AsyncParentRunManager",
"AsyncRunManager",
"BaseRunManager",
"CallbackManager",
"CallbackManagerForChainGroup",
"CallbackManagerForChainRun",
"CallbackManagerForLLMRun",
"CallbackManagerForRetrieverRun",
"CallbackManagerForToolRun",
"ParentRunManager",
"RunManager",
"collect_runs",
"env_var_is_set",
"handle_event",
"register_configure_hook",
"trace_as_chain_group",
"tracing_enabled",
"tracing_v2_enabled",
]
|
from langchain_core.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForChainGroup,
AsyncCallbackManagerForChainRun,
AsyncCallbackManagerForLLMRun,
AsyncCallbackManagerForRetrieverRun,
AsyncCallbackManagerForToolRun,
AsyncParentRunManager,
AsyncRunManager,
BaseRunManager,
CallbackManager,
CallbackManagerForChainGroup,
CallbackManagerForChainRun,
CallbackManagerForLLMRun,
CallbackManagerForRetrieverRun,
CallbackManagerForToolRun,
ParentRunManager,
RunManager,
handle_event,
trace_as_chain_group,
)
from langchain_core.tracers.context import (
collect_runs,
register_configure_hook,
tracing_enabled,
tracing_v2_enabled,
)
from langchain_core.utils.env import env_var_is_set
__all__ = [
"tracing_enabled",
"tracing_v2_enabled",
"collect_runs",
"trace_as_chain_group",
"handle_event",
"BaseRunManager",
"RunManager",
"ParentRunManager",
"AsyncRunManager",
"AsyncParentRunManager",
"CallbackManagerForLLMRun",
"AsyncCallbackManagerForLLMRun",
"CallbackManagerForChainRun",
"AsyncCallbackManagerForChainRun",
"CallbackManagerForToolRun",
"AsyncCallbackManagerForToolRun",
"CallbackManagerForRetrieverRun",
"AsyncCallbackManagerForRetrieverRun",
"CallbackManager",
"CallbackManagerForChainGroup",
"AsyncCallbackManager",
"AsyncCallbackManagerForChainGroup",
"register_configure_hook",
"env_var_is_set",
]
|
import os
from jina import Flow, Document, DocumentArray
from ...tfidf_text_executor import TFIDFTextEncoder # is implicitly required
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_flow_generates_embedding():
doc = DocumentArray([Document(text='Han likes eating pizza')])
with Flow.load_config(os.path.join(cur_dir, 'flow.yml')) as f:
responses = f.index(inputs=doc, return_results=True)
assert responses[0].docs[0].embedding is not None
# input has 4 different words
assert responses[0].docs[0].embedding.nnz == 4
|
import os
from jina import Flow, Document, DocumentArray
from jinahub.encoder.tfidf_text_executor import TFIDFTextEncoder # is implicitly required
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_flow_generates_embedding():
doc = DocumentArray([Document(text='Han likes eating pizza')])
with Flow.load_config(os.path.join(cur_dir, 'flow.yml')) as f:
responses = f.index(inputs=doc, return_results=True)
assert responses[0].docs[0].embedding is not None
# input has 4 different words
assert responses[0].docs[0].embedding.nnz == 4
|
from inspect import signature
from typing import (
Any,
Awaitable,
Callable,
List,
Optional,
Tuple,
Type,
Union,
cast,
get_origin,
get_args,
)
import typing
from llama_index.core.bridge.pydantic import BaseModel, FieldInfo, create_model
def create_schema_from_function(
name: str,
func: Union[Callable[..., Any], Callable[..., Awaitable[Any]]],
additional_fields: Optional[
List[Union[Tuple[str, Type, Any], Tuple[str, Type]]]
] = None,
ignore_fields: Optional[List[str]] = None,
) -> Type[BaseModel]:
"""Create schema from function."""
fields = {}
ignore_fields = ignore_fields or []
params = signature(func).parameters
for param_name in params:
if param_name in ignore_fields:
continue
param_type = params[param_name].annotation
param_default = params[param_name].default
description = None
if get_origin(param_type) is typing.Annotated:
args = get_args(param_type)
param_type = args[0]
if isinstance(args[1], str):
description = args[1]
if param_type is params[param_name].empty:
param_type = Any
if param_default is params[param_name].empty:
# Required field
fields[param_name] = (param_type, FieldInfo(description=description))
elif isinstance(param_default, FieldInfo):
# Field with pydantic.Field as default value
fields[param_name] = (param_type, param_default)
else:
fields[param_name] = (
param_type,
FieldInfo(default=param_default, description=description),
)
additional_fields = additional_fields or []
for field_info in additional_fields:
if len(field_info) == 3:
field_info = cast(Tuple[str, Type, Any], field_info)
field_name, field_type, field_default = field_info
fields[field_name] = (field_type, FieldInfo(default=field_default))
elif len(field_info) == 2:
# Required field has no default value
field_info = cast(Tuple[str, Type], field_info)
field_name, field_type = field_info
fields[field_name] = (field_type, FieldInfo())
else:
raise ValueError(
f"Invalid additional field info: {field_info}. "
"Must be a tuple of length 2 or 3."
)
return create_model(name, **fields) # type: ignore
|
from inspect import signature
from typing import (
Any,
Awaitable,
Callable,
List,
Optional,
Tuple,
Type,
Union,
cast,
get_origin,
get_args,
)
import typing
from llama_index.core.bridge.pydantic import BaseModel, FieldInfo, create_model
def create_schema_from_function(
name: str,
func: Union[Callable[..., Any], Callable[..., Awaitable[Any]]],
additional_fields: Optional[
List[Union[Tuple[str, Type, Any], Tuple[str, Type]]]
] = None,
) -> Type[BaseModel]:
"""Create schema from function."""
fields = {}
params = signature(func).parameters
for param_name in params:
param_type = params[param_name].annotation
param_default = params[param_name].default
description = None
if get_origin(param_type) is typing.Annotated:
args = get_args(param_type)
param_type = args[0]
if isinstance(args[1], str):
description = args[1]
if param_type is params[param_name].empty:
param_type = Any
if param_default is params[param_name].empty:
# Required field
fields[param_name] = (param_type, FieldInfo(description=description))
elif isinstance(param_default, FieldInfo):
# Field with pydantic.Field as default value
fields[param_name] = (param_type, param_default)
else:
fields[param_name] = (
param_type,
FieldInfo(default=param_default, description=description),
)
additional_fields = additional_fields or []
for field_info in additional_fields:
if len(field_info) == 3:
field_info = cast(Tuple[str, Type, Any], field_info)
field_name, field_type, field_default = field_info
fields[field_name] = (field_type, FieldInfo(default=field_default))
elif len(field_info) == 2:
# Required field has no default value
field_info = cast(Tuple[str, Type], field_info)
field_name, field_type = field_info
fields[field_name] = (field_type, FieldInfo())
else:
raise ValueError(
f"Invalid additional field info: {field_info}. "
"Must be a tuple of length 2 or 3."
)
return create_model(name, **fields) # type: ignore
|
# Copyright 2021 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import Formatter
if TYPE_CHECKING:
import jax.numpy as jnp
class JaxFormatter(Formatter[Mapping, "jnp.ndarray", Mapping]):
def __init__(self, features=None, **jnp_array_kwargs):
super().__init__(features=features)
self.jnp_array_kwargs = jnp_array_kwargs
import jax.numpy as jnp # noqa import jax at initialization
def _consolidate(self, column):
import jax.numpy as jnp
if isinstance(column, list) and column:
if all(
isinstance(x, jnp.ndarray) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column
):
return jnp.stack(column)
return column
def _tensorize(self, value):
import jax
import jax.numpy as jnp
if isinstance(value, (str, bytes, type(None))):
return value
elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
return value.tolist()
default_dtype = {}
if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_x64:
default_dtype = {"dtype": jnp.int64}
else:
default_dtype = {"dtype": jnp.int32}
elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": jnp.float32}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
value = np.asarray(value)
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(value, **{**default_dtype, **self.jnp_array_kwargs})
def _recursive_tensorize(self, data_struct: dict):
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct)
def format_row(self, pa_table: pa.Table) -> Mapping:
row = self.numpy_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> "jnp.ndarray":
column = self.numpy_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
|
# Copyright 2021 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import sys
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import Formatter
if TYPE_CHECKING:
import jax.numpy as jnp
class JaxFormatter(Formatter[dict, "jnp.ndarray", dict]):
def __init__(self, features=None, decoded=True, **jnp_array_kwargs):
super().__init__(features=features, decoded=decoded)
self.jnp_array_kwargs = jnp_array_kwargs
import jax.numpy as jnp # noqa import jax at initialization
def _consolidate(self, column):
import jax.numpy as jnp
if isinstance(column, list) and column:
if all(
isinstance(x, jnp.ndarray) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column
):
return jnp.stack(column)
return column
def _tensorize(self, value):
import jax
import jax.numpy as jnp
if isinstance(value, (str, bytes, type(None))):
return value
elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
return value.tolist()
default_dtype = {}
if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_x64:
default_dtype = {"dtype": jnp.int64}
else:
default_dtype = {"dtype": jnp.int32}
elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": jnp.float32}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
value = np.asarray(value)
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(value, **{**default_dtype, **self.jnp_array_kwargs})
def _recursive_tensorize(self, data_struct: dict):
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct)
def format_row(self, pa_table: pa.Table) -> dict:
row = self.numpy_arrow_extractor().extract_row(pa_table)
if self.decoded:
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> "jnp.ndarray":
column = self.numpy_arrow_extractor().extract_column(pa_table)
if self.decoded:
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> dict:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
if self.decoded:
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
|
from .torch_encoder import ImageTorchEncoder
|
from .torch_encoder import ImageTorchEncoder
|
import json
import os
from typing import Dict
from torch import Tensor, nn
class Dropout(nn.Module):
"""Dropout layer.
Args:
dropout: Sets a dropout value for dense layer.
"""
def __init__(self, dropout: float = 0.2):
super(Dropout, self).__init__()
self.dropout = dropout
self.dropout_layer = nn.Dropout(self.dropout)
def forward(self, features: Dict[str, Tensor]):
features.update({"sentence_embedding": self.dropout_layer(features["sentence_embedding"])})
return features
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump({"dropout": self.dropout}, fOut)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = Dropout(**config)
return model
|
from torch import Tensor
from torch import nn
from typing import Dict
import os
import json
class Dropout(nn.Module):
"""Dropout layer.
Args:
dropout: Sets a dropout value for dense layer.
"""
def __init__(self, dropout: float = 0.2):
super(Dropout, self).__init__()
self.dropout = dropout
self.dropout_layer = nn.Dropout(self.dropout)
def forward(self, features: Dict[str, Tensor]):
features.update({"sentence_embedding": self.dropout_layer(features["sentence_embedding"])})
return features
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump({"dropout": self.dropout}, fOut)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = Dropout(**config)
return model
|
import json
import pytest
from langchain_core.agents import AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import AIMessage, SystemMessage
from langchain.agents.openai_functions_multi_agent.base import (
_FunctionsAgentAction,
_parse_ai_message,
)
# Test: _parse_ai_message() function.
class TestParseAIMessage:
# Test: Pass Non-AIMessage.
def test_not_an_ai(self) -> None:
err = f"Expected an AI message got {SystemMessage!s}"
with pytest.raises(TypeError, match=err):
_parse_ai_message(SystemMessage(content="x"))
# Test: Model response (not a function call).
def test_model_response(self) -> None:
msg = AIMessage(content="Model response.")
result = _parse_ai_message(msg)
assert isinstance(result, AgentFinish)
assert result.return_values == {"output": "Model response."}
assert result.log == "Model response."
# Test: Model response with a function call.
def test_func_call(self) -> None:
act = json.dumps([{"action_name": "foo", "action": {"param": 42}}])
msg = AIMessage(
content="LLM thoughts.",
additional_kwargs={
"function_call": {"name": "foo", "arguments": f'{{"actions": {act}}}'}
},
)
result = _parse_ai_message(msg)
assert isinstance(result, list)
assert len(result) == 1
action = result[0]
assert isinstance(action, _FunctionsAgentAction)
assert action.tool == "foo"
assert action.tool_input == {"param": 42}
assert action.log == (
"\nInvoking: `foo` with `{'param': 42}`\nresponded: LLM thoughts.\n\n"
)
assert action.message_log == [msg]
# Test: Model response with a function call (old style tools).
def test_func_call_oldstyle(self) -> None:
act = json.dumps([{"action_name": "foo", "action": {"__arg1": "42"}}])
msg = AIMessage(
content="LLM thoughts.",
additional_kwargs={
"function_call": {"name": "foo", "arguments": f'{{"actions": {act}}}'}
},
)
result = _parse_ai_message(msg)
assert isinstance(result, list)
assert len(result) == 1
action = result[0]
assert isinstance(action, _FunctionsAgentAction)
assert action.tool == "foo"
assert action.tool_input == "42"
assert action.log == (
"\nInvoking: `foo` with `42`\nresponded: LLM thoughts.\n\n"
)
assert action.message_log == [msg]
# Test: Invalid function call args.
def test_func_call_invalid(self) -> None:
msg = AIMessage(
content="LLM thoughts.",
additional_kwargs={"function_call": {"name": "foo", "arguments": "{42]"}},
)
err = (
"Could not parse tool input: {'name': 'foo', 'arguments': '{42]'} "
"because the `arguments` is not valid JSON."
)
with pytest.raises(OutputParserException, match=err):
_parse_ai_message(msg)
|
import json
import pytest
from langchain_core.agents import AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import AIMessage, SystemMessage
from langchain.agents.openai_functions_multi_agent.base import (
_FunctionsAgentAction,
_parse_ai_message,
)
# Test: _parse_ai_message() function.
class TestParseAIMessage:
# Test: Pass Non-AIMessage.
def test_not_an_ai(self) -> None:
err = f"Expected an AI message got {str(SystemMessage)}"
with pytest.raises(TypeError, match=err):
_parse_ai_message(SystemMessage(content="x"))
# Test: Model response (not a function call).
def test_model_response(self) -> None:
msg = AIMessage(content="Model response.")
result = _parse_ai_message(msg)
assert isinstance(result, AgentFinish)
assert result.return_values == {"output": "Model response."}
assert result.log == "Model response."
# Test: Model response with a function call.
def test_func_call(self) -> None:
act = json.dumps([{"action_name": "foo", "action": {"param": 42}}])
msg = AIMessage(
content="LLM thoughts.",
additional_kwargs={
"function_call": {"name": "foo", "arguments": f'{{"actions": {act}}}'}
},
)
result = _parse_ai_message(msg)
assert isinstance(result, list)
assert len(result) == 1
action = result[0]
assert isinstance(action, _FunctionsAgentAction)
assert action.tool == "foo"
assert action.tool_input == {"param": 42}
assert action.log == (
"\nInvoking: `foo` with `{'param': 42}`\nresponded: LLM thoughts.\n\n"
)
assert action.message_log == [msg]
# Test: Model response with a function call (old style tools).
def test_func_call_oldstyle(self) -> None:
act = json.dumps([{"action_name": "foo", "action": {"__arg1": "42"}}])
msg = AIMessage(
content="LLM thoughts.",
additional_kwargs={
"function_call": {"name": "foo", "arguments": f'{{"actions": {act}}}'}
},
)
result = _parse_ai_message(msg)
assert isinstance(result, list)
assert len(result) == 1
action = result[0]
assert isinstance(action, _FunctionsAgentAction)
assert action.tool == "foo"
assert action.tool_input == "42"
assert action.log == (
"\nInvoking: `foo` with `42`\nresponded: LLM thoughts.\n\n"
)
assert action.message_log == [msg]
# Test: Invalid function call args.
def test_func_call_invalid(self) -> None:
msg = AIMessage(
content="LLM thoughts.",
additional_kwargs={"function_call": {"name": "foo", "arguments": "{42]"}},
)
err = (
"Could not parse tool input: {'name': 'foo', 'arguments': '{42]'} "
"because the `arguments` is not valid JSON."
)
with pytest.raises(OutputParserException, match=err):
_parse_ai_message(msg)
|
import datetime
import prisma.fields
import prisma.models
import pytest
import backend.server.v2.library.model as library_model
from backend.util import json
@pytest.mark.asyncio
async def test_agent_preset_from_db():
# Create mock DB agent
db_agent = prisma.models.AgentPreset(
id="test-agent-123",
createdAt=datetime.datetime.now(),
updatedAt=datetime.datetime.now(),
agentId="agent-123",
agentVersion=1,
name="Test Agent",
description="Test agent description",
isActive=True,
userId="test-user-123",
isDeleted=False,
InputPresets=[
prisma.models.AgentNodeExecutionInputOutput(
id="input-123",
time=datetime.datetime.now(),
name="input1",
data=json.dumps({"type": "string", "value": "test value"}), # type: ignore
)
],
)
# Convert to LibraryAgentPreset
agent = library_model.LibraryAgentPreset.from_db(db_agent)
assert agent.id == "test-agent-123"
assert agent.agent_version == 1
assert agent.is_active is True
assert agent.name == "Test Agent"
assert agent.description == "Test agent description"
assert agent.inputs == {"input1": {"type": "string", "value": "test value"}}
|
import datetime
import prisma.fields
import prisma.models
import backend.server.v2.library.model as library_model
def test_agent_preset_from_db():
# Create mock DB agent
db_agent = prisma.models.AgentPreset(
id="test-agent-123",
createdAt=datetime.datetime.now(),
updatedAt=datetime.datetime.now(),
agentId="agent-123",
agentVersion=1,
name="Test Agent",
description="Test agent description",
isActive=True,
userId="test-user-123",
isDeleted=False,
InputPresets=[
prisma.models.AgentNodeExecutionInputOutput(
id="input-123",
time=datetime.datetime.now(),
name="input1",
data=prisma.fields.Json({"type": "string", "value": "test value"}),
)
],
)
# Convert to LibraryAgentPreset
agent = library_model.LibraryAgentPreset.from_db(db_agent)
assert agent.id == "test-agent-123"
assert agent.agent_version == 1
assert agent.is_active is True
assert agent.name == "Test Agent"
assert agent.description == "Test agent description"
assert agent.inputs == {"input1": {"type": "string", "value": "test value"}}
|
"""Standard LangChain interface tests"""
from pathlib import Path
from typing import Literal, cast
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_anthropic import ChatAnthropic
REPO_ROOT_DIR = Path(__file__).parents[5]
class TestAnthropicStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatAnthropic
@property
def chat_model_params(self) -> dict:
return {"model": "claude-3-5-sonnet-latest"}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_image_urls(self) -> bool:
return True
@property
def supports_pdf_inputs(self) -> bool:
return True
@property
def supports_image_tool_message(self) -> bool:
return True
@property
def supports_anthropic_inputs(self) -> bool:
return True
@property
def enable_vcr_tests(self) -> bool:
return True
@property
def supported_usage_metadata_details(
self,
) -> dict[
Literal["invoke", "stream"],
list[
Literal[
"audio_input",
"audio_output",
"reasoning_output",
"cache_read_input",
"cache_creation_input",
]
],
]:
return {
"invoke": ["cache_read_input", "cache_creation_input"],
"stream": ["cache_read_input", "cache_creation_input"],
}
def invoke_with_cache_creation_input(self, *, stream: bool = False) -> AIMessage:
llm = ChatAnthropic(
model="claude-3-5-sonnet-20240620", # type: ignore[call-arg]
extra_headers={"anthropic-beta": "prompt-caching-2024-07-31"}, # type: ignore[call-arg]
)
with open(REPO_ROOT_DIR / "README.md") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
return _invoke(
llm,
[
{
"role": "user",
"content": [
{
"type": "text",
"text": input_,
"cache_control": {"type": "ephemeral"},
}
],
}
],
stream,
)
def invoke_with_cache_read_input(self, *, stream: bool = False) -> AIMessage:
llm = ChatAnthropic(
model="claude-3-5-sonnet-20240620", # type: ignore[call-arg]
extra_headers={"anthropic-beta": "prompt-caching-2024-07-31"}, # type: ignore[call-arg]
)
with open(REPO_ROOT_DIR / "README.md") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
# invoke twice so first invocation is cached
_invoke(
llm,
[
{
"role": "user",
"content": [
{
"type": "text",
"text": input_,
"cache_control": {"type": "ephemeral"},
}
],
}
],
stream,
)
return _invoke(
llm,
[
{
"role": "user",
"content": [
{
"type": "text",
"text": input_,
"cache_control": {"type": "ephemeral"},
}
],
}
],
stream,
)
def _invoke(llm: ChatAnthropic, input_: list, stream: bool) -> AIMessage:
if stream:
full = None
for chunk in llm.stream(input_):
full = full + chunk if full else chunk # type: ignore[operator]
return cast(AIMessage, full)
else:
return cast(AIMessage, llm.invoke(input_))
|
"""Standard LangChain interface tests"""
from pathlib import Path
from typing import Literal, cast
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_anthropic import ChatAnthropic
REPO_ROOT_DIR = Path(__file__).parents[5]
class TestAnthropicStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatAnthropic
@property
def chat_model_params(self) -> dict:
return {"model": "claude-3-5-sonnet-latest"}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_image_urls(self) -> bool:
return True
@property
def supports_pdf_inputs(self) -> bool:
return True
@property
def supports_image_tool_message(self) -> bool:
return True
@property
def supports_anthropic_inputs(self) -> bool:
return True
@property
def supported_usage_metadata_details(
self,
) -> dict[
Literal["invoke", "stream"],
list[
Literal[
"audio_input",
"audio_output",
"reasoning_output",
"cache_read_input",
"cache_creation_input",
]
],
]:
return {
"invoke": ["cache_read_input", "cache_creation_input"],
"stream": ["cache_read_input", "cache_creation_input"],
}
def invoke_with_cache_creation_input(self, *, stream: bool = False) -> AIMessage:
llm = ChatAnthropic(
model="claude-3-5-sonnet-20240620", # type: ignore[call-arg]
extra_headers={"anthropic-beta": "prompt-caching-2024-07-31"}, # type: ignore[call-arg]
)
with open(REPO_ROOT_DIR / "README.md") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
return _invoke(
llm,
[
{
"role": "user",
"content": [
{
"type": "text",
"text": input_,
"cache_control": {"type": "ephemeral"},
}
],
}
],
stream,
)
def invoke_with_cache_read_input(self, *, stream: bool = False) -> AIMessage:
llm = ChatAnthropic(
model="claude-3-5-sonnet-20240620", # type: ignore[call-arg]
extra_headers={"anthropic-beta": "prompt-caching-2024-07-31"}, # type: ignore[call-arg]
)
with open(REPO_ROOT_DIR / "README.md") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
# invoke twice so first invocation is cached
_invoke(
llm,
[
{
"role": "user",
"content": [
{
"type": "text",
"text": input_,
"cache_control": {"type": "ephemeral"},
}
],
}
],
stream,
)
return _invoke(
llm,
[
{
"role": "user",
"content": [
{
"type": "text",
"text": input_,
"cache_control": {"type": "ephemeral"},
}
],
}
],
stream,
)
def _invoke(llm: ChatAnthropic, input_: list, stream: bool) -> AIMessage:
if stream:
full = None
for chunk in llm.stream(input_):
full = full + chunk if full else chunk # type: ignore[operator]
return cast(AIMessage, full)
else:
return cast(AIMessage, llm.invoke(input_))
|
from abc import abstractmethod
from typing import (
TYPE_CHECKING,
TypeVar,
Sequence,
List,
Union,
Optional,
Dict,
)
from qdrant_client.http.models.models import Distance
from docarray import Document, DocumentArray
from docarray.math import ndarray
from docarray.score import NamedScore
if TYPE_CHECKING: # pragma: no cover
import tensorflow
import torch
import numpy as np
from qdrant_client import QdrantClient
QdrantArrayType = TypeVar(
'QdrantArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
class FindMixin:
@property
@abstractmethod
def client(self) -> 'QdrantClient':
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def serialize_config(self) -> dict:
raise NotImplementedError()
@property
@abstractmethod
def distance(self) -> 'Distance':
raise NotImplementedError()
def _find_similar_vectors(
self, q: 'QdrantArrayType', limit: int = 10, filter: Optional[Dict] = None
):
query_vector = self._map_embedding(q)
search_result = self.client.search(
self.collection_name,
query_vector=query_vector,
query_filter=filter,
search_params=None,
top=limit,
append_payload=['_serialized'],
)
docs = []
for hit in search_result:
doc = Document.from_base64(
hit.payload['_serialized'], **self.serialize_config
)
doc.scores[f'{self.distance.lower()}_similarity'] = NamedScore(
value=hit.score
)
docs.append(doc)
return DocumentArray(docs)
def _find(
self,
query: 'QdrantArrayType',
limit: int = 10,
filter: Optional[Dict] = None,
**kwargs,
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given a batch of input queries.
:param query: input supported to be used in Qdrant.
:param limit: number of retrieved items
:param filter: filter query used for pre-filtering
:return: a list of DocumentArrays containing the closest Document objects for each of the queries in `query`.
"""
num_rows, _ = ndarray.get_array_rows(query)
if num_rows == 1:
return [self._find_similar_vectors(query, limit=limit, filter=filter)]
else:
closest_docs = []
for q in query:
da = self._find_similar_vectors(q, limit=limit, filter=filter)
closest_docs.append(da)
return closest_docs
def _find_with_filter(
self, filter: Optional[Dict], limit: Optional[Union[int, float]] = 10
):
list_of_points, _offset = self.client.scroll(
collection_name=self.collection_name,
scroll_filter=filter,
with_payload=True,
limit=limit,
)
da = DocumentArray()
for result in list_of_points[:limit]:
doc = Document.from_base64(
result.payload['_serialized'], **self.serialize_config
)
da.append(doc)
return da
def _filter(
self, filter: Optional[Dict], limit: Optional[Union[int, float]] = 10
) -> 'DocumentArray':
"""Returns a subset of documents by filtering by the given filter (`Qdrant` filter)..
:param limit: number of retrieved items
:param filter: filter query used for filtering.
For more information: https://docarray.jina.ai/advanced/document-store/qdrant/#qdrant
:return: a `DocumentArray` containing the `Document` objects that verify the filter.
"""
return self._find_with_filter(filter, limit=limit)
|
from abc import abstractmethod
from typing import (
TYPE_CHECKING,
TypeVar,
Sequence,
List,
Union,
Optional,
Dict,
)
from qdrant_client.http.models.models import Distance
from docarray import Document, DocumentArray
from docarray.math import ndarray
from docarray.score import NamedScore
if TYPE_CHECKING:
import tensorflow
import torch
import numpy as np
from qdrant_client import QdrantClient
QdrantArrayType = TypeVar(
'QdrantArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
class FindMixin:
@property
@abstractmethod
def client(self) -> 'QdrantClient':
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def serialize_config(self) -> dict:
raise NotImplementedError()
@property
@abstractmethod
def distance(self) -> 'Distance':
raise NotImplementedError()
def _find_similar_vectors(
self, q: 'QdrantArrayType', limit: int = 10, filter: Optional[Dict] = None
):
query_vector = self._map_embedding(q)
search_result = self.client.search(
self.collection_name,
query_vector=query_vector,
query_filter=filter,
search_params=None,
top=limit,
append_payload=['_serialized'],
)
docs = []
for hit in search_result:
doc = Document.from_base64(
hit.payload['_serialized'], **self.serialize_config
)
doc.scores[f'{self.distance.lower()}_similarity'] = NamedScore(
value=hit.score
)
docs.append(doc)
return DocumentArray(docs)
def _find(
self,
query: 'QdrantArrayType',
limit: int = 10,
filter: Optional[Dict] = None,
**kwargs,
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given a batch of input queries.
:param query: input supported to be used in Qdrant.
:param limit: number of retrieved items
:param filter: filter query used for pre-filtering
:return: a list of DocumentArrays containing the closest Document objects for each of the queries in `query`.
"""
num_rows, _ = ndarray.get_array_rows(query)
if num_rows == 1:
return [self._find_similar_vectors(query, limit=limit, filter=filter)]
else:
closest_docs = []
for q in query:
da = self._find_similar_vectors(q, limit=limit, filter=filter)
closest_docs.append(da)
return closest_docs
def _find_with_filter(
self, filter: Optional[Dict], limit: Optional[Union[int, float]] = 10
):
list_of_points, _offset = self.client.scroll(
collection_name=self.collection_name,
scroll_filter=filter,
with_payload=True,
limit=limit,
)
da = DocumentArray()
for result in list_of_points[:limit]:
doc = Document.from_base64(
result.payload['_serialized'], **self.serialize_config
)
da.append(doc)
return da
def _filter(
self, filter: Optional[Dict], limit: Optional[Union[int, float]] = 10
) -> 'DocumentArray':
"""Returns a subset of documents by filtering by the given filter (`Qdrant` filter)..
:param limit: number of retrieved items
:param filter: filter query used for filtering.
For more information: https://docarray.jina.ai/advanced/document-store/qdrant/#qdrant
:return: a `DocumentArray` containing the `Document` objects that verify the filter.
"""
return self._find_with_filter(filter, limit=limit)
|
import csv
import gzip
import os
from . import InputExample
class STSDataReader:
"""Reads in the STS dataset. Each line contains two sentences (s1_col_idx, s2_col_idx) and one label (score_col_idx)
Default values expects a tab separated file with the first & second column the sentence pair and third column the score (0...1). Default config normalizes scores from 0...5 to 0...1
"""
def __init__(
self,
dataset_folder,
s1_col_idx=0,
s2_col_idx=1,
score_col_idx=2,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize_scores=True,
min_score=0,
max_score=5,
):
self.dataset_folder = dataset_folder
self.score_col_idx = score_col_idx
self.s1_col_idx = s1_col_idx
self.s2_col_idx = s2_col_idx
self.delimiter = delimiter
self.quoting = quoting
self.normalize_scores = normalize_scores
self.min_score = min_score
self.max_score = max_score
def get_examples(self, filename, max_examples=0):
"""filename specified which data split to use (train.csv, dev.csv, test.csv)."""
filepath = os.path.join(self.dataset_folder, filename)
with gzip.open(filepath, "rt", encoding="utf8") if filename.endswith(".gz") else open(
filepath, encoding="utf-8"
) as fIn:
data = csv.reader(fIn, delimiter=self.delimiter, quoting=self.quoting)
examples = []
for id, row in enumerate(data):
score = float(row[self.score_col_idx])
if self.normalize_scores: # Normalize to a 0...1 value
score = (score - self.min_score) / (self.max_score - self.min_score)
s1 = row[self.s1_col_idx]
s2 = row[self.s2_col_idx]
examples.append(InputExample(guid=filename + str(id), texts=[s1, s2], label=score))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
class STSBenchmarkDataReader(STSDataReader):
"""Reader especially for the STS benchmark dataset. There, the sentences are in column 5 and 6, the score is in column 4.
Scores are normalized from 0...5 to 0...1
"""
def __init__(
self,
dataset_folder,
s1_col_idx=5,
s2_col_idx=6,
score_col_idx=4,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize_scores=True,
min_score=0,
max_score=5,
):
super().__init__(
dataset_folder=dataset_folder,
s1_col_idx=s1_col_idx,
s2_col_idx=s2_col_idx,
score_col_idx=score_col_idx,
delimiter=delimiter,
quoting=quoting,
normalize_scores=normalize_scores,
min_score=min_score,
max_score=max_score,
)
|
from . import InputExample
import csv
import gzip
import os
class STSDataReader:
"""
Reads in the STS dataset. Each line contains two sentences (s1_col_idx, s2_col_idx) and one label (score_col_idx)
Default values expects a tab separated file with the first & second column the sentence pair and third column the score (0...1). Default config normalizes scores from 0...5 to 0...1
"""
def __init__(
self,
dataset_folder,
s1_col_idx=0,
s2_col_idx=1,
score_col_idx=2,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize_scores=True,
min_score=0,
max_score=5,
):
self.dataset_folder = dataset_folder
self.score_col_idx = score_col_idx
self.s1_col_idx = s1_col_idx
self.s2_col_idx = s2_col_idx
self.delimiter = delimiter
self.quoting = quoting
self.normalize_scores = normalize_scores
self.min_score = min_score
self.max_score = max_score
def get_examples(self, filename, max_examples=0):
"""
filename specified which data split to use (train.csv, dev.csv, test.csv).
"""
filepath = os.path.join(self.dataset_folder, filename)
with gzip.open(filepath, "rt", encoding="utf8") if filename.endswith(".gz") else open(
filepath, encoding="utf-8"
) as fIn:
data = csv.reader(fIn, delimiter=self.delimiter, quoting=self.quoting)
examples = []
for id, row in enumerate(data):
score = float(row[self.score_col_idx])
if self.normalize_scores: # Normalize to a 0...1 value
score = (score - self.min_score) / (self.max_score - self.min_score)
s1 = row[self.s1_col_idx]
s2 = row[self.s2_col_idx]
examples.append(InputExample(guid=filename + str(id), texts=[s1, s2], label=score))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
class STSBenchmarkDataReader(STSDataReader):
"""
Reader especially for the STS benchmark dataset. There, the sentences are in column 5 and 6, the score is in column 4.
Scores are normalized from 0...5 to 0...1
"""
def __init__(
self,
dataset_folder,
s1_col_idx=5,
s2_col_idx=6,
score_col_idx=4,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize_scores=True,
min_score=0,
max_score=5,
):
super().__init__(
dataset_folder=dataset_folder,
s1_col_idx=s1_col_idx,
s2_col_idx=s2_col_idx,
score_col_idx=score_col_idx,
delimiter=delimiter,
quoting=quoting,
normalize_scores=normalize_scores,
min_score=min_score,
max_score=max_score,
)
|
import os.path
from pathlib import Path
from typing import Any, Callable, Optional, Union
import numpy as np
from PIL import Image
from .utils import check_integrity, download_url
from .vision import VisionDataset
class SEMEION(VisionDataset):
r"""`SEMEION <http://archive.ics.uci.edu/ml/datasets/semeion+handwritten+digit>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where directory
``semeion.py`` exists.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/semeion/semeion.data"
filename = "semeion.data"
md5_checksum = "cb545d371d2ce14ec121470795a77432"
def __init__(
self,
root: Union[str, Path],
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = True,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
fp = os.path.join(self.root, self.filename)
data = np.loadtxt(fp)
# convert value to 8 bit unsigned integer
# color (white #255) the pixels
self.data = (data[:, :256] * 255).astype("uint8")
self.data = np.reshape(self.data, (-1, 16, 16))
self.labels = np.nonzero(data[:, 256:])[1]
def __getitem__(self, index: int) -> tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.labels[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img, mode="L")
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return len(self.data)
def _check_integrity(self) -> bool:
root = self.root
fpath = os.path.join(root, self.filename)
if not check_integrity(fpath, self.md5_checksum):
return False
return True
def download(self) -> None:
if self._check_integrity():
return
root = self.root
download_url(self.url, root, self.filename, self.md5_checksum)
|
import os.path
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
import numpy as np
from PIL import Image
from .utils import check_integrity, download_url
from .vision import VisionDataset
class SEMEION(VisionDataset):
r"""`SEMEION <http://archive.ics.uci.edu/ml/datasets/semeion+handwritten+digit>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where directory
``semeion.py`` exists.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/semeion/semeion.data"
filename = "semeion.data"
md5_checksum = "cb545d371d2ce14ec121470795a77432"
def __init__(
self,
root: Union[str, Path],
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = True,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
fp = os.path.join(self.root, self.filename)
data = np.loadtxt(fp)
# convert value to 8 bit unsigned integer
# color (white #255) the pixels
self.data = (data[:, :256] * 255).astype("uint8")
self.data = np.reshape(self.data, (-1, 16, 16))
self.labels = np.nonzero(data[:, 256:])[1]
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.labels[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img, mode="L")
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return len(self.data)
def _check_integrity(self) -> bool:
root = self.root
fpath = os.path.join(root, self.filename)
if not check_integrity(fpath, self.md5_checksum):
return False
return True
def download(self) -> None:
if self._check_integrity():
return
root = self.root
download_url(self.url, root, self.filename, self.md5_checksum)
|
import os
import pytest
from langchain_openai import OpenAI
os.environ["OPENAI_API_KEY"] = "foo"
def test_openai_model_param() -> None:
llm = OpenAI(model="foo")
assert llm.model_name == "foo"
llm = OpenAI(model_name="foo") # type: ignore[call-arg]
assert llm.model_name == "foo"
# Test standard tracing params
ls_params = llm._get_ls_params()
assert ls_params == {
"ls_provider": "openai",
"ls_model_type": "llm",
"ls_model_name": "foo",
"ls_temperature": 0.7,
"ls_max_tokens": 256,
}
def test_openai_model_kwargs() -> None:
llm = OpenAI(model_kwargs={"foo": "bar"})
assert llm.model_kwargs == {"foo": "bar"}
def test_openai_fields_in_model_kwargs() -> None:
"""Test that for backwards compatibility fields can be passed in as model_kwargs."""
llm = OpenAI(model_kwargs={"model_name": "foo"})
assert llm.model_name == "foo"
llm = OpenAI(model_kwargs={"model": "foo"})
assert llm.model_name == "foo"
def test_openai_incorrect_field() -> None:
with pytest.warns(match="not default parameter"):
llm = OpenAI(foo="bar") # type: ignore[call-arg]
assert llm.model_kwargs == {"foo": "bar"}
@pytest.fixture
def mock_completion() -> dict:
return {
"id": "cmpl-3evkmQda5Hu7fcZavknQda3SQ",
"object": "text_completion",
"created": 1689989000,
"model": "text-davinci-003",
"choices": [
{"text": "Bar Baz", "index": 0, "logprobs": None, "finish_reason": "length"}
],
"usage": {"prompt_tokens": 1, "completion_tokens": 2, "total_tokens": 3},
}
@pytest.mark.parametrize("model", ["gpt-3.5-turbo-instruct"])
def test_get_token_ids(model: str) -> None:
OpenAI(model=model).get_token_ids("foo")
return
def test_custom_token_counting() -> None:
def token_encoder(text: str) -> list[int]:
return [1, 2, 3]
llm = OpenAI(custom_get_token_ids=token_encoder)
assert llm.get_token_ids("foo") == [1, 2, 3]
|
import os
from typing import List
import pytest
from langchain_openai import OpenAI
os.environ["OPENAI_API_KEY"] = "foo"
def test_openai_model_param() -> None:
llm = OpenAI(model="foo")
assert llm.model_name == "foo"
llm = OpenAI(model_name="foo") # type: ignore[call-arg]
assert llm.model_name == "foo"
# Test standard tracing params
ls_params = llm._get_ls_params()
assert ls_params == {
"ls_provider": "openai",
"ls_model_type": "llm",
"ls_model_name": "foo",
"ls_temperature": 0.7,
"ls_max_tokens": 256,
}
def test_openai_model_kwargs() -> None:
llm = OpenAI(model_kwargs={"foo": "bar"})
assert llm.model_kwargs == {"foo": "bar"}
def test_openai_fields_in_model_kwargs() -> None:
"""Test that for backwards compatibility fields can be passed in as model_kwargs."""
llm = OpenAI(model_kwargs={"model_name": "foo"})
assert llm.model_name == "foo"
llm = OpenAI(model_kwargs={"model": "foo"})
assert llm.model_name == "foo"
def test_openai_incorrect_field() -> None:
with pytest.warns(match="not default parameter"):
llm = OpenAI(foo="bar") # type: ignore[call-arg]
assert llm.model_kwargs == {"foo": "bar"}
@pytest.fixture
def mock_completion() -> dict:
return {
"id": "cmpl-3evkmQda5Hu7fcZavknQda3SQ",
"object": "text_completion",
"created": 1689989000,
"model": "text-davinci-003",
"choices": [
{"text": "Bar Baz", "index": 0, "logprobs": None, "finish_reason": "length"}
],
"usage": {"prompt_tokens": 1, "completion_tokens": 2, "total_tokens": 3},
}
@pytest.mark.parametrize("model", ["gpt-3.5-turbo-instruct"])
def test_get_token_ids(model: str) -> None:
OpenAI(model=model).get_token_ids("foo")
return
def test_custom_token_counting() -> None:
def token_encoder(text: str) -> List[int]:
return [1, 2, 3]
llm = OpenAI(custom_get_token_ids=token_encoder)
assert llm.get_token_ids("foo") == [1, 2, 3]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_overlaps import bbox_overlaps
from .cityscapes_utils import evaluateImgLists
from .class_names import (cityscapes_classes, coco_classes,
coco_panoptic_classes, dataset_aliases, get_classes,
imagenet_det_classes, imagenet_vid_classes,
objects365v1_classes, objects365v2_classes,
oid_challenge_classes, oid_v6_classes, voc_classes)
from .mean_ap import average_precision, eval_map, print_map_summary
from .panoptic_utils import (INSTANCE_OFFSET, pq_compute_multi_core,
pq_compute_single_core)
from .recall import (eval_recalls, plot_iou_recall, plot_num_recall,
print_recall_summary)
__all__ = [
'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',
'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes',
'average_precision', 'eval_map', 'print_map_summary', 'eval_recalls',
'print_recall_summary', 'plot_num_recall', 'plot_iou_recall',
'oid_v6_classes', 'oid_challenge_classes', 'INSTANCE_OFFSET',
'pq_compute_single_core', 'pq_compute_multi_core', 'bbox_overlaps',
'objects365v1_classes', 'objects365v2_classes', 'coco_panoptic_classes',
'evaluateImgLists'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_overlaps import bbox_overlaps
from .class_names import (cityscapes_classes, coco_classes,
coco_panoptic_classes, dataset_aliases, get_classes,
imagenet_det_classes, imagenet_vid_classes,
objects365v1_classes, objects365v2_classes,
oid_challenge_classes, oid_v6_classes, voc_classes)
from .mean_ap import average_precision, eval_map, print_map_summary
from .panoptic_utils import (INSTANCE_OFFSET, pq_compute_multi_core,
pq_compute_single_core)
from .recall import (eval_recalls, plot_iou_recall, plot_num_recall,
print_recall_summary)
__all__ = [
'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',
'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes',
'average_precision', 'eval_map', 'print_map_summary', 'eval_recalls',
'print_recall_summary', 'plot_num_recall', 'plot_iou_recall',
'oid_v6_classes', 'oid_challenge_classes', 'INSTANCE_OFFSET',
'pq_compute_single_core', 'pq_compute_multi_core', 'bbox_overlaps',
'objects365v1_classes', 'objects365v2_classes', 'coco_panoptic_classes'
]
|
import os
import yaml
from jina.serve.runtimes.gateway.gateway import BaseGateway, Gateway
from jina.jaml import JAML
class MyDummyGateway(Gateway):
async def setup_server(self):
self.server = 'dummy server'
async def run_server(self):
self.logger.info(self.server)
async def shutdown(self):
pass
def test_cls_from_tag():
assert JAML.cls_from_tag('MyDummyGateway') == MyDummyGateway
assert JAML.cls_from_tag('!MyDummyGateway') == MyDummyGateway
assert JAML.cls_from_tag('BaseGateway') == BaseGateway
assert JAML.cls_from_tag('Nonexisting') is None
def test_base_jtype(tmpdir):
gateway_path = os.path.join(tmpdir, 'gateway.yml')
g = BaseGateway.load_config('Gateway', runtime_args={'port': [12345]})
g.save_config(gateway_path)
with open(gateway_path, 'r', encoding='utf-8') as file:
conf = yaml.safe_load(file)
assert 'jtype' in conf
assert conf['jtype'] == 'Gateway'
assert (
type(BaseGateway.load_config(gateway_path, runtime_args={'port': [12345]}))
== Gateway
)
def test_custom_jtype(tmpdir):
gateway_path = os.path.join(tmpdir, 'gateway.yml')
e = BaseGateway.load_config('MyDummyGateway', runtime_args={'port': [12345]})
print(f' e {type(e)} => {e.__dict__}')
e.save_config(gateway_path)
with open(gateway_path, 'r', encoding='utf-8') as file:
conf = yaml.safe_load(file)
assert 'jtype' in conf
assert conf['jtype'] == 'MyDummyGateway'
assert (
type(BaseGateway.load_config(gateway_path, runtime_args={'port': [12345]}))
== MyDummyGateway
)
|
import os
import yaml
from jina.serve.runtimes.gateway.gateway import BaseGateway, Gateway
from jina.jaml import JAML
class MyDummyGateway(Gateway):
async def setup_server(self):
self.server = 'dummy server'
async def run_server(self):
self.logger.info(self.server)
async def shutdown(self):
pass
def test_cls_from_tag():
assert JAML.cls_from_tag('MyDummyGateway') == MyDummyGateway
assert JAML.cls_from_tag('!MyDummyGateway') == MyDummyGateway
assert JAML.cls_from_tag('BaseGateway') == BaseGateway
assert JAML.cls_from_tag('Nonexisting') is None
def test_base_jtype(tmpdir):
gateway_path = os.path.join(tmpdir, 'gateway.yml')
g = BaseGateway.load_config('Gateway', runtime_args={'port': [12345]})
g.save_config(gateway_path)
with open(gateway_path, 'r') as file:
conf = yaml.safe_load(file)
assert 'jtype' in conf
assert conf['jtype'] == 'Gateway'
assert (
type(BaseGateway.load_config(gateway_path, runtime_args={'port': [12345]}))
== Gateway
)
def test_custom_jtype(tmpdir):
gateway_path = os.path.join(tmpdir, 'gateway.yml')
e = BaseGateway.load_config('MyDummyGateway', runtime_args={'port': [12345]})
print(f' e {type(e)} => {e.__dict__}')
e.save_config(gateway_path)
with open(gateway_path, 'r') as file:
conf = yaml.safe_load(file)
assert 'jtype' in conf
assert conf['jtype'] == 'MyDummyGateway'
assert (
type(BaseGateway.load_config(gateway_path, runtime_args={'port': [12345]}))
== MyDummyGateway
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from mmdet.registry import MODELS
from .utils import weighted_loss
@weighted_loss
def mse_loss(pred: Tensor, target: Tensor) -> Tensor:
"""A Wrapper of MSE loss.
Args:
pred (Tensor): The prediction.
target (Tensor): The learning target of the prediction.
Returns:
Tensor: loss Tensor
"""
return F.mse_loss(pred, target, reduction='none')
@MODELS.register_module()
class MSELoss(nn.Module):
"""MSELoss.
Args:
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
"""
def __init__(self,
reduction: str = 'mean',
loss_weight: float = 1.0) -> None:
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
avg_factor: Optional[int] = None,
reduction_override: Optional[str] = None) -> Tensor:
"""Forward function of loss.
Args:
pred (Tensor): The prediction.
target (Tensor): The learning target of the prediction.
weight (Tensor, optional): Weight of the loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
Tensor: The calculated loss.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss = self.loss_weight * mse_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from mmdet.registry import MODELS
from .utils import weighted_loss
@weighted_loss
def mse_loss(pred, target):
"""Wrapper of mse loss."""
return F.mse_loss(pred, target, reduction='none')
@MODELS.register_module()
class MSELoss(nn.Module):
"""MSELoss.
Args:
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function of loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): Weight of the loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss = self.loss_weight * mse_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss
|
_base_ = './retinanet_r50-caffe_fpn_ms-3x_coco.py'
# learning policy
model = dict(
pretrained='open-mmlab://detectron2/resnet101_caffe',
backbone=dict(depth=101))
|
_base_ = './retinanet_r50_caffe_fpn_mstrain_3x_coco.py'
# learning policy
model = dict(
pretrained='open-mmlab://detectron2/resnet101_caffe',
backbone=dict(depth=101))
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
import pytest
from jina import Executor
from ...minranker import MinRanker
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.metric == 'cosine'
@pytest.mark.parametrize('default_traversal_paths', [['r'], ['c']])
def test_ranker(documents_chunk, documents_chunk_chunk, default_traversal_paths):
ranker = MinRanker(metric='cosine', default_traversal_paths=default_traversal_paths)
if default_traversal_paths == ['r']:
ranking_docs = documents_chunk
else:
ranking_docs = documents_chunk_chunk
ranker.rank(ranking_docs, parameters={})
assert ranking_docs
for doc in ranking_docs.traverse_flat(default_traversal_paths):
assert doc.matches
for i in range(len(doc.matches) - 1):
match = doc.matches[i]
assert match.tags
assert (
match.scores['cosine'].value
>= doc.matches[i + 1].scores['cosine'].value
)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import pytest
from ...minranker import MinRanker
@pytest.mark.parametrize('default_traversal_paths', [['r'], ['c']])
def test_ranker(documents_chunk, documents_chunk_chunk, default_traversal_paths):
ranker = MinRanker(metric='cosine', default_traversal_paths=default_traversal_paths)
if default_traversal_paths == ['r']:
ranking_docs = documents_chunk
else:
ranking_docs = documents_chunk_chunk
ranker.rank(ranking_docs, parameters={})
assert ranking_docs
for doc in ranking_docs.traverse_flat(default_traversal_paths):
assert doc.matches
for i in range(len(doc.matches) - 1):
match = doc.matches[i]
assert match.tags
assert (
match.scores['cosine'].value
>= doc.matches[i + 1].scores['cosine'].value
)
|
import os
import urllib
import pytest
from pydantic import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import TextUrl
from docarray.typing.url.mimetypes import (
OBJ_MIMETYPE,
AUDIO_MIMETYPE,
VIDEO_MIMETYPE,
IMAGE_MIMETYPE,
TEXT_MIMETYPE,
)
from tests import TOYDATA_DIR
REMOTE_TEXT_FILE = 'https://de.wikipedia.org/wiki/Brixen'
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
LOCAL_TEXT_FILES = [
str(TOYDATA_DIR / 'penal_colony.txt'),
str(TOYDATA_DIR / 'test.md'),
str(TOYDATA_DIR / 'test.html'),
str(TOYDATA_DIR / 'test.css'),
str(TOYDATA_DIR / 'test.csv'),
str(TOYDATA_DIR / 'test.log'),
]
LOCAL_TEXT_FILES_AND_BEGINNING = [
(str(TOYDATA_DIR / 'penal_colony.txt'), '“It’s a peculiar apparatus,”'),
(str(TOYDATA_DIR / 'test.md'), "# Hello"),
(str(TOYDATA_DIR / 'test.html'), "<html>"),
(str(TOYDATA_DIR / 'test.css'), 'body {'),
(str(TOYDATA_DIR / 'test.csv'), "John,Doe"),
(str(TOYDATA_DIR / 'test.log'), "2022-11-25 12:34:56 INFO: Program started"),
]
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'url,expected_beginning',
[(REMOTE_TEXT_FILE, '<!DOCTYPE html>'), *LOCAL_TEXT_FILES_AND_BEGINNING],
)
def test_load(url, expected_beginning):
uri = parse_obj_as(TextUrl, url)
txt = uri.load()
assert txt.startswith(expected_beginning)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('url', [REMOTE_TEXT_FILE, *LOCAL_TEXT_FILES])
def test_load_to_bytes(url):
uri = parse_obj_as(TextUrl, url)
txt_bytes = uri.load_bytes()
assert isinstance(txt_bytes, bytes)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('url', [REMOTE_TEXT_FILE, *LOCAL_TEXT_FILES])
def test_proto_text_url(url):
uri = parse_obj_as(TextUrl, url)
proto = uri._to_node_protobuf()
assert 'text_url' in str(proto)
@pytest.mark.internet
def test_load_timeout():
url = parse_obj_as(TextUrl, REMOTE_TEXT_FILE)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
with pytest.raises(urllib.error.URLError):
_ = url.load_bytes(timeout=0.001)
def test_json_schema():
schema_json_of(TextUrl)
@pytest.mark.internet
def test_dump_json():
url = parse_obj_as(TextUrl, REMOTE_TEXT_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[REMOTE_TEXT_FILE, *LOCAL_TEXT_FILES],
)
def test_validation(path_to_file):
url = parse_obj_as(TextUrl, path_to_file)
assert isinstance(url, TextUrl)
assert isinstance(url, str)
@pytest.mark.parametrize(
'file_type, file_source',
[
*[(TEXT_MIMETYPE, file) for file in LOCAL_TEXT_FILES],
(TEXT_MIMETYPE, REMOTE_TEXT_FILE),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.aac')),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.mp3')),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.ogg')),
(IMAGE_MIMETYPE, os.path.join(TOYDATA_DIR, 'test.png')),
(VIDEO_MIMETYPE, os.path.join(TOYDATA_DIR, 'mov_bbb.mp4')),
(OBJ_MIMETYPE, os.path.join(TOYDATA_DIR, 'test.glb')),
],
)
def test_file_validation(file_type, file_source):
if file_type != TextUrl.mime_type():
with pytest.raises(ValueError):
parse_obj_as(TextUrl, file_source)
else:
parse_obj_as(TextUrl, file_source)
|
import os
import urllib
import pytest
from pydantic import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import TextUrl
from tests import TOYDATA_DIR
REMOTE_TEXT_FILE = 'https://de.wikipedia.org/wiki/Brixen'
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
LOCAL_TEXT_FILES = [
str(TOYDATA_DIR / 'penal_colony.txt'),
str(TOYDATA_DIR / 'test.md'),
str(TOYDATA_DIR / 'test.html'),
str(TOYDATA_DIR / 'test.css'),
str(TOYDATA_DIR / 'test.csv'),
str(TOYDATA_DIR / 'test.log'),
]
LOCAL_TEXT_FILES_AND_BEGINNING = [
(str(TOYDATA_DIR / 'penal_colony.txt'), '“It’s a peculiar apparatus,”'),
(str(TOYDATA_DIR / 'test.md'), "# Hello"),
(str(TOYDATA_DIR / 'test.html'), "<html>"),
(str(TOYDATA_DIR / 'test.css'), 'body {'),
(str(TOYDATA_DIR / 'test.csv'), "John,Doe"),
(str(TOYDATA_DIR / 'test.log'), "2022-11-25 12:34:56 INFO: Program started"),
]
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'url,expected_beginning',
[(REMOTE_TEXT_FILE, '<!DOCTYPE html>'), *LOCAL_TEXT_FILES_AND_BEGINNING],
)
def test_load(url, expected_beginning):
uri = parse_obj_as(TextUrl, url)
txt = uri.load()
assert txt.startswith(expected_beginning)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('url', [REMOTE_TEXT_FILE, *LOCAL_TEXT_FILES])
def test_load_to_bytes(url):
uri = parse_obj_as(TextUrl, url)
txt_bytes = uri.load_bytes()
assert isinstance(txt_bytes, bytes)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('url', [REMOTE_TEXT_FILE, *LOCAL_TEXT_FILES])
def test_proto_text_url(url):
uri = parse_obj_as(TextUrl, url)
proto = uri._to_node_protobuf()
assert 'text_url' in str(proto)
@pytest.mark.internet
def test_load_timeout():
url = parse_obj_as(TextUrl, REMOTE_TEXT_FILE)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
with pytest.raises(urllib.error.URLError):
_ = url.load_bytes(timeout=0.001)
def test_json_schema():
schema_json_of(TextUrl)
@pytest.mark.internet
def test_dump_json():
url = parse_obj_as(TextUrl, REMOTE_TEXT_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[REMOTE_TEXT_FILE, *LOCAL_TEXT_FILES],
)
def test_validation(path_to_file):
url = parse_obj_as(TextUrl, path_to_file)
assert isinstance(url, TextUrl)
assert isinstance(url, str)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from jina.executors.metas import get_default_metas
from ...faiss_searcher import FaissSearcher
def _get_docs_from_vecs(queries):
docs = DocumentArray()
for q in queries:
doc = Document(embedding=q)
docs.append(doc)
return docs
@pytest.fixture(scope='function', autouse=True)
def metas(tmpdir):
os.environ['TEST_WORKSPACE'] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ['TEST_WORKSPACE']
metas['name'] = 'faiss_idx'
yield metas
del os.environ['TEST_WORKSPACE']
def test_train_and_index(metas, tmpdir):
query = np.array(np.random.random([10, 10]), dtype=np.float32)
query_docs = _get_docs_from_vecs(query)
trained_index_file = os.path.join(tmpdir, 'faiss.index')
train_data = np.array(np.random.random([512, 10]), dtype=np.float32)
index_docs = _get_docs_from_vecs(train_data)
f = Flow().add(
uses=FaissSearcher,
timeout_ready=-1,
uses_with={
'index_key': 'IVF6,PQ2',
'trained_index_file': trained_index_file,
},
uses_meta=metas,
)
with f:
import faiss
faiss_index = faiss.index_factory(10, 'IVF6,PQ2', faiss.METRIC_INNER_PRODUCT)
faiss.normalize_L2(train_data)
faiss_index.train(train_data)
faiss.write_index(faiss_index, trained_index_file)
# train and index docs first
f.post(on='/index', data=index_docs)
result = f.post(
on='/search', data=query_docs, return_results=True, parameters={'limit': 4}
)[0].docs
assert len(result[0].matches) == 4
for d in result:
assert (
d.matches[0].scores['cosine'].value
<= d.matches[1].scores['cosine'].value
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from jina.executors.metas import get_default_metas
from ...faiss_searcher import FaissSearcher
def _get_docs_from_vecs(queries):
docs = DocumentArray()
for q in queries:
doc = Document(embedding=q)
docs.append(doc)
return docs
@pytest.fixture(scope='function', autouse=True)
def metas(tmpdir):
os.environ['TEST_WORKSPACE'] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ['TEST_WORKSPACE']
metas['name'] = 'faiss_idx'
yield metas
del os.environ['TEST_WORKSPACE']
def test_train_and_index(metas, tmpdir):
query = np.array(np.random.random([10, 10]), dtype=np.float32)
query_docs = _get_docs_from_vecs(query)
trained_index_file = os.path.join(tmpdir, 'faiss.index')
train_data = np.array(np.random.random([512, 10]), dtype=np.float32)
index_docs = _get_docs_from_vecs(train_data)
f = Flow().add(
uses=FaissSearcher,
timeout_ready=-1,
uses_with={
'index_key': 'IVF6,PQ2',
'trained_index_file': trained_index_file,
},
uses_meta=metas,
)
with f:
import faiss
faiss_index = faiss.index_factory(10, 'IVF6,PQ2', faiss.METRIC_INNER_PRODUCT)
faiss.normalize_L2(train_data)
faiss_index.train(train_data)
faiss.write_index(faiss_index, trained_index_file)
# train and index docs first
f.post(on='/index', data=index_docs)
result = f.post(
on='/search', data=query_docs, return_results=True, parameters={'top_k': 4}
)[0].docs
assert len(result[0].matches) == 4
for d in result:
assert (
d.matches[0].scores['cosine'].value
<= d.matches[1].scores['cosine'].value
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
],
timeout=30,
check=True,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class YOLOX(SingleStageDetector):
r"""Implementation of `YOLOX: Exceeding YOLO Series in 2021
<https://arxiv.org/abs/2107.08430>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(YOLOX, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class YOLOX(SingleStageDetector):
r"""Implementation of `YOLOX: Exceeding YOLO Series in 2021
<https://arxiv.org/abs/2107.08430>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(YOLOX, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
"""
This tool allows agents to interact with the pygithub library
and operate on a GitHub repository.
To use this tool, you must first set as environment variables:
GITHUB_API_TOKEN
GITHUB_REPOSITORY -> format: {owner}/{repo}
"""
from typing import Any, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.github import GitHubAPIWrapper
class GitHubAction(BaseTool):
"""Tool for interacting with the GitHub API."""
api_wrapper: GitHubAPIWrapper = Field(default_factory=GitHubAPIWrapper)
mode: str
name: str = ""
description: str = ""
args_schema: Optional[Type[BaseModel]] = None
def _run(
self,
instructions: Optional[str] = "",
run_manager: Optional[CallbackManagerForToolRun] = None,
**kwargs: Any,
) -> str:
"""Use the GitHub API to run an operation."""
if not instructions or instructions == "{}":
# Catch other forms of empty input that GPT-4 likes to send.
instructions = ""
if self.args_schema is not None:
field_names = list(self.args_schema.schema()["properties"].keys())
if len(field_names) > 1:
raise AssertionError(
f"Expected one argument in tool schema, got {field_names}."
)
if field_names:
field = field_names[0]
else:
field = ""
query = str(kwargs.get(field, ""))
else:
query = instructions
return self.api_wrapper.run(self.mode, query)
|
"""
This tool allows agents to interact with the pygithub library
and operate on a GitHub repository.
To use this tool, you must first set as environment variables:
GITHUB_API_TOKEN
GITHUB_REPOSITORY -> format: {owner}/{repo}
"""
from typing import Any, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.github import GitHubAPIWrapper
class GitHubAction(BaseTool): # type: ignore[override]
"""Tool for interacting with the GitHub API."""
api_wrapper: GitHubAPIWrapper = Field(default_factory=GitHubAPIWrapper) # type: ignore[arg-type]
mode: str
name: str = ""
description: str = ""
args_schema: Optional[Type[BaseModel]] = None
def _run(
self,
instructions: Optional[str] = "",
run_manager: Optional[CallbackManagerForToolRun] = None,
**kwargs: Any,
) -> str:
"""Use the GitHub API to run an operation."""
if not instructions or instructions == "{}":
# Catch other forms of empty input that GPT-4 likes to send.
instructions = ""
if self.args_schema is not None:
field_names = list(self.args_schema.schema()["properties"].keys())
if len(field_names) > 1:
raise AssertionError(
f"Expected one argument in tool schema, got {field_names}."
)
if field_names:
field = field_names[0]
else:
field = ""
query = str(kwargs.get(field, ""))
else:
query = instructions
return self.api_wrapper.run(self.mode, query)
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def deprecate(*args, take_from: Optional[Union[Dict, Any]] = None, standard_warn=True, stacklevel=2):
from .. import __version__
deprecated_kwargs = take_from
values = ()
if not isinstance(args[0], tuple):
args = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__version__).base_version) >= version.parse(version_name):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}"
)
warning = None
if isinstance(deprecated_kwargs, dict) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(attribute),)
warning = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(deprecated_kwargs, attribute):
values += (getattr(deprecated_kwargs, attribute),)
warning = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
warning = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
warning = warning + " " if standard_warn else ""
warnings.warn(warning + message, FutureWarning, stacklevel=stacklevel)
if isinstance(deprecated_kwargs, dict) and len(deprecated_kwargs) > 0:
call_frame = inspect.getouterframes(inspect.currentframe())[1]
filename = call_frame.filename
line_number = call_frame.lineno
function = call_frame.function
key, value = next(iter(deprecated_kwargs.items()))
raise TypeError(f"{function} in {filename} line {line_number - 1} got an unexpected keyword argument `{key}`")
if len(values) == 0:
return
elif len(values) == 1:
return values[0]
return values
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def deprecate(*args, take_from: Optional[Union[Dict, Any]] = None, standard_warn=True, stacklevel=2):
from .. import __version__
deprecated_kwargs = take_from
values = ()
if not isinstance(args[0], tuple):
args = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__version__).base_version) >= version.parse(version_name):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}"
)
warning = None
if isinstance(deprecated_kwargs, dict) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(attribute),)
warning = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(deprecated_kwargs, attribute):
values += (getattr(deprecated_kwargs, attribute),)
warning = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
warning = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
warning = warning + " " if standard_warn else ""
warnings.warn(warning + message, FutureWarning, stacklevel=stacklevel)
if isinstance(deprecated_kwargs, dict) and len(deprecated_kwargs) > 0:
call_frame = inspect.getouterframes(inspect.currentframe())[1]
filename = call_frame.filename
line_number = call_frame.lineno
function = call_frame.function
key, value = next(iter(deprecated_kwargs.items()))
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`")
if len(values) == 0:
return
elif len(values) == 1:
return values[0]
return values
|
_base_ = [
'../_base_/models/fast-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=2000),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=None),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['proposals']),
dict(
type='ToDataContainer',
fields=[dict(key='proposals', stack=False)]),
dict(type='Collect', keys=['img', 'proposals']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_train2017.pkl',
pipeline=train_pipeline),
val=dict(
proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl',
pipeline=test_pipeline),
test=dict(
proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl',
pipeline=test_pipeline))
|
_base_ = [
'../_base_/models/fast_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=2000),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=None),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['proposals']),
dict(
type='ToDataContainer',
fields=[dict(key='proposals', stack=False)]),
dict(type='Collect', keys=['img', 'proposals']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_train2017.pkl',
pipeline=train_pipeline),
val=dict(
proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl',
pipeline=test_pipeline),
test=dict(
proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl',
pipeline=test_pipeline))
|
import numpy as np
from docarray.document import AnyDocument, BaseDocument
from docarray.typing import NdArray
def test_any_doc():
class InnerDocument(BaseDocument):
text: str
tensor: NdArray
class CustomDoc(BaseDocument):
inner: InnerDocument
text: str
doc = CustomDoc(
text='bye', inner=InnerDocument(text='hello', tensor=np.zeros((3, 224, 224)))
)
any_doc = AnyDocument(**doc.__dict__)
assert any_doc.text == doc.text
assert any_doc.inner.text == doc.inner.text
assert (any_doc.inner.tensor == doc.inner.tensor).all()
|
import numpy as np
from docarray.document import AnyDocument, BaseDocument
from docarray.typing import Tensor
def test_any_doc():
class InnerDocument(BaseDocument):
text: str
tensor: Tensor
class CustomDoc(BaseDocument):
inner: InnerDocument
text: str
doc = CustomDoc(
text='bye', inner=InnerDocument(text='hello', tensor=np.zeros((3, 224, 224)))
)
any_doc = AnyDocument(**doc.__dict__)
assert any_doc.text == doc.text
assert any_doc.inner.text == doc.inner.text
assert (any_doc.inner.tensor == doc.inner.tensor).all()
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.mlflow_callback import (
MlflowCallbackHandler,
MlflowLogger,
analyze_text,
construct_html_from_prompt_and_generation,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"analyze_text": "langchain_community.callbacks.mlflow_callback",
"construct_html_from_prompt_and_generation": (
"langchain_community.callbacks.mlflow_callback"
),
"MlflowLogger": "langchain_community.callbacks.mlflow_callback",
"MlflowCallbackHandler": "langchain_community.callbacks.mlflow_callback",
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MlflowCallbackHandler",
"MlflowLogger",
"analyze_text",
"construct_html_from_prompt_and_generation",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.mlflow_callback import (
MlflowCallbackHandler,
MlflowLogger,
analyze_text,
construct_html_from_prompt_and_generation,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"analyze_text": "langchain_community.callbacks.mlflow_callback",
"construct_html_from_prompt_and_generation": (
"langchain_community.callbacks.mlflow_callback"
),
"MlflowLogger": "langchain_community.callbacks.mlflow_callback",
"MlflowCallbackHandler": "langchain_community.callbacks.mlflow_callback",
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"analyze_text",
"construct_html_from_prompt_and_generation",
"MlflowLogger",
"MlflowCallbackHandler",
]
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card
class CreateModelCardTest(unittest.TestCase):
def test_generate_model_card_with_library_name(self):
with TemporaryDirectory() as tmpdir:
file_path = Path(tmpdir) / "README.md"
file_path.write_text("---\nlibrary_name: foo\n---\nContent\n")
model_card = load_or_create_model_card(file_path)
populate_model_card(model_card)
assert model_card.data.library_name == "foo"
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card
class CreateModelCardTest(unittest.TestCase):
def test_generate_model_card_with_library_name(self):
with TemporaryDirectory() as tmpdir:
file_path = Path(tmpdir) / "README.md"
file_path.write_text("---\nlibrary_name: foo\n---\nContent\n")
model_card = load_or_create_model_card(file_path)
populate_model_card(model_card)
assert model_card.data.library_name == "foo"
|
"""
This file runs Masked Language Model. You provide a training file. Each line is interpreted as a sentence / paragraph.
Optionally, you can also provide a dev file.
The fine-tuned model is stored in the output/model_name folder.
Usage:
python train_mlm.py model_name data/train_sentences.txt [data/dev_sentences.txt]
"""
import gzip
import sys
from datetime import datetime
from transformers import (
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForWholeWordMask,
Trainer,
TrainingArguments,
)
if len(sys.argv) < 3:
print("Usage: python train_mlm.py model_name data/train_sentences.txt [data/dev_sentences.txt]")
exit()
model_name = sys.argv[1]
per_device_train_batch_size = 64
save_steps = 1000 # Save model every 1k steps
num_train_epochs = 3 # Number of epochs
use_fp16 = False # Set to True, if your GPU supports FP16 operations
max_length = 100 # Max length for a text input
do_whole_word_mask = True # If set to true, whole words are masked
mlm_prob = 0.15 # Probability that a word is replaced by a [MASK] token
# Load the model
model = AutoModelForMaskedLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
output_dir = "output/{}-{}".format(model_name.replace("/", "_"), datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
print("Save checkpoints to:", output_dir)
##### Load our training datasets
train_sentences = []
train_path = sys.argv[2]
with gzip.open(train_path, "rt", encoding="utf8") if train_path.endswith(".gz") else open(
train_path, "r", encoding="utf8"
) as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
print("Train sentences:", len(train_sentences))
dev_sentences = []
if len(sys.argv) >= 4:
dev_path = sys.argv[3]
with gzip.open(dev_path, "rt", encoding="utf8") if dev_path.endswith(".gz") else open(
dev_path, "r", encoding="utf8"
) as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
dev_sentences.append(line)
print("Dev sentences:", len(dev_sentences))
# A dataset wrapper, that tokenizes our data on-the-fly
class TokenizedSentencesDataset:
def __init__(self, sentences, tokenizer, max_length, cache_tokenization=False):
self.tokenizer = tokenizer
self.sentences = sentences
self.max_length = max_length
self.cache_tokenization = cache_tokenization
def __getitem__(self, item):
if not self.cache_tokenization:
return self.tokenizer(
self.sentences[item],
add_special_tokens=True,
truncation=True,
max_length=self.max_length,
return_special_tokens_mask=True,
)
if isinstance(self.sentences[item], str):
self.sentences[item] = self.tokenizer(
self.sentences[item],
add_special_tokens=True,
truncation=True,
max_length=self.max_length,
return_special_tokens_mask=True,
)
return self.sentences[item]
def __len__(self):
return len(self.sentences)
train_dataset = TokenizedSentencesDataset(train_sentences, tokenizer, max_length)
dev_dataset = (
TokenizedSentencesDataset(dev_sentences, tokenizer, max_length, cache_tokenization=True)
if len(dev_sentences) > 0
else None
)
##### Training arguments
if do_whole_word_mask:
data_collator = DataCollatorForWholeWordMask(tokenizer=tokenizer, mlm=True, mlm_probability=mlm_prob)
else:
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=True, mlm_probability=mlm_prob)
training_args = TrainingArguments(
output_dir=output_dir,
overwrite_output_dir=True,
num_train_epochs=num_train_epochs,
evaluation_strategy="steps" if dev_dataset is not None else "no",
per_device_train_batch_size=per_device_train_batch_size,
eval_steps=save_steps,
save_steps=save_steps,
logging_steps=save_steps,
save_total_limit=1,
prediction_loss_only=True,
fp16=use_fp16,
)
trainer = Trainer(
model=model, args=training_args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=dev_dataset
)
print("Save tokenizer to:", output_dir)
tokenizer.save_pretrained(output_dir)
trainer.train()
print("Save model to:", output_dir)
model.save_pretrained(output_dir)
print("Training done")
|
"""
This file runs Masked Language Model. You provide a training file. Each line is interpreted as a sentence / paragraph.
Optionally, you can also provide a dev file.
The fine-tuned model is stored in the output/model_name folder.
Usage:
python train_mlm.py model_name data/train_sentences.txt [data/dev_sentences.txt]
"""
from transformers import AutoModelForMaskedLM, AutoTokenizer
from transformers import DataCollatorForLanguageModeling, DataCollatorForWholeWordMask
from transformers import Trainer, TrainingArguments
import sys
import gzip
from datetime import datetime
if len(sys.argv) < 3:
print("Usage: python train_mlm.py model_name data/train_sentences.txt [data/dev_sentences.txt]")
exit()
model_name = sys.argv[1]
per_device_train_batch_size = 64
save_steps = 1000 # Save model every 1k steps
num_train_epochs = 3 # Number of epochs
use_fp16 = False # Set to True, if your GPU supports FP16 operations
max_length = 100 # Max length for a text input
do_whole_word_mask = True # If set to true, whole words are masked
mlm_prob = 0.15 # Probability that a word is replaced by a [MASK] token
# Load the model
model = AutoModelForMaskedLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
output_dir = "output/{}-{}".format(model_name.replace("/", "_"), datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
print("Save checkpoints to:", output_dir)
##### Load our training datasets
train_sentences = []
train_path = sys.argv[2]
with gzip.open(train_path, "rt", encoding="utf8") if train_path.endswith(".gz") else open(
train_path, "r", encoding="utf8"
) as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
print("Train sentences:", len(train_sentences))
dev_sentences = []
if len(sys.argv) >= 4:
dev_path = sys.argv[3]
with gzip.open(dev_path, "rt", encoding="utf8") if dev_path.endswith(".gz") else open(
dev_path, "r", encoding="utf8"
) as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
dev_sentences.append(line)
print("Dev sentences:", len(dev_sentences))
# A dataset wrapper, that tokenizes our data on-the-fly
class TokenizedSentencesDataset:
def __init__(self, sentences, tokenizer, max_length, cache_tokenization=False):
self.tokenizer = tokenizer
self.sentences = sentences
self.max_length = max_length
self.cache_tokenization = cache_tokenization
def __getitem__(self, item):
if not self.cache_tokenization:
return self.tokenizer(
self.sentences[item],
add_special_tokens=True,
truncation=True,
max_length=self.max_length,
return_special_tokens_mask=True,
)
if isinstance(self.sentences[item], str):
self.sentences[item] = self.tokenizer(
self.sentences[item],
add_special_tokens=True,
truncation=True,
max_length=self.max_length,
return_special_tokens_mask=True,
)
return self.sentences[item]
def __len__(self):
return len(self.sentences)
train_dataset = TokenizedSentencesDataset(train_sentences, tokenizer, max_length)
dev_dataset = (
TokenizedSentencesDataset(dev_sentences, tokenizer, max_length, cache_tokenization=True)
if len(dev_sentences) > 0
else None
)
##### Training arguments
if do_whole_word_mask:
data_collator = DataCollatorForWholeWordMask(tokenizer=tokenizer, mlm=True, mlm_probability=mlm_prob)
else:
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=True, mlm_probability=mlm_prob)
training_args = TrainingArguments(
output_dir=output_dir,
overwrite_output_dir=True,
num_train_epochs=num_train_epochs,
evaluation_strategy="steps" if dev_dataset is not None else "no",
per_device_train_batch_size=per_device_train_batch_size,
eval_steps=save_steps,
save_steps=save_steps,
logging_steps=save_steps,
save_total_limit=1,
prediction_loss_only=True,
fp16=use_fp16,
)
trainer = Trainer(
model=model, args=training_args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=dev_dataset
)
print("Save tokenizer to:", output_dir)
tokenizer.save_pretrained(output_dir)
trainer.train()
print("Save model to:", output_dir)
model.save_pretrained(output_dir)
print("Training done")
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint_hook import CheckpointHook
from .ema_hook import EMAHook
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .logger_hook import LoggerHook
from .naive_visualization_hook import NaiveVisualizationHook
from .param_scheduler_hook import ParamSchedulerHook
from .profiler_hook import NPUProfilerHook, ProfilerHook
from .runtime_info_hook import RuntimeInfoHook
from .sampler_seed_hook import DistSamplerSeedHook
from .sync_buffer_hook import SyncBuffersHook
from .test_time_aug_hook import PrepareTTAHook
__all__ = [
'Hook', 'IterTimerHook', 'DistSamplerSeedHook', 'ParamSchedulerHook',
'SyncBuffersHook', 'EmptyCacheHook', 'CheckpointHook', 'LoggerHook',
'NaiveVisualizationHook', 'EMAHook', 'RuntimeInfoHook', 'ProfilerHook',
'NPUProfilerHook', 'PrepareTTAHook'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint_hook import CheckpointHook
from .ema_hook import EMAHook
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .logger_hook import LoggerHook
from .naive_visualization_hook import NaiveVisualizationHook
from .param_scheduler_hook import ParamSchedulerHook
from .profiler_hook import ProfilerHook
from .runtime_info_hook import RuntimeInfoHook
from .sampler_seed_hook import DistSamplerSeedHook
from .sync_buffer_hook import SyncBuffersHook
from .test_time_aug_hook import PrepareTTAHook
__all__ = [
'Hook', 'IterTimerHook', 'DistSamplerSeedHook', 'ParamSchedulerHook',
'SyncBuffersHook', 'EmptyCacheHook', 'CheckpointHook', 'LoggerHook',
'NaiveVisualizationHook', 'EMAHook', 'RuntimeInfoHook', 'ProfilerHook',
'PrepareTTAHook'
]
|
"""
This script contains an example how to perform semantic search with Elasticsearch.
You need Elasticsearch up and running locally:
https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html
Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.readthedocs.io/, e.g.:
```
pip install elasticsearch
```
This script was created for `elasticsearch` v8.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_elasticsearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 5. Encode the corpus
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True)
corpus_index = None
while True:
# 6. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 7. Perform semantic search using qdrant
results, search_time, corpus_index = semantic_search_elasticsearch(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=5,
output_index=True,
)
# 8. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 10. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""
This script contains an example how to perform semantic search with Elasticsearch.
You need Elasticsearch up and running locally:
https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html
Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.readthedocs.io/, e.g.:
```
pip install elasticsearch
```
This script was created for `elasticsearch` v8.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_elasticsearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train", trust_remote_code=True)
corpus = dataset["answer"]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 5. Encode the corpus
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True)
corpus_index = None
while True:
# 6. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 7. Perform semantic search using qdrant
results, search_time, corpus_index = semantic_search_elasticsearch(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=5,
output_index=True,
)
# 8. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 10. Prompt for more queries
queries = [input("Please enter a question: ")]
|
import csv
import os
from pathlib import Path
from typing import Union
import torchaudio
from torch.utils.data import Dataset
class FluentSpeechCommands(Dataset):
"""Create *Fluent Speech Commands* :cite:`fluent` Dataset
Args:
root (str of Path): Path to the directory where the dataset is found.
subset (str, optional): subset of the dataset to use. Options: [`"train"`, `"valid"`, `"test"`].
(Default: ``"train"``)
"""
def __init__(self, root: Union[str, Path], subset: str = "train"):
if subset not in ["train", "valid", "test"]:
raise ValueError("`subset` must be one of ['train', 'valid', 'test']")
root = os.fspath(root)
self._path = os.path.join(root, "fluent_speech_commands_dataset")
if not os.path.isdir(self._path):
raise RuntimeError("Dataset not found.")
subset_path = os.path.join(self._path, "data", f"{subset}_data.csv")
with open(subset_path) as subset_csv:
subset_reader = csv.reader(subset_csv)
data = list(subset_reader)
self.header = data[0]
self.data = data[1:]
def __len__(self):
return len(self.data)
def __getitem__(self, n: int):
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, int, str, str, str, str):
``(waveform, sample_rate, file_name, speaker_id, transcription, action, object, location)``
"""
sample = self.data[n]
file_name = sample[self.header.index("path")].split("/")[-1]
file_name = file_name.split(".")[0]
speaker_id, transcription, action, obj, location = sample[2:]
wav_path = os.path.join(self._path, "wavs", "speakers", speaker_id, f"{file_name}.wav")
wav, sample_rate = torchaudio.load(wav_path)
return wav, sample_rate, file_name, speaker_id, transcription, action, obj, location
|
import csv
import os
from pathlib import Path
from typing import Union
import torchaudio
from torch.utils.data import Dataset
class FluentSpeechCommands(Dataset):
"""Create *Fluent Speech Commands* [:footcite:`fluent`] Dataset
Args:
root (str of Path): Path to the directory where the dataset is found.
subset (str, optional): subset of the dataset to use. Options: [`"train"`, `"valid"`, `"test"`].
(Default: ``"train"``)
"""
def __init__(self, root: Union[str, Path], subset: str = "train"):
if subset not in ["train", "valid", "test"]:
raise ValueError("`subset` must be one of ['train', 'valid', 'test']")
root = os.fspath(root)
self._path = os.path.join(root, "fluent_speech_commands_dataset")
if not os.path.isdir(self._path):
raise RuntimeError("Dataset not found.")
subset_path = os.path.join(self._path, "data", f"{subset}_data.csv")
with open(subset_path) as subset_csv:
subset_reader = csv.reader(subset_csv)
data = list(subset_reader)
self.header = data[0]
self.data = data[1:]
def __len__(self):
return len(self.data)
def __getitem__(self, n: int):
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, int, str, str, str, str):
``(waveform, sample_rate, file_name, speaker_id, transcription, action, object, location)``
"""
sample = self.data[n]
file_name = sample[self.header.index("path")].split("/")[-1]
file_name = file_name.split(".")[0]
speaker_id, transcription, action, obj, location = sample[2:]
wav_path = os.path.join(self._path, "wavs", "speakers", speaker_id, f"{file_name}.wav")
wav, sample_rate = torchaudio.load(wav_path)
return wav, sample_rate, file_name, speaker_id, transcription, action, obj, location
|
# Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mahalanobis metric."""
import numpy as np
import datasets
_DESCRIPTION = """
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
"""
_CITATION = """\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
"""
_KWARGS_DESCRIPTION = """
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{'mahalanobis': array([0.5])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Mahalanobis(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float", id="sequence"), id="X"),
}
),
)
def _compute(self, X, reference_distribution):
# convert to numpy arrays
X = np.array(X)
reference_distribution = np.array(reference_distribution)
# Assert that arrays are 2D
if len(X.shape) != 2:
raise ValueError("Expected `X` to be a 2D vector")
if len(reference_distribution.shape) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector")
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension"
)
# Get mahalanobis distance for each prediction
X_minus_mu = X - np.mean(reference_distribution)
cov = np.cov(reference_distribution.T)
try:
inv_covmat = np.linalg.inv(cov)
except np.linalg.LinAlgError:
inv_covmat = np.linalg.pinv(cov)
left_term = np.dot(X_minus_mu, inv_covmat)
mahal_dist = np.dot(left_term, X_minus_mu.T).diagonal()
return {"mahalanobis": mahal_dist}
|
# Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mahalanobis metric."""
import numpy as np
import datasets
_DESCRIPTION = """
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
"""
_CITATION = """\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
"""
_KWARGS_DESCRIPTION = """
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{'mahalanobis': array([0.5])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Mahalanobis(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float", id="sequence"), id="X"),
}
),
)
def _compute(self, X, reference_distribution):
# convert to numpy arrays
X = np.array(X)
reference_distribution = np.array(reference_distribution)
# Assert that arrays are 2D
if len(X.shape) != 2:
raise ValueError("Expected `X` to be a 2D vector")
if len(reference_distribution.shape) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector")
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension"
)
# Get mahalanobis distance for each prediction
X_minus_mu = X - np.mean(reference_distribution)
cov = np.cov(reference_distribution.T)
try:
inv_covmat = np.linalg.inv(cov)
except np.linalg.LinAlgError:
inv_covmat = np.linalg.pinv(cov)
left_term = np.dot(X_minus_mu, inv_covmat)
mahal_dist = np.dot(left_term, X_minus_mu.T).diagonal()
return {"mahalanobis": mahal_dist}
|
# NOTE:
# The entire `torchaudio.backend` module is deprecated.
# New things should be added to `torchaudio._backend`.
# Only things related to backward compatibility should be placed here.
from . import common, no_backend, soundfile_backend, sox_io_backend # noqa
__all__ = []
|
# NOTE:
# The entire `torchaudio.backend` module is deprecated.
# New things should be added to `torchaudio._backend`.
# Only things related to backward compatibility should be placed here.
def __getattr__(name: str):
if name == "common":
from . import _common
return _common
if name in ["no_backend", "sox_io_backend", "soundfile_backend"]:
import warnings
warnings.warn(
"Torchaudio's I/O functions now support par-call bakcend dispatch. "
"Importing backend implementation directly is no longer guaranteed to work. "
"Please use `backend` keyword with load/save/info function, instead of "
"calling the udnerlying implementation directly.",
stacklevel=2,
)
if name == "sox_io_backend":
from . import _sox_io_backend
return _sox_io_backend
if name == "soundfile_backend":
from torchaudio._backend import soundfile_backend
return soundfile_backend
if name == "no_backend":
from . import _no_backend
return _no_backend
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
"""
==================================
Getting started with transforms v2
==================================
.. note::
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_transforms_v2.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_v2_transforms_plot_transforms_v2.py>` to download the full example code.
Most computer vision tasks are not supported out of the box by ``torchvision.transforms`` v1, since it only supports
images. ``torchvision.transforms.v2`` enables jointly transforming images, videos, bounding boxes, and masks. This
example showcases the core functionality of the new ``torchvision.transforms.v2`` API.
"""
import pathlib
import torch
def load_data():
from torchvision.io import read_image
from torchvision import datapoints
from torchvision.ops import masks_to_boxes
assets_directory = pathlib.Path("../assets")
path = assets_directory / "FudanPed00054.png"
image = datapoints.Image(read_image(str(path)))
merged_masks = read_image(str(assets_directory / "FudanPed00054_mask.png"))
labels = torch.unique(merged_masks)[1:]
masks = datapoints.Mask(merged_masks == labels.view(-1, 1, 1))
bounding_boxes = datapoints.BoundingBoxes(
masks_to_boxes(masks), format=datapoints.BoundingBoxFormat.XYXY, canvas_size=image.shape[-2:]
)
return path, image, bounding_boxes, masks, labels
# %%
# The :mod:`torchvision.transforms.v2` API supports images, videos, bounding boxes, and instance and segmentation
# masks. Thus, it offers native support for many Computer Vision tasks, like image and video classification, object
# detection or instance and semantic segmentation. Still, the interface is the same, making
# :mod:`torchvision.transforms.v2` a drop-in replacement for the existing :mod:`torchvision.transforms` API, aka v1.
import torchvision.transforms.v2 as transforms
transform = transforms.Compose(
[
transforms.ColorJitter(contrast=0.5),
transforms.RandomRotation(30),
transforms.CenterCrop(480),
]
)
# %%
# :mod:`torchvision.transforms.v2` natively supports jointly transforming multiple inputs while making sure that
# potential random behavior is consistent across all inputs. However, it doesn't enforce a specific input structure or
# order.
path, image, bounding_boxes, masks, labels = load_data()
torch.manual_seed(0)
new_image = transform(image) # Image Classification
new_image, new_bounding_boxes, new_labels = transform(image, bounding_boxes, labels) # Object Detection
new_image, new_bounding_boxes, new_masks, new_labels = transform(
image, bounding_boxes, masks, labels
) # Instance Segmentation
new_image, new_target = transform((image, {"boxes": bounding_boxes, "labels": labels})) # Arbitrary Structure
# %%
# Under the hood, :mod:`torchvision.transforms.v2` relies on :mod:`torchvision.datapoints` for the dispatch to the
# appropriate function for the input data: :ref:`sphx_glr_auto_examples_v2_transforms_plot_datapoints.py`. Note however, that as
# regular user, you likely don't have to touch this yourself. See
# :ref:`sphx_glr_auto_examples_v2_transforms_plot_transforms_v2_e2e.py`.
#
# All "foreign" types like :class:`str`'s or :class:`pathlib.Path`'s are passed through, allowing to store extra
# information directly with the sample:
sample = {"path": path, "image": image}
new_sample = transform(sample)
assert new_sample["path"] is sample["path"]
# %%
# As stated above, :mod:`torchvision.transforms.v2` is a drop-in replacement for :mod:`torchvision.transforms` and thus
# also supports transforming plain :class:`torch.Tensor`'s as image or video if applicable. This is achieved with a
# simple heuristic:
#
# * If we find an explicit image or video (:class:`torchvision.datapoints.Image`, :class:`torchvision.datapoints.Video`,
# or :class:`PIL.Image.Image`) in the input, all other plain tensors are passed through.
# * If there is no explicit image or video, only the first plain :class:`torch.Tensor` will be transformed as image or
# video, while all others will be passed through.
plain_tensor_image = torch.rand(image.shape)
print(image.shape, plain_tensor_image.shape)
# passing a plain tensor together with an explicit image, will not transform the former
plain_tensor_image, image = transform(plain_tensor_image, image)
print(image.shape, plain_tensor_image.shape)
# passing a plain tensor without an explicit image, will transform the former
plain_tensor_image, _ = transform(plain_tensor_image, bounding_boxes)
print(image.shape, plain_tensor_image.shape)
|
"""
==================================
Getting started with transforms v2
==================================
Most computer vision tasks are not supported out of the box by ``torchvision.transforms`` v1, since it only supports
images. ``torchvision.transforms.v2`` enables jointly transforming images, videos, bounding boxes, and masks. This
example showcases the core functionality of the new ``torchvision.transforms.v2`` API.
"""
import pathlib
import torch
def load_data():
from torchvision.io import read_image
from torchvision import datapoints
from torchvision.ops import masks_to_boxes
assets_directory = pathlib.Path("../assets")
path = assets_directory / "FudanPed00054.png"
image = datapoints.Image(read_image(str(path)))
merged_masks = read_image(str(assets_directory / "FudanPed00054_mask.png"))
labels = torch.unique(merged_masks)[1:]
masks = datapoints.Mask(merged_masks == labels.view(-1, 1, 1))
bounding_boxes = datapoints.BoundingBoxes(
masks_to_boxes(masks), format=datapoints.BoundingBoxFormat.XYXY, canvas_size=image.shape[-2:]
)
return path, image, bounding_boxes, masks, labels
# %%
# The :mod:`torchvision.transforms.v2` API supports images, videos, bounding boxes, and instance and segmentation
# masks. Thus, it offers native support for many Computer Vision tasks, like image and video classification, object
# detection or instance and semantic segmentation. Still, the interface is the same, making
# :mod:`torchvision.transforms.v2` a drop-in replacement for the existing :mod:`torchvision.transforms` API, aka v1.
import torchvision.transforms.v2 as transforms
transform = transforms.Compose(
[
transforms.ColorJitter(contrast=0.5),
transforms.RandomRotation(30),
transforms.CenterCrop(480),
]
)
# %%
# :mod:`torchvision.transforms.v2` natively supports jointly transforming multiple inputs while making sure that
# potential random behavior is consistent across all inputs. However, it doesn't enforce a specific input structure or
# order.
path, image, bounding_boxes, masks, labels = load_data()
torch.manual_seed(0)
new_image = transform(image) # Image Classification
new_image, new_bounding_boxes, new_labels = transform(image, bounding_boxes, labels) # Object Detection
new_image, new_bounding_boxes, new_masks, new_labels = transform(
image, bounding_boxes, masks, labels
) # Instance Segmentation
new_image, new_target = transform((image, {"boxes": bounding_boxes, "labels": labels})) # Arbitrary Structure
# %%
# Under the hood, :mod:`torchvision.transforms.v2` relies on :mod:`torchvision.datapoints` for the dispatch to the
# appropriate function for the input data: :ref:`sphx_glr_auto_examples_v2_transforms_plot_datapoints.py`. Note however, that as
# regular user, you likely don't have to touch this yourself. See
# :ref:`sphx_glr_auto_examples_v2_transforms_plot_transforms_v2_e2e.py`.
#
# All "foreign" types like :class:`str`'s or :class:`pathlib.Path`'s are passed through, allowing to store extra
# information directly with the sample:
sample = {"path": path, "image": image}
new_sample = transform(sample)
assert new_sample["path"] is sample["path"]
# %%
# As stated above, :mod:`torchvision.transforms.v2` is a drop-in replacement for :mod:`torchvision.transforms` and thus
# also supports transforming plain :class:`torch.Tensor`'s as image or video if applicable. This is achieved with a
# simple heuristic:
#
# * If we find an explicit image or video (:class:`torchvision.datapoints.Image`, :class:`torchvision.datapoints.Video`,
# or :class:`PIL.Image.Image`) in the input, all other plain tensors are passed through.
# * If there is no explicit image or video, only the first plain :class:`torch.Tensor` will be transformed as image or
# video, while all others will be passed through.
plain_tensor_image = torch.rand(image.shape)
print(image.shape, plain_tensor_image.shape)
# passing a plain tensor together with an explicit image, will not transform the former
plain_tensor_image, image = transform(plain_tensor_image, image)
print(image.shape, plain_tensor_image.shape)
# passing a plain tensor without an explicit image, will transform the former
plain_tensor_image, _ = transform(plain_tensor_image, bounding_boxes)
print(image.shape, plain_tensor_image.shape)
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import pytest
import requests
import weaviate
HOST = "http://localhost:8080"
cur_dir = os.path.dirname(os.path.abspath(__file__))
weaviate_yml = os.path.abspath(os.path.join(cur_dir, 'docker-compose.yml'))
@pytest.fixture(scope='session', autouse=True)
def start_storage():
os.system(f"docker-compose -f {weaviate_yml} up -d --remove-orphans")
_wait_for_weaviate()
yield
os.system(f"docker-compose -f {weaviate_yml} down --remove-orphans")
def _wait_for_weaviate():
while True:
try:
response = requests.get(f"{HOST}/v1/.well-known/ready")
if response.status_code == 200:
return
else:
time.sleep(0.5)
except requests.exceptions.ConnectionError:
time.sleep(1)
@pytest.fixture
def weaviate_client(start_storage):
client = weaviate.Client(HOST)
client.schema.delete_all()
yield client
client.schema.delete_all()
|
import os
import time
import pytest
import requests
import weaviate
HOST = "http://localhost:8080"
cur_dir = os.path.dirname(os.path.abspath(__file__))
weaviate_yml = os.path.abspath(os.path.join(cur_dir, 'docker-compose.yml'))
@pytest.fixture(scope='session', autouse=True)
def start_storage():
os.system(f"docker-compose -f {weaviate_yml} up -d --remove-orphans")
_wait_for_weaviate()
yield
os.system(f"docker-compose -f {weaviate_yml} down --remove-orphans")
def _wait_for_weaviate():
while True:
try:
response = requests.get(f"{HOST}/v1/.well-known/ready")
if response.status_code == 200:
return
else:
time.sleep(0.5)
except requests.exceptions.ConnectionError:
time.sleep(1)
@pytest.fixture
def weaviate_client(start_storage):
client = weaviate.Client(HOST)
client.schema.delete_all()
yield client
client.schema.delete_all()
|
import os
# DO NOT EDIT. Generated by api_gen.sh
from keras.api import DTypePolicy
from keras.api import FloatDTypePolicy
from keras.api import Function
from keras.api import Initializer
from keras.api import Input
from keras.api import InputSpec
from keras.api import KerasTensor
from keras.api import Layer
from keras.api import Loss
from keras.api import Metric
from keras.api import Model
from keras.api import Operation
from keras.api import Optimizer
from keras.api import Quantizer
from keras.api import Regularizer
from keras.api import Sequential
from keras.api import StatelessScope
from keras.api import SymbolicScope
from keras.api import Variable
from keras.api import __version__
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import device
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import layers
from keras.api import legacy
from keras.api import losses
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import name_scope
from keras.api import ops
from keras.api import optimizers
from keras.api import preprocessing
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import saving
from keras.api import tree
from keras.api import utils
from keras.api import version
# END DO NOT EDIT.
# Add everything in /api/ to the module search path.
__path__.append(os.path.join(os.path.dirname(__file__), "api")) # noqa: F405
# Don't pollute namespace.
del os
# Never autocomplete `.src` or `.api` on an imported keras object.
def __dir__():
keys = dict.fromkeys((globals().keys()))
keys.pop("src")
keys.pop("api")
return list(keys)
# Don't import `.src` or `.api` during `from keras import *`.
__all__ = [
name
for name in globals().keys()
if not (name.startswith("_") or name in ("src", "api"))
]
|
import os
# DO NOT EDIT. Generated by api_gen.sh
from keras.api import DTypePolicy
from keras.api import FloatDTypePolicy
from keras.api import Function
from keras.api import Initializer
from keras.api import Input
from keras.api import InputSpec
from keras.api import KerasTensor
from keras.api import Layer
from keras.api import Loss
from keras.api import Metric
from keras.api import Model
from keras.api import Operation
from keras.api import Optimizer
from keras.api import Quantizer
from keras.api import Regularizer
from keras.api import Sequential
from keras.api import StatelessScope
from keras.api import Variable
from keras.api import __version__
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import device
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import layers
from keras.api import legacy
from keras.api import losses
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import name_scope
from keras.api import ops
from keras.api import optimizers
from keras.api import preprocessing
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import saving
from keras.api import tree
from keras.api import utils
from keras.api import version
# END DO NOT EDIT.
# Add everything in /api/ to the module search path.
__path__.append(os.path.join(os.path.dirname(__file__), "api")) # noqa: F405
# Don't pollute namespace.
del os
# Never autocomplete `.src` or `.api` on an imported keras object.
def __dir__():
keys = dict.fromkeys((globals().keys()))
keys.pop("src")
keys.pop("api")
return list(keys)
# Don't import `.src` or `.api` during `from keras import *`.
__all__ = [
name
for name in globals().keys()
if not (name.startswith("_") or name in ("src", "api"))
]
|
AUDIO_FILE_FORMATS = [
'3g2',
'3ga',
'3gp',
'aac',
'ac3',
'act',
'aiff',
'amr',
'ape',
'au',
'awb',
'dct',
'dsf',
'dvf',
'flac',
'gsm',
'iklax',
'ivs',
'm4a',
'm4b',
'm4p',
'mmf',
'mp2',
'mp3',
'mpc',
'msv',
'nsf',
'oga',
'ogg',
'opus',
'ra',
'rm',
'raw',
'sln',
'tta',
'voc',
'vox',
'wav',
'wma',
'wv',
]
TEXT_FILE_FORMATS = [
'txt',
'ics',
'ifb',
'css',
'csv',
'htm',
'html',
'js',
'md',
'markdown',
'mdown',
'markdn',
'mathml',
'mml',
'conf',
'def',
'diff',
'in',
'ksh',
'list',
'log',
'pl',
'text',
'dsc',
'rtx',
'sgm',
'sgml',
'tsv',
'man',
'me',
'ms',
'roff',
't',
'tr',
'uri',
'uris',
'urls',
'curl',
'dcurl',
'mcurl',
'scurl',
'fly',
'flx',
'gv',
'3dml',
'spot',
'jad',
'si',
'sl',
'wml',
'wmls',
'asm',
's',
'c',
'cc',
'cpp',
'cxx',
'dic',
'h',
'hh',
'f',
'f77',
'f90',
'for',
'java',
'p',
'pas',
'pp',
'inc',
'py',
'etx',
'uu',
'vcs',
'vcf',
]
|
AUDIO_FILE_FORMATS = [
'3g2',
'3ga',
'3gp',
'aac',
'ac3',
'act',
'aiff',
'amr',
'ape',
'au',
'awb',
'dct',
'dsf',
'dvf',
'flac',
'gsm',
'iklax',
'ivs',
'm4a',
'm4b',
'm4p',
'mmf',
'mp2',
'mp3',
'mpc',
'msv',
'nsf',
'oga',
'ogg',
'opus',
'ra',
'rm',
'raw',
'sln',
'tta',
'voc',
'vox',
'wav',
'wma',
'wv',
]
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from functools import lru_cache
from pathlib import Path
@lru_cache(maxsize=None)
def _get_cache_path() -> Path:
"""
Get the path to the cache directory.
:return: The path to the cache directory.
"""
cache_path = Path.home() / '.cache' / 'docarray'
if "DOCARRAY_CACHE" in os.environ:
cache_path = Path(os.environ["DOCARRAY_CACHE"])
cache_path.mkdir(parents=True, exist_ok=True)
return cache_path
|
import os
from functools import lru_cache
from pathlib import Path
@lru_cache(maxsize=None)
def _get_cache_path() -> Path:
"""
Get the path to the cache directory.
:return: The path to the cache directory.
"""
cache_path = Path.home() / '.cache' / 'docarray'
if "DOCARRAY_CACHE" in os.environ:
cache_path = Path(os.environ["DOCARRAY_CACHE"])
cache_path.mkdir(parents=True, exist_ok=True)
return cache_path
|
from typing import Union
from torch import nn
import transformers
import torch
from PIL import Image
class CLIPModel(nn.Module):
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None):
super(CLIPModel, self).__init__()
if processor_name is None:
processor_name = model_name
self.model = transformers.CLIPModel.from_pretrained(model_name)
self.processor = transformers.CLIPProcessor.from_pretrained(processor_name)
def __repr__(self):
return "CLIPModel()"
def forward(self, features):
image_embeds = []
text_embeds = []
if "pixel_values" in features:
vision_outputs = self.model.vision_model(pixel_values=features["pixel_values"])
image_embeds = self.model.visual_projection(vision_outputs[1])
if "input_ids" in features:
text_outputs = self.model.text_model(
input_ids=features.get("input_ids"),
attention_mask=features.get("attention_mask", None),
position_ids=features.get("position_ids", None),
output_attentions=features.get("output_attentions", None),
output_hidden_states=features.get("output_hidden_states", None),
)
text_embeds = self.model.text_projection(text_outputs[1])
sentence_embedding = []
image_features = iter(image_embeds)
text_features = iter(text_embeds)
for idx, input_type in enumerate(features["image_text_info"]):
if input_type == 0:
sentence_embedding.append(next(image_features))
else:
sentence_embedding.append(next(text_features))
features["sentence_embedding"] = torch.stack(sentence_embedding).float()
return features
def tokenize(self, texts, padding: Union[str, bool] = True):
images = []
texts_values = []
image_text_info = []
for idx, data in enumerate(texts):
if isinstance(data, Image.Image): # An Image
images.append(data)
image_text_info.append(0)
else: # A text
texts_values.append(data)
image_text_info.append(1)
encoding = {}
if len(texts_values):
encoding = self.processor.tokenizer(texts_values, return_tensors="pt", padding=padding)
if len(images):
image_features = self.processor.image_processor(images, return_tensors="pt")
encoding["pixel_values"] = image_features.pixel_values
encoding["image_text_info"] = image_text_info
return dict(encoding)
def save(self, output_path: str):
self.model.save_pretrained(output_path)
self.processor.save_pretrained(output_path)
@staticmethod
def load(input_path: str):
return CLIPModel(model_name=input_path)
|
from typing import Union
from torch import nn
import transformers
import torch
from PIL import Image
class CLIPModel(nn.Module):
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None):
super(CLIPModel, self).__init__()
if processor_name is None:
processor_name = model_name
self.model = transformers.CLIPModel.from_pretrained(model_name)
self.processor = transformers.CLIPProcessor.from_pretrained(processor_name)
def __repr__(self):
return "CLIPModel()"
def forward(self, features):
image_embeds = []
text_embeds = []
if "pixel_values" in features:
vision_outputs = self.model.vision_model(pixel_values=features["pixel_values"])
image_embeds = self.model.visual_projection(vision_outputs[1])
if "input_ids" in features:
text_outputs = self.model.text_model(
input_ids=features.get("input_ids"),
attention_mask=features.get("attention_mask", None),
position_ids=features.get("position_ids", None),
output_attentions=features.get("output_attentions", None),
output_hidden_states=features.get("output_hidden_states", None),
)
text_embeds = self.model.text_projection(text_outputs[1])
sentence_embedding = []
image_features = iter(image_embeds)
text_features = iter(text_embeds)
for idx, input_type in enumerate(features["image_text_info"]):
if input_type == 0:
sentence_embedding.append(next(image_features))
else:
sentence_embedding.append(next(text_features))
features["sentence_embedding"] = torch.stack(sentence_embedding).float()
return features
def tokenize(self, texts, padding: Union[str, bool] = True):
images = []
texts_values = []
image_text_info = []
for idx, data in enumerate(texts):
if isinstance(data, Image.Image): # An Image
images.append(data)
image_text_info.append(0)
else: # A text
texts_values.append(data)
image_text_info.append(1)
encoding = {}
if len(texts_values):
encoding = self.processor.tokenizer(texts_values, return_tensors="pt", padding=padding)
if len(images):
image_features = self.processor.image_processor(images, return_tensors="pt")
encoding["pixel_values"] = image_features.pixel_values
encoding["image_text_info"] = image_text_info
return encoding
def save(self, output_path: str):
self.model.save_pretrained(output_path)
self.processor.save_pretrained(output_path)
@staticmethod
def load(input_path: str):
return CLIPModel(model_name=input_path)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.17.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.16.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
_base_ = '../dcn/cascade-mask-rcnn_x101-32x4d-dconv-c3-c5_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False))
|
_base_ = '../dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False))
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import ImageDoc
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image():
image = ImageDoc(url=REMOTE_JPG)
image.tensor = image.url.load()
assert isinstance(image.tensor, np.ndarray)
def test_image_str():
image = parse_obj_as(ImageDoc, 'http://myurl.jpg')
assert image.url == 'http://myurl.jpg'
def test_image_np():
image = parse_obj_as(ImageDoc, np.zeros((10, 10, 3)))
assert (image.tensor == np.zeros((10, 10, 3))).all()
def test_image_torch():
image = parse_obj_as(ImageDoc, torch.zeros(10, 10, 3))
assert (image.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.tensorflow
def test_image_tensorflow():
image = ImageDoc(tensor=tf.zeros((10, 10, 3)))
assert tnp.allclose(image.tensor.tensor, tf.zeros((10, 10, 3)))
def test_image_shortcut_doc():
class MyDoc(BaseDocument):
image: ImageDoc
image2: ImageDoc
image3: ImageDoc
doc = MyDoc(
image='http://myurl.jpg',
image2=np.zeros((10, 10, 3)),
image3=torch.zeros(10, 10, 3),
)
assert doc.image.url == 'http://myurl.jpg'
assert (doc.image2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.image3.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.slow
@pytest.mark.internet
def test_byte():
img = ImageDoc(url=REMOTE_JPG)
img.bytes_ = img.url.load_bytes()
@pytest.mark.slow
@pytest.mark.internet
def test_byte_from_tensor():
img = ImageDoc(url=REMOTE_JPG)
img.tensor = img.url.load()
img.bytes_ = img.tensor.to_bytes()
assert isinstance(img.bytes_, bytes)
assert len(img.bytes_) > 0
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import ImageDoc
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image():
image = ImageDoc(url=REMOTE_JPG)
image.tensor = image.url.load()
assert isinstance(image.tensor, np.ndarray)
def test_image_str():
image = parse_obj_as(ImageDoc, 'http://myurl.jpg')
assert image.url == 'http://myurl.jpg'
def test_image_np():
image = parse_obj_as(ImageDoc, np.zeros((10, 10, 3)))
assert (image.tensor == np.zeros((10, 10, 3))).all()
def test_image_torch():
image = parse_obj_as(ImageDoc, torch.zeros(10, 10, 3))
assert (image.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.tensorflow
def test_image_tensorflow():
image = ImageDoc(tensor=tf.zeros((10, 10, 3)))
assert tnp.allclose(image.tensor.tensor, tf.zeros((10, 10, 3)))
def test_image_shortcut_doc():
class MyDoc(BaseDocument):
image: ImageDoc
image2: ImageDoc
image3: ImageDoc
doc = MyDoc(
image='http://myurl.jpg',
image2=np.zeros((10, 10, 3)),
image3=torch.zeros(10, 10, 3),
)
assert doc.image.url == 'http://myurl.jpg'
assert (doc.image2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.image3.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.slow
@pytest.mark.internet
def test_byte():
img = ImageDoc(url=REMOTE_JPG)
img.bytes = img.url.load_bytes()
@pytest.mark.slow
@pytest.mark.internet
def test_byte_from_tensor():
img = ImageDoc(url=REMOTE_JPG)
img.tensor = img.url.load()
img.bytes = img.tensor.to_bytes()
assert isinstance(img.bytes, bytes)
assert len(img.bytes) > 0
|
from typing import List
import numpy as np
import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from docarray import BaseDoc, DocArray
from docarray.base_doc import DocArrayResponse
from docarray.documents import ImageDoc, TextDoc
from docarray.typing import NdArray
@pytest.mark.asyncio
async def test_fast_api():
class Mmdoc(BaseDoc):
img: ImageDoc
text: TextDoc
title: str
input_doc = Mmdoc(
img=ImageDoc(tensor=np.zeros((3, 224, 224))), text=TextDoc(), title='hello'
)
app = FastAPI()
@app.post("/doc/", response_model=Mmdoc, response_class=DocArrayResponse)
async def create_item(doc: Mmdoc) -> Mmdoc:
return doc
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
@pytest.mark.asyncio
async def test_image():
class InputDoc(BaseDoc):
img: ImageDoc
class OutputDoc(BaseDoc):
embedding_clip: NdArray
embedding_bert: NdArray
input_doc = InputDoc(img=ImageDoc(tensor=np.zeros((3, 224, 224))))
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc, response_class=DocArrayResponse)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
doc = OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
return doc
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
doc = OutputDoc.parse_raw(response.content.decode())
assert isinstance(doc, OutputDoc)
assert doc.embedding_clip.shape == (100, 1)
assert doc.embedding_bert.shape == (100, 1)
@pytest.mark.asyncio
async def test_sentence_to_embeddings():
class InputDoc(BaseDoc):
text: str
class OutputDoc(BaseDoc):
embedding_clip: NdArray
embedding_bert: NdArray
input_doc = InputDoc(text='hello')
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc, response_class=DocArrayResponse)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
return OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
doc = OutputDoc.parse_raw(response.content.decode())
assert isinstance(doc, OutputDoc)
assert doc.embedding_clip.shape == (100, 1)
assert doc.embedding_bert.shape == (100, 1)
@pytest.mark.asyncio
async def test_docarray():
doc = ImageDoc(tensor=np.zeros((3, 224, 224)))
docs = DocArray[ImageDoc]([doc, doc])
app = FastAPI()
@app.post("/doc/", response_class=DocArrayResponse)
async def func(fastapi_docs: List[ImageDoc]) -> List[ImageDoc]:
docarray_docs = DocArray[ImageDoc].construct(fastapi_docs)
return list(docarray_docs)
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=docs.to_json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
docs = DocArray[ImageDoc].from_json(response.content.decode())
assert len(docs) == 2
assert docs[0].tensor.shape == (3, 224, 224)
|
import numpy as np
import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from docarray import BaseDoc
from docarray.base_doc import DocResponse
from docarray.documents import ImageDoc, TextDoc
from docarray.typing import NdArray
@pytest.mark.asyncio
async def test_fast_api():
class Mmdoc(BaseDoc):
img: ImageDoc
text: TextDoc
title: str
input_doc = Mmdoc(
img=ImageDoc(tensor=np.zeros((3, 224, 224))), text=TextDoc(), title='hello'
)
app = FastAPI()
@app.post("/doc/", response_model=Mmdoc, response_class=DocResponse)
async def create_item(doc: Mmdoc):
return doc
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
@pytest.mark.asyncio
async def test_image():
class InputDoc(BaseDoc):
img: ImageDoc
class OutputDoc(BaseDoc):
embedding_clip: NdArray
embedding_bert: NdArray
input_doc = InputDoc(img=ImageDoc(tensor=np.zeros((3, 224, 224))))
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc, response_class=DocResponse)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
doc = OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
return doc
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
doc = OutputDoc.parse_raw(response.content.decode())
assert isinstance(doc, OutputDoc)
assert doc.embedding_clip.shape == (100, 1)
assert doc.embedding_bert.shape == (100, 1)
@pytest.mark.asyncio
async def test_sentence_to_embeddings():
class InputDoc(BaseDoc):
text: str
class OutputDoc(BaseDoc):
embedding_clip: NdArray
embedding_bert: NdArray
input_doc = InputDoc(text='hello')
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc, response_class=DocResponse)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
return OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
doc = OutputDoc.parse_raw(response.content.decode())
assert isinstance(doc, OutputDoc)
assert doc.embedding_clip.shape == (100, 1)
assert doc.embedding_bert.shape == (100, 1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.