input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.output_parsers.rail_parser import GuardrailsOutputParser
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GuardrailsOutputParser": "langchain_community.output_parsers.rail_parser",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GuardrailsOutputParser",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.output_parsers.rail_parser import GuardrailsOutputParser
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GuardrailsOutputParser": "langchain_community.output_parsers.rail_parser"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GuardrailsOutputParser",
]
|
# dataset settings
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type='ConcatDataset',
# VOCDataset will add different `dataset_type` in dataset.metainfo,
# which will get error if using ConcatDataset. Adding
# `ignore_keys` can avoid this error.
ignore_keys=['dataset_type'],
datasets=[
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2007/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
filter_cfg=dict(
filter_empty_gt=True, min_size=32, bbox_min_size=32),
pipeline=train_pipeline),
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2012/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2012/'),
filter_cfg=dict(
filter_empty_gt=True, min_size=32, bbox_min_size=32),
pipeline=train_pipeline)
])))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2007/ImageSets/Main/test.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
# Pascal VOC2007 uses `11points` as default evaluate mode, while PASCAL
# VOC2012 defaults to use 'area'.
val_evaluator = dict(type='VOCMetric', metric='mAP', eval_mode='11points')
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type='ConcatDataset',
# VOCDataset will add different `DATASET_TYPE` in dataset.metainfo,
# which will get error if using ConcatDataset. Adding
# `ignore_keys` can avoid this error.
ignore_keys=['DATASET_TYPE'],
datasets=[
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2007/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
filter_cfg=dict(
filter_empty_gt=True, min_size=32, bbox_min_size=32),
pipeline=train_pipeline),
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2012/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2012/'),
filter_cfg=dict(
filter_empty_gt=True, min_size=32, bbox_min_size=32),
pipeline=train_pipeline)
])))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2007/ImageSets/Main/test.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
# Pascal VOC2007 uses `11points` as default evaluate mode, while PASCAL
# VOC2012 defaults to use 'area'.
val_evaluator = dict(type='VOCMetric', metric='mAP', eval_mode='11points')
test_evaluator = val_evaluator
|
# Copyright (c) OpenMMLab. All rights reserved.
import bisect
import os.path as osp
import mmcv
import torch.distributed as dist
from mmcv.runner import DistEvalHook as BaseDistEvalHook
from mmcv.runner import EvalHook as BaseEvalHook
from torch.nn.modules.batchnorm import _BatchNorm
def _calc_dynamic_intervals(start_interval, dynamic_interval_list):
assert mmcv.is_list_of(dynamic_interval_list, tuple)
dynamic_milestones = [0]
dynamic_milestones.extend(
[dynamic_interval[0] for dynamic_interval in dynamic_interval_list])
dynamic_intervals = [start_interval]
dynamic_intervals.extend(
[dynamic_interval[1] for dynamic_interval in dynamic_interval_list])
return dynamic_milestones, dynamic_intervals
class EvalHook(BaseEvalHook):
def __init__(self, *args, dynamic_intervals=None, **kwargs):
super(EvalHook, self).__init__(*args, **kwargs)
self.use_dynamic_intervals = dynamic_intervals is not None
if self.use_dynamic_intervals:
self.dynamic_milestones, self.dynamic_intervals = \
_calc_dynamic_intervals(self.interval, dynamic_intervals)
def _decide_interval(self, runner):
if self.use_dynamic_intervals:
progress = runner.epoch if self.by_epoch else runner.iter
step = bisect.bisect(self.dynamic_milestones, (progress + 1))
# Dynamically modify the evaluation interval
self.interval = self.dynamic_intervals[step - 1]
def before_train_epoch(self, runner):
"""Evaluate the model only at the start of training by epoch."""
self._decide_interval(runner)
super().before_train_epoch(runner)
def before_train_iter(self, runner):
self._decide_interval(runner)
super().before_train_iter(runner)
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
if not self._should_evaluate(runner):
return
from mmdet.apis import single_gpu_test
results = single_gpu_test(runner.model, self.dataloader, show=False)
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score)
# Note: Considering that MMCV's EvalHook updated its interface in V1.3.16,
# in order to avoid strong version dependency, we did not directly
# inherit EvalHook but BaseDistEvalHook.
class DistEvalHook(BaseDistEvalHook):
def __init__(self, *args, dynamic_intervals=None, **kwargs):
super(DistEvalHook, self).__init__(*args, **kwargs)
self.use_dynamic_intervals = dynamic_intervals is not None
if self.use_dynamic_intervals:
self.dynamic_milestones, self.dynamic_intervals = \
_calc_dynamic_intervals(self.interval, dynamic_intervals)
def _decide_interval(self, runner):
if self.use_dynamic_intervals:
progress = runner.epoch if self.by_epoch else runner.iter
step = bisect.bisect(self.dynamic_milestones, (progress + 1))
# Dynamically modify the evaluation interval
self.interval = self.dynamic_intervals[step - 1]
def before_train_epoch(self, runner):
"""Evaluate the model only at the start of training by epoch."""
self._decide_interval(runner)
super().before_train_epoch(runner)
def before_train_iter(self, runner):
self._decide_interval(runner)
super().before_train_iter(runner)
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
# Synchronization of BatchNorm's buffer (running_mean
# and running_var) is not supported in the DDP of pytorch,
# which may cause the inconsistent performance of models in
# different ranks, so we broadcast BatchNorm's buffers
# of rank 0 to other ranks to avoid this.
if self.broadcast_bn_buffer:
model = runner.model
for name, module in model.named_modules():
if isinstance(module,
_BatchNorm) and module.track_running_stats:
dist.broadcast(module.running_var, 0)
dist.broadcast(module.running_mean, 0)
if not self._should_evaluate(runner):
return
tmpdir = self.tmpdir
if tmpdir is None:
tmpdir = osp.join(runner.work_dir, '.eval_hook')
from mmdet.apis import multi_gpu_test
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=tmpdir,
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import torch.distributed as dist
from mmcv.runner import DistEvalHook as BaseDistEvalHook
from mmcv.runner import EvalHook as BaseEvalHook
from torch.nn.modules.batchnorm import _BatchNorm
class EvalHook(BaseEvalHook):
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
if not self._should_evaluate(runner):
return
from mmdet.apis import single_gpu_test
results = single_gpu_test(runner.model, self.dataloader, show=False)
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score)
class DistEvalHook(BaseDistEvalHook):
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
# Synchronization of BatchNorm's buffer (running_mean
# and running_var) is not supported in the DDP of pytorch,
# which may cause the inconsistent performance of models in
# different ranks, so we broadcast BatchNorm's buffers
# of rank 0 to other ranks to avoid this.
if self.broadcast_bn_buffer:
model = runner.model
for name, module in model.named_modules():
if isinstance(module,
_BatchNorm) and module.track_running_stats:
dist.broadcast(module.running_var, 0)
dist.broadcast(module.running_mean, 0)
if not self._should_evaluate(runner):
return
tmpdir = self.tmpdir
if tmpdir is None:
tmpdir = osp.join(runner.work_dir, '.eval_hook')
from mmdet.apis import multi_gpu_test
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=tmpdir,
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score)
|
from docarray.typing.tensor.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.image import ImageNdArray, ImageTensor
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
__all__ = [
'NdArray',
'AnyTensor',
'AnyEmbedding',
'NdArrayEmbedding',
'ImageNdArray',
'ImageTensor',
]
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor.embedding import TorchEmbedding # noqa: F401
from docarray.typing.tensor.image import ImageTorchTensor # noqa: F401
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
__all__.extend(['TorchEmbedding', 'TorchTensor', 'ImageTorchTensor'])
|
from docarray.typing.tensor.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
__all__ = [
'NdArray',
'AnyTensor',
'AnyEmbedding',
'NdArrayEmbedding',
]
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor.embedding import TorchEmbedding # noqa: F401
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
__all__.extend(['TorchEmbedding', 'TorchTensor'])
|
"""Module for Jina Requests."""
from typing import (
TYPE_CHECKING,
AsyncIterable,
Dict,
Iterable,
Iterator,
Optional,
Tuple,
Union,
)
from jina.clients.request.helper import _new_data_request, _new_data_request_from_batch
from jina.enums import DataInputType
from jina.helper import batch_iterator
from jina.logging.predefined import default_logger
if TYPE_CHECKING: # pragma: no cover
from docarray.document import DocumentSourceType
from docarray.document.mixins.content import DocumentContentType
from docarray import Document
from jina.types.request import Request
SingletonDataType = Union[
DocumentContentType,
DocumentSourceType,
Document,
Tuple[DocumentContentType, DocumentContentType],
Tuple[DocumentSourceType, DocumentSourceType],
]
GeneratorSourceType = Union[
Document, Iterable[SingletonDataType], AsyncIterable[SingletonDataType]
]
def request_generator(
exec_endpoint: str,
data: 'GeneratorSourceType',
request_size: int = 0,
data_type: DataInputType = DataInputType.AUTO,
target_executor: Optional[str] = None,
parameters: Optional[Dict] = None,
**kwargs, # do not remove this, add on purpose to suppress unknown kwargs
) -> Iterator['Request']:
"""Generate a request iterator.
:param exec_endpoint: the endpoint string, by convention starts with `/`
:param data: data to send, a list of dict/string/bytes that can be converted into a list of `Document` objects
:param request_size: the number of the `Documents` in each request
:param data_type: if ``data`` is an iterator over self-contained document, i.e. :class:`DocumentSourceType`;
or an iterator over possible Document content (set to text, blob and buffer).
:param parameters: a dictionary of parameters to be sent to the executor
:param target_executor: a regex string. Only matching Executors will process the request.
:param kwargs: additional arguments
:yield: request
"""
_kwargs = dict(extra_kwargs=kwargs)
try:
if data is None:
# this allows empty inputs, i.e. a data request with only parameters
yield _new_data_request(
endpoint=exec_endpoint, target=target_executor, parameters=parameters
)
else:
if not isinstance(data, Iterable):
data = [data]
for batch in batch_iterator(data, request_size):
yield _new_data_request_from_batch(
_kwargs=kwargs,
batch=batch,
data_type=data_type,
endpoint=exec_endpoint,
target=target_executor,
parameters=parameters,
)
except Exception as ex:
# must be handled here, as grpc channel wont handle Python exception
default_logger.critical(f'inputs is not valid! {ex!r}', exc_info=True)
raise
|
"""Module for Jina Requests."""
from typing import (
TYPE_CHECKING,
AsyncIterable,
Dict,
Iterable,
Iterator,
Optional,
Tuple,
Union,
)
from jina.clients.request.helper import _new_data_request, _new_data_request_from_batch
from jina.enums import DataInputType
from jina.helper import batch_iterator
from jina.logging.predefined import default_logger
if TYPE_CHECKING: # pragma: no cover
from docarray.document import DocumentSourceType
from docarray.document.mixins.content import DocumentContentType
from jina import Document
from jina.types.request import Request
SingletonDataType = Union[
DocumentContentType,
DocumentSourceType,
Document,
Tuple[DocumentContentType, DocumentContentType],
Tuple[DocumentSourceType, DocumentSourceType],
]
GeneratorSourceType = Union[
Document, Iterable[SingletonDataType], AsyncIterable[SingletonDataType]
]
def request_generator(
exec_endpoint: str,
data: 'GeneratorSourceType',
request_size: int = 0,
data_type: DataInputType = DataInputType.AUTO,
target_executor: Optional[str] = None,
parameters: Optional[Dict] = None,
**kwargs, # do not remove this, add on purpose to suppress unknown kwargs
) -> Iterator['Request']:
"""Generate a request iterator.
:param exec_endpoint: the endpoint string, by convention starts with `/`
:param data: data to send, a list of dict/string/bytes that can be converted into a list of `Document` objects
:param request_size: the number of the `Documents` in each request
:param data_type: if ``data`` is an iterator over self-contained document, i.e. :class:`DocumentSourceType`;
or an iterator over possible Document content (set to text, blob and buffer).
:param parameters: a dictionary of parameters to be sent to the executor
:param target_executor: a regex string. Only matching Executors will process the request.
:param kwargs: additional arguments
:yield: request
"""
_kwargs = dict(extra_kwargs=kwargs)
try:
if data is None:
# this allows empty inputs, i.e. a data request with only parameters
yield _new_data_request(
endpoint=exec_endpoint, target=target_executor, parameters=parameters
)
else:
if not isinstance(data, Iterable):
data = [data]
for batch in batch_iterator(data, request_size):
yield _new_data_request_from_batch(
_kwargs=kwargs,
batch=batch,
data_type=data_type,
endpoint=exec_endpoint,
target=target_executor,
parameters=parameters,
)
except Exception as ex:
# must be handled here, as grpc channel wont handle Python exception
default_logger.critical(f'inputs is not valid! {ex!r}', exc_info=True)
raise
|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Conversion script for the LDM checkpoints."""
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNet2DConditionModel, UNet2DModel
do_only_config = False
do_only_weights = True
do_only_renaming = False
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
args = parser.parse_args()
config_parameters_to_change = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
key_parameters_to_change = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
subfolder = "" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
text = reader.read()
config = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
model = UNet2DModel(**config)
else:
class_name = UNet2DConditionModel if "ldm-text2im-large-256" in args.repo_path else UNet2DModel
model = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
config = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
config[value] = config[key]
del config[key]
config["down_block_types"] = [k.replace("UNetRes", "") for k in config["down_block_types"]]
config["up_block_types"] = [k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
state_dict = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
new_state_dict = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
has_changed = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
new_state_dict[".".join([new_key] + param_key.split(".")[1:])] = param_value
has_changed = True
if not has_changed:
new_state_dict[param_key] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
|
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Conversion script for the LDM checkpoints."""
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNet2DConditionModel, UNet2DModel
do_only_config = False
do_only_weights = True
do_only_renaming = False
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
args = parser.parse_args()
config_parameters_to_change = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
key_parameters_to_change = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
subfolder = "" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
text = reader.read()
config = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
model = UNet2DModel(**config)
else:
class_name = UNet2DConditionModel if "ldm-text2im-large-256" in args.repo_path else UNet2DModel
model = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
config = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
config[value] = config[key]
del config[key]
config["down_block_types"] = [k.replace("UNetRes", "") for k in config["down_block_types"]]
config["up_block_types"] = [k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
state_dict = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
new_state_dict = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
has_changed = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
new_state_dict[".".join([new_key] + param_key.split(".")[1:])] = param_value
has_changed = True
if not has_changed:
new_state_dict[param_key] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
|
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py'
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(type='AmpOptimWrapper')
|
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py'
# use FP16
fp16 = dict(loss_scale=512.)
|
from typing import Union
import numpy as np
import PIL.Image
import torch
from torchvision.prototype import datapoints
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_image_tensor(inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray]) -> datapoints.Image:
if isinstance(inpt, np.ndarray):
output = torch.from_numpy(inpt).permute((2, 0, 1)).contiguous()
elif isinstance(inpt, PIL.Image.Image):
output = pil_to_tensor(inpt)
elif isinstance(inpt, torch.Tensor):
output = inpt
else:
raise TypeError(f"Input can either be a numpy array or a PIL image, but got {type(inpt)} instead.")
return datapoints.Image(output)
to_image_pil = _F.to_pil_image
pil_to_tensor = _F.pil_to_tensor
# We changed the names to align them with the new naming scheme. Still, `to_pil_image` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
to_pil_image = to_image_pil
|
from typing import Union
import numpy as np
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_image_tensor(inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray]) -> features.Image:
if isinstance(inpt, np.ndarray):
output = torch.from_numpy(inpt).permute((2, 0, 1)).contiguous()
elif isinstance(inpt, PIL.Image.Image):
output = pil_to_tensor(inpt)
elif isinstance(inpt, torch.Tensor):
output = inpt
else:
raise TypeError(f"Input can either be a numpy array or a PIL image, but got {type(inpt)} instead.")
return features.Image(output)
to_image_pil = _F.to_pil_image
pil_to_tensor = _F.pil_to_tensor
# We changed the names to align them with the new naming scheme. Still, `to_pil_image` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
to_pil_image = to_image_pil
|
_base_ = './mask-rcnn_r101_fpn_1x_coco.py'
model = dict(
# ResNeXt-101-32x8d model trained with Caffe2 at FB,
# so the mean and std need to be changed.
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
bgr_to_rgb=False),
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=8,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnext101_32x8d')))
|
_base_ = './mask_rcnn_r101_fpn_1x_coco.py'
model = dict(
# ResNeXt-101-32x8d model trained with Caffe2 at FB,
# so the mean and std need to be changed.
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
bgr_to_rgb=False),
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=8,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnext101_32x8d')))
|
from keras.src.utils.module_utils import tensorflow as tf
def start_trace(logdir):
tf.profiler.experimental.start(logdir=logdir)
def stop_trace(save):
tf.profiler.experimental.stop(save=save)
def start_batch_trace(batch):
batch_trace_context = tf.profiler.experimental.Trace(
"Profiled batch", step_num=batch
)
batch_trace_context.__enter__()
return batch_trace_context
def stop_batch_trace(batch_trace_context):
batch_trace_context.__exit__(None, None, None)
|
import tensorflow as tf
def start_trace(logdir):
tf.profiler.experimental.start(logdir=logdir)
def stop_trace(save):
tf.profiler.experimental.stop(save=save)
|
"""Module to test base parser implementations."""
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import GenericFakeChatModel
from langchain_core.messages import AIMessage
from langchain_core.output_parsers import (
BaseGenerationOutputParser,
BaseTransformOutputParser,
)
from langchain_core.outputs import ChatGeneration, Generation
def test_base_generation_parser() -> None:
"""Test Base Generation Output Parser."""
class StrInvertCase(BaseGenerationOutputParser[str]):
"""An example parser that inverts the case of the characters in the message."""
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> str:
"""Parse a list of model Generations into a specific format.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Many parsers assume that only a single generation is passed it in.
We will assert for that
partial: Whether to allow partial results. This is used for parsers
that support streaming
"""
if len(result) != 1:
msg = "This output parser can only be used with a single generation."
raise NotImplementedError(msg)
generation = result[0]
if not isinstance(generation, ChatGeneration):
# Say that this one only works with chat generations
msg = "This output parser can only be used with a chat generation."
raise OutputParserException(msg)
content = generation.message.content
assert isinstance(content, str)
return content.swapcase() # type: ignore
StrInvertCase.model_rebuild()
model = GenericFakeChatModel(messages=iter([AIMessage(content="hEllo")]))
chain = model | StrInvertCase()
assert chain.invoke("") == "HeLLO"
def test_base_transform_output_parser() -> None:
"""Test base transform output parser."""
class StrInvertCase(BaseTransformOutputParser[str]):
"""An example parser that inverts the case of the characters in the message."""
def parse(self, text: str) -> str:
"""Parse a single string into a specific format."""
raise NotImplementedError
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> str:
"""Parse a list of model Generations into a specific format.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Many parsers assume that only a single generation is passed it in.
We will assert for that
partial: Whether to allow partial results. This is used for parsers
that support streaming
"""
if len(result) != 1:
msg = "This output parser can only be used with a single generation."
raise NotImplementedError(msg)
generation = result[0]
if not isinstance(generation, ChatGeneration):
# Say that this one only works with chat generations
msg = "This output parser can only be used with a chat generation."
raise OutputParserException(msg)
content = generation.message.content
assert isinstance(content, str)
return content.swapcase() # type: ignore
model = GenericFakeChatModel(messages=iter([AIMessage(content="hello world")]))
chain = model | StrInvertCase()
# inputs to models are ignored, response is hard-coded in model definition
chunks = list(chain.stream(""))
assert chunks == ["HELLO", " ", "WORLD"]
|
"""Module to test base parser implementations."""
from typing import Optional as Optional
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import GenericFakeChatModel
from langchain_core.messages import AIMessage
from langchain_core.output_parsers import (
BaseGenerationOutputParser,
BaseTransformOutputParser,
)
from langchain_core.outputs import ChatGeneration, Generation
def test_base_generation_parser() -> None:
"""Test Base Generation Output Parser."""
class StrInvertCase(BaseGenerationOutputParser[str]):
"""An example parser that inverts the case of the characters in the message."""
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> str:
"""Parse a list of model Generations into a specific format.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Many parsers assume that only a single generation is passed it in.
We will assert for that
partial: Whether to allow partial results. This is used for parsers
that support streaming
"""
if len(result) != 1:
msg = "This output parser can only be used with a single generation."
raise NotImplementedError(msg)
generation = result[0]
if not isinstance(generation, ChatGeneration):
# Say that this one only works with chat generations
msg = "This output parser can only be used with a chat generation."
raise OutputParserException(msg)
content = generation.message.content
assert isinstance(content, str)
return content.swapcase() # type: ignore
StrInvertCase.model_rebuild()
model = GenericFakeChatModel(messages=iter([AIMessage(content="hEllo")]))
chain = model | StrInvertCase()
assert chain.invoke("") == "HeLLO"
def test_base_transform_output_parser() -> None:
"""Test base transform output parser."""
class StrInvertCase(BaseTransformOutputParser[str]):
"""An example parser that inverts the case of the characters in the message."""
def parse(self, text: str) -> str:
"""Parse a single string into a specific format."""
raise NotImplementedError
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> str:
"""Parse a list of model Generations into a specific format.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Many parsers assume that only a single generation is passed it in.
We will assert for that
partial: Whether to allow partial results. This is used for parsers
that support streaming
"""
if len(result) != 1:
msg = "This output parser can only be used with a single generation."
raise NotImplementedError(msg)
generation = result[0]
if not isinstance(generation, ChatGeneration):
# Say that this one only works with chat generations
msg = "This output parser can only be used with a chat generation."
raise OutputParserException(msg)
content = generation.message.content
assert isinstance(content, str)
return content.swapcase() # type: ignore
model = GenericFakeChatModel(messages=iter([AIMessage(content="hello world")]))
chain = model | StrInvertCase()
# inputs to models are ignored, response is hard-coded in model definition
chunks = list(chain.stream(""))
assert chunks == ["HELLO", " ", "WORLD"]
|
_base_ = '../cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
|
_base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
|
"""init.py."""
from llama_index.tools.code_interpreter.base import (
CodeInterpreterToolSpec,
)
__all__ = ["CodeInterpreterToolSpec"]
|
"""init.py."""
from llama_index.tools.code_interpreter.base import (
CodeInterpreterToolSpec,
)
__all__ = ["CodeInterpreterToolSpec"]
|
from .filtering import (
allpass_biquad,
band_biquad,
bandpass_biquad,
bandreject_biquad,
bass_biquad,
biquad,
contrast,
dcshift,
deemph_biquad,
dither,
equalizer_biquad,
filtfilt,
flanger,
gain,
highpass_biquad,
lfilter,
lowpass_biquad,
overdrive,
phaser,
riaa_biquad,
treble_biquad,
vad,
)
from .functional import (
amplitude_to_DB,
apply_beamforming,
apply_codec,
barkscale_fbanks,
compute_deltas,
compute_kaldi_pitch,
create_dct,
DB_to_amplitude,
detect_pitch_frequency,
edit_distance,
griffinlim,
inverse_spectrogram,
linear_fbanks,
loudness,
mask_along_axis,
mask_along_axis_iid,
melscale_fbanks,
mu_law_decoding,
mu_law_encoding,
mvdr_weights_rtf,
mvdr_weights_souden,
phase_vocoder,
pitch_shift,
psd,
resample,
rnnt_loss,
rtf_evd,
rtf_power,
sliding_window_cmn,
spectral_centroid,
spectrogram,
)
__all__ = [
"amplitude_to_DB",
"compute_deltas",
"compute_kaldi_pitch",
"create_dct",
"melscale_fbanks",
"barkscale_fbanks",
"linear_fbanks",
"DB_to_amplitude",
"loudness",
"detect_pitch_frequency",
"griffinlim",
"mask_along_axis",
"mask_along_axis_iid",
"mu_law_encoding",
"mu_law_decoding",
"phase_vocoder",
"sliding_window_cmn",
"spectrogram",
"inverse_spectrogram",
"spectral_centroid",
"allpass_biquad",
"band_biquad",
"bandpass_biquad",
"bandreject_biquad",
"bass_biquad",
"biquad",
"contrast",
"dither",
"dcshift",
"deemph_biquad",
"equalizer_biquad",
"filtfilt",
"flanger",
"gain",
"highpass_biquad",
"lfilter",
"lowpass_biquad",
"overdrive",
"phaser",
"riaa_biquad",
"treble_biquad",
"vad",
"apply_codec",
"resample",
"edit_distance",
"pitch_shift",
"rnnt_loss",
"psd",
"mvdr_weights_souden",
"mvdr_weights_rtf",
"rtf_evd",
"rtf_power",
"apply_beamforming",
]
|
from .filtering import (
allpass_biquad,
band_biquad,
bandpass_biquad,
bandreject_biquad,
bass_biquad,
biquad,
contrast,
dcshift,
deemph_biquad,
dither,
equalizer_biquad,
filtfilt,
flanger,
gain,
highpass_biquad,
lfilter,
lowpass_biquad,
overdrive,
phaser,
riaa_biquad,
treble_biquad,
vad,
)
from .functional import (
amplitude_to_DB,
apply_beamforming,
apply_codec,
compute_deltas,
compute_kaldi_pitch,
create_dct,
DB_to_amplitude,
detect_pitch_frequency,
edit_distance,
griffinlim,
inverse_spectrogram,
linear_fbanks,
loudness,
mask_along_axis,
mask_along_axis_iid,
melscale_fbanks,
mu_law_decoding,
mu_law_encoding,
mvdr_weights_rtf,
mvdr_weights_souden,
phase_vocoder,
pitch_shift,
psd,
resample,
rnnt_loss,
rtf_evd,
rtf_power,
sliding_window_cmn,
spectral_centroid,
spectrogram,
)
__all__ = [
"amplitude_to_DB",
"compute_deltas",
"compute_kaldi_pitch",
"create_dct",
"melscale_fbanks",
"linear_fbanks",
"DB_to_amplitude",
"loudness",
"detect_pitch_frequency",
"griffinlim",
"mask_along_axis",
"mask_along_axis_iid",
"mu_law_encoding",
"mu_law_decoding",
"phase_vocoder",
"sliding_window_cmn",
"spectrogram",
"inverse_spectrogram",
"spectral_centroid",
"allpass_biquad",
"band_biquad",
"bandpass_biquad",
"bandreject_biquad",
"bass_biquad",
"biquad",
"contrast",
"dither",
"dcshift",
"deemph_biquad",
"equalizer_biquad",
"filtfilt",
"flanger",
"gain",
"highpass_biquad",
"lfilter",
"lowpass_biquad",
"overdrive",
"phaser",
"riaa_biquad",
"treble_biquad",
"vad",
"apply_codec",
"resample",
"edit_distance",
"pitch_shift",
"rnnt_loss",
"psd",
"mvdr_weights_souden",
"mvdr_weights_rtf",
"rtf_evd",
"rtf_power",
"apply_beamforming",
]
|
"""
The pre-trained models produce embeddings of size 512 - 1024. However, when storing a large
number of embeddings, this requires quite a lot of memory / storage.
In this example, we reduce the dimensionality of the embeddings to e.g. 128 dimensions. This significantly
reduces the required memory / storage while maintaining nearly the same performance.
For dimensionality reduction, we compute embeddings for a large set of (representative) sentence. Then,
we use PCA to find e.g. 128 principle components of our vector space. This allows us to maintain
us much information as possible with only 128 dimensions.
PCA gives us a matrix that down-projects vectors to 128 dimensions. We use this matrix
and extend our original SentenceTransformer model with this linear downproject. Hence,
the new SentenceTransformer model will produce directly embeddings with 128 dimensions
without further changes needed.
"""
from datasets import load_dataset
from sklearn.decomposition import PCA
from sentence_transformers import SentenceTransformer, models
import logging
import random
import numpy as np
import torch
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Model for which we apply dimensionality reduction
model_name = "all-MiniLM-L6-v2"
model = SentenceTransformer(model_name)
# New size for the embeddings
new_dimension = 128
# We measure the performance of the original model
# and later we will measure the performance with the reduces dimension size
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
stsb_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
name="sts-test",
)
logging.info("Original model performance:")
stsb_evaluator(model)
######## Reduce the embedding dimensions ########
train_dataset = load_dataset("sentence-transformers/all-nli", "pair-score", split="train")
nli_sentences = train_dataset["sentence1"] + train_dataset["sentence2"]
random.shuffle(nli_sentences)
# To determine the PCA matrix, we need some example sentence embeddings.
# Here, we compute the embeddings for 20k random sentences from the AllNLI dataset
pca_train_sentences = nli_sentences[0:20000]
train_embeddings = model.encode(pca_train_sentences, convert_to_numpy=True)
# Compute PCA on the train embeddings matrix
pca = PCA(n_components=new_dimension)
pca.fit(train_embeddings)
pca_comp = np.asarray(pca.components_)
# We add a dense layer to the model, so that it will produce directly embeddings with the new size
dense = models.Dense(
in_features=model.get_sentence_embedding_dimension(),
out_features=new_dimension,
bias=False,
activation_function=torch.nn.Identity(),
)
dense.linear.weight = torch.nn.Parameter(torch.tensor(pca_comp))
model.add_module("dense", dense)
# Evaluate the model with the reduce embedding size
logging.info("Model with {} dimensions:".format(new_dimension))
stsb_evaluator(model)
# If you like, you can store the model on disc by uncommenting the following line
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
model.save(f"{model_name}-128dim")
# You can then load the adapted model that produces 128 dimensional embeddings like this:
# model = SentenceTransformer('models/my-128dim-model')
# Or you can push the model to the Hugging Face Hub
# model.push_to_hub(f'{model_name}-128dim')
|
"""
The pre-trained models produce embeddings of size 512 - 1024. However, when storing a large
number of embeddings, this requires quite a lot of memory / storage.
In this example, we reduce the dimensionality of the embeddings to e.g. 128 dimensions. This significantly
reduces the required memory / storage while maintaining nearly the same performance.
For dimensionality reduction, we compute embeddings for a large set of (representative) sentence. Then,
we use PCA to find e.g. 128 principle components of our vector space. This allows us to maintain
us much information as possible with only 128 dimensions.
PCA gives us a matrix that down-projects vectors to 128 dimensions. We use this matrix
and extend our original SentenceTransformer model with this linear downproject. Hence,
the new SentenceTransformer model will produce directly embeddings with 128 dimensions
without further changes needed.
"""
from sklearn.decomposition import PCA
from sentence_transformers import SentenceTransformer, LoggingHandler, util, evaluation, models, InputExample
import logging
import os
import gzip
import csv
import random
import numpy as np
import torch
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Model for which we apply dimensionality reduction
model = SentenceTransformer("all-MiniLM-L6-v2")
# New size for the embeddings
new_dimension = 128
# We use AllNLI as a source of sentences to compute PCA
nli_dataset_path = "datasets/AllNLI.tsv.gz"
# We use the STS benchmark dataset to see how much performance we loose by using the dimensionality reduction
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# We measure the performance of the original model
# and later we will measure the performance with the reduces dimension size
logger.info("Read STSbenchmark test dataset")
eval_examples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "test":
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
eval_examples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
# Evaluate the original model on the STS benchmark dataset
stsb_evaluator = evaluation.EmbeddingSimilarityEvaluator.from_input_examples(eval_examples, name="sts-benchmark-test")
logger.info("Original model performance:")
stsb_evaluator(model)
######## Reduce the embedding dimensions ########
# Read sentences from NLI dataset
nli_sentences = set()
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
nli_sentences.add(row["sentence1"])
nli_sentences.add(row["sentence2"])
nli_sentences = list(nli_sentences)
random.shuffle(nli_sentences)
# To determine the PCA matrix, we need some example sentence embeddings.
# Here, we compute the embeddings for 20k random sentences from the AllNLI dataset
pca_train_sentences = nli_sentences[0:20000]
train_embeddings = model.encode(pca_train_sentences, convert_to_numpy=True)
# Compute PCA on the train embeddings matrix
pca = PCA(n_components=new_dimension)
pca.fit(train_embeddings)
pca_comp = np.asarray(pca.components_)
# We add a dense layer to the model, so that it will produce directly embeddings with the new size
dense = models.Dense(
in_features=model.get_sentence_embedding_dimension(),
out_features=new_dimension,
bias=False,
activation_function=torch.nn.Identity(),
)
dense.linear.weight = torch.nn.Parameter(torch.tensor(pca_comp))
model.add_module("dense", dense)
# Evaluate the model with the reduce embedding size
logger.info("Model with {} dimensions:".format(new_dimension))
stsb_evaluator(model)
# If you like, you can store the model on disc by uncommenting the following line
# model.save('models/my-128dim-model')
# You can then load the adapted model that produces 128 dimensional embeddings like this:
# model = SentenceTransformer('models/my-128dim-model')
|
import dataclasses
from collections import defaultdict
from typing import TYPE_CHECKING, Type, List
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
from docarray.document.strawberry_type import StrawberryDocument
class StrawberryMixin:
"""Provide helper functions to convert to/from a Strawberry model"""
def to_strawberry_type(self) -> 'StrawberryDocument':
"""Convert a Document object into a Strawberry type."""
from docarray.document.strawberry_type import StrawberryDocument as SD
from docarray.document.strawberry_type import _NameScoreItem, _NamedScore
_p_dict = {}
for f in self.non_empty_fields:
v = getattr(self, f)
if f in ('matches', 'chunks'):
_p_dict[f] = v.to_strawberry_type()
elif f in ('scores', 'evaluations'):
_p_dict[f] = [
_NameScoreItem(name=k, score=_NamedScore(**v.to_dict()))
for k, v in v.items()
]
else:
_p_dict[f] = v
return SD(**_p_dict)
@classmethod
def from_strawberry_type(cls: Type['T'], model) -> 'T':
"""Build a Document object from a Strawberry model
:param model: the Strawberry data model object that represents a Document
:return: a Document object
"""
from docarray import Document
fields = {}
_field_chunks, _field_matches = None, None
if model.chunks:
_field_chunks = [Document.from_strawberry_type(d) for d in model.chunks]
if model.matches:
_field_matches = [Document.from_strawberry_type(d) for d in model.matches]
for field in dataclasses.fields(model):
f_name = field.name
value = getattr(model, f_name)
if value is None:
continue
if f_name == 'scores' or f_name == 'evaluations':
from docarray.score import NamedScore
from docarray.document.strawberry_type import _NameScoreItem
value: List[_NameScoreItem]
fields[f_name] = defaultdict(NamedScore)
for v in value:
fields[f_name][v.name] = NamedScore(**dataclasses.asdict(v.score))
else:
fields[f_name] = value
d = Document(**fields)
if _field_chunks:
d.chunks = _field_chunks
if _field_matches:
d.matches = _field_matches
return d
|
import dataclasses
from collections import defaultdict
from typing import TYPE_CHECKING, Type, List
if TYPE_CHECKING:
from docarray.typing import T
from docarray.document.strawberry_type import StrawberryDocument
class StrawberryMixin:
"""Provide helper functions to convert to/from a Strawberry model"""
def to_strawberry_type(self) -> 'StrawberryDocument':
"""Convert a Document object into a Strawberry type."""
from docarray.document.strawberry_type import StrawberryDocument as SD
from docarray.document.strawberry_type import _NameScoreItem, _NamedScore
_p_dict = {}
for f in self.non_empty_fields:
v = getattr(self, f)
if f in ('matches', 'chunks'):
_p_dict[f] = v.to_strawberry_type()
elif f in ('scores', 'evaluations'):
_p_dict[f] = [
_NameScoreItem(name=k, score=_NamedScore(**v.to_dict()))
for k, v in v.items()
]
else:
_p_dict[f] = v
return SD(**_p_dict)
@classmethod
def from_strawberry_type(cls: Type['T'], model) -> 'T':
"""Build a Document object from a Strawberry model
:param model: the Strawberry data model object that represents a Document
:return: a Document object
"""
from docarray import Document
fields = {}
_field_chunks, _field_matches = None, None
if model.chunks:
_field_chunks = [Document.from_strawberry_type(d) for d in model.chunks]
if model.matches:
_field_matches = [Document.from_strawberry_type(d) for d in model.matches]
for field in dataclasses.fields(model):
f_name = field.name
value = getattr(model, f_name)
if value is None:
continue
if f_name == 'scores' or f_name == 'evaluations':
from docarray.score import NamedScore
from docarray.document.strawberry_type import _NameScoreItem
value: List[_NameScoreItem]
fields[f_name] = defaultdict(NamedScore)
for v in value:
fields[f_name][v.name] = NamedScore(**dataclasses.asdict(v.score))
else:
fields[f_name] = value
d = Document(**fields)
if _field_chunks:
d.chunks = _field_chunks
if _field_matches:
d.matches = _field_matches
return d
|
_base_ = './tood_r50_fpn_1x_coco.py'
max_epochs = 24
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
# training schedule for 2x
train_cfg = dict(max_epochs=max_epochs)
# multi-scale training
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './tood_r50_fpn_1x_coco.py'
max_epochs = 24
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
# training schedule for 2x
train_cfg = dict(max_epochs=max_epochs)
# multi-scale training
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import io
from typing import List
import fitz
import numpy as np
import pdfplumber
from jina import Document, DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
class PDFSegmenter(Executor):
"""
:class:`PDFCrafter` Extracts data (text and images) from PDF files.
Stores images (`mime_type`=image/*) on chunk level ('c') and text segments (`mime_type`=text/plain)
on chunk level ('c') in the root ('r') Document.
"""
def __init__(
self,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.logger = JinaLogger(context=self.__class__.__name__)
@requests
def craft(self, docs: DocumentArray, **kwargs):
"""
Read PDF files. Extracts data from them.
Checks if the input is a string of the filename,
or if it's the file in bytes.
It will then extract the data from the file, creating a list for images,
and text.
:param docs: Array of Documents.
"""
for doc in docs:
pdf_img, pdf_text = self._parse_pdf(doc)
if pdf_img is not None:
images = self._extract_image(pdf_img)
doc.chunks.extend(
[Document(blob=img, mime_type='image/*') for img in images]
)
if pdf_text is not None:
texts = self._extract_text(pdf_text)
doc.chunks.extend(
[Document(text=t, mime_type='text/plain') for t in texts]
)
def _parse_pdf(self, doc: Document):
pdf_img = None
pdf_text = None
try:
if doc.uri:
pdf_img = fitz.open(doc.uri)
pdf_text = pdfplumber.open(doc.uri)
if doc.buffer:
pdf_img = fitz.open(stream=doc.buffer, filetype='pdf')
pdf_text = pdfplumber.open(io.BytesIO(doc.buffer))
except Exception as ex:
self.logger.error(f'Failed to open due to: {ex}')
return pdf_img, pdf_text
def _extract_text(self, pdf_text) -> List[str]:
# Extract text
with pdf_text:
texts = []
count = len(pdf_text.pages)
for i in range(count):
page = pdf_text.pages[i]
texts.append(page.extract_text(x_tolerance=1, y_tolerance=1))
return texts
def _extract_image(self, pdf_img) -> List['np.ndarray']:
with pdf_img:
images = []
for page in range(len(pdf_img)):
for img in pdf_img.getPageImageList(page):
xref = img[0]
pix = fitz.Pixmap(pdf_img, xref)
# read data from buffer and reshape the array into 3-d format
np_arr = (
np.frombuffer(pix.samples, dtype=np.uint8)
.reshape(pix.h, pix.w, pix.n)
.astype('float32')
)
if pix.n - pix.alpha < 4: # if gray or RGB
if pix.n == 1: # convert gray to rgb
images.append(np.concatenate((np_arr,) * 3, -1))
elif pix.n == 4: # remove transparency layer
images.append(np_arr[..., :3])
else:
images.append(np_arr)
else: # if CMYK:
pix = fitz.Pixmap(fitz.csRGB, pix) # Convert to RGB
np_arr = (
np.frombuffer(pix.samples, dtype=np.uint8)
.reshape(pix.h, pix.w, pix.n)
.astype('float32')
)
images.append(np_arr)
return images
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import io
from typing import List
import fitz
import numpy as np
import pdfplumber
from jina import Document, DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
class PDFSegmenter(Executor):
"""
:class:`PDFCrafter` Extracts data (text and images) from PDF files.
Stores images (`mime_type`=image/*) on chunk level ('c') and text segments (`mime_type`=text/plain)
on chunk level ('c') in the root ('r') Document.
"""
def __init__(
self,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.logger = JinaLogger(context=self.__class__.__name__)
@requests
def craft(self, docs: DocumentArray, **kwargs):
"""
Read PDF files. Extracts data from them.
Checks if the input is a string of the filename,
or if it's the file in bytes.
It will then extract the data from the file, creating a list for images,
and text.
:param docs: Array of Documents.
"""
for doc in docs:
pdf_img, pdf_text = self._parse_pdf(doc)
if pdf_img is not None:
images = self._extract_image(pdf_img)
doc.chunks.extend(
[Document(blob=img, mime_type='image/*') for img in images]
)
if pdf_text is not None:
texts = self._extract_text(pdf_text)
doc.chunks.extend(
[Document(text=t, mime_type='text/plain') for t in texts]
)
def _parse_pdf(self, doc: Document):
pdf_img = None
pdf_text = None
try:
if doc.uri:
pdf_img = fitz.open(doc.uri)
pdf_text = pdfplumber.open(doc.uri)
if doc.buffer:
pdf_img = fitz.open(stream=doc.buffer, filetype='pdf')
pdf_text = pdfplumber.open(io.BytesIO(doc.buffer))
except Exception as ex:
self.logger.error(f'Failed to open due to: {ex}')
return pdf_img, pdf_text
def _extract_text(self, pdf_text) -> List[str]:
# Extract text
with pdf_text:
texts = []
count = len(pdf_text.pages)
for i in range(count):
page = pdf_text.pages[i]
texts.append(page.extract_text(x_tolerance=1, y_tolerance=1))
return texts
def _extract_image(self, pdf_img) -> List['np.ndarray']:
with pdf_img:
images = []
for page in range(len(pdf_img)):
for img in pdf_img.getPageImageList(page):
xref = img[0]
pix = fitz.Pixmap(pdf_img, xref)
# read data from buffer and reshape the array into 3-d format
np_arr = (
np.frombuffer(pix.samples, dtype=np.uint8)
.reshape(pix.h, pix.w, pix.n)
.astype('float32')
)
if pix.n - pix.alpha < 4: # if gray or RGB
if pix.n == 1: # convert gray to rgb
images.append(np.concatenate((np_arr,) * 3, -1))
elif pix.n == 4: # remove transparency layer
images.append(np_arr[..., :3])
else:
images.append(np_arr)
else: # if CMYK:
pix = fitz.Pixmap(fitz.csRGB, pix) # Convert to RGB
np_arr = (
np.frombuffer(pix.samples, dtype=np.uint8)
.reshape(pix.h, pix.w, pix.n)
.astype('float32')
)
images.append(np_arr)
return images
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class FSAF(SingleStageDetector):
"""Implementation of `FSAF <https://arxiv.org/abs/1903.00621>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(FSAF, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class FSAF(SingleStageDetector):
"""Implementation of `FSAF <https://arxiv.org/abs/1903.00621>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(FSAF, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
import unittest
import torch
from mmengine.data import PixelData
from mmengine.testing import assert_allclose
from mmdet.models.seg_heads import PanopticFPNHead
from mmdet.structures import DetDataSample
class TestPanopticFPNHead(unittest.TestCase):
def test_init_weights(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=1,
inner_channels=1)
head.init_weights()
assert_allclose(head.conv_logits.bias.data,
torch.zeros_like(head.conv_logits.bias.data))
def test_loss(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=32,
inner_channels=32,
start_level=0,
end_level=1)
x = [torch.rand((2, 32, 8, 8)), torch.rand((2, 32, 4, 4))]
data_sample1 = DetDataSample()
data_sample1.gt_sem_seg = PixelData(
sem_seg=torch.randint(0, 4, (1, 7, 8)))
data_sample2 = DetDataSample()
data_sample2.gt_sem_seg = PixelData(
sem_seg=torch.randint(0, 4, (1, 7, 8)))
batch_data_samples = [data_sample1, data_sample2]
results = head.loss(x, batch_data_samples)
self.assertIsInstance(results, dict)
def test_predict(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=32,
inner_channels=32,
start_level=0,
end_level=1)
x = [torch.rand((2, 32, 8, 8)), torch.rand((2, 32, 4, 4))]
img_meta1 = {
'batch_input_shape': (16, 16),
'img_shape': (14, 14),
'ori_shape': (12, 12),
}
img_meta2 = {
'batch_input_shape': (16, 16),
'img_shape': (16, 16),
'ori_shape': (16, 16),
}
batch_img_metas = [img_meta1, img_meta2]
head.eval()
with torch.no_grad():
seg_preds = head.predict(x, batch_img_metas, rescale=False)
self.assertTupleEqual(seg_preds[0].shape[-2:], (16, 16))
self.assertTupleEqual(seg_preds[1].shape[-2:], (16, 16))
seg_preds = head.predict(x, batch_img_metas, rescale=True)
self.assertTupleEqual(seg_preds[0].shape[-2:], (12, 12))
self.assertTupleEqual(seg_preds[1].shape[-2:], (16, 16))
|
import unittest
import torch
from mmengine.data import PixelData
from mmengine.testing import assert_allclose
from mmdet.data_elements import DetDataSample
from mmdet.models.seg_heads import PanopticFPNHead
class TestPanopticFPNHead(unittest.TestCase):
def test_init_weights(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=1,
inner_channels=1)
head.init_weights()
assert_allclose(head.conv_logits.bias.data,
torch.zeros_like(head.conv_logits.bias.data))
def test_loss(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=32,
inner_channels=32,
start_level=0,
end_level=1)
x = [torch.rand((2, 32, 8, 8)), torch.rand((2, 32, 4, 4))]
data_sample1 = DetDataSample()
data_sample1.gt_sem_seg = PixelData(
sem_seg=torch.randint(0, 4, (1, 7, 8)))
data_sample2 = DetDataSample()
data_sample2.gt_sem_seg = PixelData(
sem_seg=torch.randint(0, 4, (1, 7, 8)))
batch_data_samples = [data_sample1, data_sample2]
results = head.loss(x, batch_data_samples)
self.assertIsInstance(results, dict)
def test_predict(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=32,
inner_channels=32,
start_level=0,
end_level=1)
x = [torch.rand((2, 32, 8, 8)), torch.rand((2, 32, 4, 4))]
img_meta1 = {
'batch_input_shape': (16, 16),
'img_shape': (14, 14),
'ori_shape': (12, 12),
}
img_meta2 = {
'batch_input_shape': (16, 16),
'img_shape': (16, 16),
'ori_shape': (16, 16),
}
batch_img_metas = [img_meta1, img_meta2]
head.eval()
with torch.no_grad():
seg_preds = head.predict(x, batch_img_metas, rescale=False)
self.assertTupleEqual(seg_preds[0].shape[-2:], (16, 16))
self.assertTupleEqual(seg_preds[1].shape[-2:], (16, 16))
seg_preds = head.predict(x, batch_img_metas, rescale=True)
self.assertTupleEqual(seg_preds[0].shape[-2:], (12, 12))
self.assertTupleEqual(seg_preds[1].shape[-2:], (16, 16))
|
import numpy as np
from sklearn.datasets import make_classification
import xgboost as xgb
from xgboost.testing.updater import get_basescore
def test_exp_family() -> None:
X, y = make_classification(n_samples=128, n_classes=2, weights=[0.8, 0.2])
clf = xgb.train(
{"objective": "binary:logistic"}, xgb.QuantileDMatrix(X, y), num_boost_round=1
)
reg = xgb.train(
{"objective": "reg:logistic"}, xgb.QuantileDMatrix(X, y), num_boost_round=1
)
clf1 = xgb.train(
{"objective": "binary:logitraw"}, xgb.QuantileDMatrix(X, y), num_boost_round=1
)
# The base score stored in the booster model is un-transformed
np.testing.assert_allclose([get_basescore(m) for m in (reg, clf, clf1)], y.mean())
X, y = make_classification(weights=[0.8, 0.2], random_state=2025)
clf = xgb.train(
{"objective": "binary:logistic", "scale_pos_weight": 4.0},
xgb.QuantileDMatrix(X, y),
num_boost_round=1,
)
score = get_basescore(clf)
np.testing.assert_allclose(score, 0.5, rtol=1e-3)
|
import numpy as np
from sklearn.datasets import make_classification
import xgboost as xgb
from xgboost.testing.updater import get_basescore
def test_exp_family() -> None:
X, y = make_classification(n_samples=128, n_classes=2, weights=[0.8, 0.2])
clf = xgb.train(
{"objective": "binary:logistic"}, xgb.QuantileDMatrix(X, y), num_boost_round=1
)
reg = xgb.train(
{"objective": "reg:logistic"}, xgb.QuantileDMatrix(X, y), num_boost_round=1
)
clf1 = xgb.train(
{"objective": "binary:logitraw"}, xgb.QuantileDMatrix(X, y), num_boost_round=1
)
# The base score stored in the booster model is un-transformed
np.testing.assert_allclose([get_basescore(m) for m in (reg, clf, clf1)], y.mean())
|
import hashlib
import logging
from os import PathLike
from pathlib import Path
from typing import Union
import torch
from torchaudio._internal import download_url_to_file
_LG = logging.getLogger(__name__)
def _get_local_path(key):
path = Path(torch.hub.get_dir()) / "torchaudio" / Path(key)
path.parent.mkdir(parents=True, exist_ok=True)
return path
def _download(key, path, progress):
url = f"https://download.pytorch.org/torchaudio/{key}"
download_url_to_file(url, path, progress=progress)
def _get_hash(path, hash, chunk_size=1028):
m = hashlib.sha256()
with open(path, "rb") as file:
data = file.read(chunk_size)
while data:
m.update(data)
data = file.read(chunk_size)
return m.hexdigest()
def download_asset(
key: str,
hash: str = "",
path: Union[str, PathLike] = "",
*,
progress: bool = True,
) -> str:
"""Download and store torchaudio assets to local file system.
If a file exists at the download path, then that path is returned with or without
hash validation.
Args:
key (str): The asset identifier.
hash (str, optional):
The value of SHA256 hash of the asset. If provided, it is used to verify
the downloaded / cached object. If not provided, then no hash validation
is performed. This means if a file exists at the download path, then the path
is returned as-is without verifying the identity of the file.
path (path-like object, optional):
By default, the downloaded asset is saved in a directory under
:py:func:`torch.hub.get_dir` and intermediate directories based on the given `key`
are created.
This argument can be used to overwrite the target location.
When this argument is provided, all the intermediate directories have to be
created beforehand.
progress (bool): Whether to show progress bar for downloading. Default: ``True``.
Note:
Currently the valid key values are the route on ``download.pytorch.org/torchaudio``,
but this is an implementation detail.
Returns:
str: The path to the asset on the local file system.
"""
path = path or _get_local_path(key)
if path.exists():
_LG.info("The local file (%s) exists. Skipping the download.", path)
else:
_LG.info("Downloading %s to %s", key, path)
_download(key, path, progress=progress)
if hash:
_LG.info("Verifying the hash value.")
digest = _get_hash(path, hash)
if digest != hash:
raise ValueError(
f"The hash value of the downloaded file ({path}), '{digest}' does not match "
f"the provided hash value, '{hash}'."
)
_LG.info("Hash validated.")
return str(path)
|
import hashlib
import logging
from os import PathLike
from pathlib import Path
from typing import Union
import torch
_LG = logging.getLogger(__name__)
def _get_local_path(key):
path = Path(torch.hub.get_dir()) / "torchaudio" / Path(key)
path.parent.mkdir(parents=True, exist_ok=True)
return path
def _download(key, path, progress):
url = f"https://download.pytorch.org/torchaudio/{key}"
torch.hub.download_url_to_file(url, path, progress=progress)
def _get_hash(path, hash, chunk_size=1028):
m = hashlib.sha256()
with open(path, "rb") as file:
data = file.read(chunk_size)
while data:
m.update(data)
data = file.read(chunk_size)
return m.hexdigest()
def download_asset(
key: str,
hash: str = "",
path: Union[str, PathLike] = "",
*,
progress: bool = True,
) -> str:
"""Download and store torchaudio assets to local file system.
If a file exists at the download path, then that path is returned with or without
hash validation.
Args:
key (str): The asset identifier.
hash (str, optional):
The value of SHA256 hash of the asset. If provided, it is used to verify
the downloaded / cached object. If not provided, then no hash validation
is performed. This means if a file exists at the download path, then the path
is returned as-is without verifying the identity of the file.
path (path-like object, optional):
By default, the downloaded asset is saved in a directory under
:py:func:`torch.hub.get_dir` and intermediate directories based on the given `key`
are created.
This argument can be used to overwrite the target location.
When this argument is provided, all the intermediate directories have to be
created beforehand.
progress (bool): Whether to show progress bar for downloading. Default: ``True``.
Note:
Currently the valid key values are the route on ``download.pytorch.org/torchaudio``,
but this is an implementation detail.
Returns:
str: The path to the asset on the local file system.
"""
path = path or _get_local_path(key)
if path.exists():
_LG.info("The local file (%s) exists. Skipping the download.", path)
else:
_LG.info("Downloading %s to %s", key, path)
_download(key, path, progress=progress)
if hash:
_LG.info("Verifying the hash value.")
digest = _get_hash(path, hash)
if digest != hash:
raise ValueError(
f"The hash value of the downloaded file ({path}), '{digest}' does not match "
f"the provided hash value, '{hash}'."
)
_LG.info("Hash validated.")
return str(path)
|
_base_ = '../rpn/rpn_r50_fpn_1x_coco.py'
model = dict(
rpn_head=dict(
_delete_=True,
type='GARPNHead',
in_channels=256,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=8,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[8],
strides=[4, 8, 16, 32, 64]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.14, 0.14]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.11, 0.11]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(
rpn=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
center_ratio=0.2,
ignore_ratio=0.5)),
test_cfg=dict(rpn=dict(nms_post=1000)))
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
|
_base_ = '../rpn/rpn_r50_fpn_1x_coco.py'
model = dict(
rpn_head=dict(
_delete_=True,
type='GARPNHead',
in_channels=256,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=8,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[8],
strides=[4, 8, 16, 32, 64]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.14, 0.14]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.11, 0.11]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(
rpn=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
center_ratio=0.2,
ignore_ratio=0.5)),
test_cfg=dict(rpn=dict(nms_post=1000)))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
from __future__ import annotations
import csv
import logging
import os
from typing import TYPE_CHECKING
import torch
from torch.utils.data import DataLoader
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
from sentence_transformers.util import batch_to_device
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
logger = logging.getLogger(__name__)
class LabelAccuracyEvaluator(SentenceEvaluator):
"""
Evaluate a model based on its accuracy on a labeled dataset
This requires a model with LossFunction.SOFTMAX
The results are written in a CSV. If a CSV already exists, then values are appended.
"""
def __init__(self, dataloader: DataLoader, name: str = "", softmax_model=None, write_csv: bool = True):
"""
Constructs an evaluator for the given dataset
Args:
dataloader (DataLoader): the data for the evaluation
"""
super().__init__()
self.dataloader = dataloader
self.name = name
self.softmax_model = softmax_model
if name:
name = "_" + name
self.write_csv = write_csv
self.csv_file = "accuracy_evaluation" + name + "_results.csv"
self.csv_headers = ["epoch", "steps", "accuracy"]
self.primary_metric = "accuracy"
def __call__(
self, model: "SentenceTransformer", output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
model.eval()
total = 0
correct = 0
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("Evaluation on the " + self.name + " dataset" + out_txt)
self.dataloader.collate_fn = model.smart_batching_collate
for step, batch in enumerate(self.dataloader):
features, label_ids = batch
for idx in range(len(features)):
features[idx] = batch_to_device(features[idx], model.device)
label_ids = label_ids.to(model.device)
with torch.no_grad():
_, prediction = self.softmax_model(features, labels=None)
total += prediction.size(0)
correct += torch.argmax(prediction, dim=1).eq(label_ids).sum().item()
accuracy = correct / total
logger.info("Accuracy: {:.4f} ({}/{})\n".format(accuracy, correct, total))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
if not os.path.isfile(csv_path):
with open(csv_path, newline="", mode="w", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, accuracy])
else:
with open(csv_path, newline="", mode="a", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow([epoch, steps, accuracy])
metrics = {"accuracy": accuracy}
metrics = self.prefix_name_to_metrics(metrics, self.name)
self.store_metrics_in_model_card_data(model, metrics)
return metrics
|
import csv
import logging
import os
from typing import TYPE_CHECKING, Dict
import torch
from torch.utils.data import DataLoader
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
from sentence_transformers.util import batch_to_device
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
logger = logging.getLogger(__name__)
class LabelAccuracyEvaluator(SentenceEvaluator):
"""
Evaluate a model based on its accuracy on a labeled dataset
This requires a model with LossFunction.SOFTMAX
The results are written in a CSV. If a CSV already exists, then values are appended.
"""
def __init__(self, dataloader: DataLoader, name: str = "", softmax_model=None, write_csv: bool = True):
"""
Constructs an evaluator for the given dataset
Args:
dataloader (DataLoader): the data for the evaluation
"""
super().__init__()
self.dataloader = dataloader
self.name = name
self.softmax_model = softmax_model
if name:
name = "_" + name
self.write_csv = write_csv
self.csv_file = "accuracy_evaluation" + name + "_results.csv"
self.csv_headers = ["epoch", "steps", "accuracy"]
self.primary_metric = "accuracy"
def __call__(
self, model: "SentenceTransformer", output_path: str = None, epoch: int = -1, steps: int = -1
) -> Dict[str, float]:
model.eval()
total = 0
correct = 0
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("Evaluation on the " + self.name + " dataset" + out_txt)
self.dataloader.collate_fn = model.smart_batching_collate
for step, batch in enumerate(self.dataloader):
features, label_ids = batch
for idx in range(len(features)):
features[idx] = batch_to_device(features[idx], model.device)
label_ids = label_ids.to(model.device)
with torch.no_grad():
_, prediction = self.softmax_model(features, labels=None)
total += prediction.size(0)
correct += torch.argmax(prediction, dim=1).eq(label_ids).sum().item()
accuracy = correct / total
logger.info("Accuracy: {:.4f} ({}/{})\n".format(accuracy, correct, total))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
if not os.path.isfile(csv_path):
with open(csv_path, newline="", mode="w", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, accuracy])
else:
with open(csv_path, newline="", mode="a", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow([epoch, steps, accuracy])
metrics = {"accuracy": accuracy}
metrics = self.prefix_name_to_metrics(metrics, self.name)
self.store_metrics_in_model_card_data(model, metrics)
return metrics
|
import torch
import torch.nn.functional as F
from ..utils import _log_api_usage_once
def sigmoid_focal_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
alpha: float = 0.25,
gamma: float = 2,
reduction: str = "none",
) -> torch.Tensor:
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs (Tensor): A float tensor of arbitrary shape.
The predictions for each example.
targets (Tensor): A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha (float): Weighting factor in range [0, 1] to balance
positive vs negative examples or -1 for ignore. Default: ``0.25``.
gamma (float): Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples. Default: ``2``.
reduction (string): ``'none'`` | ``'mean'`` | ``'sum'``
``'none'``: No reduction will be applied to the output.
``'mean'``: The output will be averaged.
``'sum'``: The output will be summed. Default: ``'none'``.
Returns:
Loss tensor with the reduction option applied.
"""
# Original implementation from https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/focal_loss.py
if not (0 <= alpha <= 1) or alpha != -1:
raise ValueError(f"Invalid alpha value: {alpha}. alpha must be in the range [0,1] or -1 for ignore.")
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(sigmoid_focal_loss)
p = torch.sigmoid(inputs)
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = p * targets + (1 - p) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
# Check reduction option and return loss accordingly
if reduction == "none":
pass
elif reduction == "mean":
loss = loss.mean()
elif reduction == "sum":
loss = loss.sum()
else:
raise ValueError(
f"Invalid Value for arg 'reduction': '{reduction} \n Supported reduction modes: 'none', 'mean', 'sum'"
)
return loss
|
import torch
import torch.nn.functional as F
from ..utils import _log_api_usage_once
def sigmoid_focal_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
alpha: float = 0.25,
gamma: float = 2,
reduction: str = "none",
) -> torch.Tensor:
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs (Tensor): A float tensor of arbitrary shape.
The predictions for each example.
targets (Tensor): A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha (float): Weighting factor in range (0,1) to balance
positive vs negative examples or -1 for ignore. Default: ``0.25``.
gamma (float): Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples. Default: ``2``.
reduction (string): ``'none'`` | ``'mean'`` | ``'sum'``
``'none'``: No reduction will be applied to the output.
``'mean'``: The output will be averaged.
``'sum'``: The output will be summed. Default: ``'none'``.
Returns:
Loss tensor with the reduction option applied.
"""
# Original implementation from https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/focal_loss.py
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(sigmoid_focal_loss)
p = torch.sigmoid(inputs)
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = p * targets + (1 - p) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
# Check reduction option and return loss accordingly
if reduction == "none":
pass
elif reduction == "mean":
loss = loss.mean()
elif reduction == "sum":
loss = loss.sum()
else:
raise ValueError(
f"Invalid Value for arg 'reduction': '{reduction} \n Supported reduction modes: 'none', 'mean', 'sum'"
)
return loss
|
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("sparse-embedding/splade-distilbert-base-uncased-init")
guide = SparseEncoder("naver/splade-cocondenser-ensembledistil")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
}
)
loss = losses.SparseCachedGISTEmbedLoss(
model,
guide,
mini_batch_size=64,
margin_strategy="relative", # or "relative" (e.g., margin=0.05 for max. 95% of positive similarity)
margin=0.1,
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
# TODO: Investigate if it's working with a test, seems that the problem is hparam and not the cache part
|
from datasets import Dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseCachedGISTEmbedLoss,
SparseEncoder,
SparseEncoderTrainer,
SpladePooling,
)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Create a small toy dataset
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
}
)
# Initialize the sparse loss with a guide model
guide = SparseEncoder(
modules=[
MLMTransformer("prithivida/Splade_PP_en_v1"),
SpladePooling(pooling_strategy="max"),
],
device="cuda:0",
)
loss = SparseCachedGISTEmbedLoss(model, guide=guide)
# Create the trainer
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
# Train the model
trainer.train()
|
# Copyright (c) OpenMMLab. All rights reserved.
from pathlib import Path
import mmcv
import torch
from mmcv.runner import load_checkpoint
from .. import build_detector
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class KnowledgeDistillationSingleStageDetector(SingleStageDetector):
r"""Implementation of `Distilling the Knowledge in a Neural Network.
<https://arxiv.org/abs/1503.02531>`_.
Args:
teacher_config (str | dict): Config file path
or the config object of teacher model.
teacher_ckpt (str, optional): Checkpoint path of teacher model.
If left as None, the model will not load any weights.
"""
def __init__(self,
backbone,
neck,
bbox_head,
teacher_config,
teacher_ckpt=None,
eval_teacher=True,
train_cfg=None,
test_cfg=None,
pretrained=None):
super().__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
pretrained)
self.eval_teacher = eval_teacher
# Build teacher model
if isinstance(teacher_config, (str, Path)):
teacher_config = mmcv.Config.fromfile(teacher_config)
self.teacher_model = build_detector(teacher_config['model'])
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.extract_feat(img)
with torch.no_grad():
teacher_x = self.teacher_model.extract_feat(img)
out_teacher = self.teacher_model.bbox_head(teacher_x)
losses = self.bbox_head.forward_train(x, out_teacher, img_metas,
gt_bboxes, gt_labels,
gt_bboxes_ignore)
return losses
def cuda(self, device=None):
"""Since teacher_model is registered as a plain object, it is necessary
to put the teacher model to cuda when calling cuda function."""
self.teacher_model.cuda(device=device)
return super().cuda(device=device)
def train(self, mode=True):
"""Set the same train mode for teacher and student model."""
if self.eval_teacher:
self.teacher_model.train(False)
else:
self.teacher_model.train(mode)
super().train(mode)
def __setattr__(self, name, value):
"""Set attribute, i.e. self.name = value
This reloading prevent the teacher model from being registered as a
nn.Module. The teacher module is registered as a plain object, so that
the teacher parameters will not show up when calling
``self.parameters``, ``self.modules``, ``self.children`` methods.
"""
if name == 'teacher_model':
object.__setattr__(self, name, value)
else:
super().__setattr__(name, value)
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmcv.runner import load_checkpoint
from .. import build_detector
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class KnowledgeDistillationSingleStageDetector(SingleStageDetector):
r"""Implementation of `Distilling the Knowledge in a Neural Network.
<https://arxiv.org/abs/1503.02531>`_.
Args:
teacher_config (str | dict): Config file path
or the config object of teacher model.
teacher_ckpt (str, optional): Checkpoint path of teacher model.
If left as None, the model will not load any weights.
"""
def __init__(self,
backbone,
neck,
bbox_head,
teacher_config,
teacher_ckpt=None,
eval_teacher=True,
train_cfg=None,
test_cfg=None,
pretrained=None):
super().__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
pretrained)
self.eval_teacher = eval_teacher
# Build teacher model
if isinstance(teacher_config, str):
teacher_config = mmcv.Config.fromfile(teacher_config)
self.teacher_model = build_detector(teacher_config['model'])
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.extract_feat(img)
with torch.no_grad():
teacher_x = self.teacher_model.extract_feat(img)
out_teacher = self.teacher_model.bbox_head(teacher_x)
losses = self.bbox_head.forward_train(x, out_teacher, img_metas,
gt_bboxes, gt_labels,
gt_bboxes_ignore)
return losses
def cuda(self, device=None):
"""Since teacher_model is registered as a plain object, it is necessary
to put the teacher model to cuda when calling cuda function."""
self.teacher_model.cuda(device=device)
return super().cuda(device=device)
def train(self, mode=True):
"""Set the same train mode for teacher and student model."""
if self.eval_teacher:
self.teacher_model.train(False)
else:
self.teacher_model.train(mode)
super().train(mode)
def __setattr__(self, name, value):
"""Set attribute, i.e. self.name = value
This reloading prevent the teacher model from being registered as a
nn.Module. The teacher module is registered as a plain object, so that
the teacher parameters will not show up when calling
``self.parameters``, ``self.modules``, ``self.children`` methods.
"""
if name == 'teacher_model':
object.__setattr__(self, name, value)
else:
super().__setattr__(name, value)
|
import torch
import torchaudio.prototype.transforms as T
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class Transforms(TestBaseMixin):
@nested_params(
["Convolve", "FFTConvolve"],
["full", "valid", "same"],
)
def test_Convolve(self, cls, mode):
leading_dims = (2, 3, 2)
L_x, L_y = 32, 55
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
convolve = getattr(T, cls)(mode=mode).to(device=self.device, dtype=self.dtype)
output = convolve(x, y)
ts_output = torch_script(convolve)(x, y)
self.assertEqual(ts_output, output)
def test_Speed(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=self.dtype, device=self.device)
speed = T.Speed(1000, 0.9).to(self.device, self.dtype)
output = speed(waveform, lengths)
ts_output = torch_script(speed)(waveform, lengths)
self.assertEqual(ts_output, output)
def test_SpeedPerturbation(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=self.dtype, device=self.device)
speed = T.SpeedPerturbation(1000, [0.9]).to(self.device, self.dtype)
output = speed(waveform, lengths)
ts_output = torch_script(speed)(waveform, lengths)
self.assertEqual(ts_output, output)
@nested_params([True, False])
def test_AddNoise(self, use_lengths):
leading_dims = (2, 3)
L = 31
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
if use_lengths:
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
else:
lengths = None
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
add_noise = T.AddNoise().to(self.device, self.dtype)
output = add_noise(waveform, noise, snr, lengths)
ts_output = torch_script(add_noise)(waveform, noise, snr, lengths)
self.assertEqual(ts_output, output)
def test_Preemphasis(self):
waveform = torch.rand(3, 4, 10, dtype=self.dtype, device=self.device)
preemphasis = T.Preemphasis(coeff=0.97).to(dtype=self.dtype, device=self.device)
output = preemphasis(waveform)
ts_output = torch_script(preemphasis)(waveform)
self.assertEqual(ts_output, output)
def test_Deemphasis(self):
waveform = torch.rand(3, 4, 10, dtype=self.dtype, device=self.device)
deemphasis = T.Deemphasis(coeff=0.97).to(dtype=self.dtype, device=self.device)
output = deemphasis(waveform)
ts_output = torch_script(deemphasis)(waveform)
self.assertEqual(ts_output, output)
|
import torch
import torchaudio.prototype.transforms as T
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class Transforms(TestBaseMixin):
@nested_params(
["Convolve", "FFTConvolve"],
["full", "valid", "same"],
)
def test_Convolve(self, cls, mode):
leading_dims = (2, 3, 2)
L_x, L_y = 32, 55
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
convolve = getattr(T, cls)(mode=mode).to(device=self.device, dtype=self.dtype)
output = convolve(x, y)
ts_output = torch_script(convolve)(x, y)
self.assertEqual(ts_output, output)
def test_Speed(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=self.dtype, device=self.device)
speed = T.Speed(1000, 0.9).to(self.device, self.dtype)
output = speed(waveform, lengths)
ts_output = torch_script(speed)(waveform, lengths)
self.assertEqual(ts_output, output)
def test_SpeedPerturbation(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=self.dtype, device=self.device)
speed = T.SpeedPerturbation(1000, [0.9]).to(self.device, self.dtype)
output = speed(waveform, lengths)
ts_output = torch_script(speed)(waveform, lengths)
self.assertEqual(ts_output, output)
def test_AddNoise(self):
leading_dims = (2, 3)
L = 31
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
add_noise = T.AddNoise().to(self.device, self.dtype)
output = add_noise(waveform, noise, lengths, snr)
ts_output = torch_script(add_noise)(waveform, noise, lengths, snr)
self.assertEqual(ts_output, output)
def test_Preemphasis(self):
waveform = torch.rand(3, 4, 10, dtype=self.dtype, device=self.device)
preemphasis = T.Preemphasis(coeff=0.97).to(dtype=self.dtype, device=self.device)
output = preemphasis(waveform)
ts_output = torch_script(preemphasis)(waveform)
self.assertEqual(ts_output, output)
def test_Deemphasis(self):
waveform = torch.rand(3, 4, 10, dtype=self.dtype, device=self.device)
deemphasis = T.Deemphasis(coeff=0.97).to(dtype=self.dtype, device=self.device)
output = deemphasis(waveform)
ts_output = torch_script(deemphasis)(waveform)
self.assertEqual(ts_output, output)
|
_base_ = './tood_r50_fpn_1x_coco.py'
max_epochs = 24
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
# training schedule for 2x
train_cfg = dict(max_epochs=max_epochs)
# multi-scale training
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './tood_r50_fpn_1x_coco.py'
max_epochs = 24
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
# training schedule for 2x
train_cfg = dict(max_epochs=max_epochs)
# multi-scale training
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=[(1333, 480), (1333, 800)],
resize_cfg=dict(type='Resize', keep_ratio=True)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
"""Test Bedrock multi-modal LLM."""
import json
from io import BytesIO
import pytest
from unittest.mock import patch, AsyncMock
from llama_index.core.multi_modal_llms.base import MultiModalLLM
from llama_index.multi_modal_llms.bedrock import BedrockMultiModal
from llama_index.core.schema import ImageDocument
def test_class_name():
"""Test class name."""
llm = BedrockMultiModal()
assert llm.class_name() == "bedrock_multi_modal_llm"
def test_init():
"""Test initialization."""
llm = BedrockMultiModal(max_tokens=400)
assert llm.max_tokens == 400
assert llm.model == "anthropic.claude-3-sonnet-20240229-v1:0"
def test_inheritance():
"""Test inheritance."""
assert issubclass(BedrockMultiModal, MultiModalLLM)
def test_model_validation():
"""Test model validation."""
with pytest.raises(ValueError, match="Invalid model"):
BedrockMultiModal(model="invalid-model")
@patch("boto3.Session")
def test_completion(mock_session):
"""Test completion."""
# Mock the invoke_model response
mock_client = mock_session.return_value.client.return_value
mock_response = {
"body": BytesIO(json.dumps({"content": [{"text": "test response"}]}).encode()),
"ResponseMetadata": {
"HTTPHeaders": {
"x-amzn-bedrock-input-token-count": "100",
"x-amzn-bedrock-output-token-count": "50",
}
},
}
mock_client.invoke_model.return_value = mock_response
llm = BedrockMultiModal()
image_doc = ImageDocument(image="base64_encoded_string")
response = llm.complete(prompt="test prompt", image_documents=[image_doc])
assert response.text == "test response"
assert response.additional_kwargs["input_tokens"] == "100"
assert response.additional_kwargs["output_tokens"] == "50"
# Verify the call was made with correct parameters
mock_client.invoke_model.assert_called_once()
@pytest.mark.asyncio
@patch("aioboto3.Session")
async def test_async_completion(mock_session):
"""Test async completion."""
# Mock the async client
mock_client = mock_session.return_value.client.return_value.__aenter__.return_value
mock_body = AsyncMock()
mock_body.read.return_value = json.dumps(
{"content": [{"text": "async test response"}]}
).encode()
mock_response = {
"body": mock_body,
"ResponseMetadata": {
"HTTPHeaders": {
"x-amzn-bedrock-input-token-count": "100",
"x-amzn-bedrock-output-token-count": "50",
}
},
}
mock_client.invoke_model.return_value = mock_response
llm = BedrockMultiModal()
image_doc = ImageDocument(image="base64_encoded_string")
response = await llm.acomplete(prompt="test prompt", image_documents=[image_doc])
assert response.text == "async test response"
assert response.additional_kwargs["input_tokens"] == "100"
assert response.additional_kwargs["output_tokens"] == "50"
mock_body.read.assert_awaited_once()
|
"""Test Bedrock multi-modal LLM."""
import json
from io import BytesIO
import pytest
from unittest.mock import patch, AsyncMock
from llama_index.core.multi_modal_llms.base import MultiModalLLM
from llama_index.multi_modal_llms.bedrock import BedrockMultiModal
from llama_index.core.schema import ImageDocument
def test_class_name():
"""Test class name."""
llm = BedrockMultiModal()
assert llm.class_name() == "bedrock_multi_modal_llm"
def test_init():
"""Test initialization."""
llm = BedrockMultiModal(max_tokens=400)
assert llm.max_tokens == 400
assert llm.model == "anthropic.claude-3-sonnet-20240229-v1:0"
def test_inheritance():
"""Test inheritance."""
assert issubclass(BedrockMultiModal, MultiModalLLM)
def test_model_validation():
"""Test model validation."""
with pytest.raises(ValueError, match="Invalid model"):
BedrockMultiModal(model="invalid-model")
@patch("boto3.Session")
def test_completion(mock_session):
"""Test completion."""
# Mock the invoke_model response
mock_client = mock_session.return_value.client.return_value
mock_response = {
"body": BytesIO(json.dumps({"content": [{"text": "test response"}]}).encode()),
"ResponseMetadata": {
"HTTPHeaders": {
"x-amzn-bedrock-input-token-count": "100",
"x-amzn-bedrock-output-token-count": "50",
}
},
}
mock_client.invoke_model.return_value = mock_response
llm = BedrockMultiModal()
image_doc = ImageDocument(image="base64_encoded_string")
response = llm.complete(prompt="test prompt", image_documents=[image_doc])
assert response.text == "test response"
assert response.additional_kwargs["input_tokens"] == "100"
assert response.additional_kwargs["output_tokens"] == "50"
# Verify the call was made with correct parameters
mock_client.invoke_model.assert_called_once()
@pytest.mark.asyncio
@patch("aioboto3.Session")
async def test_async_completion(mock_session):
"""Test async completion."""
# Mock the async client
mock_client = mock_session.return_value.client.return_value.__aenter__.return_value
mock_body = AsyncMock()
mock_body.read.return_value = json.dumps(
{"content": [{"text": "async test response"}]}
).encode()
mock_response = {
"body": mock_body,
"ResponseMetadata": {
"HTTPHeaders": {
"x-amzn-bedrock-input-token-count": "100",
"x-amzn-bedrock-output-token-count": "50",
}
},
}
mock_client.invoke_model.return_value = mock_response
llm = BedrockMultiModal()
image_doc = ImageDocument(image="base64_encoded_string")
response = await llm.acomplete(prompt="test prompt", image_documents=[image_doc])
assert response.text == "async test response"
assert response.additional_kwargs["input_tokens"] == "100"
assert response.additional_kwargs["output_tokens"] == "50"
mock_body.read.assert_awaited_once()
|
import os
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidationError,
)
class DirectoryListingInput(BaseModel):
"""Input for ListDirectoryTool."""
dir_path: str = Field(default=".", description="Subdirectory to list.")
class ListDirectoryTool(BaseFileToolMixin, BaseTool):
"""Tool that lists files and directories in a specified folder."""
name: str = "list_directory"
args_schema: Type[BaseModel] = DirectoryListingInput
description: str = "List files and directories in a specified folder"
def _run(
self,
dir_path: str = ".",
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
dir_path_ = self.get_relative_path(dir_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name="dir_path", value=dir_path)
try:
entries = os.listdir(dir_path_)
if entries:
return "\n".join(entries)
else:
return f"No files found in directory {dir_path}"
except Exception as e:
return "Error: " + str(e)
# TODO: Add aiofiles method
|
import os
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidationError,
)
class DirectoryListingInput(BaseModel):
"""Input for ListDirectoryTool."""
dir_path: str = Field(default=".", description="Subdirectory to list.")
class ListDirectoryTool(BaseFileToolMixin, BaseTool): # type: ignore[override, override]
"""Tool that lists files and directories in a specified folder."""
name: str = "list_directory"
args_schema: Type[BaseModel] = DirectoryListingInput
description: str = "List files and directories in a specified folder"
def _run(
self,
dir_path: str = ".",
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
dir_path_ = self.get_relative_path(dir_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name="dir_path", value=dir_path)
try:
entries = os.listdir(dir_path_)
if entries:
return "\n".join(entries)
else:
return f"No files found in directory {dir_path}"
except Exception as e:
return "Error: " + str(e)
# TODO: Add aiofiles method
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.1'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
import numpy as np
from docarray.proto import DocumentProto, NdArrayProto, NodeProto
from docarray.typing import Tensor
def test_nested_item_proto():
NodeProto(text='hello')
NodeProto(nested=DocumentProto())
def test_nested_optional_item_proto():
NodeProto()
def test_ndarray():
nd_proto = NdArrayProto()
original_tensor = np.zeros((3, 224, 224))
Tensor._flush_tensor_to_proto(nd_proto, value=original_tensor)
nested_item = NodeProto(tensor=nd_proto)
tensor = Tensor.from_protobuf(nested_item.tensor)
assert (tensor == original_tensor).all()
def test_document_proto_set():
data = {}
nested_item1 = NodeProto(text='hello')
nd_proto = NdArrayProto()
original_tensor = np.zeros((3, 224, 224))
Tensor._flush_tensor_to_proto(nd_proto, value=original_tensor)
nested_item2 = NodeProto(tensor=nd_proto)
data['a'] = nested_item1
data['b'] = nested_item2
DocumentProto(data=data)
|
import numpy as np
from docarray.proto import DocumentProto, NdArrayProto, NodeProto
from docarray.typing import Tensor
def test_nested_item_proto():
NodeProto(text='hello')
NodeProto(nested=DocumentProto())
def test_nested_optional_item_proto():
NodeProto()
def test_ndarray():
nd_proto = NdArrayProto()
original_tensor = np.zeros((3, 224, 224))
Tensor._flush_tensor_to_proto(nd_proto, value=original_tensor)
nested_item = NodeProto(tensor=nd_proto)
tensor = Tensor._read_from_proto(nested_item.tensor)
assert (tensor == original_tensor).all()
def test_document_proto_set():
data = {}
nested_item1 = NodeProto(text='hello')
nd_proto = NdArrayProto()
original_tensor = np.zeros((3, 224, 224))
Tensor._flush_tensor_to_proto(nd_proto, value=original_tensor)
nested_item2 = NodeProto(tensor=nd_proto)
data['a'] = nested_item1
data['b'] = nested_item2
DocumentProto(data=data)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple, Union
from torch import Tensor
from mmdet.registry import MODELS
from .convfc_bbox_head import ConvFCBBoxHead
@MODELS.register_module()
class SCNetBBoxHead(ConvFCBBoxHead):
"""BBox head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
This inherits ``ConvFCBBoxHead`` with modified forward() function, allow us
to get intermediate shared feature.
"""
def _forward_shared(self, x: Tensor) -> Tensor:
"""Forward function for shared part.
Args:
x (Tensor): Input feature.
Returns:
Tensor: Shared feature.
"""
if self.num_shared_convs > 0:
for conv in self.shared_convs:
x = conv(x)
if self.num_shared_fcs > 0:
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.flatten(1)
for fc in self.shared_fcs:
x = self.relu(fc(x))
return x
def _forward_cls_reg(self, x: Tensor) -> Tuple[Tensor]:
"""Forward function for classification and regression parts.
Args:
x (Tensor): Input feature.
Returns:
tuple[Tensor]:
- cls_score (Tensor): classification prediction.
- bbox_pred (Tensor): bbox prediction.
"""
x_cls = x
x_reg = x
for conv in self.cls_convs:
x_cls = conv(x_cls)
if x_cls.dim() > 2:
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.flatten(1)
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if x_reg.dim() > 2:
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.flatten(1)
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
cls_score = self.fc_cls(x_cls) if self.with_cls else None
bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
return cls_score, bbox_pred
def forward(
self,
x: Tensor,
return_shared_feat: bool = False) -> Union[Tensor, Tuple[Tensor]]:
"""Forward function.
Args:
x (Tensor): input features
return_shared_feat (bool): If True, return cls-reg-shared feature.
Return:
out (tuple[Tensor]): contain ``cls_score`` and ``bbox_pred``,
if ``return_shared_feat`` is True, append ``x_shared`` to the
returned tuple.
"""
x_shared = self._forward_shared(x)
out = self._forward_cls_reg(x_shared)
if return_shared_feat:
out += (x_shared, )
return out
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .convfc_bbox_head import ConvFCBBoxHead
@MODELS.register_module()
class SCNetBBoxHead(ConvFCBBoxHead):
"""BBox head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
This inherits ``ConvFCBBoxHead`` with modified forward() function, allow us
to get intermediate shared feature.
"""
def _forward_shared(self, x):
"""Forward function for shared part."""
if self.num_shared_convs > 0:
for conv in self.shared_convs:
x = conv(x)
if self.num_shared_fcs > 0:
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.flatten(1)
for fc in self.shared_fcs:
x = self.relu(fc(x))
return x
def _forward_cls_reg(self, x):
"""Forward function for classification and regression parts."""
x_cls = x
x_reg = x
for conv in self.cls_convs:
x_cls = conv(x_cls)
if x_cls.dim() > 2:
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.flatten(1)
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if x_reg.dim() > 2:
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.flatten(1)
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
cls_score = self.fc_cls(x_cls) if self.with_cls else None
bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
return cls_score, bbox_pred
def forward(self, x, return_shared_feat=False):
"""Forward function.
Args:
x (Tensor): input features
return_shared_feat (bool): If True, return cls-reg-shared feature.
Return:
out (tuple[Tensor]): contain ``cls_score`` and ``bbox_pred``,
if ``return_shared_feat`` is True, append ``x_shared`` to the
returned tuple.
"""
x_shared = self._forward_shared(x)
out = self._forward_cls_reg(x_shared)
if return_shared_feat:
out += (x_shared, )
return out
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
from typing import List, Optional, Sequence
import torch
from torch.nn.parameter import Parameter
from torch.nn.utils import clip_grad
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class OptimizerHook(Hook):
"""A hook contains custom operations for the optimizer.
Args:
grad_clip (dict, optional): A config dict to control the clip_grad.
Defaults to None.
detect_anomalous_params (bool): This option is only used for
debugging which will slow down the training speed.
Detect anomalous parameters that are not included in
the computational graph with ``loss`` as the root.
There are two cases
- Parameters were not used during
forward pass.
- Parameters were not used to produce
loss.
Defaults to False.
"""
priority = 'HIGH'
def __init__(self,
grad_clip: Optional[dict] = None,
detect_anomalous_params: bool = False) -> None:
self.grad_clip = grad_clip
self.detect_anomalous_params = detect_anomalous_params
def clip_grads(self, params: List[Parameter]) -> Optional[torch.Tensor]:
"""Clip the gradients of parameters.
Args:
params (list[Parameter]): Model's parameters.
Returns:
Optional[torch.Tensor]: Total norm of the parameters if there is
at least one param requiring gradient, else None.
"""
params = list(
filter(lambda p: p.requires_grad and p.grad is not None, params))
if len(params) > 0:
return clip_grad.clip_grad_norm_(params, **self.grad_clip)
return None
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""All operations need to be finished after each training iteration.
This function will finish following 3 operations:
- Detect any anomalous parameters which are not included in the
training graph. (optional)
- Compute the gradient of model parameters.
- Clip the gradients of each parameter. (optional)
- Update model parameters with gradients.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (Sequence[dict], optional): Data from dataloader.
In order to keep this interface consistent with other hooks,
we keep ``data_batch`` here. Defaults to None.
outputs (dict, optional): Outputs from model.
In order to keep this interface consistent with other hooks,
we keep ``outputs`` here. Defaults to None.
"""
runner.optimizer.zero_grad()
if self.detect_anomalous_params:
self.detect_anomalous_parameters(runner.outputs['loss'], runner)
runner.outputs['loss'].backward()
if self.grad_clip is not None:
grad_norm = self.clip_grads(runner.model.parameters())
if grad_norm is not None:
# Add grad norm to the logger
runner.message_hub.update_scalar('train/grad_norm',
float(grad_norm))
runner.optimizer.step()
def detect_anomalous_parameters(self, loss: torch.Tensor, runner) -> None:
"""Detect anomalous parameters that are not included in the graph.
Args:
loss (torch.Tensor): The loss of current iteration.
runner (Runner): The runner of the training process.
"""
logger = runner.logger
parameters_in_graph = set()
visited = set()
def traverse(grad_fn):
if grad_fn is None:
return
if grad_fn not in visited:
visited.add(grad_fn)
if hasattr(grad_fn, 'variable'):
parameters_in_graph.add(grad_fn.variable)
parents = grad_fn.next_functions
if parents is not None:
for parent in parents:
grad_fn = parent[0]
traverse(grad_fn)
traverse(loss.grad_fn)
for n, p in runner.model.named_parameters():
if p not in parameters_in_graph and p.requires_grad:
logger.log(
level=logging.ERROR,
msg=f'{n} with shape {p.size()} is not '
f'in the computational graph \n')
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
from typing import List, Optional, Sequence
import torch
from torch.nn.parameter import Parameter
from torch.nn.utils import clip_grad
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class OptimizerHook(Hook):
"""A hook contains custom operations for the optimizer.
Args:
grad_clip (dict, optional): A config dict to control the clip_grad.
Defaults to None.
detect_anomalous_params (bool): This option is only used for
debugging which will slow down the training speed.
Detect anomalous parameters that are not included in
the computational graph with ``loss`` as the root.
There are two cases
- Parameters were not used during
forward pass.
- Parameters were not used to produce
loss.
Defaults to False.
"""
priority = 'HIGH'
def __init__(self,
grad_clip: Optional[dict] = None,
detect_anomalous_params: bool = False) -> None:
self.grad_clip = grad_clip
self.detect_anomalous_params = detect_anomalous_params
def clip_grads(self, params: List[Parameter]) -> Optional[torch.Tensor]:
"""Clip the gradients of parameters.
Args:
params (list[Parameter]): Model's parameters.
Returns:
Optional[torch.Tensor]: Total norm of the parameters if there is
at least one param requiring gradient, else None.
"""
params = list(
filter(lambda p: p.requires_grad and p.grad is not None, params))
if len(params) > 0:
return clip_grad.clip_grad_norm_(params, **self.grad_clip)
return None
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""All operations need to be finished after each training iteration.
This function will finish following 3 operations:
- Detect any anomalous parameters which are not included in the
training graph. (optional)
- Compute the gradient of model parameters.
- Clip the gradients of each parameter. (optional)
- Update model parameters with gradients.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (Sequence[dict], optional): Data from dataloader.
In order to keep this interface consistent with other hooks,
we keep ``data_batch`` here. Defaults to None.
outputs (dict, optional): Outputs from model.
In order to keep this interface consistent with other hooks,
we keep ``outputs`` here. Defaults to None.
"""
runner.optimizer.zero_grad()
if self.detect_anomalous_params:
self.detect_anomalous_parameters(runner.outputs['loss'], runner)
runner.outputs['loss'].backward()
if self.grad_clip is not None:
grad_norm = self.clip_grads(runner.model.parameters())
if grad_norm is not None:
# Add grad norm to the logger
runner.log_buffer.update({'grad_norm': float(grad_norm)},
runner.outputs['num_samples'])
runner.optimizer.step()
def detect_anomalous_parameters(self, loss: torch.Tensor, runner) -> None:
"""Detect anomalous parameters that are not included in the graph.
Args:
loss (torch.Tensor): The loss of current iteration.
runner (Runner): The runner of the training process.
"""
logger = runner.logger
parameters_in_graph = set()
visited = set()
def traverse(grad_fn):
if grad_fn is None:
return
if grad_fn not in visited:
visited.add(grad_fn)
if hasattr(grad_fn, 'variable'):
parameters_in_graph.add(grad_fn.variable)
parents = grad_fn.next_functions
if parents is not None:
for parent in parents:
grad_fn = parent[0]
traverse(grad_fn)
traverse(loss.grad_fn)
for n, p in runner.model.named_parameters():
if p not in parameters_in_graph and p.requires_grad:
logger.log(
level=logging.ERROR,
msg=f'{n} with shape {p.size()} is not '
f'in the computational graph \n')
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads.autoassign_head import AutoAssignHead
from mmdet.models.dense_heads.paa_head import levels_to_images
def test_autoassign_head_loss():
"""Tests autoassign head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(assigner=None, allowed_border=-1, pos_weight=-1, debug=False))
self = AutoAssignHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
self.init_weights()
cls_scores, bbox_preds, objectnesses = self(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses,
gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_pos_loss = empty_gt_losses['loss_pos']
empty_neg_loss = empty_gt_losses['loss_neg']
empty_center_loss = empty_gt_losses['loss_center']
assert empty_neg_loss.item() > 0, 'cls loss should be non-zero'
assert empty_pos_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_center_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
onegt_pos_loss = one_gt_losses['loss_pos']
onegt_neg_loss = one_gt_losses['loss_neg']
onegt_center_loss = one_gt_losses['loss_center']
assert onegt_pos_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_neg_loss.item() > 0, 'box loss should be non-zero'
assert onegt_center_loss.item() > 0, 'box loss should be non-zero'
n, c, h, w = 10, 4, 20, 20
mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
results = levels_to_images(mlvl_tensor)
assert len(results) == n
assert results[0].size() == (h * w * 5, c)
cls_scores = [torch.ones(2, 4, 5, 5)]
bbox_preds = [torch.ones(2, 4, 5, 5)]
iou_preds = [torch.ones(2, 1, 5, 5)]
mlvl_anchors = [torch.ones(5 * 5, 2)]
img_shape = None
scale_factor = [0.5, 0.5]
cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
rescale = False
self._get_bboxes(
cls_scores,
bbox_preds,
iou_preds,
mlvl_anchors,
img_shape,
scale_factor,
cfg,
rescale=rescale)
|
import mmcv
import torch
from mmdet.models.dense_heads.autoassign_head import AutoAssignHead
from mmdet.models.dense_heads.paa_head import levels_to_images
def test_autoassign_head_loss():
"""Tests autoassign head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(assigner=None, allowed_border=-1, pos_weight=-1, debug=False))
self = AutoAssignHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
self.init_weights()
cls_scores, bbox_preds, objectnesses = self(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses,
gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_pos_loss = empty_gt_losses['loss_pos']
empty_neg_loss = empty_gt_losses['loss_neg']
empty_center_loss = empty_gt_losses['loss_center']
assert empty_neg_loss.item() > 0, 'cls loss should be non-zero'
assert empty_pos_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_center_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
onegt_pos_loss = one_gt_losses['loss_pos']
onegt_neg_loss = one_gt_losses['loss_neg']
onegt_center_loss = one_gt_losses['loss_center']
assert onegt_pos_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_neg_loss.item() > 0, 'box loss should be non-zero'
assert onegt_center_loss.item() > 0, 'box loss should be non-zero'
n, c, h, w = 10, 4, 20, 20
mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
results = levels_to_images(mlvl_tensor)
assert len(results) == n
assert results[0].size() == (h * w * 5, c)
cls_scores = [torch.ones(2, 4, 5, 5)]
bbox_preds = [torch.ones(2, 4, 5, 5)]
iou_preds = [torch.ones(2, 1, 5, 5)]
mlvl_anchors = [torch.ones(5 * 5, 2)]
img_shape = None
scale_factor = [0.5, 0.5]
cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
rescale = False
self._get_bboxes(
cls_scores,
bbox_preds,
iou_preds,
mlvl_anchors,
img_shape,
scale_factor,
cfg,
rescale=rescale)
|
from __future__ import annotations
from dataclasses import dataclass
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
@dataclass
class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments):
"""
SparseEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
"""
|
from __future__ import annotations
from dataclasses import dataclass
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
@dataclass
class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments):
"""
SparseEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
"""
|
#!/usr/bin/env python3
import numbers
import random
import warnings
from torchvision.transforms import RandomCrop, RandomResizedCrop
from . import _functional_video as F
__all__ = [
"RandomCropVideo",
"RandomResizedCropVideo",
"CenterCropVideo",
"NormalizeVideo",
"ToTensorVideo",
"RandomHorizontalFlipVideo",
]
warnings.warn(
"The 'torchvision.transforms._transforms_video' module is deprecated since 0.12 and will be removed in the future. "
"Please use the 'torchvision.transforms' module instead."
)
class RandomCropVideo(RandomCrop):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: randomly cropped/resized video clip.
size is (C, T, OH, OW)
"""
i, j, h, w = self.get_params(clip, self.size)
return F.crop(clip, i, j, h, w)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(size={self.size})"
class RandomResizedCropVideo(RandomResizedCrop):
def __init__(
self,
size,
scale=(0.08, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
interpolation_mode="bilinear",
):
if isinstance(size, tuple):
if len(size) != 2:
raise ValueError(f"size should be tuple (height, width), instead got {size}")
self.size = size
else:
self.size = (size, size)
self.interpolation_mode = interpolation_mode
self.scale = scale
self.ratio = ratio
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: randomly cropped/resized video clip.
size is (C, T, H, W)
"""
i, j, h, w = self.get_params(clip, self.scale, self.ratio)
return F.resized_crop(clip, i, j, h, w, self.size, self.interpolation_mode)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(size={self.size}, interpolation_mode={self.interpolation_mode}, scale={self.scale}, ratio={self.ratio})"
class CenterCropVideo:
def __init__(self, crop_size):
if isinstance(crop_size, numbers.Number):
self.crop_size = (int(crop_size), int(crop_size))
else:
self.crop_size = crop_size
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: central cropping of video clip. Size is
(C, T, crop_size, crop_size)
"""
return F.center_crop(clip, self.crop_size)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(crop_size={self.crop_size})"
class NormalizeVideo:
"""
Normalize the video clip by mean subtraction and division by standard deviation
Args:
mean (3-tuple): pixel RGB mean
std (3-tuple): pixel RGB standard deviation
inplace (boolean): whether do in-place normalization
"""
def __init__(self, mean, std, inplace=False):
self.mean = mean
self.std = std
self.inplace = inplace
def __call__(self, clip):
"""
Args:
clip (torch.tensor): video clip to be normalized. Size is (C, T, H, W)
"""
return F.normalize(clip, self.mean, self.std, self.inplace)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(mean={self.mean}, std={self.std}, inplace={self.inplace})"
class ToTensorVideo:
"""
Convert tensor data type from uint8 to float, divide value by 255.0 and
permute the dimensions of clip tensor
"""
def __init__(self):
pass
def __call__(self, clip):
"""
Args:
clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C)
Return:
clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W)
"""
return F.to_tensor(clip)
def __repr__(self) -> str:
return self.__class__.__name__
class RandomHorizontalFlipVideo:
"""
Flip the video clip along the horizontal direction with a given probability
Args:
p (float): probability of the clip being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Size is (C, T, H, W)
Return:
clip (torch.tensor): Size is (C, T, H, W)
"""
if random.random() < self.p:
clip = F.hflip(clip)
return clip
def __repr__(self) -> str:
return f"{self.__class__.__name__}(p={self.p})"
|
#!/usr/bin/env python3
import numbers
import random
import warnings
from torchvision.transforms import RandomCrop, RandomResizedCrop
from . import _functional_video as F
__all__ = [
"RandomCropVideo",
"RandomResizedCropVideo",
"CenterCropVideo",
"NormalizeVideo",
"ToTensorVideo",
"RandomHorizontalFlipVideo",
]
warnings.warn(
"The 'torchvision.transforms._transforms_video' module is deprecated since 0.12 and will be removed in the future. "
"Please use the 'torchvision.transforms' module instead."
)
class RandomCropVideo(RandomCrop):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: randomly cropped/resized video clip.
size is (C, T, OH, OW)
"""
i, j, h, w = self.get_params(clip, self.size)
return F.crop(clip, i, j, h, w)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(size={self.size})"
class RandomResizedCropVideo(RandomResizedCrop):
def __init__(
self,
size,
scale=(0.08, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
interpolation_mode="bilinear",
):
if isinstance(size, tuple):
if len(size) != 2:
raise ValueError(f"size should be tuple (height, width), instead got {size}")
self.size = size
else:
self.size = (size, size)
self.interpolation_mode = interpolation_mode
self.scale = scale
self.ratio = ratio
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: randomly cropped/resized video clip.
size is (C, T, H, W)
"""
i, j, h, w = self.get_params(clip, self.scale, self.ratio)
return F.resized_crop(clip, i, j, h, w, self.size, self.interpolation_mode)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(size={self.size}, interpolation_mode={self.interpolation_mode}, scale={self.scale}, ratio={self.ratio})"
class CenterCropVideo:
def __init__(self, crop_size):
if isinstance(crop_size, numbers.Number):
self.crop_size = (int(crop_size), int(crop_size))
else:
self.crop_size = crop_size
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: central cropping of video clip. Size is
(C, T, crop_size, crop_size)
"""
return F.center_crop(clip, self.crop_size)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(crop_size={self.crop_size})"
class NormalizeVideo:
"""
Normalize the video clip by mean subtraction and division by standard deviation
Args:
mean (3-tuple): pixel RGB mean
std (3-tuple): pixel RGB standard deviation
inplace (boolean): whether do in-place normalization
"""
def __init__(self, mean, std, inplace=False):
self.mean = mean
self.std = std
self.inplace = inplace
def __call__(self, clip):
"""
Args:
clip (torch.tensor): video clip to be normalized. Size is (C, T, H, W)
"""
return F.normalize(clip, self.mean, self.std, self.inplace)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(mean={self.mean}, std={self.std}, inplace={self.inplace})"
class ToTensorVideo:
"""
Convert tensor data type from uint8 to float, divide value by 255.0 and
permute the dimensions of clip tensor
"""
def __init__(self):
pass
def __call__(self, clip):
"""
Args:
clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C)
Return:
clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W)
"""
return F.to_tensor(clip)
def __repr__(self) -> str:
return self.__class__.__name__
class RandomHorizontalFlipVideo:
"""
Flip the video clip along the horizonal direction with a given probability
Args:
p (float): probability of the clip being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Size is (C, T, H, W)
Return:
clip (torch.tensor): Size is (C, T, H, W)
"""
if random.random() < self.p:
clip = F.hflip(clip)
return clip
def __repr__(self) -> str:
return f"{self.__class__.__name__}(p={self.p})"
|
from typing import Any, Dict, Sequence
from llama_index.core.base.llms.types import ChatMessage
LLAMA_MODELS = {
"llama-2-13b-chat": 4096,
"llama-2-70b-chat": 4096,
}
MISTRAL_MODELS = {
"mistral-7b-instruct-v0-2": 32768,
"mixtral-8x7b-instruct-v0-1": 32768,
}
GEMMA_MODELS = {
"gemma-7b-it": 8192,
}
ALL_AVAILABLE_MODELS = {
**LLAMA_MODELS,
**MISTRAL_MODELS,
**GEMMA_MODELS,
}
def friendli_modelname_to_contextsize(modelname: str) -> int:
"""
Get a context size of a model from its name.
Args:
modelname (str): The name of model.
Returns:
int: Context size of the model.
"""
context_size = ALL_AVAILABLE_MODELS.get(modelname)
if context_size is None:
raise ValueError(
f"Unknown model: {modelname}. Please provide a valid Friendli model name."
"Known models are: " + ", ".join(ALL_AVAILABLE_MODELS.keys())
)
return context_size
def get_chat_request(messages: Sequence[ChatMessage]) -> Dict[str, Any]:
"""Get messages for the Friendli chat request."""
return {
"messages": [
{"role": message.role.value, "content": message.content}
for message in messages
]
}
|
from typing import Any, Dict, Sequence
from llama_index.core.base.llms.types import ChatMessage
LLAMA_MODELS = {
"llama-2-13b-chat": 4096,
"llama-2-70b-chat": 4096,
}
MISTRAL_MODELS = {
"mistral-7b-instruct-v0-2": 32768,
"mixtral-8x7b-instruct-v0-1": 32768,
}
GEMMA_MODELS = {
"gemma-7b-it": 8192,
}
ALL_AVAILABLE_MODELS = {
**LLAMA_MODELS,
**MISTRAL_MODELS,
**GEMMA_MODELS,
}
def friendli_modelname_to_contextsize(modelname: str) -> int:
"""Get a context size of a model from its name.
Args:
modelname (str): The name of model.
Returns:
int: Context size of the model.
"""
context_size = ALL_AVAILABLE_MODELS.get(modelname)
if context_size is None:
raise ValueError(
f"Unknown model: {modelname}. Please provide a valid Friendli model name."
"Known models are: " + ", ".join(ALL_AVAILABLE_MODELS.keys())
)
return context_size
def get_chat_request(messages: Sequence[ChatMessage]) -> Dict[str, Any]:
"""Get messages for the Friendli chat request."""
return {
"messages": [
{"role": message.role.value, "content": message.content}
for message in messages
]
}
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.losses.CoSENTLoss import CoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCoSENTLoss(CoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0, similarity_fct=util.cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence).
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(i,j)-s(k,l))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`SparseCosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, SparseCoSENTLoss may be used
as a drop-in replacement for :class:`SparseCosineSimilarityLoss` in any training script.
Args:
model: SparseEncoder
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseAnglELoss` is SparseCoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`SparseCosineSimilarityLoss` seems to produce a weaker training signal than SparseCoSENTLoss. In our experiments, SparseCoSENTLoss is recommended.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(model=model, loss=losses.SparseCoSENTLoss(model), lambda_corpus=5e-5, all_docs=True)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
model.similarity_fn_name = "cosine"
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseCoSENTLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.losses.CoSENTLoss import CoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCoSENTLoss(CoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0, similarity_fct=util.cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence).
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(i,j)-s(k,l))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`SparseCosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, SparseCoSENTLoss may be used
as a drop-in replacement for :class:`SparseCosineSimilarityLoss` in any training script.
Args:
model: SparseEncoder
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseAnglELoss` is SparseCoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`SparseCosineSimilarityLoss` seems to produce a weaker training signal than SparseCoSENTLoss. In our experiments, SparseCoSENTLoss is recommended.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(model=model, loss=losses.SparseCoSENTLoss(model), lambda_corpus=5e-5, all_docs=True)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
model.similarity_fn_name = "cosine"
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
|
from typing import Union
import numpy as np
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_image(inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray]) -> tv_tensors.Image:
"""[BETA] See :class:`~torchvision.transforms.v2.ToImage` for details."""
if isinstance(inpt, np.ndarray):
output = torch.from_numpy(inpt).permute((2, 0, 1)).contiguous()
elif isinstance(inpt, PIL.Image.Image):
output = pil_to_tensor(inpt)
elif isinstance(inpt, torch.Tensor):
output = inpt
else:
raise TypeError(
f"Input can either be a pure Tensor, a numpy array, or a PIL image, but got {type(inpt)} instead."
)
return tv_tensors.Image(output)
to_pil_image = _F.to_pil_image
pil_to_tensor = _F.pil_to_tensor
|
from typing import Union
import numpy as np
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_image(inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray]) -> tv_tensors.Image:
"""[BETA] See :class:`~torchvision.transforms.v2.ToImage` for details."""
if isinstance(inpt, np.ndarray):
output = torch.from_numpy(inpt).permute((2, 0, 1)).contiguous()
elif isinstance(inpt, PIL.Image.Image):
output = pil_to_tensor(inpt)
elif isinstance(inpt, torch.Tensor):
output = inpt
else:
raise TypeError(f"Input can either be a numpy array or a PIL image, but got {type(inpt)} instead.")
return tv_tensors.Image(output)
to_pil_image = _F.to_pil_image
pil_to_tensor = _F.pil_to_tensor
|
from jina.serve.runtimes.gateway.grpc.gateway import GRPCGateway
|
import os
from jina import __default_host__
from jina.excepts import PortAlreadyUsed
from jina.helper import is_port_free
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.gateway.grpc.gateway import GRPCGateway
__all__ = ['GRPCGatewayRuntime']
class GRPCGatewayRuntime(GatewayRuntime):
"""Gateway Runtime for gRPC."""
async def async_setup(self):
"""
The async method to setup.
Create the gRPC server and expose the port for communication.
"""
if not self.args.proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
if not (is_port_free(__default_host__, self.args.port)):
raise PortAlreadyUsed(f'port:{self.args.port}')
self.gateway = GRPCGateway(
name=self.name,
grpc_server_options=self.args.grpc_server_options,
port=self.args.port,
ssl_keyfile=self.args.ssl_keyfile,
ssl_certfile=self.args.ssl_certfile,
)
self.gateway.set_streamer(
args=self.args,
timeout_send=self.timeout_send,
metrics_registry=self.metrics_registry,
runtime_name=self.name,
)
await self.gateway.setup_server()
async def async_teardown(self):
"""Close the connection pool"""
# usually async_cancel should already have been called, but then its a noop
# if the runtime is stopped without a sigterm (e.g. as a context manager, this can happen)
await self.gateway.teardown()
await self.async_cancel()
async def async_cancel(self):
"""The async method to stop server."""
await self.gateway.stop_server()
async def async_run_forever(self):
"""The async running of server."""
await self.gateway.run_server()
|
from __future__ import annotations
import logging
from typing import Any, Dict, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
logger = logging.getLogger(__name__)
class AzureCogsTextAnalyticsHealthTool(BaseTool):
"""Tool that queries the Azure Cognitive Services Text Analytics for Health API.
In order to set this up, follow instructions at:
https://learn.microsoft.com/en-us/azure/ai-services/language-service/text-analytics-for-health/quickstart?tabs=windows&pivots=programming-language-python
"""
azure_cogs_key: str = "" #: :meta private:
azure_cogs_endpoint: str = "" #: :meta private:
text_analytics_client: Any #: :meta private:
name: str = "azure_cognitive_services_text_analyics_health"
description: str = (
"A wrapper around Azure Cognitive Services Text Analytics for Health. "
"Useful for when you need to identify entities in healthcare data. "
"Input should be text."
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and endpoint exists in environment."""
azure_cogs_key = get_from_dict_or_env(
values, "azure_cogs_key", "AZURE_COGS_KEY"
)
azure_cogs_endpoint = get_from_dict_or_env(
values, "azure_cogs_endpoint", "AZURE_COGS_ENDPOINT"
)
try:
import azure.ai.textanalytics as sdk
from azure.core.credentials import AzureKeyCredential
values["text_analytics_client"] = sdk.TextAnalyticsClient(
endpoint=azure_cogs_endpoint,
credential=AzureKeyCredential(azure_cogs_key),
)
except ImportError:
raise ImportError(
"azure-ai-textanalytics is not installed. "
"Run `pip install azure-ai-textanalytics` to install."
)
return values
def _text_analysis(self, text: str) -> Dict:
poller = self.text_analytics_client.begin_analyze_healthcare_entities(
[{"id": "1", "language": "en", "text": text}]
)
result = poller.result()
res_dict = {}
docs = [doc for doc in result if not doc.is_error]
if docs is not None:
res_dict["entities"] = [
f"{x.text} is a healthcare entity of type {x.category}"
for y in docs
for x in y.entities
]
return res_dict
def _format_text_analysis_result(self, text_analysis_result: Dict) -> str:
formatted_result = []
if "entities" in text_analysis_result:
formatted_result.append(
f"""The text contains the following healthcare entities: {
", ".join(text_analysis_result["entities"])
}""".replace("\n", " ")
)
return "\n".join(formatted_result)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
try:
text_analysis_result = self._text_analysis(query)
return self._format_text_analysis_result(text_analysis_result)
except Exception as e:
raise RuntimeError(
f"Error while running AzureCogsTextAnalyticsHealthTool: {e}"
)
|
from __future__ import annotations
import logging
from typing import Any, Dict, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
logger = logging.getLogger(__name__)
class AzureCogsTextAnalyticsHealthTool(BaseTool): # type: ignore[override]
"""Tool that queries the Azure Cognitive Services Text Analytics for Health API.
In order to set this up, follow instructions at:
https://learn.microsoft.com/en-us/azure/ai-services/language-service/text-analytics-for-health/quickstart?tabs=windows&pivots=programming-language-python
"""
azure_cogs_key: str = "" #: :meta private:
azure_cogs_endpoint: str = "" #: :meta private:
text_analytics_client: Any #: :meta private:
name: str = "azure_cognitive_services_text_analyics_health"
description: str = (
"A wrapper around Azure Cognitive Services Text Analytics for Health. "
"Useful for when you need to identify entities in healthcare data. "
"Input should be text."
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and endpoint exists in environment."""
azure_cogs_key = get_from_dict_or_env(
values, "azure_cogs_key", "AZURE_COGS_KEY"
)
azure_cogs_endpoint = get_from_dict_or_env(
values, "azure_cogs_endpoint", "AZURE_COGS_ENDPOINT"
)
try:
import azure.ai.textanalytics as sdk
from azure.core.credentials import AzureKeyCredential
values["text_analytics_client"] = sdk.TextAnalyticsClient(
endpoint=azure_cogs_endpoint,
credential=AzureKeyCredential(azure_cogs_key),
)
except ImportError:
raise ImportError(
"azure-ai-textanalytics is not installed. "
"Run `pip install azure-ai-textanalytics` to install."
)
return values
def _text_analysis(self, text: str) -> Dict:
poller = self.text_analytics_client.begin_analyze_healthcare_entities(
[{"id": "1", "language": "en", "text": text}]
)
result = poller.result()
res_dict = {}
docs = [doc for doc in result if not doc.is_error]
if docs is not None:
res_dict["entities"] = [
f"{x.text} is a healthcare entity of type {x.category}"
for y in docs
for x in y.entities
]
return res_dict
def _format_text_analysis_result(self, text_analysis_result: Dict) -> str:
formatted_result = []
if "entities" in text_analysis_result:
formatted_result.append(
f"""The text contains the following healthcare entities: {
", ".join(text_analysis_result["entities"])
}""".replace("\n", " ")
)
return "\n".join(formatted_result)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
try:
text_analysis_result = self._text_analysis(query)
return self._format_text_analysis_result(text_analysis_result)
except Exception as e:
raise RuntimeError(
f"Error while running AzureCogsTextAnalyticsHealthTool: {e}"
)
|
import pathlib
from typing import Any, Dict, List, Union
import torch
from torchdata.datapipes.iter import CSVDictParser, IterDataPipe, Mapper
from torchvision.datapoints import Image
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, KaggleDownloadResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from .._api import register_dataset, register_info
NAME = "fer2013"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=("angry", "disgust", "fear", "happy", "sad", "surprise", "neutral"))
@register_dataset(NAME)
class FER2013(Dataset):
"""FER 2013 Dataset
homepage="https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge"
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_CHECKSUMS = {
"train": "a2b7c9360cc0b38d21187e5eece01c2799fce5426cdeecf746889cc96cda2d10",
"test": "dec8dfe8021e30cd6704b85ec813042b4a5d99d81cb55e023291a94104f575c3",
}
def _resources(self) -> List[OnlineResource]:
archive = KaggleDownloadResource(
"https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge",
file_name=f"{self._split}.csv.zip",
sha256=self._CHECKSUMS[self._split],
)
return [archive]
def _prepare_sample(self, data: Dict[str, Any]) -> Dict[str, Any]:
label_id = data.get("emotion")
return dict(
image=Image(torch.tensor([int(idx) for idx in data["pixels"].split()], dtype=torch.uint8).reshape(48, 48)),
label=Label(int(label_id), categories=self._categories) if label_id is not None else None,
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = CSVDictParser(dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 28_709 if self._split == "train" else 3_589
|
import pathlib
from typing import Any, Dict, List, Union
import torch
from torchdata.datapipes.iter import CSVDictParser, IterDataPipe, Mapper
from torchvision.prototype.datapoints import Image, Label
from torchvision.prototype.datasets.utils import Dataset, KaggleDownloadResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from .._api import register_dataset, register_info
NAME = "fer2013"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=("angry", "disgust", "fear", "happy", "sad", "surprise", "neutral"))
@register_dataset(NAME)
class FER2013(Dataset):
"""FER 2013 Dataset
homepage="https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge"
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_CHECKSUMS = {
"train": "a2b7c9360cc0b38d21187e5eece01c2799fce5426cdeecf746889cc96cda2d10",
"test": "dec8dfe8021e30cd6704b85ec813042b4a5d99d81cb55e023291a94104f575c3",
}
def _resources(self) -> List[OnlineResource]:
archive = KaggleDownloadResource(
"https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge",
file_name=f"{self._split}.csv.zip",
sha256=self._CHECKSUMS[self._split],
)
return [archive]
def _prepare_sample(self, data: Dict[str, Any]) -> Dict[str, Any]:
label_id = data.get("emotion")
return dict(
image=Image(torch.tensor([int(idx) for idx in data["pixels"].split()], dtype=torch.uint8).reshape(48, 48)),
label=Label(int(label_id), categories=self._categories) if label_id is not None else None,
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = CSVDictParser(dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 28_709 if self._split == "train" else 3_589
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmengine.model import BaseModule
from mmdet.registry import MODELS
@MODELS.register_module()
class SSDNeck(BaseModule):
"""Extra layers of SSD backbone to generate multi-scale feature maps.
Args:
in_channels (Sequence[int]): Number of input channels per scale.
out_channels (Sequence[int]): Number of output channels per scale.
level_strides (Sequence[int]): Stride of 3x3 conv per level.
level_paddings (Sequence[int]): Padding size of 3x3 conv per level.
l2_norm_scale (float|None): L2 normalization layer init scale.
If None, not use L2 normalization on the first input feature.
last_kernel_size (int): Kernel size of the last conv layer.
Default: 3.
use_depthwise (bool): Whether to use DepthwiseSeparableConv.
Default: False.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: None.
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
level_strides,
level_paddings,
l2_norm_scale=20.,
last_kernel_size=3,
use_depthwise=False,
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='ReLU'),
init_cfg=[
dict(
type='Xavier', distribution='uniform',
layer='Conv2d'),
dict(type='Constant', val=1, layer='BatchNorm2d'),
]):
super(SSDNeck, self).__init__(init_cfg)
assert len(out_channels) > len(in_channels)
assert len(out_channels) - len(in_channels) == len(level_strides)
assert len(level_strides) == len(level_paddings)
assert in_channels == out_channels[:len(in_channels)]
if l2_norm_scale:
self.l2_norm = L2Norm(in_channels[0], l2_norm_scale)
self.init_cfg += [
dict(
type='Constant',
val=self.l2_norm.scale,
override=dict(name='l2_norm'))
]
self.extra_layers = nn.ModuleList()
extra_layer_channels = out_channels[len(in_channels):]
second_conv = DepthwiseSeparableConvModule if \
use_depthwise else ConvModule
for i, (out_channel, stride, padding) in enumerate(
zip(extra_layer_channels, level_strides, level_paddings)):
kernel_size = last_kernel_size \
if i == len(extra_layer_channels) - 1 else 3
per_lvl_convs = nn.Sequential(
ConvModule(
out_channels[len(in_channels) - 1 + i],
out_channel // 2,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
second_conv(
out_channel // 2,
out_channel,
kernel_size,
stride=stride,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.extra_layers.append(per_lvl_convs)
def forward(self, inputs):
"""Forward function."""
outs = [feat for feat in inputs]
if hasattr(self, 'l2_norm'):
outs[0] = self.l2_norm(outs[0])
feat = outs[-1]
for layer in self.extra_layers:
feat = layer(feat)
outs.append(feat)
return tuple(outs)
class L2Norm(nn.Module):
def __init__(self, n_dims, scale=20., eps=1e-10):
"""L2 normalization layer.
Args:
n_dims (int): Number of dimensions to be normalized
scale (float, optional): Defaults to 20..
eps (float, optional): Used to avoid division by zero.
Defaults to 1e-10.
"""
super(L2Norm, self).__init__()
self.n_dims = n_dims
self.weight = nn.Parameter(torch.Tensor(self.n_dims))
self.eps = eps
self.scale = scale
def forward(self, x):
"""Forward function."""
# normalization layer convert to FP32 in FP16 training
x_float = x.float()
norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps
return (self.weight[None, :, None, None].float().expand_as(x_float) *
x_float / norm).type_as(x)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmcv.runner import BaseModule
from mmdet.registry import MODELS
@MODELS.register_module()
class SSDNeck(BaseModule):
"""Extra layers of SSD backbone to generate multi-scale feature maps.
Args:
in_channels (Sequence[int]): Number of input channels per scale.
out_channels (Sequence[int]): Number of output channels per scale.
level_strides (Sequence[int]): Stride of 3x3 conv per level.
level_paddings (Sequence[int]): Padding size of 3x3 conv per level.
l2_norm_scale (float|None): L2 normalization layer init scale.
If None, not use L2 normalization on the first input feature.
last_kernel_size (int): Kernel size of the last conv layer.
Default: 3.
use_depthwise (bool): Whether to use DepthwiseSeparableConv.
Default: False.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: None.
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
level_strides,
level_paddings,
l2_norm_scale=20.,
last_kernel_size=3,
use_depthwise=False,
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='ReLU'),
init_cfg=[
dict(
type='Xavier', distribution='uniform',
layer='Conv2d'),
dict(type='Constant', val=1, layer='BatchNorm2d'),
]):
super(SSDNeck, self).__init__(init_cfg)
assert len(out_channels) > len(in_channels)
assert len(out_channels) - len(in_channels) == len(level_strides)
assert len(level_strides) == len(level_paddings)
assert in_channels == out_channels[:len(in_channels)]
if l2_norm_scale:
self.l2_norm = L2Norm(in_channels[0], l2_norm_scale)
self.init_cfg += [
dict(
type='Constant',
val=self.l2_norm.scale,
override=dict(name='l2_norm'))
]
self.extra_layers = nn.ModuleList()
extra_layer_channels = out_channels[len(in_channels):]
second_conv = DepthwiseSeparableConvModule if \
use_depthwise else ConvModule
for i, (out_channel, stride, padding) in enumerate(
zip(extra_layer_channels, level_strides, level_paddings)):
kernel_size = last_kernel_size \
if i == len(extra_layer_channels) - 1 else 3
per_lvl_convs = nn.Sequential(
ConvModule(
out_channels[len(in_channels) - 1 + i],
out_channel // 2,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
second_conv(
out_channel // 2,
out_channel,
kernel_size,
stride=stride,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.extra_layers.append(per_lvl_convs)
def forward(self, inputs):
"""Forward function."""
outs = [feat for feat in inputs]
if hasattr(self, 'l2_norm'):
outs[0] = self.l2_norm(outs[0])
feat = outs[-1]
for layer in self.extra_layers:
feat = layer(feat)
outs.append(feat)
return tuple(outs)
class L2Norm(nn.Module):
def __init__(self, n_dims, scale=20., eps=1e-10):
"""L2 normalization layer.
Args:
n_dims (int): Number of dimensions to be normalized
scale (float, optional): Defaults to 20..
eps (float, optional): Used to avoid division by zero.
Defaults to 1e-10.
"""
super(L2Norm, self).__init__()
self.n_dims = n_dims
self.weight = nn.Parameter(torch.Tensor(self.n_dims))
self.eps = eps
self.scale = scale
def forward(self, x):
"""Forward function."""
# normalization layer convert to FP32 in FP16 training
x_float = x.float()
norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps
return (self.weight[None, :, None, None].float().expand_as(x_float) *
x_float / norm).type_as(x)
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../common/lsj-100e_coco-instance.py'
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
model = dict(data_preprocessor=dict(batch_augments=batch_augments))
train_dataloader = dict(batch_size=8, num_workers=4)
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=dict(
type='SGD', lr=0.02 * 4, momentum=0.9, weight_decay=0.00004))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../common/lsj_100e_coco_instance.py'
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
model = dict(data_preprocessor=dict(batch_augments=batch_augments))
train_dataloader = dict(batch_size=8, num_workers=4)
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=dict(
type='SGD', lr=0.02 * 4, momentum=0.9, weight_decay=0.00004))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
import dataclasses
from collections import defaultdict
from typing import TYPE_CHECKING, Type, List
if TYPE_CHECKING:
from docarray.typing import T
from docarray.document.strawberry_type import StrawberryDocument
class StrawberryMixin:
"""Provide helper functions to convert to/from a Strawberry model"""
def to_strawberry_type(self) -> 'StrawberryDocument':
"""Convert a Document object into a Strawberry type."""
from docarray.document.strawberry_type import StrawberryDocument as SD
from docarray.document.strawberry_type import _NameScoreItem, _NamedScore
_p_dict = {}
for f in self.non_empty_fields:
v = getattr(self, f)
if f in ('matches', 'chunks'):
_p_dict[f] = v.to_strawberry_type()
elif f in ('scores', 'evaluations'):
_p_dict[f] = [
_NameScoreItem(name=k, score=_NamedScore(**v.to_dict()))
for k, v in v.items()
]
else:
_p_dict[f] = v
return SD(**_p_dict)
@classmethod
def from_strawberry_type(cls: Type['T'], model) -> 'T':
"""Build a Document object from a Strawberry model
:param model: the Strawberry data model object that represents a Document
:return: a Document object
"""
from docarray import Document
fields = {}
_field_chunks, _field_matches = None, None
if model.chunks:
_field_chunks = [Document.from_strawberry_type(d) for d in model.chunks]
if model.matches:
_field_matches = [Document.from_strawberry_type(d) for d in model.matches]
for field in dataclasses.fields(model):
f_name = field.name
value = getattr(model, f_name)
if value is None:
continue
if f_name == 'scores' or f_name == 'evaluations':
from docarray.score import NamedScore
from docarray.document.strawberry_type import _NameScoreItem
value: List[_NameScoreItem]
fields[f_name] = defaultdict(NamedScore)
for v in value:
fields[f_name][v.name] = NamedScore(**dataclasses.asdict(v.score))
else:
fields[f_name] = value
d = Document(**fields)
if _field_chunks:
d.chunks = _field_chunks
if _field_matches:
d.matches = _field_matches
return d
|
import dataclasses
from collections import defaultdict
from typing import TYPE_CHECKING, Type, List
if TYPE_CHECKING:
from docarray.typing import T
from docarray.document.strawberry_type import StrawberryDocument
class StrawberryMixin:
"""Provide helper functions to convert to/from a Strawberry model"""
def to_strawberry_type(self) -> 'StrawberryDocument':
"""Convert a Document object into a Strawberry type."""
from docarray.document.strawberry_type import StrawberryDocument as SD
from docarray.document.strawberry_type import _NameScoreItem, _NamedScore
_p_dict = {}
for f in self.non_empty_fields:
v = getattr(self, f)
if f in ('matches', 'chunks'):
_p_dict[f] = v.to_strawberry_type()
elif f in ('scores', 'evaluations'):
_p_dict[f] = [
_NameScoreItem(k, _NamedScore(**v.to_dict())) for k, v in v.items()
]
else:
_p_dict[f] = v
return SD(**_p_dict)
@classmethod
def from_strawberry_type(cls: Type['T'], model) -> 'T':
"""Build a Document object from a Strawberry model
:param model: the Strawberry data model object that represents a Document
:return: a Document object
"""
from docarray import Document
fields = {}
_field_chunks, _field_matches = None, None
if model.chunks:
_field_chunks = [Document.from_strawberry_type(d) for d in model.chunks]
if model.matches:
_field_matches = [Document.from_strawberry_type(d) for d in model.matches]
for field in dataclasses.fields(model):
f_name = field.name
value = getattr(model, f_name)
if value is None:
continue
if f_name == 'scores' or f_name == 'evaluations':
from docarray.score import NamedScore
from docarray.document.strawberry_type import _NameScoreItem
value: List[_NameScoreItem]
fields[f_name] = defaultdict(NamedScore)
for v in value:
fields[f_name][v.name] = NamedScore(**dataclasses.asdict(v.score))
else:
fields[f_name] = value
d = Document(**fields)
if _field_chunks:
d.chunks = _field_chunks
if _field_matches:
d.matches = _field_matches
return d
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/MOT17/'
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args, to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=(1088, 1088),
ratio_range=(0.8, 1.2),
keep_ratio=True,
clip_object_border=False),
dict(type='PhotoMetricDistortion'),
dict(type='RandomCrop', crop_size=(1088, 1088), bbox_clip_border=False),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1088, 1088), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/half-train_cocoformat.json',
data_prefix=dict(img='train/'),
metainfo=dict(classes=('pedestrian', )),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/half-val_cocoformat.json',
data_prefix=dict(img='train/'),
metainfo=dict(classes=('pedestrian', )),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/half-val_cocoformat.json',
metric='bbox',
format_only=False)
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/MOT17/'
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=(1088, 1088),
ratio_range=(0.8, 1.2),
keep_ratio=True,
clip_object_border=False),
dict(type='PhotoMetricDistortion'),
dict(type='RandomCrop', crop_size=(1088, 1088), bbox_clip_border=False),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(1088, 1088), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/half-train_cocoformat.json',
data_prefix=dict(img='train/'),
metainfo=dict(classes=('pedestrian', )),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/half-val_cocoformat.json',
data_prefix=dict(img='train/'),
metainfo=dict(classes=('pedestrian', )),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/half-val_cocoformat.json',
metric='bbox',
format_only=False)
test_evaluator = val_evaluator
|
import os
import pathlib
import sys
def get_frontend_path() -> pathlib.Path:
if getattr(sys, "frozen", False):
# The application is frozen
datadir = pathlib.Path(os.path.dirname(sys.executable)) / "example_files"
else:
# The application is not frozen
# Change this bit to match where you store your data files:
filedir = os.path.dirname(__file__)
datadir = pathlib.Path(filedir).parent.parent.parent / "example_files"
return pathlib.Path(datadir)
def get_data_path() -> pathlib.Path:
if getattr(sys, "frozen", False):
# The application is frozen
datadir = os.path.dirname(sys.executable)
else:
# The application is not frozen
# Change this bit to match where you store your data files:
filedir = os.path.dirname(__file__)
datadir = pathlib.Path(filedir).parent.parent
return pathlib.Path(datadir)
|
import os
import pathlib
import sys
def get_secrets_path() -> pathlib.Path:
return get_data_path() / "secrets"
def get_config_path() -> pathlib.Path:
return get_data_path()
def get_frontend_path() -> pathlib.Path:
if getattr(sys, "frozen", False):
# The application is frozen
datadir = pathlib.Path(os.path.dirname(sys.executable)) / "example_files"
else:
# The application is not frozen
# Change this bit to match where you store your data files:
filedir = os.path.dirname(__file__)
datadir = pathlib.Path(filedir).parent.parent.parent / "example_files"
return pathlib.Path(datadir)
def get_data_path() -> pathlib.Path:
if getattr(sys, "frozen", False):
# The application is frozen
datadir = os.path.dirname(sys.executable)
else:
# The application is not frozen
# Change this bit to match where you store your data files:
filedir = os.path.dirname(__file__)
datadir = pathlib.Path(filedir).parent.parent
return pathlib.Path(datadir)
|
import os
from pathlib import Path
from typing import List, Optional, Tuple
import requests
def get_file_content(url: str, path: str) -> Tuple[str, int]:
"""Get the content of a file from the GitHub REST API."""
resp = requests.get(url + path)
return resp.text, resp.status_code
def get_file_content_bytes(url: str, path: str) -> Tuple[bytes, int]:
"""Get the content of a file from the GitHub REST API."""
resp = requests.get(url + path)
return resp.content, resp.status_code
def get_exports(raw_content: str) -> List:
"""
Read content of a Python file and returns a list of exported class names.
For example:
```python
from .a import A
from .b import B
__all__ = ["A", "B"]
```
will return `["A", "B"]`.
Args:
- raw_content: The content of a Python file as a string.
Returns:
A list of exported class names.
"""
exports = []
for line in raw_content.splitlines():
line = line.strip()
if line.startswith("__all__"):
exports = line.split("=")[1].strip().strip("[").strip("]").split(",")
exports = [export.strip().strip("'").strip('"') for export in exports]
return exports
def rewrite_exports(exports: List[str], dirpath: str) -> None:
"""
Write the `__all__` variable to the `__init__.py` file in the modules dir.
Removes the line that contains `__all__` and appends a new line with the updated
`__all__` variable.
Args:
- exports: A list of exported class names.
"""
init_path = f"{dirpath}/__init__.py"
with open(init_path) as f:
lines = f.readlines()
with open(init_path, "w") as f:
for line in lines:
line = line.strip()
if line.startswith("__all__"):
continue
f.write(line + os.linesep)
f.write(f"__all__ = {list(set(exports))}" + os.linesep)
def initialize_directory(
custom_path: Optional[str] = None, custom_dir: Optional[str] = None
) -> Path:
"""Initialize directory."""
if custom_path is not None and custom_dir is not None:
raise ValueError(
"You cannot specify both `custom_path` and `custom_dir` at the same time."
)
custom_dir = custom_dir or "llamadatasets"
if custom_path is not None:
dirpath = Path(custom_path)
else:
dirpath = Path(__file__).parent / custom_dir
if not os.path.exists(dirpath):
# Create a new directory because it does not exist
os.makedirs(dirpath)
return dirpath
def get_source_files_list(source_tree_url: str, path: str) -> List[str]:
"""Get the list of source files to download."""
resp = requests.get(
source_tree_url + path + "?recursive=1", headers={"Accept": "application/json"}
)
payload = resp.json()["payload"]
return [item["name"] for item in payload["tree"]["items"]]
def recursive_tree_traverse(
tree_urls: List[str], acc: List[str], source_tree_url: str
) -> List[str]:
"""Recursively traversge Github trees to get all file paths in a folder."""
if not tree_urls:
return acc
else:
url = tree_urls[0]
try:
res = requests.get(url, headers={"Accept": "application/json"})
tree_elements = res.json()["payload"]["tree"]["items"]
except Exception:
raise ValueError("Failed to traverse github tree source.")
new_trees = [
source_tree_url + "/" + el["path"]
for el in tree_elements
if el["contentType"] == "directory"
]
acc += [
el["path"].replace("llama-index-packs/", "/")
for el in tree_elements
if el["contentType"] == "file"
]
return recursive_tree_traverse(
tree_urls=tree_urls[1:] + new_trees,
acc=acc,
source_tree_url=source_tree_url,
)
def get_source_files_recursive(source_tree_url: str, path: str) -> List[str]:
"""Get source files of a Github folder recursively."""
initial_url = source_tree_url + path + "?recursive=1"
initial_tree_urls = [initial_url]
return recursive_tree_traverse(initial_tree_urls, [], source_tree_url)
class ChangeDirectory:
"""Context manager for changing the current working directory."""
def __init__(self, new_path: str):
self.new_path = os.path.expanduser(new_path)
def __enter__(self) -> None:
self.saved_path = os.getcwd()
os.chdir(self.new_path)
def __exit__(self, etype: object, value: object, traceback: object) -> None:
os.chdir(self.saved_path)
|
import os
from pathlib import Path
from typing import List, Optional, Tuple
import requests
def get_file_content(url: str, path: str) -> Tuple[str, int]:
"""Get the content of a file from the GitHub REST API."""
resp = requests.get(url + path)
return resp.text, resp.status_code
def get_file_content_bytes(url: str, path: str) -> Tuple[bytes, int]:
"""Get the content of a file from the GitHub REST API."""
resp = requests.get(url + path)
return resp.content, resp.status_code
def get_exports(raw_content: str) -> List:
"""Read content of a Python file and returns a list of exported class names.
For example:
```python
from .a import A
from .b import B
__all__ = ["A", "B"]
```
will return `["A", "B"]`.
Args:
- raw_content: The content of a Python file as a string.
Returns:
A list of exported class names.
"""
exports = []
for line in raw_content.splitlines():
line = line.strip()
if line.startswith("__all__"):
exports = line.split("=")[1].strip().strip("[").strip("]").split(",")
exports = [export.strip().strip("'").strip('"') for export in exports]
return exports
def rewrite_exports(exports: List[str], dirpath: str) -> None:
"""Write the `__all__` variable to the `__init__.py` file in the modules dir.
Removes the line that contains `__all__` and appends a new line with the updated
`__all__` variable.
Args:
- exports: A list of exported class names.
"""
init_path = f"{dirpath}/__init__.py"
with open(init_path) as f:
lines = f.readlines()
with open(init_path, "w") as f:
for line in lines:
line = line.strip()
if line.startswith("__all__"):
continue
f.write(line + os.linesep)
f.write(f"__all__ = {list(set(exports))}" + os.linesep)
def initialize_directory(
custom_path: Optional[str] = None, custom_dir: Optional[str] = None
) -> Path:
"""Initialize directory."""
if custom_path is not None and custom_dir is not None:
raise ValueError(
"You cannot specify both `custom_path` and `custom_dir` at the same time."
)
custom_dir = custom_dir or "llamadatasets"
if custom_path is not None:
dirpath = Path(custom_path)
else:
dirpath = Path(__file__).parent / custom_dir
if not os.path.exists(dirpath):
# Create a new directory because it does not exist
os.makedirs(dirpath)
return dirpath
def get_source_files_list(source_tree_url: str, path: str) -> List[str]:
"""Get the list of source files to download."""
resp = requests.get(
source_tree_url + path + "?recursive=1", headers={"Accept": "application/json"}
)
payload = resp.json()["payload"]
return [item["name"] for item in payload["tree"]["items"]]
def recursive_tree_traverse(
tree_urls: List[str], acc: List[str], source_tree_url: str
) -> List[str]:
"""Recursively traversge Github trees to get all file paths in a folder."""
if not tree_urls:
return acc
else:
url = tree_urls[0]
try:
res = requests.get(url, headers={"Accept": "application/json"})
tree_elements = res.json()["payload"]["tree"]["items"]
except Exception:
raise ValueError("Failed to traverse github tree source.")
new_trees = [
source_tree_url + "/" + el["path"]
for el in tree_elements
if el["contentType"] == "directory"
]
acc += [
el["path"].replace("llama-index-packs/", "/")
for el in tree_elements
if el["contentType"] == "file"
]
return recursive_tree_traverse(
tree_urls=tree_urls[1:] + new_trees,
acc=acc,
source_tree_url=source_tree_url,
)
def get_source_files_recursive(source_tree_url: str, path: str) -> List[str]:
"""Get source files of a Github folder recursively."""
initial_url = source_tree_url + path + "?recursive=1"
initial_tree_urls = [initial_url]
return recursive_tree_traverse(initial_tree_urls, [], source_tree_url)
class ChangeDirectory:
"""Context manager for changing the current working directory."""
def __init__(self, new_path: str):
self.new_path = os.path.expanduser(new_path)
def __enter__(self) -> None:
self.saved_path = os.getcwd()
os.chdir(self.new_path)
def __exit__(self, etype: object, value: object, traceback: object) -> None:
os.chdir(self.saved_path)
|
from typing import Any, Dict, Optional, Type
from jina.jaml.parsers.base import BaseLegacyParser
from jina.serve.gateway import BaseGateway
class GatewayLegacyParser(BaseLegacyParser):
"""Legacy parser for gateway."""
def parse(
self,
cls: Type['BaseGateway'],
data: Dict,
runtime_args: Optional[Dict[str, Any]] = None,
) -> 'BaseGateway':
"""
:param cls: target class type to parse into, must be a :class:`JAMLCompatible` type
:param data: gateway yaml file loaded as python dict
:param runtime_args: Optional runtime_args to be directly passed without being parsed into a yaml config
:return: the Gateway YAML parser given the syntax version number
"""
from jina.logging.predefined import default_logger
data['metas'] = {}
cls._init_from_yaml = True
# tmp_p = {kk: expand_env_var(vv) for kk, vv in data.get('with', {}).items()}
obj = cls(
**data.get('with', {}),
metas=data.get('metas', {}),
requests=data.get('requests', {}),
runtime_args=runtime_args,
)
cls._init_from_yaml = False
obj.is_updated = False
return obj
def dump(self, data: 'BaseGateway') -> Dict:
"""
:param data: versioned gateway object
:return: the dictionary given a versioned gateway object
"""
a = {k: v for k, v in data._init_kwargs_dict.items()}
r = {}
if a:
r['with'] = a
return r
|
from typing import Any, Dict, Optional, Type
from jina.jaml.parsers.base import BaseLegacyParser
from jina.serve.gateway import BaseGateway
class GatewayLegacyParser(BaseLegacyParser):
"""Legacy parser for gateway."""
def parse(
self,
cls: Type['BaseGateway'],
data: Dict,
runtime_args: Optional[Dict[str, Any]] = None,
) -> 'BaseGateway':
"""
:param cls: target class type to parse into, must be a :class:`JAMLCompatible` type
:param data: flow yaml file loaded as python dict
:param runtime_args: Optional runtime_args to be directly passed without being parsed into a yaml config
:return: the Flow YAML parser given the syntax version number
"""
from jina.logging.predefined import default_logger
data['metas'] = {}
cls._init_from_yaml = True
# tmp_p = {kk: expand_env_var(vv) for kk, vv in data.get('with', {}).items()}
obj = cls(
**data.get('with', {}),
metas=data.get('metas', {}),
requests=data.get('requests', {}),
runtime_args=runtime_args,
)
cls._init_from_yaml = False
obj.is_updated = False
return obj
def dump(self, data: 'BaseGateway') -> Dict:
"""
:param data: versioned gateway object
:return: the dictionary given a versioned flow object
"""
a = {k: v for k, v in data._init_kwargs_dict.items()}
r = {}
if a:
r['with'] = a
return r
|
"""
Feature engineering pipeline for categorical data
=================================================
The script showcases how to keep the categorical data encoding consistent across
training and inference. There are many ways to attain the same goal, this script can be
used as a starting point.
See Also
--------
- :doc:`Tutorial </tutorials/categorical>`
- :ref:`sphx_glr_python_examples_categorical.py`
- :ref:`sphx_glr_python_examples_cat_in_the_dat.py`
"""
from typing import List, Tuple
import numpy as np
import pandas as pd
from sklearn.compose import make_column_selector, make_column_transformer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OrdinalEncoder
import xgboost as xgb
def make_example_data() -> Tuple[pd.DataFrame, pd.Series, List[str]]:
"""Generate data for demo."""
n_samples = 2048
rng = np.random.default_rng(1994)
# We have three categorical features, while the rest are numerical.
categorical_features = ["brand_id", "retailer_id", "category_id"]
df = pd.DataFrame(
np.random.randint(32, 96, size=(n_samples, 3)),
columns=categorical_features,
)
df["price"] = rng.integers(100, 200, size=(n_samples,))
df["stock_status"] = rng.choice([True, False], n_samples)
df["on_sale"] = rng.choice([True, False], n_samples)
df["label"] = rng.normal(loc=0.0, scale=1.0, size=n_samples)
X = df.drop(["label"], axis=1)
y = df["label"]
return X, y, categorical_features
def native() -> None:
"""Using the native XGBoost interface."""
X, y, cat_feats = make_example_data()
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=1994, test_size=0.2
)
# Create an encoder based on training data.
enc = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=np.nan)
enc.set_output(transform="pandas")
enc = enc.fit(X_train[cat_feats])
def enc_transform(X: pd.DataFrame) -> pd.DataFrame:
# don't make change inplace so that we can have demonstrations for encoding
X = X.copy()
cat_cols = enc.transform(X[cat_feats])
for i, name in enumerate(cat_feats):
# create pd.Series based on the encoder
cat_cols[name] = pd.Categorical.from_codes(
codes=cat_cols[name].astype(np.int32), categories=enc.categories_[i]
)
X[cat_feats] = cat_cols
return X
# Encode the data based on fitted encoder.
X_train_enc = enc_transform(X_train)
X_test_enc = enc_transform(X_test)
# Train XGBoost model using the native interface.
Xy_train = xgb.QuantileDMatrix(X_train_enc, y_train, enable_categorical=True)
Xy_test = xgb.QuantileDMatrix(
X_test_enc, y_test, enable_categorical=True, ref=Xy_train
)
booster = xgb.train({}, Xy_train)
booster.predict(Xy_test)
# Following shows that data are encoded consistently.
# We first obtain result from newly encoded data
predt0 = booster.inplace_predict(enc_transform(X_train.head(16)))
# then we obtain result from already encoded data from training.
predt1 = booster.inplace_predict(X_train_enc.head(16))
np.testing.assert_allclose(predt0, predt1)
def pipeline() -> None:
"""Using the sklearn pipeline."""
X, y, cat_feats = make_example_data()
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=3, test_size=0.2
)
enc = make_column_transformer(
(
OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=np.nan),
# all categorical feature names end with "_id"
make_column_selector(pattern=".*_id"),
),
remainder="passthrough",
verbose_feature_names_out=False,
)
# No need to set pandas output, we use `feature_types` to indicate the type of
# features.
# enc.set_output(transform="pandas")
feature_types = ["c" if fn in cat_feats else "q" for fn in X_train.columns]
reg = xgb.XGBRegressor(
feature_types=feature_types, enable_categorical=True, n_estimators=10
)
p = make_pipeline(enc, reg)
p.fit(X_train, y_train)
# check XGBoost is using the feature type correctly.
model_types = reg.get_booster().feature_types
assert model_types is not None
for a, b in zip(model_types, feature_types):
assert a == b
# Following shows that data are encoded consistently.
# We first create a slice of data that doesn't contain all the categories
predt0 = p.predict(X_train.iloc[:16, :])
# Then we use the dataframe that contains all the categories
predt1 = p.predict(X_train)[:16]
# The resulting encoding is the same
np.testing.assert_allclose(predt0, predt1)
if __name__ == "__main__":
pipeline()
native()
|
"""
Feature engineering pipeline for categorical data
=================================================
The script showcases how to keep the categorical data encoding consistent across
training and inference. There are many ways to attain the same goal, this script can be
used as a starting point.
See Also
--------
- :doc:`Tutorial </tutorials/categorical>`
- :ref:`sphx_glr_python_examples_categorical.py`
- :ref:`sphx_glr_python_examples_cat_in_the_dat.py`
"""
from typing import List, Tuple
import numpy as np
import pandas as pd
from sklearn.compose import make_column_selector, make_column_transformer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OrdinalEncoder
import xgboost as xgb
def make_example_data() -> Tuple[pd.DataFrame, pd.Series, List[str]]:
"""Generate data for demo."""
n_samples = 2048
rng = np.random.default_rng(1994)
# We have three categorical features, while the rest are numerical.
categorical_features = ["brand_id", "retailer_id", "category_id"]
df = pd.DataFrame(
np.random.randint(32, 96, size=(n_samples, 3)),
columns=categorical_features,
)
df["price"] = rng.integers(100, 200, size=(n_samples,))
df["stock_status"] = rng.choice([True, False], n_samples)
df["on_sale"] = rng.choice([True, False], n_samples)
df["label"] = rng.normal(loc=0.0, scale=1.0, size=n_samples)
X = df.drop(["label"], axis=1)
y = df["label"]
return X, y, categorical_features
def native() -> None:
"""Using the native XGBoost interface."""
X, y, cat_feats = make_example_data()
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=1994, test_size=0.2
)
# Create an encoder based on training data.
enc = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=np.nan)
enc.set_output(transform="pandas")
enc = enc.fit(X_train[cat_feats])
def enc_transform(X: pd.DataFrame) -> pd.DataFrame:
# don't make change inplace so that we can have demonstrations for encoding
X = X.copy()
cat_cols = enc.transform(X[cat_feats])
for i, name in enumerate(cat_feats):
# create pd.Series based on the encoder
cat_cols[name] = pd.Categorical.from_codes(
codes=cat_cols[name].astype(np.int32), categories=enc.categories_[i]
)
X[cat_feats] = cat_cols
return X
# Encode the data based on fitted encoder.
X_train_enc = enc_transform(X_train)
X_test_enc = enc_transform(X_test)
# Train XGBoost model using the native interface.
Xy_train = xgb.QuantileDMatrix(X_train_enc, y_train, enable_categorical=True)
Xy_test = xgb.QuantileDMatrix(
X_test_enc, y_test, enable_categorical=True, ref=Xy_train
)
booster = xgb.train({}, Xy_train)
booster.predict(Xy_test)
# Following shows that data are encoded consistently.
# We first obtain result from newly encoded data
predt0 = booster.inplace_predict(enc_transform(X_train.head(16)))
# then we obtain result from already encoded data from training.
predt1 = booster.inplace_predict(X_train_enc.head(16))
np.testing.assert_allclose(predt0, predt1)
def pipeline() -> None:
"""Using the sklearn pipeline."""
X, y, cat_feats = make_example_data()
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=3, test_size=0.2
)
enc = make_column_transformer(
(
OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=np.nan),
# all categorical feature names end with "_id"
make_column_selector(pattern=".*_id"),
),
remainder="passthrough",
verbose_feature_names_out=False,
)
# No need to set pandas output, we use `feature_types` to indicate the type of
# features.
# enc.set_output(transform="pandas")
feature_types = ["c" if fn in cat_feats else "q" for fn in X_train.columns]
reg = xgb.XGBRegressor(
feature_types=feature_types, enable_categorical=True, n_estimators=10
)
p = make_pipeline(enc, reg)
p.fit(X_train, y_train)
# check XGBoost is using the feature type correctly.
model_types = reg.get_booster().feature_types
assert model_types is not None
for a, b in zip(model_types, feature_types):
assert a == b
# Following shows that data are encoded consistently.
# We first create a slice of data that doesn't contain all the categories
predt0 = p.predict(X_train.iloc[:16, :])
# Then we use the dataframe that contains all the categories
predt1 = p.predict(X_train)[:16]
# The resulting encoding is the same
np.testing.assert_allclose(predt0, predt1)
if __name__ == "__main__":
pipeline()
native()
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: docarray.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n\x0e\x64ocarray.proto\x12\x08\x64ocarray\x1a\x1cgoogle/protobuf/struct.proto\"A\n\x11\x44\x65nseNdArrayProto\x12\x0e\n\x06\x62uffer\x18\x01 \x01(\x0c\x12\r\n\x05shape\x18\x02 \x03(\r\x12\r\n\x05\x64type\x18\x03 \x01(\t\"g\n\x0cNdArrayProto\x12*\n\x05\x64\x65nse\x18\x01 \x01(\x0b\x32\x1b.docarray.DenseNdArrayProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\"\x9e\x02\n\tNodeProto\x12\x0e\n\x04\x62lob\x18\x01 \x01(\x0cH\x00\x12(\n\x06tensor\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x0e\n\x04text\x18\x03 \x01(\tH\x00\x12)\n\x06nested\x18\x04 \x01(\x0b\x32\x17.docarray.DocumentProtoH\x00\x12.\n\x06\x63hunks\x18\x05 \x01(\x0b\x32\x1c.docarray.DocumentArrayProtoH\x00\x12+\n\tembedding\x18\x06 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x11\n\x07\x61ny_url\x18\x07 \x01(\tH\x00\x12\x13\n\timage_url\x18\x08 \x01(\tH\x00\x12\x0c\n\x02id\x18\t \x01(\tH\x00\x42\t\n\x07\x63ontent\"\x82\x01\n\rDocumentProto\x12/\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32!.docarray.DocumentProto.DataEntry\x1a@\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.docarray.NodeProto:\x02\x38\x01\";\n\x12\x44ocumentArrayProto\x12%\n\x04\x64ocs\x18\x01 \x03(\x0b\x32\x17.docarray.DocumentProtob\x06proto3'
)
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'docarray_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_DOCUMENTPROTO_DATAENTRY._options = None
_DOCUMENTPROTO_DATAENTRY._serialized_options = b'8\001'
_DENSENDARRAYPROTO._serialized_start = 58
_DENSENDARRAYPROTO._serialized_end = 123
_NDARRAYPROTO._serialized_start = 125
_NDARRAYPROTO._serialized_end = 228
_NODEPROTO._serialized_start = 231
_NODEPROTO._serialized_end = 517
_DOCUMENTPROTO._serialized_start = 520
_DOCUMENTPROTO._serialized_end = 650
_DOCUMENTPROTO_DATAENTRY._serialized_start = 586
_DOCUMENTPROTO_DATAENTRY._serialized_end = 650
_DOCUMENTARRAYPROTO._serialized_start = 652
_DOCUMENTARRAYPROTO._serialized_end = 711
# @@protoc_insertion_point(module_scope)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: docarray.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0e\x64ocarray.proto\x12\x08\x64ocarray\x1a\x1cgoogle/protobuf/struct.proto\"A\n\x11\x44\x65nseNdArrayProto\x12\x0e\n\x06\x62uffer\x18\x01 \x01(\x0c\x12\r\n\x05shape\x18\x02 \x03(\r\x12\r\n\x05\x64type\x18\x03 \x01(\t\"g\n\x0cNdArrayProto\x12*\n\x05\x64\x65nse\x18\x01 \x01(\x0b\x32\x1b.docarray.DenseNdArrayProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\"\xbb\x01\n\tNodeProto\x12\x0e\n\x04\x62lob\x18\x01 \x01(\x0cH\x00\x12(\n\x06tensor\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x0e\n\x04text\x18\x03 \x01(\tH\x00\x12)\n\x06nested\x18\x04 \x01(\x0b\x32\x17.docarray.DocumentProtoH\x00\x12.\n\x06\x63hunks\x18\x05 \x01(\x0b\x32\x1c.docarray.DocumentArrayProtoH\x00\x42\t\n\x07\x63ontent\"\x82\x01\n\rDocumentProto\x12/\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32!.docarray.DocumentProto.DataEntry\x1a@\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.docarray.NodeProto:\x02\x38\x01\";\n\x12\x44ocumentArrayProto\x12%\n\x04\x64ocs\x18\x01 \x03(\x0b\x32\x17.docarray.DocumentProtob\x06proto3')
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'docarray_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_DOCUMENTPROTO_DATAENTRY._options = None
_DOCUMENTPROTO_DATAENTRY._serialized_options = b'8\001'
_DENSENDARRAYPROTO._serialized_start=58
_DENSENDARRAYPROTO._serialized_end=123
_NDARRAYPROTO._serialized_start=125
_NDARRAYPROTO._serialized_end=228
_NODEPROTO._serialized_start=231
_NODEPROTO._serialized_end=418
_DOCUMENTPROTO._serialized_start=421
_DOCUMENTPROTO._serialized_end=551
_DOCUMENTPROTO_DATAENTRY._serialized_start=487
_DOCUMENTPROTO_DATAENTRY._serialized_end=551
_DOCUMENTARRAYPROTO._serialized_start=553
_DOCUMENTARRAYPROTO._serialized_end=612
# @@protoc_insertion_point(module_scope)
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update``
deps = {
"Pillow": "Pillow>=10.0.1,<=15.0",
"accelerate": "accelerate>=0.26.0",
"av": "av",
"beautifulsoup4": "beautifulsoup4",
"blobfile": "blobfile",
"codecarbon": "codecarbon>=2.8.1",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"hf_xet": "hf_xet",
"huggingface-hub": "huggingface-hub>=0.30.0,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"jax": "jax>=0.4.1,<=0.4.13",
"jaxlib": "jaxlib>=0.4.1,<=0.4.13",
"jieba": "jieba",
"jinja2": "jinja2>=3.1.0",
"kenlm": "kenlm",
"keras": "keras>2.9,<2.16",
"keras-nlp": "keras-nlp>=0.3.1,<0.14.0",
"kernels": "kernels>=0.4.4,<0.5",
"librosa": "librosa",
"natten": "natten>=0.14.6,<0.15.0",
"nltk": "nltk<=3.8.1",
"num2words": "num2words",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optimum-benchmark": "optimum-benchmark>=0.3.0",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"pandas": "pandas<2.3.0",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic",
"pytest": "pytest>=7.2.0",
"pytest-asyncio": "pytest-asyncio",
"pytest-rerunfailures": "pytest-rerunfailures",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"pytest-order": "pytest-order",
"python": "python>=3.9.0",
"ray[tune]": "ray[tune]>=2.7.0",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff==0.11.2",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.4.3",
"sagemaker": "sagemaker>=2.31.0",
"schedulefree": "schedulefree>=1.2.6",
"scikit-learn": "scikit-learn",
"scipy": "scipy<1.13.0",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorboard": "tensorboard",
"tensorflow-cpu": "tensorflow-cpu>2.9,<2.16",
"tensorflow": "tensorflow>2.9,<2.16",
"tensorflow-text": "tensorflow-text<2.16",
"tensorflow-probability": "tensorflow-probability<0.24",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"tiktoken": "tiktoken",
"timm": "timm<=1.0.11",
"tokenizers": "tokenizers>=0.21,<0.22",
"torch": "torch>=2.1,<2.7",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
"pytest-rich": "pytest-rich",
"libcst": "libcst",
"rich": "rich",
"opentelemetry-api": "opentelemetry-api",
"opentelemetry-exporter-otlp": "opentelemetry-exporter-otlp",
"opentelemetry-sdk": "opentelemetry-sdk",
}
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update``
deps = {
"Pillow": "Pillow>=10.0.1,<=15.0",
"accelerate": "accelerate>=0.26.0",
"av": "av",
"beautifulsoup4": "beautifulsoup4",
"blobfile": "blobfile",
"codecarbon": "codecarbon>=2.8.1",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"hf_xet": "hf_xet",
"huggingface-hub": "huggingface-hub>=0.30.0,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.4.1,<=0.4.13",
"jaxlib": "jaxlib>=0.4.1,<=0.4.13",
"jieba": "jieba",
"jinja2": "jinja2>=3.1.0",
"kenlm": "kenlm",
"keras": "keras>2.9,<2.16",
"keras-nlp": "keras-nlp>=0.3.1,<0.14.0",
"kernels": "kernels>=0.4.4,<0.5",
"librosa": "librosa",
"natten": "natten>=0.14.6,<0.15.0",
"nltk": "nltk<=3.8.1",
"num2words": "num2words",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optimum-benchmark": "optimum-benchmark>=0.3.0",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"pandas": "pandas<2.3.0",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic",
"pytest": "pytest>=7.2.0",
"pytest-asyncio": "pytest-asyncio",
"pytest-rerunfailures": "pytest-rerunfailures",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"pytest-order": "pytest-order",
"python": "python>=3.9.0",
"ray[tune]": "ray[tune]>=2.7.0",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff==0.11.2",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.4.3",
"sagemaker": "sagemaker>=2.31.0",
"schedulefree": "schedulefree>=1.2.6",
"scikit-learn": "scikit-learn",
"scipy": "scipy<1.13.0",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorboard": "tensorboard",
"tensorflow-cpu": "tensorflow-cpu>2.9,<2.16",
"tensorflow": "tensorflow>2.9,<2.16",
"tensorflow-text": "tensorflow-text<2.16",
"tensorflow-probability": "tensorflow-probability<0.24",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"tiktoken": "tiktoken",
"timm": "timm<=1.0.11",
"tokenizers": "tokenizers>=0.21,<0.22",
"torch": "torch>=2.1,<2.7",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
"pytest-rich": "pytest-rich",
"libcst": "libcst",
"rich": "rich",
"opentelemetry-api": "opentelemetry-api",
"opentelemetry-exporter-otlp": "opentelemetry-exporter-otlp",
"opentelemetry-sdk": "opentelemetry-sdk",
}
|
_base_ = './cascade-mask-rcnn_r101_fpn_seesaw-loss_random-ms-2x_lvis-v1.py' # noqa: E501
model = dict(
roi_head=dict(
mask_head=dict(
predictor_cfg=dict(type='NormedConv2d', tempearture=20))))
|
_base_ = './cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py' # noqa: E501
model = dict(
roi_head=dict(
mask_head=dict(
predictor_cfg=dict(type='NormedConv2d', tempearture=20))))
|
from typing import List, Union
class InputExample:
"""Structure for one input example with texts, the label and a unique id"""
def __init__(self, guid: str = "", texts: List[str] = None, label: Union[int, float] = 0):
"""
Creates one InputExample with the given texts, guid and label
Args:
guid: id for the example
texts: the texts for the example.
label: the label for the example
"""
self.guid = guid
self.texts = texts
self.label = label
def __str__(self):
return "<InputExample> label: {}, texts: {}".format(str(self.label), "; ".join(self.texts))
|
from typing import Union, List
class InputExample:
"""
Structure for one input example with texts, the label and a unique id
"""
def __init__(self, guid: str = '', texts: List[str] = None, label: Union[int, float] = 0):
"""
Creates one InputExample with the given texts, guid and label
:param guid
id for the example
:param texts
the texts for the example.
:param label
the label for the example
"""
self.guid = guid
self.texts = texts
self.label = label
def __str__(self):
return "<InputExample> label: {}, texts: {}".format(str(self.label), "; ".join(self.texts))
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.port = self.runtime_args.port[0]
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=__default_host__, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.port = self.runtime_args.port[0]
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=__default_host__, port=self.port))
async def run_server(self):
await self.server.serve()
async def teardown(self):
await super().teardown()
await self.server.shutdown()
async def stop_server(self):
self.server.should_exit = True
|
import importlib.util
from typing import List, Dict, Optional
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class DuckDuckGoSearchToolSpec(BaseToolSpec):
"""DuckDuckGoSearch tool spec."""
spec_functions = ["duckduckgo_instant_search", "duckduckgo_full_search"]
def __init__(self) -> None:
if not importlib.util.find_spec("duckduckgo_search"):
raise ImportError(
"DuckDuckGoSearchToolSpec requires the duckduckgo_search package to be installed."
)
super().__init__()
def duckduckgo_instant_search(self, query: str) -> List[Dict]:
"""
Make a query to DuckDuckGo api to receive an instant answer.
Args:
query (str): The query to be passed to DuckDuckGo.
"""
from duckduckgo_search import DDGS
with DDGS() as ddg:
return list(ddg.answers(query))
def duckduckgo_full_search(
self,
query: str,
region: Optional[str] = "wt-wt",
max_results: Optional[int] = 10,
) -> List[Dict]:
"""
Make a query to DuckDuckGo search to receive a full search results.
Args:
query (str): The query to be passed to DuckDuckGo.
region (Optional[str]): The region to be used for the search in [country-language] convention, ex us-en, uk-en, ru-ru, etc...
max_results (Optional[int]): The maximum number of results to be returned.
"""
from duckduckgo_search import DDGS
params = {
"keywords": query,
"region": region,
"max_results": max_results,
}
with DDGS() as ddg:
return list(ddg.text(**params))
|
import importlib.util
from typing import List, Dict, Optional
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class DuckDuckGoSearchToolSpec(BaseToolSpec):
"""DuckDuckGoSearch tool spec."""
spec_functions = ["duckduckgo_instant_search", "duckduckgo_full_search"]
def __init__(self) -> None:
if not importlib.util.find_spec("duckduckgo_search"):
raise ImportError(
"DuckDuckGoSearchToolSpec requires the duckduckgo_search package to be installed."
)
super().__init__()
def duckduckgo_instant_search(self, query: str) -> List[Dict]:
"""
Make a query to DuckDuckGo api to receive an instant answer.
Args:
query (str): The query to be passed to DuckDuckGo.
"""
from duckduckgo_search import DDGS
with DDGS() as ddg:
return list(ddg.answers(query))
def duckduckgo_full_search(
self,
query: str,
region: Optional[str] = "wt-wt",
max_results: Optional[int] = 10,
) -> List[Dict]:
"""
Make a query to DuckDuckGo search to receive a full search results.
Args:
query (str): The query to be passed to DuckDuckGo.
region (Optional[str]): The region to be used for the search in [country-language] convention, ex us-en, uk-en, ru-ru, etc...
max_results (Optional[int]): The maximum number of results to be returned.
"""
from duckduckgo_search import DDGS
params = {
"keywords": query,
"region": region,
"max_results": max_results,
}
with DDGS() as ddg:
return list(ddg.text(**params))
|
from typing import Any, Union
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.tv_tensors._bounding_boxes import CLAMPING_MODE_TYPE
class ConvertBoundingBoxFormat(Transform):
"""Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY".
Args:
format (str or tv_tensors.BoundingBoxFormat): output bounding box format.
Possible values are defined by :class:`~torchvision.tv_tensors.BoundingBoxFormat` and
string values match the enums, e.g. "XYXY" or "XYWH" etc.
"""
_transformed_types = (tv_tensors.BoundingBoxes,)
def __init__(self, format: Union[str, tv_tensors.BoundingBoxFormat]) -> None:
super().__init__()
self.format = format
def transform(self, inpt: tv_tensors.BoundingBoxes, params: dict[str, Any]) -> tv_tensors.BoundingBoxes:
return F.convert_bounding_box_format(inpt, new_format=self.format) # type: ignore[return-value, arg-type]
class ClampBoundingBoxes(Transform):
"""Clamp bounding boxes to their corresponding image dimensions.
The clamping is done according to the bounding boxes' ``canvas_size`` meta-data.
Args:
clamping_mode: TODOBB more docs. Default is None which relies on the input box' clamping_mode attribute.
"""
def __init__(self, clamping_mode: Union[CLAMPING_MODE_TYPE, str] = "auto") -> None:
super().__init__()
self.clamping_mode = clamping_mode
_transformed_types = (tv_tensors.BoundingBoxes,)
def transform(self, inpt: tv_tensors.BoundingBoxes, params: dict[str, Any]) -> tv_tensors.BoundingBoxes:
return F.clamp_bounding_boxes(inpt, clamping_mode=self.clamping_mode) # type: ignore[return-value]
class ClampKeyPoints(Transform):
"""Clamp keypoints to their corresponding image dimensions.
The clamping is done according to the keypoints' ``canvas_size`` meta-data.
"""
_transformed_types = (tv_tensors.KeyPoints,)
def transform(self, inpt: tv_tensors.KeyPoints, params: dict[str, Any]) -> tv_tensors.KeyPoints:
return F.clamp_keypoints(inpt) # type: ignore[return-value]
class SetClampingMode(Transform):
"""TODOBB"""
def __init__(self, clamping_mode: CLAMPING_MODE_TYPE) -> None:
super().__init__()
self.clamping_mode = clamping_mode
if self.clamping_mode not in (None, "soft", "hard"):
raise ValueError(f"clamping_mode must be soft, hard or None, got {clamping_mode}")
_transformed_types = (tv_tensors.BoundingBoxes,)
def transform(self, inpt: tv_tensors.BoundingBoxes, params: dict[str, Any]) -> tv_tensors.BoundingBoxes:
out: tv_tensors.BoundingBoxes = inpt.clone() # type: ignore[assignment]
out.clamping_mode = self.clamping_mode
return out
|
from typing import Any, Union
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.tv_tensors._bounding_boxes import CLAMPING_MODE_TYPE
class ConvertBoundingBoxFormat(Transform):
"""Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY".
Args:
format (str or tv_tensors.BoundingBoxFormat): output bounding box format.
Possible values are defined by :class:`~torchvision.tv_tensors.BoundingBoxFormat` and
string values match the enums, e.g. "XYXY" or "XYWH" etc.
"""
_transformed_types = (tv_tensors.BoundingBoxes,)
def __init__(self, format: Union[str, tv_tensors.BoundingBoxFormat]) -> None:
super().__init__()
self.format = format
def transform(self, inpt: tv_tensors.BoundingBoxes, params: dict[str, Any]) -> tv_tensors.BoundingBoxes:
return F.convert_bounding_box_format(inpt, new_format=self.format) # type: ignore[return-value, arg-type]
class ClampBoundingBoxes(Transform):
"""Clamp bounding boxes to their corresponding image dimensions.
The clamping is done according to the bounding boxes' ``canvas_size`` meta-data.
Args:
clamping_mode: TODOBB more docs. Default is None which relies on the input box' clamping_mode attribute.
"""
# TODOBB consider "auto" to be a Literal, make sur torchscript is still happy
# TODOBB validate clamping_mode
def __init__(self, clamping_mode: Union[CLAMPING_MODE_TYPE, str] = "auto") -> None:
super().__init__()
self.clamping_mode = clamping_mode
_transformed_types = (tv_tensors.BoundingBoxes,)
def transform(self, inpt: tv_tensors.BoundingBoxes, params: dict[str, Any]) -> tv_tensors.BoundingBoxes:
return F.clamp_bounding_boxes(inpt, clamping_mode=self.clamping_mode) # type: ignore[return-value]
class ClampKeyPoints(Transform):
"""Clamp keypoints to their corresponding image dimensions.
The clamping is done according to the keypoints' ``canvas_size`` meta-data.
"""
_transformed_types = (tv_tensors.KeyPoints,)
def transform(self, inpt: tv_tensors.KeyPoints, params: dict[str, Any]) -> tv_tensors.KeyPoints:
return F.clamp_keypoints(inpt) # type: ignore[return-value]
class SetClampingMode(Transform):
"""TODOBB"""
def __init__(self, clamping_mode: CLAMPING_MODE_TYPE) -> None:
super().__init__()
# TODOBB validate mode
self.clamping_mode = clamping_mode
_transformed_types = (tv_tensors.BoundingBoxes,)
def transform(self, inpt: tv_tensors.BoundingBoxes, params: dict[str, Any]) -> tv_tensors.BoundingBoxes:
out: tv_tensors.BoundingBoxes = inpt.clone() # type: ignore[assignment]
out.clamping_mode = self.clamping_mode
return out
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import (ConvModule, caffe2_xavier_init, constant_init, is_norm,
normal_init)
from torch.nn import BatchNorm2d
from ..builder import NECKS
class Bottleneck(nn.Module):
"""Bottleneck block for DilatedEncoder used in `YOLOF.
<https://arxiv.org/abs/2103.09460>`.
The Bottleneck contains three ConvLayers and one residual connection.
Args:
in_channels (int): The number of input channels.
mid_channels (int): The number of middle output channels.
dilation (int): Dilation rate.
norm_cfg (dict): Dictionary to construct and config norm layer.
"""
def __init__(self,
in_channels,
mid_channels,
dilation,
norm_cfg=dict(type='BN', requires_grad=True)):
super(Bottleneck, self).__init__()
self.conv1 = ConvModule(
in_channels, mid_channels, 1, norm_cfg=norm_cfg)
self.conv2 = ConvModule(
mid_channels,
mid_channels,
3,
padding=dilation,
dilation=dilation,
norm_cfg=norm_cfg)
self.conv3 = ConvModule(
mid_channels, in_channels, 1, norm_cfg=norm_cfg)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
out = out + identity
return out
@NECKS.register_module()
class DilatedEncoder(nn.Module):
"""Dilated Encoder for YOLOF <https://arxiv.org/abs/2103.09460>`.
This module contains two types of components:
- the original FPN lateral convolution layer and fpn convolution layer,
which are 1x1 conv + 3x3 conv
- the dilated residual block
Args:
in_channels (int): The number of input channels.
out_channels (int): The number of output channels.
block_mid_channels (int): The number of middle block output channels
num_residual_blocks (int): The number of residual blocks.
block_dilations (list): The list of residual blocks dilation.
"""
def __init__(self, in_channels, out_channels, block_mid_channels,
num_residual_blocks, block_dilations):
super(DilatedEncoder, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.block_mid_channels = block_mid_channels
self.num_residual_blocks = num_residual_blocks
self.block_dilations = block_dilations
self._init_layers()
def _init_layers(self):
self.lateral_conv = nn.Conv2d(
self.in_channels, self.out_channels, kernel_size=1)
self.lateral_norm = BatchNorm2d(self.out_channels)
self.fpn_conv = nn.Conv2d(
self.out_channels, self.out_channels, kernel_size=3, padding=1)
self.fpn_norm = BatchNorm2d(self.out_channels)
encoder_blocks = []
for i in range(self.num_residual_blocks):
dilation = self.block_dilations[i]
encoder_blocks.append(
Bottleneck(
self.out_channels,
self.block_mid_channels,
dilation=dilation))
self.dilated_encoder_blocks = nn.Sequential(*encoder_blocks)
def init_weights(self):
caffe2_xavier_init(self.lateral_conv)
caffe2_xavier_init(self.fpn_conv)
for m in [self.lateral_norm, self.fpn_norm]:
constant_init(m, 1)
for m in self.dilated_encoder_blocks.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, mean=0, std=0.01)
if is_norm(m):
constant_init(m, 1)
def forward(self, feature):
out = self.lateral_norm(self.lateral_conv(feature[-1]))
out = self.fpn_norm(self.fpn_conv(out))
return self.dilated_encoder_blocks(out),
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import (ConvModule, caffe2_xavier_init, constant_init, is_norm,
normal_init)
from torch.nn import BatchNorm2d
from ..builder import NECKS
class Bottleneck(nn.Module):
"""Bottleneck block for DilatedEncoder used in `YOLOF.
<https://arxiv.org/abs/2103.09460>`.
The Bottleneck contains three ConvLayers and one residual connection.
Args:
in_channels (int): The number of input channels.
mid_channels (int): The number of middle output channels.
dilation (int): Dilation rate.
norm_cfg (dict): Dictionary to construct and config norm layer.
"""
def __init__(self,
in_channels,
mid_channels,
dilation,
norm_cfg=dict(type='BN', requires_grad=True)):
super(Bottleneck, self).__init__()
self.conv1 = ConvModule(
in_channels, mid_channels, 1, norm_cfg=norm_cfg)
self.conv2 = ConvModule(
mid_channels,
mid_channels,
3,
padding=dilation,
dilation=dilation,
norm_cfg=norm_cfg)
self.conv3 = ConvModule(
mid_channels, in_channels, 1, norm_cfg=norm_cfg)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
out = out + identity
return out
@NECKS.register_module()
class DilatedEncoder(nn.Module):
"""Dilated Encoder for YOLOF <https://arxiv.org/abs/2103.09460>`.
This module contains two types of components:
- the original FPN lateral convolution layer and fpn convolution layer,
which are 1x1 conv + 3x3 conv
- the dilated residual block
Args:
in_channels (int): The number of input channels.
out_channels (int): The number of output channels.
block_mid_channels (int): The number of middle block output channels
num_residual_blocks (int): The number of residual blocks.
"""
def __init__(self, in_channels, out_channels, block_mid_channels,
num_residual_blocks):
super(DilatedEncoder, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.block_mid_channels = block_mid_channels
self.num_residual_blocks = num_residual_blocks
self.block_dilations = [2, 4, 6, 8]
self._init_layers()
def _init_layers(self):
self.lateral_conv = nn.Conv2d(
self.in_channels, self.out_channels, kernel_size=1)
self.lateral_norm = BatchNorm2d(self.out_channels)
self.fpn_conv = nn.Conv2d(
self.out_channels, self.out_channels, kernel_size=3, padding=1)
self.fpn_norm = BatchNorm2d(self.out_channels)
encoder_blocks = []
for i in range(self.num_residual_blocks):
dilation = self.block_dilations[i]
encoder_blocks.append(
Bottleneck(
self.out_channels,
self.block_mid_channels,
dilation=dilation))
self.dilated_encoder_blocks = nn.Sequential(*encoder_blocks)
def init_weights(self):
caffe2_xavier_init(self.lateral_conv)
caffe2_xavier_init(self.fpn_conv)
for m in [self.lateral_norm, self.fpn_norm]:
constant_init(m, 1)
for m in self.dilated_encoder_blocks.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, mean=0, std=0.01)
if is_norm(m):
constant_init(m, 1)
def forward(self, feature):
out = self.lateral_norm(self.lateral_conv(feature[-1]))
out = self.fpn_norm(self.fpn_conv(out))
return self.dilated_encoder_blocks(out),
|
import os
from typing import Dict, List, Optional
import torch
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from jina_commons.batching import get_docs_batch_generator
from transformers import CLIPTokenizer, CLIPModel
class CLIPTextEncoder(Executor):
"""Encode text into embeddings using a CLIP model.
:param pretrained_model_name_or_path: Can be either:
- A string, the model id of a pretrained CLIP model hosted
inside a model repo on huggingface.co, e.g., 'openai/clip-vit-base-patch32'
- A path to a directory containing model weights saved, e.g., ./my_model_directory/
:param base_tokenizer_model: Base tokenizer model.
Defaults to ``pretrained_model_name_or_path`` if None
:param max_length: Max length argument for the tokenizer.
All CLIP models use 77 as the max length
:param device: Device to be used. Use 'cuda' for GPU.
:param default_traversal_paths: Default traversal paths for encoding, used if the
traversal path is not passed as a parameter with the request.
:param default_batch_size: Default batch size for encoding, used if the
batch size is not passed as a parameter with the request.
:param args: Arguments
:param kwargs: Keyword Arguments
"""
def __init__(
self,
pretrained_model_name_or_path: str = 'openai/clip-vit-base-patch32',
base_tokenizer_model: Optional[str] = None,
max_length: Optional[int] = 77,
device: str = 'cpu',
default_traversal_paths: List[str] = ['r'],
default_batch_size: int = 32,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.default_traversal_paths = default_traversal_paths
self.default_batch_size = default_batch_size
self.pretrained_model_name_or_path = pretrained_model_name_or_path
self.base_tokenizer_model = (
base_tokenizer_model or pretrained_model_name_or_path
)
self.max_length = max_length
self.logger = JinaLogger(self.__class__.__name__)
if device.startswith('cuda') and not torch.cuda.is_available():
self.logger.warning(
'You tried to use GPU but torch did not detect your'
'GPU correctly. Defaulting to CPU. Check your CUDA installation!'
)
device = 'cpu'
self.device = device
self.tokenizer = CLIPTokenizer.from_pretrained(self.base_tokenizer_model)
self.model = CLIPModel.from_pretrained(self.pretrained_model_name_or_path)
self.model.eval().to(torch.device(device))
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode text data into a ndarray of `D` as dimension, and fill
the embedding attribute of the docs.
:param docs: DocumentArray containing text
:param parameters: dictionary to define the `traversal_paths` and the `batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
:param kwargs: Additional key value arguments.
"""
for document_batch in get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
):
text_batch = document_batch.get_attributes('text')
with torch.no_grad():
input_tokens = self._generate_input_tokens(text_batch)
embedding_batch = self.model.get_text_features(**input_tokens)
numpy_embedding_batch = embedding_batch.cpu().numpy()
for document, numpy_embedding in zip(
document_batch, numpy_embedding_batch
):
document.embedding = numpy_embedding
def _generate_input_tokens(self, texts):
input_tokens = self.tokenizer(
texts,
max_length=self.max_length,
padding='longest',
truncation=True,
return_tensors='pt',
)
input_tokens = {
k: v.to(torch.device(self.device)) for k, v in input_tokens.items()
}
return input_tokens
|
from jina import DocumentArray, Executor, requests
import torch
import clip
from typing import Iterable, Optional, List
from jina_commons.batching import get_docs_batch_generator
class CLIPTextEncoder(Executor):
"""Encode text into embeddings using a CLIP model.
:param model_name: The name of one of the pre-trained CLIP models.
Can also be a path to a local checkpoint (a ``.pt`` file).
:param default_batch_size: Default batch size for encoding, used if the
batch size is not passed as a parameter with the request.
:param default_traversal_paths: Default traversal paths for encoding, used if the
traversal path is not passed as a parameter with the request.
:param default_device: The device (cpu or gpu) that the model should be on.
:param jit: Whether a JIT version of the model should be loaded.
"""
def __init__(
self,
model_name: str = 'ViT-B/32',
default_batch_size: int = 32,
default_traversal_paths: List[str] = ['r'],
default_device: str = 'cpu',
jit: bool = True,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.device = default_device
self.model, _ = clip.load(model_name, self.device, jit)
self.default_traversal_paths = default_traversal_paths
self.default_batch_size = default_batch_size
@requests
def encode(self, docs: Optional[DocumentArray], parameters: dict, **kwargs):
"""
Encode all docs with text and store the encodings in the embedding
attribute of the docs.
:param docs: documents sent to the encoder. The docs must have text.
:param parameters: dictionary to define the ``traversal_path`` and the
``batch_size``. For example,
``parameters={'traversal_paths': ['r'], 'batch_size': 10}``
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
)
self._create_embeddings(document_batches_generator)
def _create_embeddings(self, document_batches_generator: Iterable):
with torch.no_grad():
for document_batch in document_batches_generator:
text_batch = [d.text for d in document_batch]
tensor = clip.tokenize(text_batch).to(self.device)
embedding_batch = self.model.encode_text(tensor)
numpy_embedding_batch = embedding_batch.cpu().numpy()
for document, numpy_embedding in zip(
document_batch, numpy_embedding_batch
):
document.embedding = numpy_embedding
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework._py_context_manager."""
from tensorflow.python.framework import _py_context_manager
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class TestContextManager(object):
def __init__(self, behavior="basic"):
self.log = []
self.behavior = behavior
def __enter__(self):
self.log.append("__enter__()")
if self.behavior == "raise_from_enter":
raise ValueError("exception in __enter__")
return "var"
def __exit__(self, ex_type, ex_value, ex_tb):
self.log.append("__exit__(%s, %s, %s)" % (ex_type, ex_value, ex_tb))
if self.behavior == "raise_from_exit":
raise ValueError("exception in __exit__")
if self.behavior == "suppress_exception":
return True
# Expected log when the body doesn't raise an exception.
NO_EXCEPTION_LOG = """\
__enter__()
body('var')
__exit__(None, None, None)"""
# Expected log when the body does raise an exception. (Regular expression.)
EXCEPTION_LOG = """\
__enter__\\(\\)
body\\('var'\\)
__exit__\\(<class 'ValueError'>, Foo, <traceback object.*>\\)"""
class OpDefUtilTest(test_util.TensorFlowTestCase):
def testBasic(self):
cm = TestContextManager()
def body(var):
cm.log.append("body(%r)" % var)
_py_context_manager.test_py_context_manager(cm, body)
self.assertEqual("\n".join(cm.log), NO_EXCEPTION_LOG)
def testBodyRaisesException(self):
cm = TestContextManager()
def body(var):
cm.log.append("body(%r)" % var)
raise ValueError("Foo")
with self.assertRaisesRegex(ValueError, "Foo"):
_py_context_manager.test_py_context_manager(cm, body)
self.assertRegex("\n".join(cm.log), EXCEPTION_LOG)
def testEnterRaisesException(self):
cm = TestContextManager("raise_from_enter")
def body(var):
cm.log.append("body(%r)" % var)
with self.assertRaisesRegex(ValueError, "exception in __enter__"):
_py_context_manager.test_py_context_manager(cm, body)
self.assertEqual("\n".join(cm.log), "__enter__()")
# Test behavior in unsupported case where __exit__ raises an exception.
def testExitRaisesException(self):
cm = TestContextManager("raise_from_exit")
def body(var):
cm.log.append("body(%r)" % var)
# Note: this does *not* raise an exception (but does log a warning):
_py_context_manager.test_py_context_manager(cm, body)
self.assertEqual("\n".join(cm.log), NO_EXCEPTION_LOG)
# Test behavior in unsupported case where __exit__ suppresses exception.
def testExitSuppressesException(self):
cm = TestContextManager("suppress_exception")
def body(var):
cm.log.append("body(%r)" % var)
raise ValueError("Foo")
with self.assertRaisesRegex(
ValueError, "tensorflow::PyContextManager::Enter does not support "
"context managers that suppress exception"):
_py_context_manager.test_py_context_manager(cm, body)
self.assertRegex("\n".join(cm.log), EXCEPTION_LOG)
if __name__ == "__main__":
googletest.main()
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework._py_context_manager."""
from tensorflow.python.framework import _py_context_manager
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class TestContextManager(object):
def __init__(self, behavior="basic"):
self.log = []
self.behavior = behavior
def __enter__(self):
self.log.append("__enter__()")
if self.behavior == "raise_from_enter":
raise ValueError("exception in __enter__")
return "var"
def __exit__(self, ex_type, ex_value, ex_tb):
self.log.append("__exit__(%s, %s, %s)" % (ex_type, ex_value, ex_tb))
if self.behavior == "raise_from_exit":
raise ValueError("exception in __exit__")
if self.behavior == "suppress_exception":
return True
# Expected log when the body doesn't raise an exception.
NO_EXCEPTION_LOG = """\
__enter__()
body('var')
__exit__(None, None, None)"""
# Expected log when the body does raise an exception. (Regular expression.)
EXCEPTION_LOG = """\
__enter__\\(\\)
body\\('var'\\)
__exit__\\(<class 'ValueError'>, Foo, <traceback object.*>\\)"""
class OpDefUtilTest(test_util.TensorFlowTestCase):
def testBasic(self):
cm = TestContextManager()
def body(var):
cm.log.append("body(%r)" % var)
_py_context_manager.test_py_context_manager(cm, body)
self.assertEqual("\n".join(cm.log), NO_EXCEPTION_LOG)
def testBodyRaisesException(self):
cm = TestContextManager()
def body(var):
cm.log.append("body(%r)" % var)
raise ValueError("Foo")
with self.assertRaisesRegexp(ValueError, "Foo"):
_py_context_manager.test_py_context_manager(cm, body)
self.assertRegex("\n".join(cm.log), EXCEPTION_LOG)
def testEnterRaisesException(self):
cm = TestContextManager("raise_from_enter")
def body(var):
cm.log.append("body(%r)" % var)
with self.assertRaisesRegexp(ValueError, "exception in __enter__"):
_py_context_manager.test_py_context_manager(cm, body)
self.assertEqual("\n".join(cm.log), "__enter__()")
# Test behavior in unsupported case where __exit__ raises an exception.
def testExitRaisesException(self):
cm = TestContextManager("raise_from_exit")
def body(var):
cm.log.append("body(%r)" % var)
# Note: this does *not* raise an exception (but does log a warning):
_py_context_manager.test_py_context_manager(cm, body)
self.assertEqual("\n".join(cm.log), NO_EXCEPTION_LOG)
# Test behavior in unsupported case where __exit__ suppresses exception.
def testExitSuppressesException(self):
cm = TestContextManager("suppress_exception")
def body(var):
cm.log.append("body(%r)" % var)
raise ValueError("Foo")
with self.assertRaisesRegexp(
ValueError, "tensorflow::PyContextManager::Enter does not support "
"context managers that suppress exception"):
_py_context_manager.test_py_context_manager(cm, body)
self.assertRegex("\n".join(cm.log), EXCEPTION_LOG)
if __name__ == "__main__":
googletest.main()
|
import os
import shutil
import subprocess
import sys
def _get_run_args(print_args: bool = True):
from jina.helper import get_rich_console
from jina.parsers import get_main_parser
console = get_rich_console()
silent_print = {'help', 'hub', 'export', 'auth', 'cloud', 'ping'}
parser = get_main_parser()
if len(sys.argv) > 1:
from argparse import _StoreAction, _StoreTrueAction
from rich import box
from rich.table import Table
args, unknown = parser.parse_known_args()
if unknown:
from jina.helper import warn_unknown_args
unknown = list(filter(lambda x: x.startswith('--'), unknown))
warn_unknown_args(unknown)
if args.cli not in silent_print and print_args:
from jina.constants import __resources_path__
p = parser._actions[-1].choices[sys.argv[1]]
default_args = {
a.dest: a.default
for a in p._actions
if isinstance(a, (_StoreAction, _StoreTrueAction))
}
with open(os.path.join(__resources_path__, 'jina.logo')) as fp:
logo_str = fp.read()
param_str = Table(
title=' '.join(sys.argv),
box=box.ROUNDED,
highlight=True,
title_justify='left',
)
param_str.add_column('Argument', justify='right')
param_str.add_column('Value', justify='left')
for k, v in sorted(vars(args).items()):
param = k.replace('_', '-')
value = str(v)
if not default_args.get(k, None) == v:
value = f'[b]{value}[/]'
param_str.add_row(param, value)
if 'JINA_LOG_NO_COLOR' not in os.environ:
print(f'\n{logo_str}\n')
console.print(param_str)
return args
else:
parser.print_help()
exit()
def _quick_ac_lookup():
from jina_cli.autocomplete import ac_table
if len(sys.argv) > 1:
if sys.argv[1] == 'commands':
for k in ac_table['commands']:
print(k)
exit()
elif sys.argv[1] == 'completions':
# search with the longest shared prefix
for j in range(len(sys.argv), 2, -1):
_input = ' '.join(sys.argv[2:j]).strip()
if _input in ac_table['completions']:
compl = ac_table['completions'][_input]
for k in compl:
if k not in sys.argv:
print(k)
break
exit()
def _try_plugin_command():
"""Tries to call the CLI of an external Jina project.
:return: if the plugin has been found (locally or among the known plugins)
"""
argv = sys.argv
if len(argv) < 2: # no command given
return False
from jina_cli.autocomplete import ac_table
if argv[1] in ac_table['commands']: # native command can't be plugin command
return False
def _cmd_exists(cmd):
return shutil.which(cmd) is not None
subcommand = argv[1]
cmd = 'jina-' + subcommand
if _cmd_exists(cmd):
subprocess.run([cmd] + argv[2:])
return True
from jina_cli.known_plugins import plugin_info
if subcommand in plugin_info:
from jina.helper import get_rich_console
cmd_info = plugin_info[subcommand]
project, package = cmd_info['display-name'], cmd_info['pip-package']
console = get_rich_console()
console.print(
f"It seems like [yellow]{project}[/yellow] is not installed in your environment."
f"To use it via the [green]'jina {subcommand}'[/green] command, "
f"install it first: [green]'pip install {package}'[/green]."
)
return True
return False
def main():
"""The main entrypoint of the CLI"""
found_plugin = _try_plugin_command()
if not found_plugin:
_quick_ac_lookup()
from jina_cli import api
args = _get_run_args()
getattr(api, args.cli.replace('-', '_'))(args)
|
import os
import shutil
import subprocess
import sys
def _get_run_args(print_args: bool = True):
from jina.helper import get_rich_console
from jina.parsers import get_main_parser
console = get_rich_console()
silent_print = {'help', 'hub', 'export', 'auth', 'cloud', 'ping'}
parser = get_main_parser()
if len(sys.argv) > 1:
from argparse import _StoreAction, _StoreTrueAction
from rich import box
from rich.table import Table
args, unknown = parser.parse_known_args()
if unknown:
from jina.helper import warn_unknown_args
unknown = list(filter(lambda x: x.startswith('--'), unknown))
warn_unknown_args(unknown)
if args.cli not in silent_print and print_args:
from jina import __resources_path__
p = parser._actions[-1].choices[sys.argv[1]]
default_args = {
a.dest: a.default
for a in p._actions
if isinstance(a, (_StoreAction, _StoreTrueAction))
}
with open(os.path.join(__resources_path__, 'jina.logo')) as fp:
logo_str = fp.read()
param_str = Table(
title=' '.join(sys.argv),
box=box.ROUNDED,
highlight=True,
title_justify='left',
)
param_str.add_column('Argument', justify='right')
param_str.add_column('Value', justify='left')
for k, v in sorted(vars(args).items()):
param = k.replace('_', '-')
value = str(v)
if not default_args.get(k, None) == v:
value = f'[b]{value}[/]'
param_str.add_row(param, value)
if 'JINA_LOG_NO_COLOR' not in os.environ:
print(f'\n{logo_str}\n')
console.print(param_str)
return args
else:
parser.print_help()
exit()
def _quick_ac_lookup():
from jina_cli.autocomplete import ac_table
if len(sys.argv) > 1:
if sys.argv[1] == 'commands':
for k in ac_table['commands']:
print(k)
exit()
elif sys.argv[1] == 'completions':
# search with the longest shared prefix
for j in range(len(sys.argv), 2, -1):
_input = ' '.join(sys.argv[2:j]).strip()
if _input in ac_table['completions']:
compl = ac_table['completions'][_input]
for k in compl:
if k not in sys.argv:
print(k)
break
exit()
def _try_plugin_command():
"""Tries to call the CLI of an external Jina project.
:return: if the plugin has been found (locally or among the known plugins)
"""
argv = sys.argv
if len(argv) < 2: # no command given
return False
from jina_cli.autocomplete import ac_table
if argv[1] in ac_table['commands']: # native command can't be plugin command
return False
def _cmd_exists(cmd):
return shutil.which(cmd) is not None
subcommand = argv[1]
cmd = 'jina-' + subcommand
if _cmd_exists(cmd):
subprocess.run([cmd] + argv[2:])
return True
from jina_cli.known_plugins import plugin_info
if subcommand in plugin_info:
from jina.helper import get_rich_console
cmd_info = plugin_info[subcommand]
project, package = cmd_info['display-name'], cmd_info['pip-package']
console = get_rich_console()
console.print(
f"It seems like [yellow]{project}[/yellow] is not installed in your environment."
f"To use it via the [green]'jina {subcommand}'[/green] command, "
f"install it first: [green]'pip install {package}'[/green]."
)
return True
return False
def main():
"""The main entrypoint of the CLI"""
found_plugin = _try_plugin_command()
if not found_plugin:
_quick_ac_lookup()
from jina_cli import api
args = _get_run_args()
getattr(api, args.cli.replace('-', '_'))(args)
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
from mmcv.runner import BaseModule, auto_fp16
from mmdet.models.backbones import ResNet
from mmdet.models.builder import SHARED_HEADS
from mmdet.models.utils import ResLayer as _ResLayer
@SHARED_HEADS.register_module()
class ResLayer(BaseModule):
def __init__(self,
depth,
stage=3,
stride=2,
dilation=1,
style='pytorch',
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
with_cp=False,
dcn=None,
pretrained=None,
init_cfg=None):
super(ResLayer, self).__init__(init_cfg)
self.norm_eval = norm_eval
self.norm_cfg = norm_cfg
self.stage = stage
self.fp16_enabled = False
block, stage_blocks = ResNet.arch_settings[depth]
stage_block = stage_blocks[stage]
planes = 64 * 2**stage
inplanes = 64 * 2**(stage - 1) * block.expansion
res_layer = _ResLayer(
block,
inplanes,
planes,
stage_block,
stride=stride,
dilation=dilation,
style=style,
with_cp=with_cp,
norm_cfg=self.norm_cfg,
dcn=dcn)
self.add_module(f'layer{stage + 1}', res_layer)
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is a deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
else:
raise TypeError('pretrained must be a str or None')
@auto_fp16()
def forward(self, x):
res_layer = getattr(self, f'layer{self.stage + 1}')
out = res_layer(x)
return out
def train(self, mode=True):
super(ResLayer, self).train(mode)
if self.norm_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
from mmcv.runner import BaseModule, auto_fp16
from mmdet.models.backbones import ResNet
from mmdet.models.builder import SHARED_HEADS
from mmdet.models.utils import ResLayer as _ResLayer
@SHARED_HEADS.register_module()
class ResLayer(BaseModule):
def __init__(self,
depth,
stage=3,
stride=2,
dilation=1,
style='pytorch',
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
with_cp=False,
dcn=None,
pretrained=None,
init_cfg=None):
super(ResLayer, self).__init__(init_cfg)
self.norm_eval = norm_eval
self.norm_cfg = norm_cfg
self.stage = stage
self.fp16_enabled = False
block, stage_blocks = ResNet.arch_settings[depth]
stage_block = stage_blocks[stage]
planes = 64 * 2**stage
inplanes = 64 * 2**(stage - 1) * block.expansion
res_layer = _ResLayer(
block,
inplanes,
planes,
stage_block,
stride=stride,
dilation=dilation,
style=style,
with_cp=with_cp,
norm_cfg=self.norm_cfg,
dcn=dcn)
self.add_module(f'layer{stage + 1}', res_layer)
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be setting at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is a deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
else:
raise TypeError('pretrained must be a str or None')
@auto_fp16()
def forward(self, x):
res_layer = getattr(self, f'layer{self.stage + 1}')
out = res_layer(x)
return out
def train(self, mode=True):
super(ResLayer, self).train(mode)
if self.norm_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
|
"""
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Dataset preparation
# -------------------
#
# We start by uniformly generating 20 points in a 2D space.
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.decomposition import PCA
from sklearn.metrics import euclidean_distances
# Generate the data
EPSILON = np.finfo(np.float32).eps
n_samples = 20
rng = np.random.RandomState(seed=3)
X_true = rng.randint(0, 20, 2 * n_samples).astype(float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
# %%
# Now we compute pairwise distances between all points and add
# a small amount of noise to the distance matrix. We make sure
# to keep the noisy distance matrix symmetric.
# Compute pairwise Euclidean distances
distances = euclidean_distances(X_true)
# Add noise to the distances
noise = rng.rand(n_samples, n_samples)
noise = noise + noise.T
np.fill_diagonal(noise, 0)
distances += noise
# %%
# Here we compute metric and non-metric MDS of the noisy distance matrix.
mds = manifold.MDS(
n_components=2,
max_iter=3000,
eps=1e-9,
n_init=1,
random_state=42,
dissimilarity="precomputed",
n_jobs=1,
)
X_mds = mds.fit(distances).embedding_
nmds = manifold.MDS(
n_components=2,
metric=False,
max_iter=3000,
eps=1e-12,
dissimilarity="precomputed",
random_state=42,
n_jobs=1,
n_init=1,
)
X_nmds = nmds.fit_transform(distances)
# %%
# Rescaling the non-metric MDS solution to match the spread of the original data.
X_nmds *= np.sqrt((X_true**2).sum()) / np.sqrt((X_nmds**2).sum())
# %%
# To make the visual comparisons easier, we rotate the original data and both MDS
# solutions to their PCA axes. And flip horizontal and vertical MDS axes, if needed,
# to match the original data orientation.
# Rotate the data
pca = PCA(n_components=2)
X_true = pca.fit_transform(X_true)
X_mds = pca.fit_transform(X_mds)
X_nmds = pca.fit_transform(X_nmds)
# Align the sign of PCs
for i in [0, 1]:
if np.corrcoef(X_mds[:, i], X_true[:, i])[0, 1] < 0:
X_mds[:, i] *= -1
if np.corrcoef(X_nmds[:, i], X_true[:, i])[0, 1] < 0:
X_nmds[:, i] *= -1
# %%
# Finally, we plot the original data and both MDS reconstructions.
fig = plt.figure(1)
ax = plt.axes([0.0, 0.0, 1.0, 1.0])
s = 100
plt.scatter(X_true[:, 0], X_true[:, 1], color="navy", s=s, lw=0, label="True Position")
plt.scatter(X_mds[:, 0], X_mds[:, 1], color="turquoise", s=s, lw=0, label="MDS")
plt.scatter(X_nmds[:, 0], X_nmds[:, 1], color="darkorange", s=s, lw=0, label="NMDS")
plt.legend(scatterpoints=1, loc="best", shadow=False)
# Plot the edges
start_idx, end_idx = X_mds.nonzero()
# a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [
[X_true[i, :], X_true[j, :]] for i in range(len(X_true)) for j in range(len(X_true))
]
edges = distances.max() / (distances + EPSILON) * 100
np.fill_diagonal(edges, 0)
edges = np.abs(edges)
lc = LineCollection(
segments, zorder=0, cmap=plt.cm.Blues, norm=plt.Normalize(0, edges.max())
)
lc.set_array(edges.flatten())
lc.set_linewidths(np.full(len(segments), 0.5))
ax.add_collection(lc)
plt.show()
|
"""
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.decomposition import PCA
from sklearn.metrics import euclidean_distances
# Generate the data
EPSILON = np.finfo(np.float32).eps
n_samples = 20
rng = np.random.RandomState(seed=3)
X_true = rng.randint(0, 20, 2 * n_samples).astype(float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
# Compute pairwise Euclidean distances
distances = euclidean_distances(X_true)
# Add noise to the distances
noise = rng.rand(n_samples, n_samples)
noise = noise + noise.T
np.fill_diagonal(noise, 0)
distances += noise
mds = manifold.MDS(
n_components=2,
max_iter=3000,
eps=1e-9,
random_state=42,
dissimilarity="precomputed",
n_jobs=1,
)
X_mds = mds.fit(distances).embedding_
nmds = manifold.MDS(
n_components=2,
metric=False,
max_iter=3000,
eps=1e-12,
dissimilarity="precomputed",
random_state=42,
n_jobs=1,
n_init=1,
)
X_nmds = nmds.fit_transform(distances)
# Rescale the data
X_mds *= np.sqrt((X_true**2).sum()) / np.sqrt((X_mds**2).sum())
X_nmds *= np.sqrt((X_true**2).sum()) / np.sqrt((X_nmds**2).sum())
# Rotate the data
pca = PCA(n_components=2)
X_true = pca.fit_transform(X_true)
X_mds = pca.fit_transform(X_mds)
X_nmds = pca.fit_transform(X_nmds)
# Align the sign of PCs
for i in [0, 1]:
if np.corrcoef(X_mds[:, i], X_true[:, i])[0, 1] < 0:
X_mds[:, i] *= -1
if np.corrcoef(X_nmds[:, i], X_true[:, i])[0, 1] < 0:
X_nmds[:, i] *= -1
fig = plt.figure(1)
ax = plt.axes([0.0, 0.0, 1.0, 1.0])
s = 100
plt.scatter(X_true[:, 0], X_true[:, 1], color="navy", s=s, lw=0, label="True Position")
plt.scatter(X_mds[:, 0], X_mds[:, 1], color="turquoise", s=s, lw=0, label="MDS")
plt.scatter(X_nmds[:, 0], X_nmds[:, 1], color="darkorange", s=s, lw=0, label="NMDS")
plt.legend(scatterpoints=1, loc="best", shadow=False)
# Plot the edges
start_idx, end_idx = X_mds.nonzero()
# a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [
[X_true[i, :], X_true[j, :]] for i in range(len(X_true)) for j in range(len(X_true))
]
edges = distances.max() / (distances + EPSILON) * 100
np.fill_diagonal(edges, 0)
edges = np.abs(edges)
lc = LineCollection(
segments, zorder=0, cmap=plt.cm.Blues, norm=plt.Normalize(0, edges.max())
)
lc.set_array(edges.flatten())
lc.set_linewidths(np.full(len(segments), 0.5))
ax.add_collection(lc)
plt.show()
|
from collections import defaultdict
import torch
import transforms as reference_transforms
def get_modules(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.transforms.v2
import torchvision.tv_tensors
return torchvision.transforms.v2, torchvision.tv_tensors
else:
return reference_transforms, None
class DetectionPresetTrain:
# Note: this transform assumes that the input to forward() are always PIL
# images, regardless of the backend parameter.
def __init__(
self,
*,
data_augmentation,
hflip_prob=0.5,
mean=(123.0, 117.0, 104.0),
backend="pil",
use_v2=False,
):
T, tv_tensors = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "tv_tensor":
transforms.append(T.ToImage())
elif backend == "tensor":
transforms.append(T.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'tv_tensor', 'tensor' or 'pil', but got {backend}")
if data_augmentation == "hflip":
transforms += [T.RandomHorizontalFlip(p=hflip_prob)]
elif data_augmentation == "lsj":
transforms += [
T.ScaleJitter(target_size=(1024, 1024), antialias=True),
# TODO: FixedSizeCrop below doesn't work on tensors!
reference_transforms.FixedSizeCrop(size=(1024, 1024), fill=mean),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "multiscale":
transforms += [
T.RandomShortestSize(min_size=(480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800), max_size=1333),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "ssd":
fill = defaultdict(lambda: mean, {tv_tensors.Mask: 0}) if use_v2 else list(mean)
transforms += [
T.RandomPhotometricDistort(),
T.RandomZoomOut(fill=fill),
T.RandomIoUCrop(),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "ssdlite":
transforms += [
T.RandomIoUCrop(),
T.RandomHorizontalFlip(p=hflip_prob),
]
else:
raise ValueError(f'Unknown data augmentation policy "{data_augmentation}"')
if backend == "pil":
# Note: we could just convert to pure tensors even in v2.
transforms += [T.ToImage() if use_v2 else T.PILToTensor()]
transforms += [T.ToDtype(torch.float, scale=True)]
if use_v2:
transforms += [
T.ConvertBoundingBoxFormat(tv_tensors.BoundingBoxFormat.XYXY),
T.SanitizeBoundingBoxes(),
T.ToPureTensor(),
]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
class DetectionPresetEval:
def __init__(self, backend="pil", use_v2=False):
T, _ = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "pil":
# Note: we could just convert to pure tensors even in v2?
transforms += [T.ToImage() if use_v2 else T.PILToTensor()]
elif backend == "tensor":
transforms += [T.PILToTensor()]
elif backend == "tv_tensor":
transforms += [T.ToImage()]
else:
raise ValueError(f"backend can be 'tv_tensor', 'tensor' or 'pil', but got {backend}")
transforms += [T.ToDtype(torch.float, scale=True)]
if use_v2:
transforms += [T.ToPureTensor()]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
|
from collections import defaultdict
import torch
import transforms as reference_transforms
def get_modules(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.datapoints
import torchvision.transforms.v2
return torchvision.transforms.v2, torchvision.datapoints
else:
return reference_transforms, None
class DetectionPresetTrain:
# Note: this transform assumes that the input to forward() are always PIL
# images, regardless of the backend parameter.
def __init__(
self,
*,
data_augmentation,
hflip_prob=0.5,
mean=(123.0, 117.0, 104.0),
backend="pil",
use_v2=False,
):
T, datapoints = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "datapoint":
transforms.append(T.ToImage())
elif backend == "tensor":
transforms.append(T.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
if data_augmentation == "hflip":
transforms += [T.RandomHorizontalFlip(p=hflip_prob)]
elif data_augmentation == "lsj":
transforms += [
T.ScaleJitter(target_size=(1024, 1024), antialias=True),
# TODO: FixedSizeCrop below doesn't work on tensors!
reference_transforms.FixedSizeCrop(size=(1024, 1024), fill=mean),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "multiscale":
transforms += [
T.RandomShortestSize(min_size=(480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800), max_size=1333),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "ssd":
fill = defaultdict(lambda: mean, {datapoints.Mask: 0}) if use_v2 else list(mean)
transforms += [
T.RandomPhotometricDistort(),
T.RandomZoomOut(fill=fill),
T.RandomIoUCrop(),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "ssdlite":
transforms += [
T.RandomIoUCrop(),
T.RandomHorizontalFlip(p=hflip_prob),
]
else:
raise ValueError(f'Unknown data augmentation policy "{data_augmentation}"')
if backend == "pil":
# Note: we could just convert to pure tensors even in v2.
transforms += [T.ToImage() if use_v2 else T.PILToTensor()]
transforms += [T.ToDtype(torch.float, scale=True)]
if use_v2:
transforms += [
T.ConvertBoundingBoxFormat(datapoints.BoundingBoxFormat.XYXY),
T.SanitizeBoundingBoxes(),
T.ToPureTensor(),
]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
class DetectionPresetEval:
def __init__(self, backend="pil", use_v2=False):
T, _ = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "pil":
# Note: we could just convert to pure tensors even in v2?
transforms += [T.ToImage() if use_v2 else T.PILToTensor()]
elif backend == "tensor":
transforms += [T.PILToTensor()]
elif backend == "datapoint":
transforms += [T.ToImage()]
else:
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
transforms += [T.ToDtype(torch.float, scale=True)]
if use_v2:
transforms += [T.ToPureTensor()]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
|
"""Dict prompt template."""
import warnings
from functools import cached_property
from typing import Any, Literal, Optional
from typing_extensions import override
from langchain_core.load import dumpd
from langchain_core.prompts.string import (
DEFAULT_FORMATTER_MAPPING,
get_template_variables,
)
from langchain_core.runnables import RunnableConfig, RunnableSerializable
from langchain_core.runnables.config import ensure_config
class DictPromptTemplate(RunnableSerializable[dict, dict]):
"""Template represented by a dict.
Recognizes variables in f-string or mustache formatted string dict values. Does NOT
recognize variables in dict keys. Applies recursively.
"""
template: dict[str, Any]
template_format: Literal["f-string", "mustache"]
@property
def input_variables(self) -> list[str]:
"""Template input variables."""
return _get_input_variables(self.template, self.template_format)
def format(self, **kwargs: Any) -> dict[str, Any]:
"""Format the prompt with the inputs."""
return _insert_input_variables(self.template, kwargs, self.template_format)
async def aformat(self, **kwargs: Any) -> dict[str, Any]:
"""Format the prompt with the inputs."""
return self.format(**kwargs)
@override
def invoke(
self, input: dict, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> dict:
"""Invoke the prompt."""
return self._call_with_config(
lambda x: self.format(**x),
input,
ensure_config(config),
run_type="prompt",
serialized=self._serialized,
**kwargs,
)
@property
def _prompt_type(self) -> str:
return "dict-prompt"
@cached_property
def _serialized(self) -> dict[str, Any]:
return dumpd(self)
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether or not the class is serializable.
Returns: True.
"""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Serialization namespace."""
return ["langchain_core", "prompts", "dict"]
def pretty_repr(self, *, html: bool = False) -> str:
"""Human-readable representation.
Args:
html: Whether to format as HTML. Defaults to False.
Returns:
Human-readable representation.
"""
raise NotImplementedError
def _get_input_variables(
template: dict, template_format: Literal["f-string", "mustache"]
) -> list[str]:
input_variables = []
for v in template.values():
if isinstance(v, str):
input_variables += get_template_variables(v, template_format)
elif isinstance(v, dict):
input_variables += _get_input_variables(v, template_format)
elif isinstance(v, (list, tuple)):
for x in v:
if isinstance(x, str):
input_variables += get_template_variables(x, template_format)
elif isinstance(x, dict):
input_variables += _get_input_variables(x, template_format)
return list(set(input_variables))
def _insert_input_variables(
template: dict[str, Any],
inputs: dict[str, Any],
template_format: Literal["f-string", "mustache"],
) -> dict[str, Any]:
formatted = {}
formatter = DEFAULT_FORMATTER_MAPPING[template_format]
for k, v in template.items():
if isinstance(v, str):
formatted[k] = formatter(v, **inputs)
elif isinstance(v, dict):
if k == "image_url" and "path" in v:
msg = (
"Specifying image inputs via file path in environments with "
"user-input paths is a security vulnerability. Out of an abundance "
"of caution, the utility has been removed to prevent possible "
"misuse."
)
warnings.warn(msg, stacklevel=2)
formatted[k] = _insert_input_variables(v, inputs, template_format)
elif isinstance(v, (list, tuple)):
formatted_v = []
for x in v:
if isinstance(x, str):
formatted_v.append(formatter(x, **inputs))
elif isinstance(x, dict):
formatted_v.append(
_insert_input_variables(x, inputs, template_format)
)
formatted[k] = type(v)(formatted_v)
else:
formatted[k] = v
return formatted
|
"""Dict prompt template."""
import warnings
from functools import cached_property
from typing import Any, Literal, Optional
from typing_extensions import override
from langchain_core.load import dumpd
from langchain_core.prompts.string import (
DEFAULT_FORMATTER_MAPPING,
get_template_variables,
)
from langchain_core.runnables import RunnableConfig, RunnableSerializable
from langchain_core.runnables.config import ensure_config
class DictPromptTemplate(RunnableSerializable[dict, dict]):
"""Template represented by a dict.
Recognizes variables in f-string or mustache formatted string dict values. Does NOT
recognize variables in dict keys. Applies recursively.
"""
template: dict[str, Any]
template_format: Literal["f-string", "mustache"]
@property
def input_variables(self) -> list[str]:
"""Template input variables."""
return _get_input_variables(self.template, self.template_format)
def format(self, **kwargs: Any) -> dict[str, Any]:
"""Format the prompt with the inputs."""
return _insert_input_variables(self.template, kwargs, self.template_format)
async def aformat(self, **kwargs: Any) -> dict[str, Any]:
"""Format the prompt with the inputs."""
return self.format(**kwargs)
@override
def invoke(
self, input: dict, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> dict:
"""Invoke the prompt."""
return self._call_with_config(
lambda x: self.format(**x),
input,
ensure_config(config),
run_type="prompt",
serialized=self._serialized,
**kwargs,
)
@property
def _prompt_type(self) -> str:
return "dict-prompt"
@cached_property
def _serialized(self) -> dict[str, Any]:
return dumpd(self)
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether or not the class is serializable.
Returns: True.
"""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Serialization namespace."""
return ["langchain_core", "prompts", "dict"]
def pretty_repr(self, *, html: bool = False) -> str:
"""Human-readable representation.
Args:
html: Whether to format as HTML. Defaults to False.
Returns:
Human-readable representation.
"""
raise NotImplementedError
def _get_input_variables(
template: dict, template_format: Literal["f-string", "mustache"]
) -> list[str]:
input_variables = []
for v in template.values():
if isinstance(v, str):
input_variables += get_template_variables(v, template_format)
elif isinstance(v, dict):
input_variables += _get_input_variables(v, template_format)
elif isinstance(v, (list, tuple)):
for x in v:
if isinstance(x, str):
input_variables += get_template_variables(x, template_format)
elif isinstance(x, dict):
input_variables += _get_input_variables(x, template_format)
else:
pass
return list(set(input_variables))
def _insert_input_variables(
template: dict[str, Any],
inputs: dict[str, Any],
template_format: Literal["f-string", "mustache"],
) -> dict[str, Any]:
formatted = {}
formatter = DEFAULT_FORMATTER_MAPPING[template_format]
for k, v in template.items():
if isinstance(v, str):
formatted[k] = formatter(v, **inputs)
elif isinstance(v, dict):
if k == "image_url" and "path" in v:
msg = (
"Specifying image inputs via file path in environments with "
"user-input paths is a security vulnerability. Out of an abundance "
"of caution, the utility has been removed to prevent possible "
"misuse."
)
warnings.warn(msg, stacklevel=2)
formatted[k] = _insert_input_variables(v, inputs, template_format)
elif isinstance(v, (list, tuple)):
formatted_v = []
for x in v:
if isinstance(x, str):
formatted_v.append(formatter(x, **inputs))
elif isinstance(x, dict):
formatted_v.append(
_insert_input_variables(x, inputs, template_format)
)
formatted[k] = type(v)(formatted_v)
else:
formatted[k] = v
return formatted
|
from jina import Document, Flow
from sentencizer import Sentencizer
def test_exec():
f = Flow().add(uses=Sentencizer)
with f:
resp = f.post(
on='/test',
inputs=Document(text='Hello. World! Go? Back'),
return_results=True,
)
assert resp[0].docs[0].chunks[0].text == 'Hello.'
assert resp[0].docs[0].chunks[1].text == 'World!'
assert resp[0].docs[0].chunks[2].text == 'Go?'
assert resp[0].docs[0].chunks[3].text == 'Back'
|
from jina import Document, Flow
from ...sentencizer import Sentencizer
def test_exec():
f = Flow().add(uses=Sentencizer)
with f:
resp = f.post(
on='/test',
inputs=Document(text='Hello. World! Go? Back'),
return_results=True,
)
assert resp[0].docs[0].chunks[0].text == 'Hello.'
assert resp[0].docs[0].chunks[1].text == 'World!'
assert resp[0].docs[0].chunks[2].text == 'Go?'
assert resp[0].docs[0].chunks[3].text == 'Back'
|
from typing import Callable, TypeVar, ParamSpec
import threading
P = ParamSpec("P")
R = TypeVar("R")
def thread_cached(func: Callable[P, R]) -> Callable[P, R]:
thread_local = threading.local()
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
cache = getattr(thread_local, "cache", None)
if cache is None:
cache = thread_local.cache = {}
key = (args, tuple(sorted(kwargs.items())))
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
return wrapper
|
import threading
from functools import wraps
from typing import Callable, ParamSpec, TypeVar
T = TypeVar("T")
P = ParamSpec("P")
R = TypeVar("R")
def thread_cached(func: Callable[P, R]) -> Callable[P, R]:
thread_local = threading.local()
@wraps(func)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
cache = getattr(thread_local, "cache", None)
if cache is None:
cache = thread_local.cache = {}
key = (args, tuple(sorted(kwargs.items())))
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
return wrapper
def thread_cached_property(func: Callable[[T], R]) -> property:
return property(thread_cached(func))
|
"""Code Interpreter tool spec."""
import subprocess
import sys
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class CodeInterpreterToolSpec(BaseToolSpec):
"""
Code Interpreter tool spec.
WARNING: This tool provides the Agent access to the `subprocess.run` command.
Arbitrary code execution is possible on the machine running this tool.
This tool is not recommended to be used in a production setting, and would require heavy sandboxing or virtual machines
"""
spec_functions = ["code_interpreter"]
def code_interpreter(self, code: str):
"""
A function to execute python code, and return the stdout and stderr.
You should import any libraries that you wish to use. You have access to any libraries the user has installed.
The code passed to this function is executed in isolation. It should be complete at the time it is passed to this function.
You should interpret the output and errors returned from this function, and attempt to fix any problems.
If you cannot fix the error, show the code to the user and ask for help
It is not possible to return graphics or other complicated data from this function. If the user cannot see the output, save it to a file and tell the user.
"""
result = subprocess.run([sys.executable, "-c", code], capture_output=True)
return f"StdOut:\n{result.stdout}\nStdErr:\n{result.stderr}"
|
"""Code Interpreter tool spec."""
import subprocess
import sys
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class CodeInterpreterToolSpec(BaseToolSpec):
"""Code Interpreter tool spec.
WARNING: This tool provides the Agent access to the `subprocess.run` command.
Arbitrary code execution is possible on the machine running this tool.
This tool is not recommended to be used in a production setting, and would require heavy sandboxing or virtual machines
"""
spec_functions = ["code_interpreter"]
def code_interpreter(self, code: str):
"""
A function to execute python code, and return the stdout and stderr.
You should import any libraries that you wish to use. You have access to any libraries the user has installed.
The code passed to this function is executed in isolation. It should be complete at the time it is passed to this function.
You should interpret the output and errors returned from this function, and attempt to fix any problems.
If you cannot fix the error, show the code to the user and ask for help
It is not possible to return graphics or other complicated data from this function. If the user cannot see the output, save it to a file and tell the user.
"""
result = subprocess.run([sys.executable, "-c", code], capture_output=True)
return f"StdOut:\n{result.stdout}\nStdErr:\n{result.stderr}"
|
import warnings
from typing import List, Optional, Type
from jina.excepts import BadYAMLVersion
from jina.jaml import JAMLCompatible
from jina.jaml.parsers.base import VersionedYAMLParser
from jina.serve.gateway import BaseGateway
def _get_all_parser(cls: Type['JAMLCompatible']):
"""Get all parsers and legacy parser of a class
:param cls: target class
:return: a tuple of two elements; first is a list of all parsers, second is the legacy parser for default fallback
"""
from jina.orchestrate.flow.base import Flow
from jina.serve.executors import BaseExecutor
if issubclass(cls, Flow):
return _get_flow_parser()
elif issubclass(cls, BaseExecutor):
return _get_exec_parser()
elif issubclass(cls, BaseGateway):
return _get_gateway_parser()
else:
return _get_default_parser()
def _get_flow_parser():
from jina.jaml.parsers.flow.v1 import V1Parser
return [V1Parser], V1Parser
def _get_exec_parser():
from jina.jaml.parsers.executor.legacy import ExecutorLegacyParser
return [ExecutorLegacyParser], ExecutorLegacyParser
def _get_gateway_parser():
from jina.jaml.parsers.gateway.legacy import GatewayLegacyParser
return [GatewayLegacyParser], GatewayLegacyParser
def _get_default_parser():
from jina.jaml.parsers.default.v1 import V1Parser
return [V1Parser], V1Parser
def get_parser(
cls: Type['JAMLCompatible'], version: Optional[str]
) -> 'VersionedYAMLParser':
"""
.. # noqa: DAR401
:param cls: the target class to parse
:param version: yaml version number in "MAJOR[.MINOR]" format
:return: parser given the YAML version
"""
all_parsers, legacy_parser = _get_all_parser(cls)
if version:
if isinstance(version, (float, int)):
version = str(version)
for p in all_parsers:
if p.version == version:
return p()
for p in all_parsers:
# fallback to major
if version.split('.')[0] == p.version:
warnings.warn(
f'can not find parser for version: {version}, '
f'fallback to parser for version: {p.version}',
UserWarning,
)
return p()
raise BadYAMLVersion(f'{version} is not a valid version number')
else:
if version is not None:
warnings.warn(
f'can not find parser for version: {version}, '
f'fallback to legacy parser. '
f'this usually mean you are using a deprecated YAML format.',
DeprecationWarning,
)
# fallback to legacy parser
return legacy_parser()
def get_supported_versions(cls) -> List[str]:
"""List all supported versions
:param cls: the class to check
:return: supported versions sorted alphabetically
"""
all_parsers, _ = _get_all_parser(cls)
return list(sorted(p.version for p in all_parsers))
|
import warnings
from typing import List, Optional, Type
from jina.excepts import BadYAMLVersion
from jina.jaml import JAMLCompatible
from jina.jaml.parsers.base import VersionedYAMLParser
def _get_all_parser(cls: Type['JAMLCompatible']):
"""Get all parsers and legacy parser of a class
:param cls: target class
:return: a tuple of two elements; first is a list of all parsers, second is the legacy parser for default fallback
"""
from jina.orchestrate.flow.base import Flow
from jina.serve.executors import BaseExecutor
if issubclass(cls, Flow):
return _get_flow_parser()
elif issubclass(cls, BaseExecutor):
return _get_exec_parser()
else:
return _get_default_parser()
def _get_flow_parser():
from jina.jaml.parsers.flow.v1 import V1Parser
return [V1Parser], V1Parser
def _get_exec_parser():
from jina.jaml.parsers.executor.legacy import LegacyParser
return [LegacyParser], LegacyParser
def _get_default_parser():
from jina.jaml.parsers.default.v1 import V1Parser
return [V1Parser], V1Parser
def get_parser(
cls: Type['JAMLCompatible'], version: Optional[str]
) -> 'VersionedYAMLParser':
"""
.. # noqa: DAR401
:param cls: the target class to parse
:param version: yaml version number in "MAJOR[.MINOR]" format
:return: parser given the YAML version
"""
all_parsers, legacy_parser = _get_all_parser(cls)
if version:
if isinstance(version, (float, int)):
version = str(version)
for p in all_parsers:
if p.version == version:
return p()
for p in all_parsers:
# fallback to major
if version.split('.')[0] == p.version:
warnings.warn(
f'can not find parser for version: {version}, '
f'fallback to parser for version: {p.version}',
UserWarning,
)
return p()
raise BadYAMLVersion(f'{version} is not a valid version number')
else:
if version is not None:
warnings.warn(
f'can not find parser for version: {version}, '
f'fallback to legacy parser. '
f'this usually mean you are using a deprecated YAML format.',
DeprecationWarning,
)
# fallback to legacy parser
return legacy_parser()
def get_supported_versions(cls) -> List[str]:
"""List all supported versions
:param cls: the class to check
:return: supported versions sorted alphabetically
"""
all_parsers, _ = _get_all_parser(cls)
return list(sorted(p.version for p in all_parsers))
|
_base_ = './ga-retinanet_r101-caffe_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 960)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning policy
max_epochs = 24
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3.0,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './ga-retinanet_r101-caffe_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 960)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning policy
max_epochs = 24
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3.0,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
"""Standard LangChain interface tests."""
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests.chat_models import (
ChatModelUnitTests,
)
from langchain_groq import ChatGroq
class TestGroqStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatGroq
@property
def chat_model_params(self) -> dict:
return {"model": "llama-3.1-8b-instant"}
|
"""Standard LangChain interface tests"""
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests.chat_models import (
ChatModelUnitTests,
)
from langchain_groq import ChatGroq
class TestGroqStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatGroq
@property
def chat_model_params(self) -> dict:
return {"model": "llama-3.1-8b-instant"}
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads import HybridTaskCascadeRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
class TestHTCRoIHead(TestCase):
@parameterized.expand(['htc/htc_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
"""Test init htc RoI head."""
# Normal HTC RoI head
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
assert roi_head.with_bbox
assert roi_head.with_mask
assert roi_head.with_semantic
@parameterized.expand(['htc/htc_r50_fpn_1x_coco.py'])
def test_htc_roi_head_loss(self, cfg_file):
"""Tests htc roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
with_semantic=True)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
# When there is no truth, the cls loss should be nonzero but
# there should be no box and mask loss.
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[0],
num_classes=4,
with_mask=True,
with_semantic=True)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss_cls' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
elif 'loss_bbox' in name or 'loss_mask' in name:
self.assertEqual(value.sum(), 0)
@parameterized.expand(['htc/htc_r50_fpn_1x_coco.py'])
def test_htc_roi_head_predict(self, cfg_file):
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
results = roi_head.predict(
feats, proposal_list, batch_data_samples, rescale=True)
self.assertEqual(results[0].masks.shape[-2:], (s, s))
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads import HybridTaskCascadeRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
class TestHTCRoIHead(TestCase):
@parameterized.expand(['htc/htc_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
"""Test init htc RoI head."""
# Normal HTC RoI head
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
assert roi_head.with_bbox
assert roi_head.with_mask
assert roi_head.with_semantic
@parameterized.expand(['htc/htc_r50_fpn_1x_coco.py'])
def test_htc_roi_head_loss(self, cfg_file):
"""Tests htc roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
img_shape_list = [img_meta['img_shape'] for img_meta in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100)
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=[(s, s, 3)],
num_items=[1],
num_classes=4,
with_mask=True)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
# When there is no truth, the cls loss should be nonzero but
# there should be no box and mask loss.
proposal_list = demo_mm_proposals(img_shape_list, 100)
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=[(s, s, 3)],
num_items=[0],
num_classes=4,
with_mask=True)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss_cls' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
elif 'loss_bbox' in name or 'loss_mask' in name:
self.assertEqual(value.sum(), 0)
@parameterized.expand(['htc/htc_r50_fpn_1x_coco.py'])
def test_htc_roi_head_predict(self, cfg_file):
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
img_shape_list = [img_meta['img_shape'] for img_meta in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100)
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=[(s, s, 3)],
num_items=[1],
num_classes=4,
with_mask=True)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
results = roi_head.predict(
feats, proposal_list, batch_data_samples, rescale=True)
self.assertEqual(results[0].masks.shape[-2:], (s, s))
|
import json
from jina.orchestrate.flow.base import Flow
from jina.orchestrate.deployments import Deployment
from jina.jaml import JAML
from jina.logging.predefined import default_logger
from jina.schemas import get_full_schema
from jina_cli.export import api_to_dict
def export_kubernetes(args):
"""Export to k8s yaml files
:param args: args from CLI
"""
from jina.jaml import JAMLCompatible
obj = JAMLCompatible.load_config(args.config_path)
if isinstance(obj, (Flow, Deployment)):
obj.to_kubernetes_yaml(
output_base_path=args.outpath, k8s_namespace=args.k8s_namespace
)
else:
raise NotImplementedError(f'Object of class {obj.__class__.__name__} cannot be exported to Kubernetes')
def export_docker_compose(args):
"""Export to Docker compose yaml files
:param args: args from CLI
"""
from jina.jaml import JAMLCompatible
obj = JAMLCompatible.load_config(args.config_path)
if isinstance(obj, (Flow, Deployment)):
obj.to_docker_compose_yaml(
output_path=args.outpath, network_name=args.network_name
)
else:
raise NotImplementedError(f'Object of class {obj.__class__.__name__} cannot be exported to Docker Compose')
def export_flowchart(args):
"""Export to flowchart file
:param args: args from CLI
"""
Flow.load_config(args.config_path).plot(
args.outpath, vertical_layout=args.vertical_layout
)
def export_schema(args):
"""Export to JSON Schemas
:param args: args from CLI
"""
from jina import __version__
if args.yaml_path:
dump_api = api_to_dict()
for yp in args.yaml_path:
f_name = (yp % __version__) if '%s' in yp else yp
with open(f_name, 'w', encoding='utf-8') as fp:
JAML.dump(dump_api, fp)
default_logger.info(f'API is exported to {f_name}')
if args.json_path:
dump_api = api_to_dict()
for jp in args.json_path:
f_name = (jp % __version__) if '%s' in jp else jp
with open(f_name, 'w', encoding='utf-8') as fp:
json.dump(dump_api, fp, sort_keys=True)
default_logger.info(f'API is exported to {f_name}')
if args.schema_path:
dump_api = get_full_schema()
for jp in args.schema_path:
f_name = (jp % __version__) if '%s' in jp else jp
with open(f_name, 'w', encoding='utf-8') as fp:
json.dump(dump_api, fp, sort_keys=True)
default_logger.info(f'API is exported to {f_name}')
|
import json
from jina.orchestrate.flow.base import Flow
from jina.orchestrate.deployments import Deployment
from jina.jaml import JAML
from jina.logging.predefined import default_logger
from jina.schemas import get_full_schema
from jina_cli.export import api_to_dict
def export_kubernetes(args):
"""Export to k8s yaml files
:param args: args from CLI
"""
from jina.jaml import JAMLCompatible
obj = JAMLCompatible.load_config(args.config_path)
if isinstance(obj, (Flow, Deployment)):
obj.to_kubernetes_yaml(
output_base_path=args.outpath, k8s_namespace=args.k8s_namespace
)
else:
raise NotImplementedError(f'Object of class {obj.__class__.__name__} cannot be exported to Kubernetes')
def export_docker_compose(args):
"""Export to Docker compose yaml files
:param args: args from CLI
"""
from jina.jaml import JAMLCompatible
obj = JAMLCompatible.load_config(args.config_path)
if isinstance(obj, (Flow, Deployment)):
obj.to_docker_compose_yaml(
output_path=args.outpath, network_name=args.network_name
)
else:
raise NotImplementedError(f'Object of class {obj.__class__.__name__} cannot be exported to Docker Compose')
def export_flowchart(args):
"""Export to flowchart file
:param args: args from CLI
"""
Flow.load_config(args.config_path).plot(
args.outpath, vertical_layout=args.vertical_layout
)
def export_schema(args):
"""Export to JSON Schemas
:param args: args from CLI
"""
from jina import __version__
if args.yaml_path:
dump_api = api_to_dict()
for yp in args.yaml_path:
f_name = (yp % __version__) if '%s' in yp else yp
with open(f_name, 'w', encoding='utf8') as fp:
JAML.dump(dump_api, fp)
default_logger.info(f'API is exported to {f_name}')
if args.json_path:
dump_api = api_to_dict()
for jp in args.json_path:
f_name = (jp % __version__) if '%s' in jp else jp
with open(f_name, 'w', encoding='utf8') as fp:
json.dump(dump_api, fp, sort_keys=True)
default_logger.info(f'API is exported to {f_name}')
if args.schema_path:
dump_api = get_full_schema()
for jp in args.schema_path:
f_name = (jp % __version__) if '%s' in jp else jp
with open(f_name, 'w', encoding='utf8') as fp:
json.dump(dump_api, fp, sort_keys=True)
default_logger.info(f'API is exported to {f_name}')
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bfp import BFP
from .channel_mapper import ChannelMapper
from .cspnext_pafpn import CSPNeXtPAFPN
from .ct_resnet_neck import CTResNetNeck
from .dilated_encoder import DilatedEncoder
from .dyhead import DyHead
from .fpg import FPG
from .fpn import FPN
from .fpn_carafe import FPN_CARAFE
from .fpn_dropblock import FPN_DropBlock
from .hrfpn import HRFPN
from .nas_fpn import NASFPN
from .nasfcos_fpn import NASFCOS_FPN
from .pafpn import PAFPN
from .rfp import RFP
from .ssd_neck import SSDNeck
from .ssh import SSH
from .yolo_neck import YOLOV3Neck
from .yolox_pafpn import YOLOXPAFPN
__all__ = [
'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN',
'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG', 'DilatedEncoder',
'CTResNetNeck', 'SSDNeck', 'YOLOXPAFPN', 'DyHead', 'CSPNeXtPAFPN', 'SSH',
'FPN_DropBlock'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bfp import BFP
from .channel_mapper import ChannelMapper
from .cspnext_pafpn import CSPNeXtPAFPN
from .ct_resnet_neck import CTResNetNeck
from .dilated_encoder import DilatedEncoder
from .dyhead import DyHead
from .fpg import FPG
from .fpn import FPN
from .fpn_carafe import FPN_CARAFE
from .hrfpn import HRFPN
from .nas_fpn import NASFPN
from .nasfcos_fpn import NASFCOS_FPN
from .pafpn import PAFPN
from .rfp import RFP
from .ssd_neck import SSDNeck
from .ssh import SSH
from .yolo_neck import YOLOV3Neck
from .yolox_pafpn import YOLOXPAFPN
__all__ = [
'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN',
'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG', 'DilatedEncoder',
'CTResNetNeck', 'SSDNeck', 'YOLOXPAFPN', 'DyHead', 'CSPNeXtPAFPN', 'SSH'
]
|
"""langchain-core version information and utilities."""
VERSION = "0.3.53"
|
"""langchain-core version information and utilities."""
VERSION = "0.3.52"
|
import os
import pathlib
from typing import Any, Callable, Optional, Union
from .folder import default_loader
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class DTD(VisionDataset):
"""`Describable Textures Dataset (DTD) <https://www.robots.ox.ac.uk/~vgg/data/dtd/>`_.
Args:
root (str or ``pathlib.Path``): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), ``"val"``, or ``"test"``.
partition (int, optional): The dataset partition. Should be ``1 <= partition <= 10``. Defaults to ``1``.
.. note::
The partition only changes which split each image belongs to. Thus, regardless of the selected
partition, combining all splits will result in all images.
transform (callable, optional): A function/transform that takes in a PIL image or torch.Tensor, depends on the given loader,
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
loader (callable, optional): A function to load an image given its path.
By default, it uses PIL as its image loader, but users could also pass in
``torchvision.io.decode_image`` for decoding image data into tensors directly.
"""
_URL = "https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz"
_MD5 = "fff73e5086ae6bdbea199a49dfb8a4c1"
def __init__(
self,
root: Union[str, pathlib.Path],
split: str = "train",
partition: int = 1,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
loader: Callable[[Union[str, pathlib.Path]], Any] = default_loader,
) -> None:
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
if not isinstance(partition, int) and not (1 <= partition <= 10):
raise ValueError(
f"Parameter 'partition' should be an integer with `1 <= partition <= 10`, "
f"but got {partition} instead"
)
self._partition = partition
super().__init__(root, transform=transform, target_transform=target_transform)
self._base_folder = pathlib.Path(self.root) / type(self).__name__.lower()
self._data_folder = self._base_folder / "dtd"
self._meta_folder = self._data_folder / "labels"
self._images_folder = self._data_folder / "images"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
self._image_files = []
classes = []
with open(self._meta_folder / f"{self._split}{self._partition}.txt") as file:
for line in file:
cls, name = line.strip().split("/")
self._image_files.append(self._images_folder.joinpath(cls, name))
classes.append(cls)
self.classes = sorted(set(classes))
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
self._labels = [self.class_to_idx[cls] for cls in classes]
self.loader = loader
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = self.loader(image_file)
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}, partition={self._partition}"
def _check_exists(self) -> bool:
return os.path.exists(self._data_folder) and os.path.isdir(self._data_folder)
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=str(self._base_folder), md5=self._MD5)
|
import os
import pathlib
from typing import Any, Callable, Optional, Tuple, Union
from .folder import default_loader
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class DTD(VisionDataset):
"""`Describable Textures Dataset (DTD) <https://www.robots.ox.ac.uk/~vgg/data/dtd/>`_.
Args:
root (str or ``pathlib.Path``): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), ``"val"``, or ``"test"``.
partition (int, optional): The dataset partition. Should be ``1 <= partition <= 10``. Defaults to ``1``.
.. note::
The partition only changes which split each image belongs to. Thus, regardless of the selected
partition, combining all splits will result in all images.
transform (callable, optional): A function/transform that takes in a PIL image or torch.Tensor, depends on the given loader,
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
loader (callable, optional): A function to load an image given its path.
By default, it uses PIL as its image loader, but users could also pass in
``torchvision.io.decode_image`` for decoding image data into tensors directly.
"""
_URL = "https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz"
_MD5 = "fff73e5086ae6bdbea199a49dfb8a4c1"
def __init__(
self,
root: Union[str, pathlib.Path],
split: str = "train",
partition: int = 1,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
loader: Callable[[Union[str, pathlib.Path]], Any] = default_loader,
) -> None:
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
if not isinstance(partition, int) and not (1 <= partition <= 10):
raise ValueError(
f"Parameter 'partition' should be an integer with `1 <= partition <= 10`, "
f"but got {partition} instead"
)
self._partition = partition
super().__init__(root, transform=transform, target_transform=target_transform)
self._base_folder = pathlib.Path(self.root) / type(self).__name__.lower()
self._data_folder = self._base_folder / "dtd"
self._meta_folder = self._data_folder / "labels"
self._images_folder = self._data_folder / "images"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
self._image_files = []
classes = []
with open(self._meta_folder / f"{self._split}{self._partition}.txt") as file:
for line in file:
cls, name = line.strip().split("/")
self._image_files.append(self._images_folder.joinpath(cls, name))
classes.append(cls)
self.classes = sorted(set(classes))
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
self._labels = [self.class_to_idx[cls] for cls in classes]
self.loader = loader
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = self.loader(image_file)
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}, partition={self._partition}"
def _check_exists(self) -> bool:
return os.path.exists(self._data_folder) and os.path.isdir(self._data_folder)
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=str(self._base_folder), md5=self._MD5)
|
from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
except ImportError:
model2vec = None
skip_if_no_model2vec = pytest.mark.skipif(model2vec is None, reason="The model2vec library is not installed.")
@pytest.fixture
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
def test_initialization_with_embedding_weights(tokenizer: Tokenizer, embedding_weights) -> None:
model = StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
assert model.embedding.weight.shape == (30522, 768)
def test_initialization_with_embedding_dim(tokenizer: Tokenizer) -> None:
model = StaticEmbedding(tokenizer, embedding_dim=768)
assert model.embedding.weight.shape == (30522, 768)
def test_tokenize(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
assert "input_ids" in tokens
assert "offsets" in tokens
def test_forward(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
output = static_embedding(tokens)
assert "sentence_embedding" in output
def test_save_and_load(tmp_path: Path, static_embedding: StaticEmbedding) -> None:
save_dir = tmp_path / "model"
save_dir.mkdir()
static_embedding.save(str(save_dir))
loaded_model = StaticEmbedding.load(str(save_dir))
assert loaded_model.embedding.weight.shape == static_embedding.embedding.weight.shape
@skip_if_no_model2vec()
def test_from_distillation() -> None:
model = StaticEmbedding.from_distillation("sentence-transformers-testing/stsb-bert-tiny-safetensors", pca_dims=32)
assert model.embedding.weight.shape == (29528, 32)
@skip_if_no_model2vec()
def test_from_model2vec() -> None:
model = StaticEmbedding.from_model2vec("minishlab/M2V_base_output")
assert model.embedding.weight.shape == (29528, 256)
def test_loading_model2vec() -> None:
model = SentenceTransformer("minishlab/potion-base-8M")
assert model.get_sentence_embedding_dimension() == 256
assert model.max_seq_length == math.inf
test_sentences = ["It's so sunny outside!", "The sun is shining outside!"]
embeddings = model.encode(test_sentences)
assert embeddings.shape == (2, 256)
similarity = model.similarity(embeddings[0], embeddings[1])
assert similarity.item() > 0.7
|
from __future__ import annotations
from pathlib import Path
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
except ImportError:
model2vec = None
skip_if_no_model2vec = pytest.mark.skipif(model2vec is None, reason="The model2vec library is not installed.")
@pytest.fixture
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
def test_initialization_with_embedding_weights(tokenizer: Tokenizer, embedding_weights) -> None:
model = StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
assert model.embedding.weight.shape == (30522, 768)
def test_initialization_with_embedding_dim(tokenizer: Tokenizer) -> None:
model = StaticEmbedding(tokenizer, embedding_dim=768)
assert model.embedding.weight.shape == (30522, 768)
def test_tokenize(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
assert "input_ids" in tokens
assert "offsets" in tokens
def test_forward(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
output = static_embedding(tokens)
assert "sentence_embedding" in output
def test_save_and_load(tmp_path: Path, static_embedding: StaticEmbedding) -> None:
save_dir = tmp_path / "model"
save_dir.mkdir()
static_embedding.save(str(save_dir))
loaded_model = StaticEmbedding.load(str(save_dir))
assert loaded_model.embedding.weight.shape == static_embedding.embedding.weight.shape
@skip_if_no_model2vec()
def test_from_distillation() -> None:
model = StaticEmbedding.from_distillation("sentence-transformers-testing/stsb-bert-tiny-safetensors", pca_dims=32)
assert model.embedding.weight.shape == (29528, 32)
@skip_if_no_model2vec()
def test_from_model2vec() -> None:
model = StaticEmbedding.from_model2vec("minishlab/M2V_base_output")
assert model.embedding.weight.shape == (29528, 256)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from ..builder import BBOX_SAMPLERS
from ..transforms import bbox2roi
from .base_sampler import BaseSampler
@BBOX_SAMPLERS.register_module()
class OHEMSampler(BaseSampler):
r"""Online Hard Example Mining Sampler described in `Training Region-based
Object Detectors with Online Hard Example Mining
<https://arxiv.org/abs/1604.03540>`_.
"""
def __init__(self,
num,
pos_fraction,
context,
neg_pos_ub=-1,
add_gt_as_proposals=True,
loss_key='loss_cls',
**kwargs):
super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub,
add_gt_as_proposals)
self.context = context
if not hasattr(self.context, 'num_stages'):
self.bbox_head = self.context.bbox_head
else:
self.bbox_head = self.context.bbox_head[self.context.current_stage]
self.loss_key = loss_key
def hard_mining(self, inds, num_expected, bboxes, labels, feats):
with torch.no_grad():
rois = bbox2roi([bboxes])
if not hasattr(self.context, 'num_stages'):
bbox_results = self.context._bbox_forward(feats, rois)
else:
bbox_results = self.context._bbox_forward(
self.context.current_stage, feats, rois)
cls_score = bbox_results['cls_score']
loss = self.bbox_head.loss(
cls_score=cls_score,
bbox_pred=None,
rois=rois,
labels=labels,
label_weights=cls_score.new_ones(cls_score.size(0)),
bbox_targets=None,
bbox_weights=None,
reduction_override='none')[self.loss_key]
_, topk_loss_inds = loss.topk(num_expected)
return inds[topk_loss_inds]
def _sample_pos(self,
assign_result,
num_expected,
bboxes=None,
feats=None,
**kwargs):
"""Sample positive boxes.
Args:
assign_result (:obj:`AssignResult`): Assigned results
num_expected (int): Number of expected positive samples
bboxes (torch.Tensor, optional): Boxes. Defaults to None.
feats (list[torch.Tensor], optional): Multi-level features.
Defaults to None.
Returns:
torch.Tensor: Indices of positive samples
"""
# Sample some hard positive samples
pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds],
assign_result.labels[pos_inds], feats)
def _sample_neg(self,
assign_result,
num_expected,
bboxes=None,
feats=None,
**kwargs):
"""Sample negative boxes.
Args:
assign_result (:obj:`AssignResult`): Assigned results
num_expected (int): Number of expected negative samples
bboxes (torch.Tensor, optional): Boxes. Defaults to None.
feats (list[torch.Tensor], optional): Multi-level features.
Defaults to None.
Returns:
torch.Tensor: Indices of negative samples
"""
# Sample some hard negative samples
neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
neg_labels = assign_result.labels.new_empty(
neg_inds.size(0)).fill_(self.bbox_head.num_classes)
return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds],
neg_labels, feats)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from ..builder import BBOX_SAMPLERS
from ..transforms import bbox2roi
from .base_sampler import BaseSampler
@BBOX_SAMPLERS.register_module()
class OHEMSampler(BaseSampler):
r"""Online Hard Example Mining Sampler described in `Training Region-based
Object Detectors with Online Hard Example Mining
<https://arxiv.org/abs/1604.03540>`_.
"""
def __init__(self,
num,
pos_fraction,
context,
neg_pos_ub=-1,
add_gt_as_proposals=True,
**kwargs):
super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub,
add_gt_as_proposals)
self.context = context
if not hasattr(self.context, 'num_stages'):
self.bbox_head = self.context.bbox_head
else:
self.bbox_head = self.context.bbox_head[self.context.current_stage]
def hard_mining(self, inds, num_expected, bboxes, labels, feats):
with torch.no_grad():
rois = bbox2roi([bboxes])
if not hasattr(self.context, 'num_stages'):
bbox_results = self.context._bbox_forward(feats, rois)
else:
bbox_results = self.context._bbox_forward(
self.context.current_stage, feats, rois)
cls_score = bbox_results['cls_score']
loss = self.bbox_head.loss(
cls_score=cls_score,
bbox_pred=None,
rois=rois,
labels=labels,
label_weights=cls_score.new_ones(cls_score.size(0)),
bbox_targets=None,
bbox_weights=None,
reduction_override='none')['loss_cls']
_, topk_loss_inds = loss.topk(num_expected)
return inds[topk_loss_inds]
def _sample_pos(self,
assign_result,
num_expected,
bboxes=None,
feats=None,
**kwargs):
"""Sample positive boxes.
Args:
assign_result (:obj:`AssignResult`): Assigned results
num_expected (int): Number of expected positive samples
bboxes (torch.Tensor, optional): Boxes. Defaults to None.
feats (list[torch.Tensor], optional): Multi-level features.
Defaults to None.
Returns:
torch.Tensor: Indices of positive samples
"""
# Sample some hard positive samples
pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds],
assign_result.labels[pos_inds], feats)
def _sample_neg(self,
assign_result,
num_expected,
bboxes=None,
feats=None,
**kwargs):
"""Sample negative boxes.
Args:
assign_result (:obj:`AssignResult`): Assigned results
num_expected (int): Number of expected negative samples
bboxes (torch.Tensor, optional): Boxes. Defaults to None.
feats (list[torch.Tensor], optional): Multi-level features.
Defaults to None.
Returns:
torch.Tensor: Indices of negative samples
"""
# Sample some hard negative samples
neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
neg_labels = assign_result.labels.new_empty(
neg_inds.size(0)).fill_(self.bbox_head.num_classes)
return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds],
neg_labels, feats)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint_hook import CheckpointHook
from .ema_hook import EMAHook
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .logger_hook import LoggerHook
from .naive_visualization_hook import NaiveVisualizationHook
from .param_scheduler_hook import ParamSchedulerHook
from .profiler_hook import ProfilerHook
from .runtime_info_hook import RuntimeInfoHook
from .sampler_seed_hook import DistSamplerSeedHook
from .sync_buffer_hook import SyncBuffersHook
__all__ = [
'Hook', 'IterTimerHook', 'DistSamplerSeedHook', 'ParamSchedulerHook',
'SyncBuffersHook', 'EmptyCacheHook', 'CheckpointHook', 'LoggerHook',
'NaiveVisualizationHook', 'EMAHook', 'RuntimeInfoHook', 'ProfilerHook'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint_hook import CheckpointHook
from .ema_hook import EMAHook
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .logger_hook import LoggerHook
from .naive_visualization_hook import NaiveVisualizationHook
from .param_scheduler_hook import ParamSchedulerHook
from .runtime_info_hook import RuntimeInfoHook
from .sampler_seed_hook import DistSamplerSeedHook
from .sync_buffer_hook import SyncBuffersHook
__all__ = [
'Hook', 'IterTimerHook', 'DistSamplerSeedHook', 'ParamSchedulerHook',
'SyncBuffersHook', 'EmptyCacheHook', 'CheckpointHook', 'LoggerHook',
'NaiveVisualizationHook', 'EMAHook', 'RuntimeInfoHook'
]
|
import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
from keras.src.api_export import keras_export
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
relu,
leaky_relu,
relu6,
softmax,
celu,
elu,
selu,
softplus,
softsign,
silu,
gelu,
tanh,
sigmoid,
exponential,
hard_sigmoid,
hard_silu,
linear,
mish,
log_softmax,
}
ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS}
# Additional aliases
ALL_OBJECTS_DICT["swish"] = silu
ALL_OBJECTS_DICT["hard_swish"] = hard_silu
@keras_export("keras.activations.serialize")
def serialize(activation):
fn_config = serialization_lib.serialize_keras_object(activation)
if "config" not in fn_config:
raise ValueError(
f"Unknown activation function '{activation}' cannot be "
"serialized due to invalid function name. Make sure to use "
"an activation name that matches the references defined in "
"activations.py or use "
"`@keras.saving.register_keras_serializable()`"
"to register any custom activations. "
f"config={fn_config}"
)
if not isinstance(activation, types.FunctionType):
# Case for additional custom activations represented by objects
return fn_config
if (
isinstance(fn_config["config"], str)
and fn_config["config"] not in globals()
):
# Case for custom activation functions from external activations modules
fn_config["config"] = object_registration.get_registered_name(
activation
)
return fn_config
# Case for keras.activations builtins (simply return name)
return fn_config["config"]
@keras_export("keras.activations.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras activation function via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.activations.get")
def get(identifier):
"""Retrieve a Keras activation function via an identifier."""
if identifier is None:
return linear
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
return obj
raise ValueError(
f"Could not interpret activation function identifier: {identifier}"
)
|
import types
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
from keras.src.api_export import keras_export
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
relu,
leaky_relu,
relu6,
softmax,
elu,
selu,
softplus,
softsign,
silu,
gelu,
tanh,
sigmoid,
exponential,
hard_sigmoid,
hard_silu,
linear,
mish,
log_softmax,
}
ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS}
# Additional aliases
ALL_OBJECTS_DICT["swish"] = silu
ALL_OBJECTS_DICT["hard_swish"] = hard_silu
@keras_export("keras.activations.serialize")
def serialize(activation):
fn_config = serialization_lib.serialize_keras_object(activation)
if "config" not in fn_config:
raise ValueError(
f"Unknown activation function '{activation}' cannot be "
"serialized due to invalid function name. Make sure to use "
"an activation name that matches the references defined in "
"activations.py or use "
"`@keras.saving.register_keras_serializable()`"
"to register any custom activations. "
f"config={fn_config}"
)
if not isinstance(activation, types.FunctionType):
# Case for additional custom activations represented by objects
return fn_config
if (
isinstance(fn_config["config"], str)
and fn_config["config"] not in globals()
):
# Case for custom activation functions from external activations modules
fn_config["config"] = object_registration.get_registered_name(
activation
)
return fn_config
# Case for keras.activations builtins (simply return name)
return fn_config["config"]
@keras_export("keras.activations.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras activation function via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.activations.get")
def get(identifier):
"""Retrieve a Keras activation function via an identifier."""
if identifier is None:
return linear
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
return obj
raise ValueError(
f"Could not interpret activation function identifier: {identifier}"
)
|
import os
import urllib.parse
from typing import Dict, Union, Optional
from llama_index.core.base.llms.generic_utils import (
get_from_param_or_env,
)
# Import SecretStr directly from pydantic
# since there is not one in llama_index.core.bridge.pydantic
from pydantic import SecretStr
def resolve_watsonx_credentials(
*,
url: Optional[str] = None,
apikey: Optional[str] = None,
token: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
instance_id: Optional[str] = None,
) -> Dict[str, SecretStr]:
"""
Resolve watsonx.ai credentials. If the value of given param is None
then tries to find corresponding environment variable.
:raises ValueError: raises when value of required attribute is not found
:return: Dictionary with resolved credentials items
:rtype: Dict[str, SecretStr]
"""
creds = {}
creds["url"] = convert_to_secret_str(
get_from_param_or_env("url", url, "WATSONX_URL")
)
parsed_url = urllib.parse.urlparse(creds["url"].get_secret_value())
if parsed_url.netloc.endswith(".cloud.ibm.com"):
if not (apikey or "WATSONX_APIKEY" in os.environ) and not (
token or "WATSONX_TOKEN" in os.environ
):
raise ValueError(
"Did not find 'apikey' or 'token',"
" please add an environment variable"
" `WATSONX_APIKEY` or 'WATSONX_TOKEN' "
"which contains it,"
" or pass 'apikey' or 'token'"
" as a named parameter."
)
elif apikey or "WATSONX_APIKEY" in os.environ:
creds["apikey"] = convert_to_secret_str(
get_from_param_or_env("apikey", apikey, "WATSONX_APIKEY")
)
else:
creds["token"] = convert_to_secret_str(
get_from_param_or_env("token", token, "WATSONX_TOKEN")
)
else:
if (
not token
and "WATSONX_TOKEN" not in os.environ
and not password
and "WATSONX_PASSWORD" not in os.environ
and not apikey
and "WATSONX_APIKEY" not in os.environ
):
raise ValueError(
"Did not find 'token', 'password' or 'apikey',"
" please add an environment variable"
" `WATSONX_TOKEN`, 'WATSONX_PASSWORD' or 'WATSONX_APIKEY' "
"which contains it,"
" or pass 'token', 'password' or 'apikey'"
" as a named parameter."
)
elif token or "WATSONX_TOKEN" in os.environ:
creds["token"] = convert_to_secret_str(
get_from_param_or_env("token", token, "WATSONX_TOKEN")
)
elif password or "WATSONX_PASSWORD" in os.environ:
creds["password"] = convert_to_secret_str(
get_from_param_or_env("password", password, "WATSONX_PASSWORD")
)
creds["username"] = convert_to_secret_str(
get_from_param_or_env("username", username, "WATSONX_USERNAME")
)
elif apikey or "WATSONX_APIKEY" in os.environ:
creds["apikey"] = convert_to_secret_str(
get_from_param_or_env("apikey", apikey, "WATSONX_APIKEY")
)
creds["username"] = convert_to_secret_str(
get_from_param_or_env("username", username, "WATSONX_USERNAME")
)
if not instance_id or "WATSONX_INSTANCE_ID" not in os.environ:
creds["instance_id"] = convert_to_secret_str(
get_from_param_or_env("instance_id", instance_id, "WATSONX_INSTANCE_ID")
)
return creds
def convert_to_secret_str(value: Union[SecretStr, str]) -> SecretStr:
"""Convert a string to a SecretStr."""
if isinstance(value, SecretStr):
return value
return SecretStr(value)
|
import os
import urllib.parse
from typing import Dict, Union, Optional
from llama_index.core.base.llms.generic_utils import (
get_from_param_or_env,
)
# Import SecretStr directly from pydantic
# since there is not one in llama_index.core.bridge.pydantic
from pydantic import SecretStr
def resolve_watsonx_credentials(
*,
url: Optional[str] = None,
apikey: Optional[str] = None,
token: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
instance_id: Optional[str] = None
) -> Dict[str, SecretStr]:
"""
Resolve watsonx.ai credentials. If the value of given param is None
then tries to find corresponding environment variable.
:raises ValueError: raises when value of required attribute is not found
:return: Dictionary with resolved credentials items
:rtype: Dict[str, SecretStr]
"""
creds = {}
creds["url"] = convert_to_secret_str(
get_from_param_or_env("url", url, "WATSONX_URL")
)
parsed_url = urllib.parse.urlparse(creds["url"].get_secret_value())
if parsed_url.netloc.endswith(".cloud.ibm.com"):
if not (apikey or "WATSONX_APIKEY" in os.environ) and not (
token or "WATSONX_TOKEN" in os.environ
):
raise ValueError(
"Did not find 'apikey' or 'token',"
" please add an environment variable"
" `WATSONX_APIKEY` or 'WATSONX_TOKEN' "
"which contains it,"
" or pass 'apikey' or 'token'"
" as a named parameter."
)
elif apikey or "WATSONX_APIKEY" in os.environ:
creds["apikey"] = convert_to_secret_str(
get_from_param_or_env("apikey", apikey, "WATSONX_APIKEY")
)
else:
creds["token"] = convert_to_secret_str(
get_from_param_or_env("token", token, "WATSONX_TOKEN")
)
else:
if (
not token
and "WATSONX_TOKEN" not in os.environ
and not password
and "WATSONX_PASSWORD" not in os.environ
and not apikey
and "WATSONX_APIKEY" not in os.environ
):
raise ValueError(
"Did not find 'token', 'password' or 'apikey',"
" please add an environment variable"
" `WATSONX_TOKEN`, 'WATSONX_PASSWORD' or 'WATSONX_APIKEY' "
"which contains it,"
" or pass 'token', 'password' or 'apikey'"
" as a named parameter."
)
elif token or "WATSONX_TOKEN" in os.environ:
creds["token"] = convert_to_secret_str(
get_from_param_or_env("token", token, "WATSONX_TOKEN")
)
elif password or "WATSONX_PASSWORD" in os.environ:
creds["password"] = convert_to_secret_str(
get_from_param_or_env("password", password, "WATSONX_PASSWORD")
)
creds["username"] = convert_to_secret_str(
get_from_param_or_env("username", username, "WATSONX_USERNAME")
)
elif apikey or "WATSONX_APIKEY" in os.environ:
creds["apikey"] = convert_to_secret_str(
get_from_param_or_env("apikey", apikey, "WATSONX_APIKEY")
)
creds["username"] = convert_to_secret_str(
get_from_param_or_env("username", username, "WATSONX_USERNAME")
)
if not instance_id or "WATSONX_INSTANCE_ID" not in os.environ:
creds["instance_id"] = convert_to_secret_str(
get_from_param_or_env("instance_id", instance_id, "WATSONX_INSTANCE_ID")
)
return creds
def convert_to_secret_str(value: Union[SecretStr, str]) -> SecretStr:
"""Convert a string to a SecretStr."""
if isinstance(value, SecretStr):
return value
return SecretStr(value)
|
"""Test formatting functionality."""
from typing import Union
import pytest
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.documents import Document
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
ChatMessage,
ChatMessageChunk,
FunctionMessage,
FunctionMessageChunk,
HumanMessage,
HumanMessageChunk,
SystemMessage,
SystemMessageChunk,
ToolMessage,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, Generation
from langchain_core.prompt_values import ChatPromptValueConcrete, StringPromptValue
from pydantic import RootModel, ValidationError
@pytest.mark.xfail(reason="TODO: FIX BEFORE 0.3 RELEASE")
def test_serialization_of_wellknown_objects() -> None:
"""Test that pydantic is able to serialize and deserialize well known objects."""
well_known_lc_object = RootModel[
Union[
Document,
HumanMessage,
SystemMessage,
ChatMessage,
FunctionMessage,
FunctionMessageChunk,
AIMessage,
HumanMessageChunk,
SystemMessageChunk,
ChatMessageChunk,
AIMessageChunk,
StringPromptValue,
ChatPromptValueConcrete,
AgentFinish,
AgentAction,
AgentActionMessageLog,
ChatGeneration,
Generation,
ChatGenerationChunk,
]
]
lc_objects = [
HumanMessage(content="human"),
HumanMessageChunk(content="human"),
AIMessage(content="ai"),
AIMessageChunk(content="ai"),
SystemMessage(content="sys"),
SystemMessageChunk(content="sys"),
FunctionMessage(
name="func",
content="func",
),
FunctionMessageChunk(
name="func",
content="func",
),
ChatMessage(
role="human",
content="human",
),
ChatMessageChunk(
role="human",
content="human",
),
StringPromptValue(text="hello"),
ChatPromptValueConcrete(messages=[AIMessage(content="foo")]),
ChatPromptValueConcrete(messages=[HumanMessage(content="human")]),
ChatPromptValueConcrete(
messages=[ToolMessage(content="foo", tool_call_id="bar")],
),
ChatPromptValueConcrete(messages=[SystemMessage(content="foo")]),
Document(page_content="hello"),
AgentFinish(return_values={}, log=""),
AgentAction(tool="tool", tool_input="input", log=""),
AgentActionMessageLog(
tool="tool",
tool_input="input",
log="",
message_log=[HumanMessage(content="human")],
),
Generation(
text="hello",
generation_info={"info": "info"},
),
ChatGeneration(
message=HumanMessage(content="human"),
),
ChatGenerationChunk(
message=HumanMessageChunk(content="cat"),
),
]
for lc_object in lc_objects:
d = lc_object.model_dump()
assert "type" in d, f"Missing key `type` for {type(lc_object)}"
obj1 = well_known_lc_object.model_validate(d)
assert type(obj1.root) is type(lc_object), f"failed for {type(lc_object)}"
with pytest.raises((TypeError, ValidationError)):
# Make sure that specifically validation error is raised
well_known_lc_object.model_validate({})
|
"""Test formatting functionality."""
from typing import Union
import pytest
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.documents import Document
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
ChatMessage,
ChatMessageChunk,
FunctionMessage,
FunctionMessageChunk,
HumanMessage,
HumanMessageChunk,
SystemMessage,
SystemMessageChunk,
ToolMessage,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, Generation
from langchain_core.prompt_values import ChatPromptValueConcrete, StringPromptValue
from pydantic import RootModel, ValidationError
@pytest.mark.xfail(reason="TODO: FIX BEFORE 0.3 RELEASE")
def test_serialization_of_wellknown_objects() -> None:
"""Test that pydantic is able to serialize and deserialize well known objects."""
well_known_lc_object = RootModel[
Union[
Document,
HumanMessage,
SystemMessage,
ChatMessage,
FunctionMessage,
FunctionMessageChunk,
AIMessage,
HumanMessageChunk,
SystemMessageChunk,
ChatMessageChunk,
AIMessageChunk,
StringPromptValue,
ChatPromptValueConcrete,
AgentFinish,
AgentAction,
AgentActionMessageLog,
ChatGeneration,
Generation,
ChatGenerationChunk,
]
]
lc_objects = [
HumanMessage(content="human"),
HumanMessageChunk(content="human"),
AIMessage(content="ai"),
AIMessageChunk(content="ai"),
SystemMessage(content="sys"),
SystemMessageChunk(content="sys"),
FunctionMessage(
name="func",
content="func",
),
FunctionMessageChunk(
name="func",
content="func",
),
ChatMessage(
role="human",
content="human",
),
ChatMessageChunk(
role="human",
content="human",
),
StringPromptValue(text="hello"),
ChatPromptValueConcrete(messages=[AIMessage(content="foo")]),
ChatPromptValueConcrete(messages=[HumanMessage(content="human")]),
ChatPromptValueConcrete(
messages=[ToolMessage(content="foo", tool_call_id="bar")]
),
ChatPromptValueConcrete(messages=[SystemMessage(content="foo")]),
Document(page_content="hello"),
AgentFinish(return_values={}, log=""),
AgentAction(tool="tool", tool_input="input", log=""),
AgentActionMessageLog(
tool="tool",
tool_input="input",
log="",
message_log=[HumanMessage(content="human")],
),
Generation(
text="hello",
generation_info={"info": "info"},
),
ChatGeneration(
message=HumanMessage(content="human"),
),
ChatGenerationChunk(
message=HumanMessageChunk(content="cat"),
),
]
for lc_object in lc_objects:
d = lc_object.model_dump()
assert "type" in d, f"Missing key `type` for {type(lc_object)}"
obj1 = well_known_lc_object.model_validate(d)
assert type(obj1.root) is type(lc_object), f"failed for {type(lc_object)}"
with pytest.raises((TypeError, ValidationError)):
# Make sure that specifically validation error is raised
well_known_lc_object.model_validate({})
|
SYSTEM_MESSAGE_PREFIX = """Answer the following questions as best you can. You have access to the following tools:""" # noqa: E501
FORMAT_INSTRUCTIONS = """The way you use the tools is by specifying a json blob.
Specifically, this json should have a `action` key (with the name of the tool to use) and a `action_input` key (with the input to the tool going here).
The only values that should be in the "action" field are: {tool_names}
The $JSON_BLOB should only contain a SINGLE action, do NOT return a list of multiple actions. Here is an example of a valid $JSON_BLOB:
```
{{{{
"action": $TOOL_NAME,
"action_input": $INPUT
}}}}
```
ALWAYS use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action:
```
$JSON_BLOB
```
Observation: the result of the action
... (this Thought/Action/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question""" # noqa: E501
SYSTEM_MESSAGE_SUFFIX = """Begin! Reminder to always use the exact characters `Final Answer` when responding.""" # noqa: E501
HUMAN_MESSAGE = "{input}\n\n{agent_scratchpad}"
|
# flake8: noqa
SYSTEM_MESSAGE_PREFIX = """Answer the following questions as best you can. You have access to the following tools:"""
FORMAT_INSTRUCTIONS = """The way you use the tools is by specifying a json blob.
Specifically, this json should have a `action` key (with the name of the tool to use) and a `action_input` key (with the input to the tool going here).
The only values that should be in the "action" field are: {tool_names}
The $JSON_BLOB should only contain a SINGLE action, do NOT return a list of multiple actions. Here is an example of a valid $JSON_BLOB:
```
{{{{
"action": $TOOL_NAME,
"action_input": $INPUT
}}}}
```
ALWAYS use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action:
```
$JSON_BLOB
```
Observation: the result of the action
... (this Thought/Action/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question"""
SYSTEM_MESSAGE_SUFFIX = """Begin! Reminder to always use the exact characters `Final Answer` when responding."""
HUMAN_MESSAGE = "{input}\n\n{agent_scratchpad}"
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .dsdl import DSDLDetDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
__all__ = [
'XMLDataset',
'CocoDataset',
'DeepFashionDataset',
'VOCDataset',
'CityscapesDataset',
'LVISDataset',
'LVISV05Dataset',
'LVISV1Dataset',
'WIDERFaceDataset',
'get_loading_pipeline',
'CocoPanopticDataset',
'MultiImageMixDataset',
'OpenImagesDataset',
'OpenImagesChallengeDataset',
'AspectRatioBatchSampler',
'ClassAwareSampler',
'MultiSourceSampler',
'GroupMultiSourceSampler',
'BaseDetDataset',
'CrowdHumanDataset',
'Objects365V1Dataset',
'Objects365V2Dataset',
'DSDLDetDataset',
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'get_loading_pipeline', 'CocoPanopticDataset',
'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset',
'Objects365V1Dataset', 'Objects365V2Dataset'
]
|
"""
Tests the correct computation of evaluation scores from BinaryClassificationEvaluator
"""
from __future__ import annotations
import csv
import gzip
import os
from pathlib import Path
import pytest
from torch.utils.data import DataLoader
from sentence_transformers import (
InputExample,
SentenceTransformer,
evaluation,
losses,
util,
)
@pytest.mark.skip(reason="This test is rather slow, and the LabelAccuracyEvaluator is not commonly used.")
def test_LabelAccuracyEvaluator(paraphrase_distilroberta_base_v1_model: SentenceTransformer, tmp_path: Path) -> None:
"""Tests that the LabelAccuracyEvaluator can be loaded correctly"""
model = paraphrase_distilroberta_base_v1_model
nli_dataset_path = "datasets/AllNLI.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
dev_samples = []
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "train":
label_id = label2int[row["label"]]
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
if len(dev_samples) >= 100:
break
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=len(label2int),
)
dev_dataloader = DataLoader(dev_samples, shuffle=False, batch_size=16)
evaluator = evaluation.LabelAccuracyEvaluator(dev_dataloader, softmax_model=train_loss)
metrics = evaluator(model, output_path=str(tmp_path))
assert "accuracy" in metrics
assert metrics["accuracy"] > 0.2
|
"""
Tests the correct computation of evaluation scores from BinaryClassificationEvaluator
"""
from __future__ import annotations
import csv
import gzip
import os
from pathlib import Path
from torch.utils.data import DataLoader
from sentence_transformers import (
InputExample,
SentenceTransformer,
evaluation,
losses,
util,
)
def test_LabelAccuracyEvaluator(paraphrase_distilroberta_base_v1_model: SentenceTransformer, tmp_path: Path) -> None:
"""Tests that the LabelAccuracyEvaluator can be loaded correctly"""
model = paraphrase_distilroberta_base_v1_model
nli_dataset_path = "datasets/AllNLI.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
dev_samples = []
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "train":
label_id = label2int[row["label"]]
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
if len(dev_samples) >= 100:
break
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=len(label2int),
)
dev_dataloader = DataLoader(dev_samples, shuffle=False, batch_size=16)
evaluator = evaluation.LabelAccuracyEvaluator(dev_dataloader, softmax_model=train_loss)
metrics = evaluator(model, output_path=str(tmp_path))
assert "accuracy" in metrics
assert metrics["accuracy"] > 0.2
|
"""
Internal helpers
"""
from collections.abc import Callable
from functools import wraps
from inspect import signature
from types import ModuleType
from typing import TypeVar
_T = TypeVar("_T")
def get_xp(xp: ModuleType) -> Callable[[Callable[..., _T]], Callable[..., _T]]:
"""
Decorator to automatically replace xp with the corresponding array module.
Use like
import numpy as np
@get_xp(np)
def func(x, /, xp, kwarg=None):
return xp.func(x, kwarg=kwarg)
Note that xp must be a keyword argument and come after all non-keyword
arguments.
"""
def inner(f: Callable[..., _T], /) -> Callable[..., _T]:
@wraps(f)
def wrapped_f(*args: object, **kwargs: object) -> object:
return f(*args, xp=xp, **kwargs)
sig = signature(f)
new_sig = sig.replace(
parameters=[par for i, par in sig.parameters.items() if i != "xp"]
)
if wrapped_f.__doc__ is None:
wrapped_f.__doc__ = f"""\
Array API compatibility wrapper for {f.__name__}.
See the corresponding documentation in NumPy/CuPy and/or the array API
specification for more details.
"""
wrapped_f.__signature__ = new_sig # pyright: ignore[reportAttributeAccessIssue]
return wrapped_f # pyright: ignore[reportReturnType]
return inner
__all__ = ["get_xp"]
def __dir__() -> list[str]:
return __all__
|
"""
Internal helpers
"""
from functools import wraps
from inspect import signature
def get_xp(xp):
"""
Decorator to automatically replace xp with the corresponding array module.
Use like
import numpy as np
@get_xp(np)
def func(x, /, xp, kwarg=None):
return xp.func(x, kwarg=kwarg)
Note that xp must be a keyword argument and come after all non-keyword
arguments.
"""
def inner(f):
@wraps(f)
def wrapped_f(*args, **kwargs):
return f(*args, xp=xp, **kwargs)
sig = signature(f)
new_sig = sig.replace(
parameters=[sig.parameters[i] for i in sig.parameters if i != "xp"]
)
if wrapped_f.__doc__ is None:
wrapped_f.__doc__ = f"""\
Array API compatibility wrapper for {f.__name__}.
See the corresponding documentation in NumPy/CuPy and/or the array API
specification for more details.
"""
wrapped_f.__signature__ = new_sig
return wrapped_f
return inner
|
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Optional
from pydantic import BaseModel
from langchain_core.runnables import run_in_executor
if TYPE_CHECKING:
from collections.abc import Sequence
from langchain_core.callbacks import Callbacks
from langchain_core.documents import Document
class BaseDocumentCompressor(BaseModel, ABC):
"""Base class for document compressors.
This abstraction is primarily used for
post-processing of retrieved documents.
Documents matching a given query are first retrieved.
Then the list of documents can be further processed.
For example, one could re-rank the retrieved documents
using an LLM.
**Note** users should favor using a RunnableLambda
instead of sub-classing from this interface.
"""
@abstractmethod
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress retrieved documents given the query context.
Args:
documents: The retrieved documents.
query: The query context.
callbacks: Optional callbacks to run during compression.
Returns:
The compressed documents.
"""
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Async compress retrieved documents given the query context.
Args:
documents: The retrieved documents.
query: The query context.
callbacks: Optional callbacks to run during compression.
Returns:
The compressed documents.
"""
return await run_in_executor(
None, self.compress_documents, documents, query, callbacks
)
|
from __future__ import annotations
from abc import ABC, abstractmethod
from collections.abc import Sequence
from typing import Optional
from pydantic import BaseModel
from langchain_core.callbacks import Callbacks
from langchain_core.documents import Document
from langchain_core.runnables import run_in_executor
class BaseDocumentCompressor(BaseModel, ABC):
"""Base class for document compressors.
This abstraction is primarily used for
post-processing of retrieved documents.
Documents matching a given query are first retrieved.
Then the list of documents can be further processed.
For example, one could re-rank the retrieved documents
using an LLM.
**Note** users should favor using a RunnableLambda
instead of sub-classing from this interface.
"""
@abstractmethod
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress retrieved documents given the query context.
Args:
documents: The retrieved documents.
query: The query context.
callbacks: Optional callbacks to run during compression.
Returns:
The compressed documents.
"""
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Async compress retrieved documents given the query context.
Args:
documents: The retrieved documents.
query: The query context.
callbacks: Optional callbacks to run during compression.
Returns:
The compressed documents.
"""
return await run_in_executor(
None, self.compress_documents, documents, query, callbacks
)
|
"""
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
we can also visualize the top tokens for each text."""
from sentence_transformers import SparseEncoder
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Embed a list of sentences
sentences = [
"This framework generates embeddings for each input sentence",
"Sentences are passed as a list of string.",
"The quick brown fox jumps over the lazy dog.",
]
# Generate embeddings
embeddings = model.encode(sentences)
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
stats = model.sparsity(embeddings)
print(f"Embedding sparsity: {stats}")
print(f"Average non-zero dimensions: {stats['active_dims']:.2f}")
print(f"Sparsity percentage: {stats['sparsity_ratio']:.2%}")
"""
Embedding dim: 30522
Embedding sparsity: {'active_dims': 56.66666793823242, 'sparsity_ratio': 0.9981433749198914}
Average non-zero dimensions: 56.67
Sparsity percentage: 99.81%
"""
# Visualize top tokens for each text
top_k = 10
token_weights = model.decode(embeddings, top_k=top_k)
print(f"\nTop tokens {top_k} for each text:")
# The result is a list of sentence embeddings as numpy arrays
for i, sentence in enumerate(sentences):
token_scores = ", ".join([f'("{token.strip()}", {value:.2f})' for token, value in token_weights[i]])
print(f"{i}: {sentence} -> Top tokens: {token_scores}")
"""
Top tokens 10 for each text:
0: This framework generates embeddings for each input sentence -> Top tokens: ("framework", 2.19), ("##bed", 2.12), ("input", 1.99), ("each", 1.60), ("em", 1.58), ("sentence", 1.49), ("generate", 1.42), ("##ding", 1.33), ("sentences", 1.10), ("create", 0.93)
1: Sentences are passed as a list of string. -> Top tokens: ("string", 2.72), ("pass", 2.24), ("sentences", 2.15), ("passed", 2.07), ("sentence", 1.90), ("strings", 1.86), ("list", 1.84), ("lists", 1.49), ("as", 1.18), ("passing", 0.73)
2: The quick brown fox jumps over the lazy dog. -> Top tokens: ("lazy", 2.18), ("fox", 1.67), ("brown", 1.56), ("over", 1.52), ("dog", 1.50), ("quick", 1.49), ("jump", 1.39), ("dogs", 1.25), ("foxes", 0.99), ("jumping", 0.84)
"""
# Example of using max_active_dims during encoding
print("\n--- Using max_active_dims during encoding ---")
# Generate embeddings with limited active dimensions
embeddings_limited = model.encode(sentences, max_active_dims=32)
stats_limited = model.sparsity(embeddings_limited)
print(f"Limited embedding sparsity: {stats_limited}")
print(f"Average non-zero dimensions: {stats_limited['active_dims']:.2f}")
print(f"Sparsity percentage: {stats_limited['sparsity_ratio']:.2%}")
"""
--- Using max_active_dims during encoding ---
Limited embedding sparsity: {'active_dims': 32.0, 'sparsity_ratio': 0.9989516139030457}
Average non-zero dimensions: 32.00
Sparsity percentage: 99.90%
"""
# Comparing memory usage
print("\n--- Comparing memory usage ---")
def get_memory_size(tensor):
if tensor.is_sparse:
# For sparse tensors, only count non-zero elements
return (
tensor._values().element_size() * tensor._values().nelement()
+ tensor._indices().element_size() * tensor._indices().nelement()
)
else:
return tensor.element_size() * tensor.nelement()
print(f"Original embeddings memory: {get_memory_size(embeddings) / 1024:.2f} KB")
print(f"Embeddings with max_active_dims=32 memory: {get_memory_size(embeddings_limited) / 1024:.2f} KB")
"""
--- Comparing memory usage ---
Original embeddings memory: 3.32 KB
Embeddings with max_active_dims=32 memory: 1.88 KB
"""
|
"""
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
we can also visualize the top tokens for each text."""
from sentence_transformers import SparseEncoder
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Embed a list of sentences
sentences = [
"This framework generates embeddings for each input sentence",
"Sentences are passed as a list of string.",
"The quick brown fox jumps over the lazy dog.",
]
# Generate embeddings
embeddings = model.encode(sentences)
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
stats = model.get_sparsity_stats(embeddings)
print(f"Embedding sparsity: {stats}")
print(f"Average non-zero dimensions: {stats['row_non_zero_mean']:.2f}")
print(f"Sparsity percentage: {stats['row_sparsity_mean']:.2%}")
"""
Embedding dim: 30522
Embedding sparsity: {'num_rows': 3, 'num_cols': 30522, 'row_non_zero_mean': 56.66666793823242, 'row_sparsity_mean': 0.9981433749198914}
Average non-zero dimensions: 56.67
Sparsity percentage: 99.81%
"""
# Visualize top tokens for each text
top_k = 10
token_weights = model.decode(embeddings, top_k=top_k)
print(f"\nTop tokens {top_k} for each text:")
# The result is a list of sentence embeddings as numpy arrays
for i, sentence in enumerate(sentences):
token_scores = ", ".join([f'("{token.strip()}", {value:.2f})' for token, value in token_weights[i]])
print(f"{i}: {sentence} -> Top tokens: {token_scores}")
"""
Top tokens 10 for each text:
0: This framework generates embeddings for each input sentence -> Top tokens: ("framework", 2.19), ("##bed", 2.12), ("input", 1.99), ("each", 1.60), ("em", 1.58), ("sentence", 1.49), ("generate", 1.42), ("##ding", 1.33), ("sentences", 1.10), ("create", 0.93)
1: Sentences are passed as a list of string. -> Top tokens: ("string", 2.72), ("pass", 2.24), ("sentences", 2.15), ("passed", 2.07), ("sentence", 1.90), ("strings", 1.86), ("list", 1.84), ("lists", 1.49), ("as", 1.18), ("passing", 0.73)
2: The quick brown fox jumps over the lazy dog. -> Top tokens: ("lazy", 2.18), ("fox", 1.67), ("brown", 1.56), ("over", 1.52), ("dog", 1.50), ("quick", 1.49), ("jump", 1.39), ("dogs", 1.25), ("foxes", 0.99), ("jumping", 0.84)
"""
# Example of using max_active_dims during encoding
print("\n--- Using max_active_dims during encoding ---")
# Generate embeddings with limited active dimensions
embeddings_limited = model.encode(sentences, max_active_dims=32)
stats_limited = model.get_sparsity_stats(embeddings_limited)
print(f"Limited embedding sparsity: {stats_limited}")
print(f"Average non-zero dimensions: {stats_limited['row_non_zero_mean']:.2f}")
print(f"Sparsity percentage: {stats_limited['row_sparsity_mean']:.2%}")
"""
--- Using max_active_dims during encoding ---
Limited embedding sparsity: {'num_rows': 3, 'num_cols': 30522, 'row_non_zero_mean': 32.0, 'row_sparsity_mean': 0.9989516139030457}
Average non-zero dimensions: 32.00
Sparsity percentage: 99.90%
"""
# Comparing memory usage
print("\n--- Comparing memory usage ---")
def get_memory_size(tensor):
if tensor.is_sparse:
# For sparse tensors, only count non-zero elements
return (
tensor._values().element_size() * tensor._values().nelement()
+ tensor._indices().element_size() * tensor._indices().nelement()
)
else:
return tensor.element_size() * tensor.nelement()
print(f"Original embeddings memory: {get_memory_size(embeddings) / 1024:.2f} KB")
print(f"Embeddings with max_active_dims=32 memory: {get_memory_size(embeddings_limited) / 1024:.2f} KB")
"""
--- Comparing memory usage ---
Original embeddings memory: 3.32 KB
Embeddings with max_active_dims=32 memory: 1.88 KB
"""
|
import pytest
from llama_index.core import MockEmbedding
from llama_index.core.chat_engine.context import (
ContextChatEngine,
)
from llama_index.core.indices import VectorStoreIndex
from llama_index.core.llms.mock import MockLLM
from llama_index.core.schema import Document
SYSTEM_PROMPT = "Talk like a pirate."
@pytest.fixture()
def chat_engine() -> ContextChatEngine:
index = VectorStoreIndex.from_documents(
[Document.example()], embed_model=MockEmbedding(embed_dim=3)
)
retriever = index.as_retriever()
return ContextChatEngine.from_defaults(
retriever, llm=MockLLM(), system_prompt=SYSTEM_PROMPT
)
def test_chat(chat_engine: ContextChatEngine):
response = chat_engine.chat("Hello World!")
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert len(chat_engine.chat_history) == 2
response = chat_engine.chat("What is the capital of the moon?")
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert "What is the capital of the moon?" in str(response)
assert len(chat_engine.chat_history) == 4
def test_chat_stream(chat_engine: ContextChatEngine):
response = chat_engine.stream_chat("Hello World!")
num_iters = 0
for _ in response.response_gen:
num_iters += 1
assert num_iters > 10
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert len(chat_engine.chat_history) == 2
response = chat_engine.stream_chat("What is the capital of the moon?")
num_iters = 0
for _ in response.response_gen:
num_iters += 1
assert num_iters > 10
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert "What is the capital of the moon?" in str(response)
assert len(chat_engine.chat_history) == 4
@pytest.mark.asyncio
async def test_achat(chat_engine: ContextChatEngine):
response = await chat_engine.achat("Hello World!")
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert len(chat_engine.chat_history) == 2
response = await chat_engine.achat("What is the capital of the moon?")
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert "What is the capital of the moon?" in str(response)
assert len(chat_engine.chat_history) == 4
@pytest.mark.asyncio
async def test_chat_astream(chat_engine: ContextChatEngine):
response = await chat_engine.astream_chat("Hello World!")
num_iters = 0
async for _ in response.async_response_gen():
num_iters += 1
assert num_iters > 10
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert len(chat_engine.chat_history) == 2
response = await chat_engine.astream_chat("What is the capital of the moon?")
num_iters = 0
async for _ in response.async_response_gen():
num_iters += 1
assert num_iters > 10
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert "What is the capital of the moon?" in str(response)
assert len(chat_engine.chat_history) == 4
|
import pytest
from llama_index.core import MockEmbedding
from llama_index.core.chat_engine.context import (
ContextChatEngine,
)
from llama_index.core.indices import VectorStoreIndex
from llama_index.core.llms.mock import MockLLM
from llama_index.core.schema import Document
SYSTEM_PROMPT = "Talk like a pirate."
@pytest.fixture()
def chat_engine() -> ContextChatEngine:
index = VectorStoreIndex.from_documents(
[Document.example()], embed_model=MockEmbedding(embed_dim=3)
)
retriever = index.as_retriever()
return ContextChatEngine.from_defaults(
retriever, llm=MockLLM(), system_prompt=SYSTEM_PROMPT
)
def test_chat(chat_engine: ContextChatEngine):
response = chat_engine.chat("Hello World!")
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert len(chat_engine.chat_history) == 2
response = chat_engine.chat("What is the capital of the moon?")
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert "What is the capital of the moon?" in str(response)
assert len(chat_engine.chat_history) == 4
def test_chat_stream(chat_engine: ContextChatEngine):
response = chat_engine.stream_chat("Hello World!")
num_iters = 0
for _ in response.response_gen:
num_iters += 1
assert num_iters > 10
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert len(chat_engine.chat_history) == 2
response = chat_engine.stream_chat("What is the capital of the moon?")
num_iters = 0
for _ in response.response_gen:
num_iters += 1
assert num_iters > 10
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert "What is the capital of the moon?" in str(response)
assert len(chat_engine.chat_history) == 4
@pytest.mark.asyncio()
async def test_achat(chat_engine: ContextChatEngine):
response = await chat_engine.achat("Hello World!")
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert len(chat_engine.chat_history) == 2
response = await chat_engine.achat("What is the capital of the moon?")
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert "What is the capital of the moon?" in str(response)
assert len(chat_engine.chat_history) == 4
@pytest.mark.asyncio()
async def test_chat_astream(chat_engine: ContextChatEngine):
response = await chat_engine.astream_chat("Hello World!")
num_iters = 0
async for _ in response.async_response_gen():
num_iters += 1
assert num_iters > 10
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert len(chat_engine.chat_history) == 2
response = await chat_engine.astream_chat("What is the capital of the moon?")
num_iters = 0
async for _ in response.async_response_gen():
num_iters += 1
assert num_iters > 10
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert "What is the capital of the moon?" in str(response)
assert len(chat_engine.chat_history) == 4
|
import pytest
from llama_index.llms.nvidia import NVIDIA as Interface
from llama_index.llms.nvidia.base import BASE_URL
from pytest_httpx import HTTPXMock
UNKNOWN_URLS = [
"https://test_url/v1",
"https://test_url/v1/",
"http://test_url/v1",
"http://test_url/v1/",
]
@pytest.fixture()
def mock_unknown_urls(httpx_mock: HTTPXMock, base_url: str) -> None:
mock_response = {
"data": [
{
"id": "meta/llama3-8b-instruct",
"object": "model",
"created": 1234567890,
"owned_by": "OWNER",
"root": "model1",
}
]
}
base_url = base_url.rstrip("/")
httpx_mock.add_response(
url=f"{base_url}/models",
method="GET",
json=mock_response,
status_code=200,
)
def test_mode_switch_nvidia_throws_without_key_deprecated(masked_env_var: str):
x = Interface()
with pytest.raises(ValueError):
with pytest.warns(DeprecationWarning):
x.mode("nvidia")
def test_mode_switch_nvidia_with_key_deprecated(masked_env_var: str):
with pytest.warns(DeprecationWarning):
Interface().mode("nvidia", api_key="test")
def test_mode_switch_nim_throws_without_url_deprecated():
instance = Interface()
with pytest.raises(ValueError):
with pytest.warns(DeprecationWarning):
instance.mode("nim")
def test_mode_switch_nim_with_url_deprecated():
with pytest.warns(DeprecationWarning):
Interface().mode("nim", base_url="test")
@pytest.mark.parametrize("base_url", ["https://test_url/v1/"])
def test_mode_switch_param_setting_deprecated(base_url):
instance = Interface(model="meta/llama3-8b-instruct")
with pytest.warns(DeprecationWarning):
instance1 = instance.mode("nim", base_url=base_url)
assert instance1.model == "meta/llama3-8b-instruct"
assert str(instance1.api_base) == base_url
with pytest.warns(DeprecationWarning):
instance2 = instance1.mode(
"nvidia", api_key="test", model="meta/llama3-15b-instruct"
)
assert instance2.model == "meta/llama3-15b-instruct"
assert str(instance2.api_base) == BASE_URL
assert instance2.api_key == "test"
@pytest.mark.parametrize("base_url", UNKNOWN_URLS)
def test_mode_switch_unknown_base_url_without_key(
mock_unknown_urls, masked_env_var: str, base_url: str
):
Interface(base_url=base_url)
@pytest.mark.parametrize("base_url", UNKNOWN_URLS)
@pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"])
def test_mode_switch_unknown_base_url_with_key(
mock_unknown_urls, masked_env_var: str, param: str, base_url: str
):
Interface(base_url=base_url, **{param: "test"})
@pytest.mark.parametrize("base_url", [BASE_URL])
def test_mode_switch_known_base_url_without_key(masked_env_var: str, base_url: str):
with pytest.warns(UserWarning):
cls = Interface(base_url=base_url)
assert cls._is_hosted
@pytest.mark.parametrize("base_url", [BASE_URL])
@pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"])
def test_mode_switch_known_base_url_with_key(
masked_env_var: str, base_url: str, param: str
):
Interface(base_url=base_url, **{param: "test"})
|
import pytest
from llama_index.llms.nvidia import NVIDIA as Interface
from llama_index.llms.nvidia.base import BASE_URL, KNOWN_URLS
from pytest_httpx import HTTPXMock
UNKNOWN_URLS = [
"https://test_url/v1",
"https://test_url/v1/",
"http://test_url/v1",
"http://test_url/v1/",
]
@pytest.fixture()
def mock_unknown_urls(httpx_mock: HTTPXMock, base_url: str):
mock_response = {
"data": [
{
"id": "meta/llama3-8b-instruct",
"object": "model",
"created": 1234567890,
"owned_by": "OWNER",
"root": "model1",
}
]
}
if base_url.endswith("/"):
base_url = base_url[:-1]
httpx_mock.add_response(
url=f"{base_url}/models",
method="GET",
json=mock_response,
status_code=200,
)
def test_mode_switch_nvidia_throws_without_key_deprecated(masked_env_var: str):
x = Interface()
with pytest.raises(ValueError):
with pytest.warns(DeprecationWarning):
x.mode("nvidia")
def test_mode_switch_nvidia_with_key_deprecated(masked_env_var: str):
with pytest.warns(DeprecationWarning):
Interface().mode("nvidia", api_key="test")
def test_mode_switch_nim_throws_without_url_deprecated():
instance = Interface()
with pytest.raises(ValueError):
with pytest.warns(DeprecationWarning):
instance.mode("nim")
def test_mode_switch_nim_with_url_deprecated():
with pytest.warns(DeprecationWarning):
Interface().mode("nim", base_url="test")
@pytest.mark.parametrize("base_url", ["https://test_url/v1/"])
def test_mode_switch_param_setting_deprecated(base_url):
instance = Interface(model="meta/llama3-8b-instruct")
with pytest.warns(DeprecationWarning):
instance1 = instance.mode("nim", base_url=base_url)
assert instance1.model == "meta/llama3-8b-instruct"
assert str(instance1.api_base) == base_url
with pytest.warns(DeprecationWarning):
instance2 = instance1.mode(
"nvidia", api_key="test", model="meta/llama3-15b-instruct"
)
assert instance2.model == "meta/llama3-15b-instruct"
assert str(instance2.api_base) == BASE_URL
assert instance2.api_key == "test"
@pytest.mark.parametrize("base_url", UNKNOWN_URLS)
def test_mode_switch_unknown_base_url_without_key(
mock_unknown_urls, masked_env_var: str, base_url: str
):
Interface(base_url=base_url)
@pytest.mark.parametrize("base_url", UNKNOWN_URLS)
@pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"])
def test_mode_switch_unknown_base_url_with_key(
mock_unknown_urls, masked_env_var: str, param: str, base_url: str
):
Interface(base_url=base_url, **{param: "test"})
@pytest.mark.parametrize("base_url", KNOWN_URLS)
def test_mode_switch_known_base_url_without_key(masked_env_var: str, base_url: str):
with pytest.warns(UserWarning):
Interface(base_url=base_url)
@pytest.mark.parametrize("base_url", KNOWN_URLS)
@pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"])
def test_mode_switch_known_base_url_with_key(
masked_env_var: str, base_url: str, param: str
):
Interface(base_url=base_url, **{param: "test"})
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .detr import DETR
@DETECTORS.register_module()
class DeformableDETR(DETR):
def __init__(self, *args, **kwargs):
super(DETR, self).__init__(*args, **kwargs)
|
from ..builder import DETECTORS
from .detr import DETR
@DETECTORS.register_module()
class DeformableDETR(DETR):
def __init__(self, *args, **kwargs):
super(DETR, self).__init__(*args, **kwargs)
|
import os
from unittest.mock import patch
import pytest
from langchain_openai import OpenAIEmbeddings
os.environ["OPENAI_API_KEY"] = "foo"
def test_openai_invalid_model_kwargs() -> None:
with pytest.raises(ValueError):
OpenAIEmbeddings(model_kwargs={"model": "foo"})
def test_openai_incorrect_field() -> None:
with pytest.warns(match="not default parameter"):
llm = OpenAIEmbeddings(foo="bar") # type: ignore[call-arg]
assert llm.model_kwargs == {"foo": "bar"}
def test_embed_documents_with_custom_chunk_size() -> None:
embeddings = OpenAIEmbeddings(chunk_size=2)
texts = ["text1", "text2", "text3", "text4"]
custom_chunk_size = 3
with patch.object(embeddings.client, "create") as mock_create:
mock_create.side_effect = [
{"data": [{"embedding": [0.1, 0.2]}, {"embedding": [0.3, 0.4]}]},
{"data": [{"embedding": [0.5, 0.6]}, {"embedding": [0.7, 0.8]}]},
]
result = embeddings.embed_documents(texts, chunk_size=custom_chunk_size)
_, tokens, __ = embeddings._tokenize(texts, custom_chunk_size)
mock_create.call_args
mock_create.assert_any_call(input=tokens[0:3], **embeddings._invocation_params)
mock_create.assert_any_call(input=tokens[3:4], **embeddings._invocation_params)
assert result == [[0.1, 0.2], [0.3, 0.4], [0.5, 0.6], [0.7, 0.8]]
def test_embed_documents_with_custom_chunk_size_no_check_ctx_length() -> None:
embeddings = OpenAIEmbeddings(chunk_size=2, check_embedding_ctx_length=False)
texts = ["text1", "text2", "text3", "text4"]
custom_chunk_size = 3
with patch.object(embeddings.client, "create") as mock_create:
mock_create.side_effect = [
{"data": [{"embedding": [0.1, 0.2]}, {"embedding": [0.3, 0.4]}]},
{"data": [{"embedding": [0.5, 0.6]}, {"embedding": [0.7, 0.8]}]},
]
result = embeddings.embed_documents(texts, chunk_size=custom_chunk_size)
mock_create.call_args
mock_create.assert_any_call(input=texts[0:3], **embeddings._invocation_params)
mock_create.assert_any_call(input=texts[3:4], **embeddings._invocation_params)
assert result == [[0.1, 0.2], [0.3, 0.4], [0.5, 0.6], [0.7, 0.8]]
def test_embed_with_kwargs() -> None:
embeddings = OpenAIEmbeddings(
model="text-embedding-3-small", check_embedding_ctx_length=False
)
texts = ["text1", "text2"]
with patch.object(embeddings.client, "create") as mock_create:
mock_create.side_effect = [
{"data": [{"embedding": [0.1, 0.2, 0.3]}, {"embedding": [0.4, 0.5, 0.6]}]}
]
result = embeddings.embed_documents(texts, dimensions=3)
mock_create.assert_any_call(
input=texts, dimensions=3, **embeddings._invocation_params
)
assert result == [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
async def test_embed_with_kwargs_async() -> None:
embeddings = OpenAIEmbeddings(
model="text-embedding-3-small",
check_embedding_ctx_length=False,
dimensions=4, # also check that runtime kwargs take precedence
)
texts = ["text1", "text2"]
with patch.object(embeddings.async_client, "create") as mock_create:
mock_create.side_effect = [
{"data": [{"embedding": [0.1, 0.2, 0.3]}, {"embedding": [0.4, 0.5, 0.6]}]}
]
result = await embeddings.aembed_documents(texts, dimensions=3)
client_kwargs = embeddings._invocation_params.copy()
assert client_kwargs["dimensions"] == 4
client_kwargs["dimensions"] = 3
mock_create.assert_any_call(input=texts, **client_kwargs)
assert result == [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
|
import os
from unittest.mock import patch
import pytest
from langchain_openai import OpenAIEmbeddings
os.environ["OPENAI_API_KEY"] = "foo"
def test_openai_invalid_model_kwargs() -> None:
with pytest.raises(ValueError):
OpenAIEmbeddings(model_kwargs={"model": "foo"})
def test_openai_incorrect_field() -> None:
with pytest.warns(match="not default parameter"):
llm = OpenAIEmbeddings(foo="bar") # type: ignore[call-arg]
assert llm.model_kwargs == {"foo": "bar"}
def test_embed_documents_with_custom_chunk_size() -> None:
embeddings = OpenAIEmbeddings(chunk_size=2)
texts = ["text1", "text2", "text3", "text4"]
custom_chunk_size = 3
with patch.object(embeddings.client, "create") as mock_create:
mock_create.side_effect = [
{"data": [{"embedding": [0.1, 0.2]}, {"embedding": [0.3, 0.4]}]},
{"data": [{"embedding": [0.5, 0.6]}, {"embedding": [0.7, 0.8]}]},
]
result = embeddings.embed_documents(texts, chunk_size=custom_chunk_size)
_, tokens, __ = embeddings._tokenize(texts, custom_chunk_size)
mock_create.call_args
mock_create.assert_any_call(input=tokens[0:3], **embeddings._invocation_params)
mock_create.assert_any_call(input=tokens[3:4], **embeddings._invocation_params)
assert result == [[0.1, 0.2], [0.3, 0.4], [0.5, 0.6], [0.7, 0.8]]
def test_embed_documents_with_custom_chunk_size_no_check_ctx_length() -> None:
embeddings = OpenAIEmbeddings(chunk_size=2, check_embedding_ctx_length=False)
texts = ["text1", "text2", "text3", "text4"]
custom_chunk_size = 3
with patch.object(embeddings.client, "create") as mock_create:
mock_create.side_effect = [
{"data": [{"embedding": [0.1, 0.2]}, {"embedding": [0.3, 0.4]}]},
{"data": [{"embedding": [0.5, 0.6]}, {"embedding": [0.7, 0.8]}]},
]
result = embeddings.embed_documents(texts, chunk_size=custom_chunk_size)
mock_create.call_args
mock_create.assert_any_call(input=texts[0:3], **embeddings._invocation_params)
mock_create.assert_any_call(input=texts[3:4], **embeddings._invocation_params)
assert result == [[0.1, 0.2], [0.3, 0.4], [0.5, 0.6], [0.7, 0.8]]
|
"""Simple Reader that reads transcript of youtube video."""
import re
from typing import Any, List, Optional
from youtube_transcript_api import YouTubeTranscriptApi
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
from llama_index.readers.youtube_transcript.utils import YOUTUBE_URL_PATTERNS
class YoutubeTranscriptReader(BasePydanticReader):
"""Youtube Transcript reader."""
is_remote: bool = True
@classmethod
def class_name(cls) -> str:
"""Get the name identifier of the class."""
return "YoutubeTranscriptReader"
def load_data(
self,
ytlinks: List[str],
languages: Optional[List[str]] = ["en"],
**load_kwargs: Any,
) -> List[Document]:
"""
Load data from the input directory.
Args:
pages (List[str]): List of youtube links \
for which transcripts are to be read.
"""
results = []
for link in ytlinks:
video_id = self._extract_video_id(link)
if not video_id:
raise ValueError(
f"Supplied url {link} is not a supported youtube URL."
"Supported formats include:"
" youtube.com/watch?v=\\{video_id\\} "
"(with or without 'www.')\n"
" youtube.com/embed?v=\\{video_id\\} "
"(with or without 'www.')\n"
" youtu.be/{video_id\\} (never includes www subdomain)"
)
transcript_chunks = YouTubeTranscriptApi.get_transcript(
video_id, languages=languages
)
chunk_text = [chunk["text"] for chunk in transcript_chunks]
transcript = "\n".join(chunk_text)
results.append(
Document(
text=transcript, id_=video_id, extra_info={"video_id": video_id}
)
)
return results
@staticmethod
def _extract_video_id(yt_link) -> Optional[str]:
for pattern in YOUTUBE_URL_PATTERNS:
match = re.search(pattern, yt_link)
if match:
return match.group(1)
# return None if no match is found
return None
|
"""Simple Reader that reads transcript of youtube video."""
import re
from typing import Any, List, Optional
from youtube_transcript_api import YouTubeTranscriptApi
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
from llama_index.readers.youtube_transcript.utils import YOUTUBE_URL_PATTERNS
class YoutubeTranscriptReader(BasePydanticReader):
"""Youtube Transcript reader."""
is_remote: bool = True
@classmethod
def class_name(cls) -> str:
"""Get the name identifier of the class."""
return "YoutubeTranscriptReader"
def load_data(
self,
ytlinks: List[str],
languages: Optional[List[str]] = ["en"],
**load_kwargs: Any,
) -> List[Document]:
"""Load data from the input directory.
Args:
pages (List[str]): List of youtube links \
for which transcripts are to be read.
"""
results = []
for link in ytlinks:
video_id = self._extract_video_id(link)
if not video_id:
raise ValueError(
f"Supplied url {link} is not a supported youtube URL."
"Supported formats include:"
" youtube.com/watch?v=\\{video_id\\} "
"(with or without 'www.')\n"
" youtube.com/embed?v=\\{video_id\\} "
"(with or without 'www.')\n"
" youtu.be/{video_id\\} (never includes www subdomain)"
)
transcript_chunks = YouTubeTranscriptApi.get_transcript(
video_id, languages=languages
)
chunk_text = [chunk["text"] for chunk in transcript_chunks]
transcript = "\n".join(chunk_text)
results.append(
Document(
text=transcript, id_=video_id, extra_info={"video_id": video_id}
)
)
return results
@staticmethod
def _extract_video_id(yt_link) -> Optional[str]:
for pattern in YOUTUBE_URL_PATTERNS:
match = re.search(pattern, yt_link)
if match:
return match.group(1)
# return None if no match is found
return None
|
"""Test the criteria eval chain."""
import pytest
from langchain.evaluation.criteria.eval_chain import (
_SUPPORTED_CRITERIA,
Criteria,
CriteriaEvalChain,
CriteriaResultOutputParser,
LabeledCriteriaEvalChain,
)
from langchain.evaluation.schema import StringEvaluator
from tests.unit_tests.llms.fake_llm import FakeLLM
def test_resolve_criteria_str() -> None:
assert CriteriaEvalChain.resolve_criteria("helpfulness") == {
"helpfulness": _SUPPORTED_CRITERIA[Criteria.HELPFULNESS],
}
assert CriteriaEvalChain.resolve_criteria("correctness") == {
"correctness": _SUPPORTED_CRITERIA[Criteria.CORRECTNESS],
}
@pytest.mark.parametrize(
"text,want",
[
("Y", {"reasoning": "", "value": "Y", "score": 1}),
(
"""Here is my step-by-step reasoning for the given criteria:
The criterion is: "Do you like cake?" I like cake.
Y""",
{
"reasoning": """Here is my step-by-step reasoning for the given criteria:
The criterion is: "Do you like cake?" I like cake.""", # noqa: E501
"value": "Y",
"score": 1,
},
),
(
" NThe submission N is correct, accurate, and factual. It accurately"
" identifies the specific effects of knowledge and interest on"
" these factors. Therefore, the submission Y meets the criteria. Y",
{
"reasoning": "NThe submission N is correct, accurate, and factual. It"
" accurately identifies the specific effects of knowledge and interest"
" on these factors. Therefore, the submission Y meets the criteria.",
"value": "Y",
"score": 1,
},
),
],
)
def test_CriteriaResultOutputParser_parse(text: str, want: dict) -> None:
output_parser = CriteriaResultOutputParser()
got = output_parser.parse(text)
assert got.get("reasoning") == want["reasoning"]
assert got.get("value") == want["value"]
assert got.get("score") == want["score"]
@pytest.mark.parametrize("criterion", list(Criteria))
def test_resolve_criteria_enum(criterion: Criteria) -> None:
assert CriteriaEvalChain.resolve_criteria(criterion) == {
criterion.value: _SUPPORTED_CRITERIA[criterion],
}
def test_criteria_eval_chain() -> None:
chain = CriteriaEvalChain.from_llm(
llm=FakeLLM(
queries={"text": "The meaning of life\nY"},
sequential_responses=True,
),
criteria={"my criterion": "my criterion description"},
)
with pytest.warns(UserWarning, match=chain._skip_reference_warning):
result = chain.evaluate_strings(
prediction="my prediction",
reference="my reference",
input="my input",
)
assert result["reasoning"] == "The meaning of life"
def test_criteria_eval_chain_missing_reference() -> None:
chain = LabeledCriteriaEvalChain.from_llm(
llm=FakeLLM(
queries={"text": "The meaning of life\nY"},
sequential_responses=True,
),
criteria={"my criterion": "my criterion description"},
)
with pytest.raises(ValueError):
chain.evaluate_strings(prediction="my prediction", input="my input")
def test_implements_string_protocol() -> None:
assert issubclass(CriteriaEvalChain, StringEvaluator)
|
"""Test the criteria eval chain."""
import pytest
from langchain.evaluation.criteria.eval_chain import (
_SUPPORTED_CRITERIA,
Criteria,
CriteriaEvalChain,
CriteriaResultOutputParser,
LabeledCriteriaEvalChain,
)
from langchain.evaluation.schema import StringEvaluator
from tests.unit_tests.llms.fake_llm import FakeLLM
def test_resolve_criteria_str() -> None:
assert CriteriaEvalChain.resolve_criteria("helpfulness") == {
"helpfulness": _SUPPORTED_CRITERIA[Criteria.HELPFULNESS]
}
assert CriteriaEvalChain.resolve_criteria("correctness") == {
"correctness": _SUPPORTED_CRITERIA[Criteria.CORRECTNESS]
}
@pytest.mark.parametrize(
"text,want",
[
("Y", {"reasoning": "", "value": "Y", "score": 1}),
(
"""Here is my step-by-step reasoning for the given criteria:
The criterion is: "Do you like cake?" I like cake.
Y""",
{
"reasoning": """Here is my step-by-step reasoning for the given criteria:
The criterion is: "Do you like cake?" I like cake.""", # noqa: E501
"value": "Y",
"score": 1,
},
),
(
" NThe submission N is correct, accurate, and factual. It accurately"
" identifies the specific effects of knowledge and interest on"
" these factors. Therefore, the submission Y meets the criteria. Y",
{
"reasoning": "NThe submission N is correct, accurate, and factual. It"
" accurately identifies the specific effects of knowledge and interest"
" on these factors. Therefore, the submission Y meets the criteria.",
"value": "Y",
"score": 1,
},
),
],
)
def test_CriteriaResultOutputParser_parse(text: str, want: dict) -> None:
output_parser = CriteriaResultOutputParser()
got = output_parser.parse(text)
assert got.get("reasoning") == want["reasoning"]
assert got.get("value") == want["value"]
assert got.get("score") == want["score"]
@pytest.mark.parametrize("criterion", list(Criteria))
def test_resolve_criteria_enum(criterion: Criteria) -> None:
assert CriteriaEvalChain.resolve_criteria(criterion) == {
criterion.value: _SUPPORTED_CRITERIA[criterion]
}
def test_criteria_eval_chain() -> None:
chain = CriteriaEvalChain.from_llm(
llm=FakeLLM(
queries={"text": "The meaning of life\nY"}, sequential_responses=True
),
criteria={"my criterion": "my criterion description"},
)
with pytest.warns(UserWarning, match=chain._skip_reference_warning):
result = chain.evaluate_strings(
prediction="my prediction", reference="my reference", input="my input"
)
assert result["reasoning"] == "The meaning of life"
def test_criteria_eval_chain_missing_reference() -> None:
chain = LabeledCriteriaEvalChain.from_llm(
llm=FakeLLM(
queries={"text": "The meaning of life\nY"},
sequential_responses=True,
),
criteria={"my criterion": "my criterion description"},
)
with pytest.raises(ValueError):
chain.evaluate_strings(prediction="my prediction", input="my input")
def test_implements_string_protocol() -> None:
assert issubclass(CriteriaEvalChain, StringEvaluator)
|
from typing import TYPE_CHECKING, NamedTuple, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.proto import NodeProto
T = TypeVar('T', bound='Mesh3DUrl')
class Mesh3DLoadResult(NamedTuple):
vertices: NdArray
faces: NdArray
class Mesh3DUrl(Url3D):
"""
URL to a .obj, .glb, or .ply file containing 3D mesh information.
Can be remote (web) URL, or a local file path.
"""
def _to_node_protobuf(self: T) -> 'NodeProto':
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that needs to
be converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(mesh_url=str(self))
def load(self: T) -> Mesh3DLoadResult:
"""
Load the data from the url into a named tuple of two NdArrays containing
vertices and faces information.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
import numpy as np
from docarray.typing import Mesh3DUrl
class MyDoc(BaseDocument):
mesh_url: Mesh3DUrl
doc = MyDoc(mesh_url="toydata/tetrahedron.obj")
vertices, faces = doc.mesh_url.load()
assert isinstance(vertices, np.ndarray)
assert isinstance(faces, np.ndarray)
:return: named tuple of two NdArrays representing the mesh's vertices and faces
"""
mesh = self._load_trimesh_instance(force='mesh')
vertices = parse_obj_as(NdArray, mesh.vertices.view(np.ndarray))
faces = parse_obj_as(NdArray, mesh.faces.view(np.ndarray))
return Mesh3DLoadResult(vertices=vertices, faces=faces)
|
from typing import TYPE_CHECKING, Tuple, TypeVar
import numpy as np
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.proto import NodeProto
T = TypeVar('T', bound='Mesh3DUrl')
class Mesh3DUrl(Url3D):
"""
URL to a .obj, .glb, or .ply file containing 3D mesh information.
Can be remote (web) URL, or a local file path.
"""
def _to_node_protobuf(self: T) -> 'NodeProto':
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that needs to
be converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(mesh_url=str(self))
def load(self: T) -> Tuple[np.ndarray, np.ndarray]:
"""
Load the data from the url into a tuple of two numpy.ndarrays containing
vertices and faces information.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
import numpy as np
from docarray.typing import Mesh3DUrl
class MyDoc(BaseDocument):
mesh_url: Mesh3DUrl
doc = MyDoc(mesh_url="toydata/tetrahedron.obj")
vertices, faces = doc.mesh_url.load()
assert isinstance(vertices, np.ndarray)
assert isinstance(faces, np.ndarray)
:return: tuple of two np.ndarrays representing the mesh's vertices and faces
"""
mesh = self._load_trimesh_instance(force='mesh')
vertices = mesh.vertices.view(np.ndarray)
faces = mesh.faces.view(np.ndarray)
return vertices, faces
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.8.4'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.8.3'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
"""DocumentFilter that uses an LLM chain to extract the relevant parts of documents."""
from __future__ import annotations
from collections.abc import Sequence
from typing import Any, Callable, Optional, cast
from langchain_core.callbacks import Callbacks
from langchain_core.documents import BaseDocumentCompressor, Document
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseOutputParser, StrOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import Runnable
from pydantic import ConfigDict
from langchain.chains.llm import LLMChain
from langchain.retrievers.document_compressors.chain_extract_prompt import (
prompt_template,
)
def default_get_input(query: str, doc: Document) -> dict[str, Any]:
"""Return the compression chain input."""
return {"question": query, "context": doc.page_content}
class NoOutputParser(BaseOutputParser[str]):
"""Parse outputs that could return a null string of some sort."""
no_output_str: str = "NO_OUTPUT"
def parse(self, text: str) -> str:
cleaned_text = text.strip()
if cleaned_text == self.no_output_str:
return ""
return cleaned_text
def _get_default_chain_prompt() -> PromptTemplate:
output_parser = NoOutputParser()
template = prompt_template.format(no_output_str=output_parser.no_output_str)
return PromptTemplate(
template=template,
input_variables=["question", "context"],
output_parser=output_parser,
)
class LLMChainExtractor(BaseDocumentCompressor):
"""Document compressor that uses an LLM chain to extract
the relevant parts of documents."""
llm_chain: Runnable
"""LLM wrapper to use for compressing documents."""
get_input: Callable[[str, Document], dict] = default_get_input
"""Callable for constructing the chain input from the query and a Document."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress page content of raw documents."""
compressed_docs = []
for doc in documents:
_input = self.get_input(query, doc)
output_ = self.llm_chain.invoke(_input, config={"callbacks": callbacks})
if isinstance(self.llm_chain, LLMChain):
output = output_[self.llm_chain.output_key]
if self.llm_chain.prompt.output_parser is not None:
output = self.llm_chain.prompt.output_parser.parse(output)
else:
output = output_
if len(output) == 0:
continue
compressed_docs.append(
Document(page_content=cast(str, output), metadata=doc.metadata),
)
return compressed_docs
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress page content of raw documents asynchronously."""
inputs = [self.get_input(query, doc) for doc in documents]
outputs = await self.llm_chain.abatch(inputs, {"callbacks": callbacks})
compressed_docs = []
for i, doc in enumerate(documents):
if len(outputs[i]) == 0:
continue
compressed_docs.append(
Document(page_content=outputs[i], metadata=doc.metadata),
)
return compressed_docs
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[PromptTemplate] = None,
get_input: Optional[Callable[[str, Document], str]] = None,
llm_chain_kwargs: Optional[dict] = None,
) -> LLMChainExtractor:
"""Initialize from LLM."""
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
_get_input = get_input if get_input is not None else default_get_input
if _prompt.output_parser is not None:
parser = _prompt.output_parser
else:
parser = StrOutputParser()
llm_chain = _prompt | llm | parser
return cls(llm_chain=llm_chain, get_input=_get_input) # type: ignore[arg-type]
|
"""DocumentFilter that uses an LLM chain to extract the relevant parts of documents."""
from __future__ import annotations
from collections.abc import Sequence
from typing import Any, Callable, Optional, cast
from langchain_core.callbacks import Callbacks
from langchain_core.documents import BaseDocumentCompressor, Document
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseOutputParser, StrOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import Runnable
from pydantic import ConfigDict
from langchain.chains.llm import LLMChain
from langchain.retrievers.document_compressors.chain_extract_prompt import (
prompt_template,
)
def default_get_input(query: str, doc: Document) -> dict[str, Any]:
"""Return the compression chain input."""
return {"question": query, "context": doc.page_content}
class NoOutputParser(BaseOutputParser[str]):
"""Parse outputs that could return a null string of some sort."""
no_output_str: str = "NO_OUTPUT"
def parse(self, text: str) -> str:
cleaned_text = text.strip()
if cleaned_text == self.no_output_str:
return ""
return cleaned_text
def _get_default_chain_prompt() -> PromptTemplate:
output_parser = NoOutputParser()
template = prompt_template.format(no_output_str=output_parser.no_output_str)
return PromptTemplate(
template=template,
input_variables=["question", "context"],
output_parser=output_parser,
)
class LLMChainExtractor(BaseDocumentCompressor):
"""Document compressor that uses an LLM chain to extract
the relevant parts of documents."""
llm_chain: Runnable
"""LLM wrapper to use for compressing documents."""
get_input: Callable[[str, Document], dict] = default_get_input
"""Callable for constructing the chain input from the query and a Document."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress page content of raw documents."""
compressed_docs = []
for doc in documents:
_input = self.get_input(query, doc)
output_ = self.llm_chain.invoke(_input, config={"callbacks": callbacks})
if isinstance(self.llm_chain, LLMChain):
output = output_[self.llm_chain.output_key]
if self.llm_chain.prompt.output_parser is not None:
output = self.llm_chain.prompt.output_parser.parse(output)
else:
output = output_
if len(output) == 0:
continue
compressed_docs.append(
Document(page_content=cast(str, output), metadata=doc.metadata)
)
return compressed_docs
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress page content of raw documents asynchronously."""
inputs = [self.get_input(query, doc) for doc in documents]
outputs = await self.llm_chain.abatch(inputs, {"callbacks": callbacks})
compressed_docs = []
for i, doc in enumerate(documents):
if len(outputs[i]) == 0:
continue
compressed_docs.append(
Document(page_content=outputs[i], metadata=doc.metadata)
)
return compressed_docs
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[PromptTemplate] = None,
get_input: Optional[Callable[[str, Document], str]] = None,
llm_chain_kwargs: Optional[dict] = None,
) -> LLMChainExtractor:
"""Initialize from LLM."""
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
_get_input = get_input if get_input is not None else default_get_input
if _prompt.output_parser is not None:
parser = _prompt.output_parser
else:
parser = StrOutputParser()
llm_chain = _prompt | llm | parser
return cls(llm_chain=llm_chain, get_input=_get_input) # type: ignore[arg-type]
|
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import AnyEmbedding, NdArrayEmbedding, TorchEmbedding
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.computation.tensorflow_backend import tnp
from docarray.typing.tensor.embedding import TensorFlowEmbedding
@pytest.mark.proto
def test_proto_embedding():
embedding = parse_obj_as(AnyEmbedding, np.zeros((3, 224, 224)))
embedding._to_node_protobuf()
def test_json_schema():
schema_json_of(AnyEmbedding)
def test_dump_json():
tensor = parse_obj_as(AnyEmbedding, np.zeros((3, 224, 224)))
orjson_dumps(tensor)
@pytest.mark.parametrize(
'tensor,cls_audio_tensor,cls_tensor',
[
(torch.zeros(1000, 2), TorchEmbedding, torch.Tensor),
(np.zeros((1000, 2)), NdArrayEmbedding, np.ndarray),
],
)
def test_torch_ndarray_to_any_embedding(tensor, cls_audio_tensor, cls_tensor):
class MyEmbeddingDoc(BaseDoc):
tensor: AnyEmbedding
doc = MyEmbeddingDoc(tensor=tensor)
assert isinstance(doc.tensor, cls_audio_tensor)
assert isinstance(doc.tensor, cls_tensor)
assert (doc.tensor == tensor).all()
@pytest.mark.tensorflow
def test_tensorflow_to_any_embedding():
class MyEmbeddingDoc(BaseDoc):
tensor: AnyEmbedding
doc = MyEmbeddingDoc(tensor=tf.zeros((1000, 2)))
assert isinstance(doc.tensor, TensorFlowEmbedding)
assert isinstance(doc.tensor.tensor, tf.Tensor)
assert tnp.allclose(doc.tensor.tensor, tf.zeros((1000, 2)))
|
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import AnyEmbedding
@pytest.mark.proto
def test_proto_embedding():
embedding = parse_obj_as(AnyEmbedding, np.zeros((3, 224, 224)))
embedding._to_node_protobuf()
def test_json_schema():
schema_json_of(AnyEmbedding)
def test_dump_json():
tensor = parse_obj_as(AnyEmbedding, np.zeros((3, 224, 224)))
orjson_dumps(tensor)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.