input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
lang_model_name = 'bert-base-uncased'
model = dict(
type='GroundingDINO',
num_queries=900,
with_box_refine=True,
as_two_stage=True,
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_mask=False,
),
language_model=dict(
type='BertModel',
name=lang_model_name,
pad_to_max=False,
use_sub_sentence_represent=True,
special_tokens_list=['[CLS]', '[SEP]', '.', '?'],
add_pooling_layer=True,
),
backbone=dict(
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(1, 2, 3),
with_cp=False,
convert_weights=False),
neck=dict(
type='ChannelMapper',
in_channels=[192, 384, 768],
kernel_size=1,
out_channels=256,
act_cfg=None,
bias=True,
norm_cfg=dict(type='GN', num_groups=32),
num_outs=4),
encoder=dict(
num_layers=6,
# visual layer config
layer_cfg=dict(
self_attn_cfg=dict(embed_dims=256, num_levels=4, dropout=0.0),
ffn_cfg=dict(
embed_dims=256, feedforward_channels=2048, ffn_drop=0.0)),
# text layer config
text_layer_cfg=dict(
self_attn_cfg=dict(num_heads=4, embed_dims=256, dropout=0.0),
ffn_cfg=dict(
embed_dims=256, feedforward_channels=1024, ffn_drop=0.0)),
# fusion layer config
fusion_layer_cfg=dict(
v_dim=256,
l_dim=256,
embed_dim=1024,
num_heads=4,
init_values=1e-4),
),
decoder=dict(
num_layers=6,
return_intermediate=True,
layer_cfg=dict(
# query self attention layer
self_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0),
# cross attention layer query to text
cross_attn_text_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0),
# cross attention layer query to image
cross_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0),
ffn_cfg=dict(
embed_dims=256, feedforward_channels=2048, ffn_drop=0.0)),
post_norm_cfg=None),
positional_encoding=dict(
num_feats=128, normalize=True, offset=0.0, temperature=20),
bbox_head=dict(
type='GroundingDINOHead',
num_classes=80,
sync_cls_avg_factor=True,
contrastive_cfg=dict(max_text_len=256),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0), # 2.0 in DeformDETR
loss_bbox=dict(type='L1Loss', loss_weight=5.0)),
dn_cfg=dict( # TODO: Move to model.train_cfg ?
label_noise_scale=0.5,
box_noise_scale=1.0, # 0.4 for DN-DETR
group_cfg=dict(dynamic=True, num_groups=None,
num_dn_queries=100)), # TODO: half num_dn_queries
# training and testing settings
train_cfg=None,
test_cfg=dict(max_per_img=300))
test_pipeline = [
dict(
type='LoadImageFromFile', backend_args=None,
imdecode_backend='pillow'),
dict(
type='FixScaleResize',
scale=(800, 1333),
keep_ratio=True,
backend='pillow'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'text', 'custom_entities',
'tokens_positive'))
]
val_dataloader = dict(
dataset=dict(pipeline=test_pipeline, return_classes=True))
test_dataloader = val_dataloader
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
lang_model_name = 'bert-base-uncased'
model = dict(
type='GroundingDINO',
num_queries=900,
with_box_refine=True,
as_two_stage=True,
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_mask=False,
),
language_model=dict(
type='BertModel',
name=lang_model_name,
pad_to_max=False,
use_sub_sentence_represent=True,
special_tokens_list=['[CLS]', '[SEP]', '.', '?'],
add_pooling_layer=True,
),
backbone=dict(
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(1, 2, 3),
with_cp=False,
convert_weights=False),
neck=dict(
type='ChannelMapper',
in_channels=[192, 384, 768],
kernel_size=1,
out_channels=256,
act_cfg=None,
bias=True,
norm_cfg=dict(type='GN', num_groups=32),
num_outs=4),
encoder=dict(
num_layers=6,
# visual layer config
layer_cfg=dict(
self_attn_cfg=dict(embed_dims=256, num_levels=4, dropout=0.0),
ffn_cfg=dict(
embed_dims=256, feedforward_channels=2048, ffn_drop=0.0)),
# text layer config
text_layer_cfg=dict(
self_attn_cfg=dict(num_heads=4, embed_dims=256, dropout=0.0),
ffn_cfg=dict(
embed_dims=256, feedforward_channels=1024, ffn_drop=0.0)),
# fusion layer config
fusion_layer_cfg=dict(
v_dim=256,
l_dim=256,
embed_dim=1024,
num_heads=4,
init_values=1e-4),
),
decoder=dict(
num_layers=6,
return_intermediate=True,
layer_cfg=dict(
# query self attention layer
self_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0),
# cross attention layer query to text
cross_attn_text_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0),
# cross attention layer query to image
cross_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0),
ffn_cfg=dict(
embed_dims=256, feedforward_channels=2048, ffn_drop=0.0)),
post_norm_cfg=None),
positional_encoding=dict(
num_feats=128, normalize=True, offset=0.0, temperature=20),
bbox_head=dict(
type='GroundingDINOHead',
num_classes=80,
sync_cls_avg_factor=True,
contrastive_cfg=dict(max_text_len=256),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0), # 2.0 in DeformDETR
loss_bbox=dict(type='L1Loss', loss_weight=5.0)),
dn_cfg=dict( # TODO: Move to model.train_cfg ?
label_noise_scale=0.5,
box_noise_scale=1.0, # 0.4 for DN-DETR
group_cfg=dict(dynamic=True, num_groups=None,
num_dn_queries=100)), # TODO: half num_dn_queries
# training and testing settings
train_cfg=None,
test_cfg=dict(max_per_img=300))
test_pipeline = [
dict(
type='LoadImageFromFile', backend_args=None,
imdecode_backend='pillow'),
dict(
type='FixScaleResize',
scale=(800, 1333),
keep_ratio=True,
backend='pillow'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'text', 'custom_entities'))
]
val_dataloader = dict(
dataset=dict(pipeline=test_pipeline, return_classes=True))
test_dataloader = val_dataloader
|
_base_ = [
'../_base_/models/fast-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadProposals', num_max_proposals=2000),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='ProposalBroadcaster',
transforms=[
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
]),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadProposals', num_max_proposals=None),
dict(
type='ProposalBroadcaster',
transforms=[
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
]),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
dataset=dict(
proposal_file='proposals/rpn_r50_fpn_1x_train2017.pkl',
pipeline=train_pipeline))
val_dataloader = dict(
dataset=dict(
proposal_file='proposals/rpn_r50_fpn_1x_val2017.pkl',
pipeline=test_pipeline))
test_dataloader = val_dataloader
|
_base_ = [
'../_base_/models/fast-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=2000),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=None),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['proposals']),
dict(
type='ToDataContainer',
fields=[dict(key='proposals', stack=False)]),
dict(type='Collect', keys=['img', 'proposals']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_train2017.pkl',
pipeline=train_pipeline),
val=dict(
proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl',
pipeline=test_pipeline),
test=dict(
proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl',
pipeline=test_pipeline))
|
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import os
from huggingface_hub.constants import HF_HOME
from packaging import version
from ..dependency_versions_check import dep_version_check
from .import_utils import ENV_VARS_TRUE_VALUES, is_peft_available, is_transformers_available
MIN_PEFT_VERSION = "0.6.0"
MIN_TRANSFORMERS_VERSION = "4.34.0"
_CHECK_PEFT = os.environ.get("_CHECK_PEFT", "1") in ENV_VARS_TRUE_VALUES
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "diffusion_pytorch_model.bin"
WEIGHTS_INDEX_NAME = "diffusion_pytorch_model.bin.index.json"
FLAX_WEIGHTS_NAME = "diffusion_flax_model.msgpack"
ONNX_WEIGHTS_NAME = "model.onnx"
SAFETENSORS_WEIGHTS_NAME = "diffusion_pytorch_model.safetensors"
SAFE_WEIGHTS_INDEX_NAME = "diffusion_pytorch_model.safetensors.index.json"
SAFETENSORS_FILE_EXTENSION = "safetensors"
GGUF_FILE_EXTENSION = "gguf"
ONNX_EXTERNAL_WEIGHTS_NAME = "weights.pb"
HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
DIFFUSERS_DYNAMIC_MODULE_NAME = "diffusers_modules"
HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(HF_HOME, "modules"))
DEPRECATED_REVISION_ARGS = ["fp16", "non-ema"]
# Below should be `True` if the current version of `peft` and `transformers` are compatible with
# PEFT backend. Will automatically fall back to PEFT backend if the correct versions of the libraries are
# available.
# For PEFT it is has to be greater than or equal to 0.6.0 and for transformers it has to be greater than or equal to 4.34.0.
_required_peft_version = is_peft_available() and version.parse(
version.parse(importlib.metadata.version("peft")).base_version
) >= version.parse(MIN_PEFT_VERSION)
_required_transformers_version = is_transformers_available() and version.parse(
version.parse(importlib.metadata.version("transformers")).base_version
) >= version.parse(MIN_TRANSFORMERS_VERSION)
USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version
if USE_PEFT_BACKEND and _CHECK_PEFT:
dep_version_check("peft")
DECODE_ENDPOINT_SD_V1 = "https://q1bj3bpq6kzilnsu.us-east-1.aws.endpoints.huggingface.cloud/"
DECODE_ENDPOINT_SD_XL = "https://x2dmsqunjd6k9prw.us-east-1.aws.endpoints.huggingface.cloud/"
DECODE_ENDPOINT_FLUX = "https://whhx50ex1aryqvw6.us-east-1.aws.endpoints.huggingface.cloud/"
DECODE_ENDPOINT_HUNYUAN_VIDEO = "https://o7ywnmrahorts457.us-east-1.aws.endpoints.huggingface.cloud/"
ENCODE_ENDPOINT_SD_V1 = "https://qc6479g0aac6qwy9.us-east-1.aws.endpoints.huggingface.cloud/"
ENCODE_ENDPOINT_SD_XL = "https://xjqqhmyn62rog84g.us-east-1.aws.endpoints.huggingface.cloud/"
ENCODE_ENDPOINT_FLUX = "https://ptccx55jz97f9zgo.us-east-1.aws.endpoints.huggingface.cloud/"
|
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import os
from huggingface_hub.constants import HF_HOME
from packaging import version
from ..dependency_versions_check import dep_version_check
from .import_utils import ENV_VARS_TRUE_VALUES, is_peft_available, is_transformers_available
MIN_PEFT_VERSION = "0.6.0"
MIN_TRANSFORMERS_VERSION = "4.34.0"
_CHECK_PEFT = os.environ.get("_CHECK_PEFT", "1") in ENV_VARS_TRUE_VALUES
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "diffusion_pytorch_model.bin"
WEIGHTS_INDEX_NAME = "diffusion_pytorch_model.bin.index.json"
FLAX_WEIGHTS_NAME = "diffusion_flax_model.msgpack"
ONNX_WEIGHTS_NAME = "model.onnx"
SAFETENSORS_WEIGHTS_NAME = "diffusion_pytorch_model.safetensors"
SAFE_WEIGHTS_INDEX_NAME = "diffusion_pytorch_model.safetensors.index.json"
SAFETENSORS_FILE_EXTENSION = "safetensors"
GGUF_FILE_EXTENSION = "gguf"
ONNX_EXTERNAL_WEIGHTS_NAME = "weights.pb"
HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
DIFFUSERS_DYNAMIC_MODULE_NAME = "diffusers_modules"
HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(HF_HOME, "modules"))
DEPRECATED_REVISION_ARGS = ["fp16", "non-ema"]
# Below should be `True` if the current version of `peft` and `transformers` are compatible with
# PEFT backend. Will automatically fall back to PEFT backend if the correct versions of the libraries are
# available.
# For PEFT it is has to be greater than or equal to 0.6.0 and for transformers it has to be greater than or equal to 4.34.0.
_required_peft_version = is_peft_available() and version.parse(
version.parse(importlib.metadata.version("peft")).base_version
) >= version.parse(MIN_PEFT_VERSION)
_required_transformers_version = is_transformers_available() and version.parse(
version.parse(importlib.metadata.version("transformers")).base_version
) >= version.parse(MIN_TRANSFORMERS_VERSION)
USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version
if USE_PEFT_BACKEND and _CHECK_PEFT:
dep_version_check("peft")
|
from langchain_core.tools import (
BaseTool,
SchemaAnnotationError,
StructuredTool,
Tool,
ToolException,
create_schema_from_function,
tool,
)
__all__ = [
"BaseTool",
"SchemaAnnotationError",
"StructuredTool",
"Tool",
"ToolException",
"create_schema_from_function",
"tool",
]
|
from langchain_core.tools import (
BaseTool,
SchemaAnnotationError,
StructuredTool,
Tool,
ToolException,
create_schema_from_function,
tool,
)
__all__ = [
"SchemaAnnotationError",
"create_schema_from_function",
"ToolException",
"BaseTool",
"Tool",
"StructuredTool",
"tool",
]
|
_base_ = ['co_dino_5scale_r50_8xb2_1x_coco.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
load_from = 'https://download.openmmlab.com/mmdetection/v3.0/codetr/co_dino_5scale_swin_large_16e_o365tococo-614254c9.pth' # noqa
# model settings
model = dict(
backbone=dict(
_delete_=True,
type='SwinTransformer',
pretrain_img_size=384,
embed_dims=192,
depths=[2, 2, 18, 2],
num_heads=[6, 12, 24, 48],
window_size=12,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.3,
patch_norm=True,
out_indices=(0, 1, 2, 3),
# Please only add indices that would be used
# in FPN, otherwise some parameter will not be used
with_cp=True,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[192, 384, 768, 1536]),
query_head=dict(
dn_cfg=dict(box_noise_scale=0.4, group_cfg=dict(num_dn_queries=500)),
transformer=dict(encoder=dict(with_cp=6))))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[
[
dict(
type='RandomChoiceResize',
scales=[(480, 2048), (512, 2048), (544, 2048), (576, 2048),
(608, 2048), (640, 2048), (672, 2048), (704, 2048),
(736, 2048), (768, 2048), (800, 2048), (832, 2048),
(864, 2048), (896, 2048), (928, 2048), (960, 2048),
(992, 2048), (1024, 2048), (1056, 2048),
(1088, 2048), (1120, 2048), (1152, 2048),
(1184, 2048), (1216, 2048), (1248, 2048),
(1280, 2048), (1312, 2048), (1344, 2048),
(1376, 2048), (1408, 2048), (1440, 2048),
(1472, 2048), (1504, 2048), (1536, 2048)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
# The radio of all image in train dataset < 7
# follow the original implement
scales=[(400, 4200), (500, 4200), (600, 4200)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 2048), (512, 2048), (544, 2048), (576, 2048),
(608, 2048), (640, 2048), (672, 2048), (704, 2048),
(736, 2048), (768, 2048), (800, 2048), (832, 2048),
(864, 2048), (896, 2048), (928, 2048), (960, 2048),
(992, 2048), (1024, 2048), (1056, 2048),
(1088, 2048), (1120, 2048), (1152, 2048),
(1184, 2048), (1216, 2048), (1248, 2048),
(1280, 2048), (1312, 2048), (1344, 2048),
(1376, 2048), (1408, 2048), (1440, 2048),
(1472, 2048), (1504, 2048), (1536, 2048)],
keep_ratio=True)
]
]),
dict(type='PackDetInputs')
]
train_dataloader = dict(
batch_size=1, num_workers=1, dataset=dict(pipeline=train_pipeline))
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 1280), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
optim_wrapper = dict(optimizer=dict(lr=1e-4))
max_epochs = 16
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[8],
gamma=0.1)
]
|
_base_ = ['co_dino_5scale_r50_8xb2_1x_coco.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
load_from = 'https://download.openmmlab.com/mmdetection/v3.0/codetr/co_dino_5scale_swin_large_22e_o365-0a33e247.pth' # noqa
# model settings
model = dict(
backbone=dict(
_delete_=True,
type='SwinTransformer',
pretrain_img_size=384,
embed_dims=192,
depths=[2, 2, 18, 2],
num_heads=[6, 12, 24, 48],
window_size=12,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.3,
patch_norm=True,
out_indices=(0, 1, 2, 3),
# Please only add indices that would be used
# in FPN, otherwise some parameter will not be used
with_cp=True,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[192, 384, 768, 1536]),
query_head=dict(
dn_cfg=dict(box_noise_scale=0.4, group_cfg=dict(num_dn_queries=500)),
transformer=dict(encoder=dict(with_cp=6))))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[
[
dict(
type='RandomChoiceResize',
scales=[(480, 2048), (512, 2048), (544, 2048), (576, 2048),
(608, 2048), (640, 2048), (672, 2048), (704, 2048),
(736, 2048), (768, 2048), (800, 2048), (832, 2048),
(864, 2048), (896, 2048), (928, 2048), (960, 2048),
(992, 2048), (1024, 2048), (1056, 2048),
(1088, 2048), (1120, 2048), (1152, 2048),
(1184, 2048), (1216, 2048), (1248, 2048),
(1280, 2048), (1312, 2048), (1344, 2048),
(1376, 2048), (1408, 2048), (1440, 2048),
(1472, 2048), (1504, 2048), (1536, 2048)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
# The radio of all image in train dataset < 7
# follow the original implement
scales=[(400, 4200), (500, 4200), (600, 4200)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 2048), (512, 2048), (544, 2048), (576, 2048),
(608, 2048), (640, 2048), (672, 2048), (704, 2048),
(736, 2048), (768, 2048), (800, 2048), (832, 2048),
(864, 2048), (896, 2048), (928, 2048), (960, 2048),
(992, 2048), (1024, 2048), (1056, 2048),
(1088, 2048), (1120, 2048), (1152, 2048),
(1184, 2048), (1216, 2048), (1248, 2048),
(1280, 2048), (1312, 2048), (1344, 2048),
(1376, 2048), (1408, 2048), (1440, 2048),
(1472, 2048), (1504, 2048), (1536, 2048)],
keep_ratio=True)
]
]),
dict(type='PackDetInputs')
]
train_dataloader = dict(
batch_size=1, num_workers=1, dataset=dict(pipeline=train_pipeline))
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 1280), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
optim_wrapper = dict(optimizer=dict(lr=1e-4))
max_epochs = 16
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[8],
gamma=0.1)
]
|
"""
Initializer script that installs stuff to pip.
"""
from __future__ import annotations
import argparse
import logging
import os
import shutil
import subprocess
import sys
import time
def run_command(args: list[str]) -> subprocess.CompletedProcess[bytes]:
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
try:
return subprocess.run(args, check=True)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="pip initializer")
parser.add_argument(
"packages",
nargs="+",
help="pip packages to install",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"--dry-run", help="do not install anything, just print what would be done."
)
parser.add_argument(
"--no-black-binary",
help="do not use pre-compiled binaries from pip for black.",
action="store_true",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET if args.verbose else logging.DEBUG,
stream=sys.stderr,
)
uv_available = (
any(prefix in sys.base_prefix for prefix in ["uv/python", "uv\\python"])
and shutil.which("uv") is not None
)
if uv_available:
pip_args = ["uv", "pip", "install"]
elif sys.executable:
pip_args = [sys.executable, "-mpip", "install"]
else:
pip_args = ["pip3", "install"]
# If we are in a global install, use `--user` to install so that you do not
# need root access in order to initialize linters.
#
# However, `pip install --user` interacts poorly with virtualenvs (see:
# https://bit.ly/3vD4kvl) and conda (see: https://bit.ly/3KG7ZfU). So in
# these cases perform a regular installation.
in_conda = os.environ.get("CONDA_PREFIX") is not None
in_virtualenv = os.environ.get("VIRTUAL_ENV") is not None
if not in_conda and not in_virtualenv:
pip_args.append("--user")
pip_args.extend(args.packages)
for package in args.packages:
package_name, _, version = package.partition("=")
if version == "":
raise RuntimeError(
"Package {package_name} did not have a version specified. "
"Please specify a version to produce a consistent linting experience."
)
if args.no_black_binary and "black" in package_name:
pip_args.append(f"--no-binary={package_name}")
dry_run = args.dry_run == "1"
if dry_run:
print(f"Would have run: {pip_args}")
sys.exit(0)
run_command(pip_args)
|
"""
Initializer script that installs stuff to pip.
"""
from __future__ import annotations
import argparse
import logging
import os
import shutil
import subprocess
import sys
import time
def run_command(
args: list[str],
env: dict[str, str] | None = None,
) -> subprocess.CompletedProcess[str]:
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
try:
return subprocess.run(args, env=env, text=True, encoding="utf-8", check=True)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
def main() -> None:
parser = argparse.ArgumentParser(description="pip initializer")
parser.add_argument(
"packages",
nargs="+",
help="pip packages to install",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"--dry-run", help="do not install anything, just print what would be done."
)
parser.add_argument(
"--no-black-binary",
help="do not use pre-compiled binaries from pip for black.",
action="store_true",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET if args.verbose else logging.DEBUG,
stream=sys.stderr,
)
env: dict[str, str] = {
**os.environ,
"UV_PYTHON": sys.executable,
"UV_PYTHON_DOWNLOADS": "never",
"FORCE_COLOR": "1",
"CLICOLOR_FORCE": "1",
}
uv_index_url = env.get("UV_INDEX_URL", env.get("PIP_EXTRA_INDEX_URL"))
if uv_index_url:
env["UV_INDEX_URL"] = uv_index_url
uv: str | None = shutil.which("uv")
if uv:
pip_args = [uv, "pip", "install"]
elif sys.executable:
pip_args = [sys.executable, "-mpip", "install"]
else:
pip_args = ["pip3", "install"]
# If we are in a global install, use `--user` to install so that you do not
# need root access in order to initialize linters.
#
# However, `pip install --user` interacts poorly with virtualenvs (see:
# https://bit.ly/3vD4kvl) and conda (see: https://bit.ly/3KG7ZfU). So in
# these cases perform a regular installation.
in_conda = os.environ.get("CONDA_PREFIX") is not None
in_virtualenv = os.environ.get("VIRTUAL_ENV") is not None
if not in_conda and not in_virtualenv:
pip_args.append("--user")
pip_args.extend(args.packages)
for package in args.packages:
package_name, _, version = package.partition("=")
if version == "":
raise RuntimeError(
"Package {package_name} did not have a version specified. "
"Please specify a version to produce a consistent linting experience."
)
if args.no_black_binary and "black" in package_name:
pip_args.append(f"--no-binary={package_name}")
dry_run = args.dry_run == "1"
if dry_run:
print(f"Would have run: {pip_args}")
sys.exit(0)
run_command(pip_args, env=env)
if __name__ == "__main__":
main()
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.port = self.runtime_args.port[0]
self.host = self.runtime_args.host
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/debug',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=self.host, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.port = self.runtime_args.port[0]
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/debug',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=__default_host__, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
image_size = (1024, 1024)
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
# Standard Scale Jittering (SSJ) resizes and crops an image
# with a resize range of 0.8 to 1.25 of the original image size.
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.8, 1.25),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='InfiniteSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False,
backend_args=backend_args)
test_evaluator = val_evaluator
# The model is trained by 270k iterations with batch_size 64,
# which is roughly equivalent to 144 epochs.
max_iters = 270000
train_cfg = dict(
type='IterBasedTrainLoop', max_iters=max_iters, val_interval=10000)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# optimizer assumes bs=64
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004))
# learning rate policy
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=270000,
by_epoch=False,
milestones=[243000, 256500, 263250],
gamma=0.1)
]
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=10000))
log_processor = dict(by_epoch=False)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
image_size = (1024, 1024)
file_client_args = dict(backend='disk')
# comment out the code below to use different file client
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
# Standard Scale Jittering (SSJ) resizes and crops an image
# with a resize range of 0.8 to 1.25 of the original image size.
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.8, 1.25),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='InfiniteSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = val_evaluator
# The model is trained by 270k iterations with batch_size 64,
# which is roughly equivalent to 144 epochs.
max_iters = 270000
train_cfg = dict(
type='IterBasedTrainLoop', max_iters=max_iters, val_interval=10000)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# optimizer assumes bs=64
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004))
# learning rate policy
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=270000,
by_epoch=False,
milestones=[243000, 256500, 263250],
gamma=0.1)
]
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=10000))
log_processor = dict(by_epoch=False)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.6.0'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.5.0'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
from typing import Optional
import torch
from ..utils import logging
logger = logging.get_logger(__name__)
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def sdpa_attention_forward(
module: torch.nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
dropout: float = 0.0,
scaling: Optional[float] = None,
is_causal: Optional[bool] = None,
**kwargs,
) -> tuple[torch.Tensor, None]:
if kwargs.get("output_attentions", False) or kwargs.get("head_mask", None) is not None:
logger.warning_once(
"`sdpa` attention does not support `output_attentions=True` or `head_mask`."
" Please set your attention to `eager` if you want any of these features."
)
if hasattr(module, "num_key_value_groups"):
key = repeat_kv(key, module.num_key_value_groups)
value = repeat_kv(value, module.num_key_value_groups)
if attention_mask is not None and attention_mask.ndim == 4:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
# SDPA with memory-efficient backend is bugged with non-contiguous inputs and custom attn_mask for some torch versions
# Reference: https://github.com/pytorch/pytorch/issues/112577.
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
# We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
# in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
# Note that it is important to check first for the shape, otherwise compile will fail with `argument 'is_causal' must be bool, not SymBool`
if is_causal is None:
# The last condition is for encoder (decoder) models which specify this by passing their own `is_causal` flag
# This is mainly due to those models having mixed implementations for encoder, decoder, and encoder-decoder attns
is_causal = query.shape[2] > 1 and attention_mask is None and getattr(module, "is_causal", True)
# Shapes (e.g. query.shape[2]) are tensors during jit tracing, resulting in `is_causal` being a tensor.
# We convert it to a bool for the SDPA kernel that only accepts bools.
if torch.jit.is_tracing() and isinstance(is_causal, torch.Tensor):
is_causal = is_causal.item()
attn_output = torch.nn.functional.scaled_dot_product_attention(
query,
key,
value,
attn_mask=attention_mask,
dropout_p=dropout,
scale=scaling,
is_causal=is_causal,
)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, None
|
from typing import Optional, Tuple
import torch
from ..utils import logging
logger = logging.get_logger(__name__)
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def sdpa_attention_forward(
module: torch.nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
dropout: float = 0.0,
scaling: Optional[float] = None,
is_causal: Optional[bool] = None,
**kwargs,
) -> Tuple[torch.Tensor, None]:
if kwargs.get("output_attentions", False) or kwargs.get("head_mask", None) is not None:
logger.warning_once(
"`sdpa` attention does not support `output_attentions=True` or `head_mask`."
" Please set your attention to `eager` if you want any of these features."
)
if hasattr(module, "num_key_value_groups"):
key = repeat_kv(key, module.num_key_value_groups)
value = repeat_kv(value, module.num_key_value_groups)
if attention_mask is not None and attention_mask.ndim == 4:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
# SDPA with memory-efficient backend is bugged with non-contiguous inputs and custom attn_mask for some torch versions
# Reference: https://github.com/pytorch/pytorch/issues/112577.
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
# We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
# in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
# Note that it is important to check first for the shape, otherwise compile will fail with `argument 'is_causal' must be bool, not SymBool`
if is_causal is None:
# The last condition is for encoder (decoder) models which specify this by passing their own `is_causal` flag
# This is mainly due to those models having mixed implementations for encoder, decoder, and encoder-decoder attns
is_causal = query.shape[2] > 1 and attention_mask is None and getattr(module, "is_causal", True)
# Shapes (e.g. query.shape[2]) are tensors during jit tracing, resulting in `is_causal` being a tensor.
# We convert it to a bool for the SDPA kernel that only accepts bools.
if torch.jit.is_tracing() and isinstance(is_causal, torch.Tensor):
is_causal = is_causal.item()
attn_output = torch.nn.functional.scaled_dot_product_attention(
query,
key,
value,
attn_mask=attention_mask,
dropout_p=dropout,
scale=scaling,
is_causal=is_causal,
)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, None
|
"""Tracker for XGBoost collective."""
import ctypes
import json
import socket
from enum import IntEnum, unique
from typing import Dict, Optional, Union
from .core import _LIB, _check_call, _deprecate_positional_args, make_jcargs
def get_family(addr: str) -> int:
"""Get network family from address."""
return socket.getaddrinfo(addr, None)[0][0]
class RabitTracker:
"""Tracker for the collective used in XGBoost, acting as a coordinator between
workers.
Parameters
----------
n_workers:
The total number of workers in the communication group.
host_ip:
The IP address of the tracker node. XGBoost can try to guess one by probing with
sockets. But it's best to explicitly pass an address.
port:
The port this tracker should listen to. XGBoost can query an available port from
the OS, this configuration is useful for restricted network environments.
sortby:
How to sort the workers for rank assignment. The default is host, but users can
set the `DMLC_TASK_ID` via arguments of :py:meth:`~xgboost.collective.init` and
obtain deterministic rank assignment through sorting by task name. Available
options are:
- host
- task
timeout :
Timeout for constructing (bootstrap) and shutting down the communication group,
doesn't apply to communication when the group is up and running.
The timeout value should take the time of data loading and pre-processing into
account, due to potential lazy execution. By default the Tracker doesn't have
any timeout to avoid pre-mature aborting.
The :py:meth:`.wait_for` method has a different timeout parameter that can stop
the tracker even if the tracker is still being used. A value error is raised
when timeout is reached.
Examples
--------
.. code-block:: python
from xgboost.tracker import RabitTracker
from xgboost import collective as coll
tracker = RabitTracker(host_ip="127.0.0.1", n_workers=2)
tracker.start()
with coll.CommunicatorContext(**tracker.worker_args()):
ret = coll.broadcast("msg", 0)
assert str(ret) == "msg"
"""
@unique
class _SortBy(IntEnum):
HOST = 0
TASK = 1
@_deprecate_positional_args
def __init__( # pylint: disable=too-many-arguments
self,
n_workers: int,
host_ip: Optional[str],
port: int = 0,
*,
sortby: str = "host",
timeout: int = 0,
) -> None:
handle = ctypes.c_void_p()
if sortby not in ("host", "task"):
raise ValueError("Expecting either 'host' or 'task' for sortby.")
if host_ip is not None:
get_family(host_ip) # use python socket to stop early for invalid address
args = make_jcargs(
host=host_ip,
n_workers=n_workers,
port=port,
dmlc_communicator="rabit",
sortby=self._SortBy.HOST if sortby == "host" else self._SortBy.TASK,
timeout=int(timeout),
)
_check_call(_LIB.XGTrackerCreate(args, ctypes.byref(handle)))
self.handle = handle
def free(self) -> None:
"""Internal function for testing."""
if hasattr(self, "handle"):
handle = self.handle
del self.handle
_check_call(_LIB.XGTrackerFree(handle))
def __del__(self) -> None:
self.free()
def start(self) -> None:
"""Start the tracker. Once started, the client still need to call the
:py:meth:`wait_for` method in order to wait for it to finish (think of it as a
thread).
"""
_check_call(_LIB.XGTrackerRun(self.handle, make_jcargs()))
def wait_for(self, timeout: Optional[int] = None) -> None:
"""Wait for the tracker to finish all the work and shutdown. When timeout is
reached, a value error is raised. By default we don't have timeout since we
don't know how long it takes for the model to finish training.
"""
_check_call(_LIB.XGTrackerWaitFor(self.handle, make_jcargs(timeout=timeout)))
def worker_args(self) -> Dict[str, Union[str, int]]:
"""Get arguments for workers."""
c_env = ctypes.c_char_p()
_check_call(_LIB.XGTrackerWorkerArgs(self.handle, ctypes.byref(c_env)))
assert c_env.value is not None
env = json.loads(c_env.value)
return env
|
"""Tracker for XGBoost collective."""
import ctypes
import json
import socket
from enum import IntEnum, unique
from typing import Dict, Optional, Union
from .core import _LIB, _check_call, _deprecate_positional_args, make_jcargs
def get_family(addr: str) -> int:
"""Get network family from address."""
return socket.getaddrinfo(addr, None)[0][0]
class RabitTracker:
"""Tracker for the collective used in XGBoost, acting as a coordinator between
workers.
Parameters
..........
sortby:
How to sort the workers for rank assignment. The default is host, but users can
set the `DMLC_TASK_ID` via RABIT initialization arguments and obtain
deterministic rank assignment. Available options are:
- host
- task
timeout :
Timeout for constructing the communication group and waiting for the tracker to
shutdown when it's instructed to, doesn't apply to communication when tracking
is running.
The timeout value should take the time of data loading and pre-processing into
account, due to potential lazy execution.
The :py:meth:`.wait_for` method has a different timeout parameter that can stop
the tracker even if the tracker is still being used. A value error is raised
when timeout is reached.
"""
@unique
class _SortBy(IntEnum):
HOST = 0
TASK = 1
@_deprecate_positional_args
def __init__( # pylint: disable=too-many-arguments
self,
n_workers: int,
host_ip: Optional[str],
port: int = 0,
*,
sortby: str = "host",
timeout: int = 0,
) -> None:
handle = ctypes.c_void_p()
if sortby not in ("host", "task"):
raise ValueError("Expecting either 'host' or 'task' for sortby.")
if host_ip is not None:
get_family(host_ip) # use python socket to stop early for invalid address
args = make_jcargs(
host=host_ip,
n_workers=n_workers,
port=port,
dmlc_communicator="rabit",
sortby=self._SortBy.HOST if sortby == "host" else self._SortBy.TASK,
timeout=int(timeout),
)
_check_call(_LIB.XGTrackerCreate(args, ctypes.byref(handle)))
self.handle = handle
def free(self) -> None:
"""Internal function for testing."""
if hasattr(self, "handle"):
handle = self.handle
del self.handle
_check_call(_LIB.XGTrackerFree(handle))
def __del__(self) -> None:
self.free()
def start(self) -> None:
"""Start the tracker. Once started, the client still need to call the
:py:meth:`wait_for` method in order to wait for it to finish (think of it as a
thread).
"""
_check_call(_LIB.XGTrackerRun(self.handle, make_jcargs()))
def wait_for(self, timeout: Optional[int] = None) -> None:
"""Wait for the tracker to finish all the work and shutdown. When timeout is
reached, a value error is raised. By default we don't have timeout since we
don't know how long it takes for the model to finish training.
"""
_check_call(_LIB.XGTrackerWaitFor(self.handle, make_jcargs(timeout=timeout)))
def worker_args(self) -> Dict[str, Union[str, int]]:
"""Get arguments for workers."""
c_env = ctypes.c_char_p()
_check_call(_LIB.XGTrackerWorkerArgs(self.handle, ctypes.byref(c_env)))
assert c_env.value is not None
env = json.loads(c_env.value)
return env
|
from typing import Any
import torch
import enum
from torch._C import _from_dlpack
from torch._C import _to_dlpack as to_dlpack
__all__ = [
"DLDeviceType",
"from_dlpack",
"to_dlpack",
]
class DLDeviceType(enum.IntEnum):
# Enums as in DLPack specification (aten/src/ATen/dlpack.h)
kDLCPU = 1,
kDLGPU = 2,
kDLCPUPinned = 3,
kDLOpenCL = 4,
kDLVulkan = 7,
kDLMetal = 8,
kDLVPI = 9,
kDLROCM = 10,
kDLExtDev = 12,
kDLOneAPI = 14,
torch._C._add_docstr(to_dlpack, r"""to_dlpack(tensor) -> PyCapsule
Returns an opaque object (a "DLPack capsule") representing the tensor.
.. note::
``to_dlpack`` is a legacy DLPack interface. The capsule it returns
cannot be used for anything in Python other than use it as input to
``from_dlpack``. The more idiomatic use of DLPack is to call
``from_dlpack`` directly on the tensor object - this works when that
object has a ``__dlpack__`` method, which PyTorch and most other
libraries indeed have now.
.. warning::
Only call ``from_dlpack`` once per capsule produced with ``to_dlpack``.
Behavior when a capsule is consumed multiple times is undefined.
Args:
tensor: a tensor to be exported
The DLPack capsule shares the tensor's memory.
""")
# TODO: add a typing.Protocol to be able to tell Mypy that only objects with
# __dlpack__ and __dlpack_device__ methods are accepted.
def from_dlpack(ext_tensor: Any) -> 'torch.Tensor':
"""from_dlpack(ext_tensor) -> Tensor
Converts a tensor from an external library into a ``torch.Tensor``.
The returned PyTorch tensor will share the memory with the input tensor
(which may have come from another library). Note that in-place operations
will therefore also affect the data of the input tensor. This may lead to
unexpected issues (e.g., other libraries may have read-only flags or
immutable data structures), so the user should only do this if they know
for sure that this is fine.
Args:
ext_tensor (object with ``__dlpack__`` attribute, or a DLPack capsule):
The tensor or DLPack capsule to convert.
If ``ext_tensor`` is a tensor (or ndarray) object, it must support
the ``__dlpack__`` protocol (i.e., have a ``ext_tensor.__dlpack__``
method). Otherwise ``ext_tensor`` may be a DLPack capsule, which is
an opaque ``PyCapsule`` instance, typically produced by a
``to_dlpack`` function or method.
Examples::
>>> import torch.utils.dlpack
>>> t = torch.arange(4)
# Convert a tensor directly (supported in PyTorch >= 1.10)
>>> t2 = torch.from_dlpack(t)
>>> t2[:2] = -1 # show that memory is shared
>>> t2
tensor([-1, -1, 2, 3])
>>> t
tensor([-1, -1, 2, 3])
# The old-style DLPack usage, with an intermediate capsule object
>>> capsule = torch.utils.dlpack.to_dlpack(t)
>>> capsule
<capsule object "dltensor" at ...>
>>> t3 = torch.from_dlpack(capsule)
>>> t3
tensor([-1, -1, 2, 3])
>>> t3[0] = -9 # now we're sharing memory between 3 tensors
>>> t3
tensor([-9, -1, 2, 3])
>>> t2
tensor([-9, -1, 2, 3])
>>> t
tensor([-9, -1, 2, 3])
"""
if hasattr(ext_tensor, '__dlpack__'):
device = ext_tensor.__dlpack_device__()
# device is either CUDA or ROCm, we need to pass the current
# stream
if device[0] in (DLDeviceType.kDLGPU, DLDeviceType.kDLROCM):
stream = torch.cuda.current_stream(f'cuda:{device[1]}')
# cuda_stream is the pointer to the stream and it is a public
# attribute, but it is not documented
# The array API specify that the default legacy stream must be passed
# with a value of 1 for CUDA
# https://data-apis.org/array-api/latest/API_specification/array_object.html?dlpack-self-stream-none#dlpack-self-stream-none
is_cuda = device[0] == DLDeviceType.kDLGPU
# Since pytorch is not using PTDS by default, lets directly pass
# the legacy stream
stream_ptr = 1 if is_cuda and stream.cuda_stream == 0 else stream.cuda_stream
dlpack = ext_tensor.__dlpack__(stream=stream_ptr)
else:
dlpack = ext_tensor.__dlpack__()
else:
# Old versions just call the converter
dlpack = ext_tensor
return _from_dlpack(dlpack)
|
from typing import Any
import torch
import enum
from torch._C import _to_dlpack as to_dlpack
__all__ = [
"DLDeviceType",
"from_dlpack",
]
class DLDeviceType(enum.IntEnum):
# Enums as in DLPack specification (aten/src/ATen/dlpack.h)
kDLCPU = 1,
kDLCUDA = 2,
kDLCUDAHost = 3,
kDLOpenCL = 4,
kDLVulkan = 7,
kDLMetal = 8,
kDLVPI = 9,
kDLROCM = 10,
kDLROCMHost = 11,
kDLExtDev = 12,
kDLCUDAManaged = 13,
kDLOneAPI = 14,
kDLWebGPU = 15,
kDLHexagon = 16,
kDLMAIA = 17,
torch._C._add_docstr(to_dlpack, r"""to_dlpack(tensor) -> PyCapsule
Returns an opaque object (a "DLPack capsule") representing the tensor.
.. note::
``to_dlpack`` is a legacy DLPack interface. The capsule it returns
cannot be used for anything in Python other than use it as input to
``from_dlpack``. The more idiomatic use of DLPack is to call
``from_dlpack`` directly on the tensor object - this works when that
object has a ``__dlpack__`` method, which PyTorch and most other
libraries indeed have now.
.. warning::
Only call ``from_dlpack`` once per capsule produced with ``to_dlpack``.
Behavior when a capsule is consumed multiple times is undefined.
Args:
tensor: a tensor to be exported
The DLPack capsule shares the tensor's memory.
""")
# TODO: add a typing.Protocol to be able to tell Mypy that only objects with
# __dlpack__ and __dlpack_device__ methods are accepted.
def from_dlpack(ext_tensor: Any) -> 'torch.Tensor':
"""from_dlpack(ext_tensor) -> Tensor
Converts a tensor from an external library into a ``torch.Tensor``.
The returned PyTorch tensor will share the memory with the input tensor
(which may have come from another library). Note that in-place operations
will therefore also affect the data of the input tensor. This may lead to
unexpected issues (e.g., other libraries may have read-only flags or
immutable data structures), so the user should only do this if they know
for sure that this is fine.
Args:
ext_tensor (object with ``__dlpack__`` attribute, or a DLPack capsule):
The tensor or DLPack capsule to convert.
If ``ext_tensor`` is a tensor (or ndarray) object, it must support
the ``__dlpack__`` protocol (i.e., have a ``ext_tensor.__dlpack__``
method). Otherwise ``ext_tensor`` may be a DLPack capsule, which is
an opaque ``PyCapsule`` instance, typically produced by a
``to_dlpack`` function or method.
Examples::
>>> import torch.utils.dlpack
>>> t = torch.arange(4)
# Convert a tensor directly (supported in PyTorch >= 1.10)
>>> t2 = torch.from_dlpack(t)
>>> t2[:2] = -1 # show that memory is shared
>>> t2
tensor([-1, -1, 2, 3])
>>> t
tensor([-1, -1, 2, 3])
# The old-style DLPack usage, with an intermediate capsule object
>>> capsule = torch.utils.dlpack.to_dlpack(t)
>>> capsule
<capsule object "dltensor" at ...>
>>> t3 = torch.from_dlpack(capsule)
>>> t3
tensor([-1, -1, 2, 3])
>>> t3[0] = -9 # now we're sharing memory between 3 tensors
>>> t3
tensor([-9, -1, 2, 3])
>>> t2
tensor([-9, -1, 2, 3])
>>> t
tensor([-9, -1, 2, 3])
"""
if hasattr(ext_tensor, '__dlpack__'):
kwargs: dict[str, Any] = {}
kwargs["max_version"] = (1, 0)
device = ext_tensor.__dlpack_device__()
# device is either CUDA or ROCm, we need to pass the current
# stream
if device[0] in (DLDeviceType.kDLCUDA, DLDeviceType.kDLROCM):
stream = torch.cuda.current_stream(f'cuda:{device[1]}')
# cuda_stream is the pointer to the stream and it is a public
# attribute, but it is not documented
# The array API specify that the default legacy stream must be passed
# with a value of 1 for CUDA
# https://data-apis.org/array-api/latest/API_specification/array_object.html?dlpack-self-stream-none#dlpack-self-stream-none
is_cuda = device[0] == DLDeviceType.kDLCUDA
# Since pytorch is not using PTDS by default, lets directly pass
# the legacy stream
stream_ptr = 1 if is_cuda and stream.cuda_stream == 0 else stream.cuda_stream
kwargs["stream"] = stream_ptr
try:
# Try running __dlpack__ while specifying `max_version` argument.
dlpack = ext_tensor.__dlpack__(**kwargs)
except TypeError:
# If that doesn't work, try removing the `max_version` argument.
kwargs.pop("max_version")
dlpack = ext_tensor.__dlpack__(**kwargs)
else:
# Old versions just call the converter
dlpack = ext_tensor
return torch._C._from_dlpack(dlpack)
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update``
deps = {
"Pillow": "Pillow>=10.0.1,<=15.0",
"accelerate": "accelerate>=0.26.0",
"av": "av",
"beautifulsoup4": "beautifulsoup4",
"blobfile": "blobfile",
"codecarbon": "codecarbon>=2.8.1",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"hf_xet": "hf_xet",
"huggingface-hub": "huggingface-hub>=0.30.0,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.4.1,<=0.4.13",
"jaxlib": "jaxlib>=0.4.1,<=0.4.13",
"jieba": "jieba",
"jinja2": "jinja2>=3.1.0",
"kenlm": "kenlm",
"keras": "keras>2.9,<2.16",
"keras-nlp": "keras-nlp>=0.3.1,<0.14.0",
"kernels": "kernels>=0.4.4,<0.5",
"librosa": "librosa",
"natten": "natten>=0.14.6,<0.15.0",
"nltk": "nltk<=3.8.1",
"num2words": "num2words",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optimum-benchmark": "optimum-benchmark>=0.3.0",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"pandas": "pandas<2.3.0",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic",
"pytest": "pytest>=7.2.0",
"pytest-asyncio": "pytest-asyncio",
"pytest-rerunfailures": "pytest-rerunfailures",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"pytest-order": "pytest-order",
"python": "python>=3.9.0",
"ray[tune]": "ray[tune]>=2.7.0",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff==0.11.2",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.4.3",
"sagemaker": "sagemaker>=2.31.0",
"schedulefree": "schedulefree>=1.2.6",
"scikit-learn": "scikit-learn",
"scipy": "scipy<1.13.0",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorboard": "tensorboard",
"tensorflow-cpu": "tensorflow-cpu>2.9,<2.16",
"tensorflow": "tensorflow>2.9,<2.16",
"tensorflow-text": "tensorflow-text<2.16",
"tensorflow-probability": "tensorflow-probability<0.24",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"tiktoken": "tiktoken",
"timm": "timm<=1.0.11",
"tokenizers": "tokenizers>=0.21,<0.22",
"torch": "torch>=2.1,<2.7",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
"pytest-rich": "pytest-rich",
"libcst": "libcst",
"rich": "rich",
"opentelemetry-api": "opentelemetry-api",
"opentelemetry-exporter-otlp": "opentelemetry-exporter-otlp",
"opentelemetry-sdk": "opentelemetry-sdk",
}
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update``
deps = {
"Pillow": "Pillow>=10.0.1,<=15.0",
"accelerate": "accelerate>=0.26.0",
"av": "av",
"beautifulsoup4": "beautifulsoup4",
"blobfile": "blobfile",
"codecarbon": "codecarbon>=2.8.1",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"hf_xet": "hf_xet",
"huggingface-hub": "huggingface-hub>=0.30.0,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.4.1,<=0.4.13",
"jaxlib": "jaxlib>=0.4.1,<=0.4.13",
"jieba": "jieba",
"jinja2": "jinja2>=3.1.0",
"kenlm": "kenlm",
"keras": "keras>2.9,<2.16",
"keras-nlp": "keras-nlp>=0.3.1,<0.14.0",
"kernels": "kernels>=0.4.4,<0.5",
"librosa": "librosa",
"natten": "natten>=0.14.6,<0.15.0",
"nltk": "nltk<=3.8.1",
"num2words": "num2words",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optimum-benchmark": "optimum-benchmark>=0.3.0",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic",
"pytest": "pytest>=7.2.0",
"pytest-asyncio": "pytest-asyncio",
"pytest-rerunfailures": "pytest-rerunfailures",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"pytest-order": "pytest-order",
"python": "python>=3.9.0",
"ray[tune]": "ray[tune]>=2.7.0",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff==0.11.2",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.4.3",
"sagemaker": "sagemaker>=2.31.0",
"schedulefree": "schedulefree>=1.2.6",
"scikit-learn": "scikit-learn",
"scipy": "scipy<1.13.0",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorboard": "tensorboard",
"tensorflow-cpu": "tensorflow-cpu>2.9,<2.16",
"tensorflow": "tensorflow>2.9,<2.16",
"tensorflow-text": "tensorflow-text<2.16",
"tensorflow-probability": "tensorflow-probability<0.24",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"tiktoken": "tiktoken",
"timm": "timm<=1.0.11",
"tokenizers": "tokenizers>=0.21,<0.22",
"torch": "torch>=2.1,<2.7",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
"pytest-rich": "pytest-rich",
"libcst": "libcst",
"rich": "rich",
"opentelemetry-api": "opentelemetry-api",
"opentelemetry-exporter-otlp": "opentelemetry-exporter-otlp",
"opentelemetry-sdk": "opentelemetry-sdk",
}
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool as average_pool
from keras.src.ops.nn import batch_normalization as batch_normalization
from keras.src.ops.nn import binary_crossentropy as binary_crossentropy
from keras.src.ops.nn import (
categorical_crossentropy as categorical_crossentropy,
)
from keras.src.ops.nn import celu as celu
from keras.src.ops.nn import conv as conv
from keras.src.ops.nn import conv_transpose as conv_transpose
from keras.src.ops.nn import ctc_decode as ctc_decode
from keras.src.ops.nn import ctc_loss as ctc_loss
from keras.src.ops.nn import depthwise_conv as depthwise_conv
from keras.src.ops.nn import dot_product_attention as dot_product_attention
from keras.src.ops.nn import elu as elu
from keras.src.ops.nn import gelu as gelu
from keras.src.ops.nn import glu as glu
from keras.src.ops.nn import hard_shrink as hard_shrink
from keras.src.ops.nn import hard_sigmoid as hard_sigmoid
from keras.src.ops.nn import hard_silu as hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh as hard_tanh
from keras.src.ops.nn import layer_normalization as layer_normalization
from keras.src.ops.nn import leaky_relu as leaky_relu
from keras.src.ops.nn import log_sigmoid as log_sigmoid
from keras.src.ops.nn import log_softmax as log_softmax
from keras.src.ops.nn import max_pool as max_pool
from keras.src.ops.nn import moments as moments
from keras.src.ops.nn import multi_hot as multi_hot
from keras.src.ops.nn import normalize as normalize
from keras.src.ops.nn import one_hot as one_hot
from keras.src.ops.nn import polar as polar
from keras.src.ops.nn import psnr as psnr
from keras.src.ops.nn import relu as relu
from keras.src.ops.nn import relu6 as relu6
from keras.src.ops.nn import rms_normalization as rms_normalization
from keras.src.ops.nn import selu as selu
from keras.src.ops.nn import separable_conv as separable_conv
from keras.src.ops.nn import sigmoid as sigmoid
from keras.src.ops.nn import silu as silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink as soft_shrink
from keras.src.ops.nn import softmax as softmax
from keras.src.ops.nn import softplus as softplus
from keras.src.ops.nn import softsign as softsign
from keras.src.ops.nn import (
sparse_categorical_crossentropy as sparse_categorical_crossentropy,
)
from keras.src.ops.nn import sparse_plus as sparse_plus
from keras.src.ops.nn import sparse_sigmoid as sparse_sigmoid
from keras.src.ops.nn import sparsemax as sparsemax
from keras.src.ops.nn import squareplus as squareplus
from keras.src.ops.nn import tanh_shrink as tanh_shrink
from keras.src.ops.nn import threshold as threshold
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool as average_pool
from keras.src.ops.nn import batch_normalization as batch_normalization
from keras.src.ops.nn import binary_crossentropy as binary_crossentropy
from keras.src.ops.nn import (
categorical_crossentropy as categorical_crossentropy,
)
from keras.src.ops.nn import celu as celu
from keras.src.ops.nn import conv as conv
from keras.src.ops.nn import conv_transpose as conv_transpose
from keras.src.ops.nn import ctc_decode as ctc_decode
from keras.src.ops.nn import ctc_loss as ctc_loss
from keras.src.ops.nn import depthwise_conv as depthwise_conv
from keras.src.ops.nn import dot_product_attention as dot_product_attention
from keras.src.ops.nn import elu as elu
from keras.src.ops.nn import gelu as gelu
from keras.src.ops.nn import glu as glu
from keras.src.ops.nn import hard_shrink as hard_shrink
from keras.src.ops.nn import hard_sigmoid as hard_sigmoid
from keras.src.ops.nn import hard_silu as hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh as hard_tanh
from keras.src.ops.nn import leaky_relu as leaky_relu
from keras.src.ops.nn import log_sigmoid as log_sigmoid
from keras.src.ops.nn import log_softmax as log_softmax
from keras.src.ops.nn import max_pool as max_pool
from keras.src.ops.nn import moments as moments
from keras.src.ops.nn import multi_hot as multi_hot
from keras.src.ops.nn import normalize as normalize
from keras.src.ops.nn import one_hot as one_hot
from keras.src.ops.nn import polar as polar
from keras.src.ops.nn import psnr as psnr
from keras.src.ops.nn import relu as relu
from keras.src.ops.nn import relu6 as relu6
from keras.src.ops.nn import rms_normalization as rms_normalization
from keras.src.ops.nn import selu as selu
from keras.src.ops.nn import separable_conv as separable_conv
from keras.src.ops.nn import sigmoid as sigmoid
from keras.src.ops.nn import silu as silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink as soft_shrink
from keras.src.ops.nn import softmax as softmax
from keras.src.ops.nn import softplus as softplus
from keras.src.ops.nn import softsign as softsign
from keras.src.ops.nn import (
sparse_categorical_crossentropy as sparse_categorical_crossentropy,
)
from keras.src.ops.nn import sparse_plus as sparse_plus
from keras.src.ops.nn import sparse_sigmoid as sparse_sigmoid
from keras.src.ops.nn import sparsemax as sparsemax
from keras.src.ops.nn import squareplus as squareplus
from keras.src.ops.nn import tanh_shrink as tanh_shrink
from keras.src.ops.nn import threshold as threshold
|
import numpy as np
import pytest
from docarray import BaseDoc, DocList
from docarray.base_doc import AnyDoc
from docarray.documents import ImageDoc, TextDoc
from docarray.typing import NdArray
@pytest.mark.proto
def test_simple_proto():
class CustomDoc(BaseDoc):
text: str
tensor: NdArray
da = DocList(
[CustomDoc(text='hello', tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
new_da = DocList[CustomDoc].from_protobuf(da.to_protobuf())
for doc1, doc2 in zip(da, new_da):
assert doc1.text == doc2.text
assert (doc1.tensor == doc2.tensor).all()
@pytest.mark.proto
def test_nested_proto():
class CustomDocument(BaseDoc):
text: TextDoc
image: ImageDoc
da = DocList[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocList[CustomDocument].from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_nested_proto_any_doc():
class CustomDocument(BaseDoc):
text: TextDoc
image: ImageDoc
da = DocList[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocList.from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_any_doc_list_proto():
doc = AnyDoc(hello='world')
pt = DocList([doc]).to_protobuf()
docs = DocList.from_protobuf(pt)
assert docs[0].hello == 'world'
@pytest.mark.proto
def test_any_nested_doc_list_proto():
from docarray import BaseDoc, DocList
class TextDocWithId(BaseDoc):
id: str
text: str
class ResultTestDoc(BaseDoc):
matches: DocList[TextDocWithId]
index_da = DocList[TextDocWithId](
[TextDocWithId(id=f'{i}', text=f'ID {i}') for i in range(10)]
)
out_da = DocList[ResultTestDoc]([ResultTestDoc(matches=index_da[0:2])])
pb = out_da.to_protobuf()
docs = DocList.from_protobuf(pb)
assert docs[0].matches[0].id == '0'
assert len(docs[0].matches) == 2
assert len(docs) == 1
@pytest.mark.proto
def test_union_type_error():
from typing import Union
class CustomDoc(BaseDoc):
ud: Union[TextDoc, ImageDoc] = TextDoc(text='union type')
docs = DocList[CustomDoc]([CustomDoc(ud=TextDoc(text='union type'))])
with pytest.raises(ValueError):
DocList[CustomDoc].from_protobuf(docs.to_protobuf())
class BasisUnion(BaseDoc):
ud: Union[int, str]
docs_basic = DocList[BasisUnion]([BasisUnion(ud="hello")])
docs_copy = DocList[BasisUnion].from_protobuf(docs_basic.to_protobuf())
assert docs_copy == docs_basic
|
import numpy as np
import pytest
from docarray import BaseDoc, DocList
from docarray.base_doc import AnyDoc
from docarray.documents import ImageDoc, TextDoc
from docarray.typing import NdArray
@pytest.mark.proto
def test_simple_proto():
class CustomDoc(BaseDoc):
text: str
tensor: NdArray
da = DocList(
[CustomDoc(text='hello', tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
new_da = DocList[CustomDoc].from_protobuf(da.to_protobuf())
for doc1, doc2 in zip(da, new_da):
assert doc1.text == doc2.text
assert (doc1.tensor == doc2.tensor).all()
@pytest.mark.proto
def test_nested_proto():
class CustomDocument(BaseDoc):
text: TextDoc
image: ImageDoc
da = DocList[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocList[CustomDocument].from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_nested_proto_any_doc():
class CustomDocument(BaseDoc):
text: TextDoc
image: ImageDoc
da = DocList[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocList.from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_any_doc_list_proto():
doc = AnyDoc(hello='world')
pt = DocList([doc]).to_protobuf()
docs = DocList.from_protobuf(pt)
assert docs[0].dict()['hello'] == 'world'
@pytest.mark.proto
def test_any_nested_doc_list_proto():
from docarray import BaseDoc, DocList
class TextDocWithId(BaseDoc):
id: str
text: str
class ResultTestDoc(BaseDoc):
matches: DocList[TextDocWithId]
index_da = DocList[TextDocWithId](
[TextDocWithId(id=f'{i}', text=f'ID {i}') for i in range(10)]
)
out_da = DocList[ResultTestDoc]([ResultTestDoc(matches=index_da[0:2])])
pb = out_da.to_protobuf()
docs = DocList.from_protobuf(pb)
assert docs[0].matches[0].id == '0'
assert len(docs[0].matches) == 2
assert len(docs) == 1
@pytest.mark.proto
def test_union_type_error():
from typing import Union
class CustomDoc(BaseDoc):
ud: Union[TextDoc, ImageDoc] = TextDoc(text='union type')
docs = DocList[CustomDoc]([CustomDoc(ud=TextDoc(text='union type'))])
with pytest.raises(ValueError):
DocList[CustomDoc].from_protobuf(docs.to_protobuf())
class BasisUnion(BaseDoc):
ud: Union[int, str]
docs_basic = DocList[BasisUnion]([BasisUnion(ud="hello")])
docs_copy = DocList[BasisUnion].from_protobuf(docs_basic.to_protobuf())
assert docs_copy == docs_basic
|
_base_ = './solo_r50_fpn_1x_coco.py'
# model settings
model = dict(
mask_head=dict(
type='DecoupledSOLOHead',
num_classes=80,
in_channels=256,
stacked_convs=7,
feat_channels=256,
strides=[8, 8, 16, 32, 32],
scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)),
pos_scale=0.2,
num_grids=[40, 36, 24, 16, 12],
cls_down_index=0,
loss_mask=dict(
type='DiceLoss', use_sigmoid=True, activate=False,
loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)))
|
_base_ = [
'./solo_r50_fpn_1x_coco.py',
]
# model settings
model = dict(
mask_head=dict(
type='DecoupledSOLOHead',
num_classes=80,
in_channels=256,
stacked_convs=7,
feat_channels=256,
strides=[8, 8, 16, 32, 32],
scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)),
pos_scale=0.2,
num_grids=[40, 36, 24, 16, 12],
cls_down_index=0,
loss_mask=dict(
type='DiceLoss', use_sigmoid=True, activate=False,
loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)))
optimizer = dict(type='SGD', lr=0.01)
|
from __future__ import annotations
from .CSRLoss import CSRLoss
from .CSRReconstructionLoss import CSRReconstructionLoss
from .FlopsLoss import FlopsLoss
from .SparseAnglELoss import SparseAnglELoss
from .SparseCachedGISTEmbedLoss import SparseCachedGISTEmbedLoss
from .SparseCachedMultipleNegativesRankingLoss import SparseCachedMultipleNegativesRankingLoss
from .SparseCoSENTLoss import SparseCoSENTLoss
from .SparseCosineSimilarityLoss import SparseCosineSimilarityLoss
from .SparseDistillKLDivLoss import SparseDistillKLDivLoss
from .SparseGISTEmbedLoss import SparseGISTEmbedLoss
from .SparseMarginMSELoss import SparseMarginMSELoss
from .SparseMSELoss import SparseMSELoss
from .SparseMultipleNegativesRankingLoss import SparseMultipleNegativesRankingLoss
from .SparseTripletLoss import SparseTripletLoss
from .SpladeLoss import SpladeLoss
__all__ = [
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
]
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.losses.CSRLoss import CSRLoss
from sentence_transformers.sparse_encoder.losses.CSRReconstructionLoss import (
CSRReconstructionLoss,
)
from sentence_transformers.sparse_encoder.losses.FlopsLoss import FlopsLoss
from sentence_transformers.sparse_encoder.losses.SparseAnglELoss import SparseAnglELoss
from sentence_transformers.sparse_encoder.losses.SparseCachedGISTEmbedLoss import (
SparseCachedGISTEmbedLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseCachedMultipleNegativesRankingLoss import (
SparseCachedMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import (
SparseCoSENTLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseCosineSimilarityLoss import (
SparseCosineSimilarityLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseDistillKLDivLoss import (
SparseDistillKLDivLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseGISTEmbedLoss import (
SparseGISTEmbedLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseMarginMSELoss import (
SparseMarginMSELoss,
)
from sentence_transformers.sparse_encoder.losses.SparseMSELoss import SparseMSELoss
from sentence_transformers.sparse_encoder.losses.SparseMultipleNegativesRankingLoss import (
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseTripletLoss import (
SparseTripletLoss,
)
from sentence_transformers.sparse_encoder.losses.SpladeLoss import SpladeLoss
__all__ = [
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
def get_max_cuda_memory(device: Optional[torch.device] = None) -> int:
"""Returns the maximum GPU memory occupied by tensors in megabytes (MB) for
a given device. By default, this returns the peak allocated memory since
the beginning of this program.
Args:
device (torch.device, optional): selected device. Returns
statistic for the current device, given by
:func:`~torch.cuda.current_device`, if ``device`` is None.
Defaults to None.
Returns:
int: The maximum GPU memory occupied by tensors in megabytes
for a given device.
"""
mem = torch.cuda.max_memory_allocated(device=device)
mem_mb = torch.tensor([int(mem) // (1024 * 1024)],
dtype=torch.int,
device=device)
torch.cuda.reset_peak_memory_stats()
return int(mem_mb.item())
def is_cuda_available() -> bool:
"""Returns True if cuda devices exist."""
return torch.cuda.is_available()
def is_npu_available() -> bool:
"""Returns True if Ascend PyTorch and npu devices exist."""
try:
import torch_npu # noqa: F401
except Exception:
return False
return hasattr(torch, 'npu') and torch.npu.is_available()
def is_mlu_available() -> bool:
"""Returns True if Cambricon PyTorch and mlu devices exist."""
return hasattr(torch, 'is_mlu_available') and torch.is_mlu_available()
def is_mps_available() -> bool:
"""Return True if mps devices exist.
It's specialized for mac m1 chips and require torch version 1.12 or higher.
"""
return hasattr(torch.backends, 'mps') and torch.backends.mps.is_available()
def get_device() -> str:
"""Returns the currently existing device type.
Returns:
str: cuda | npu | mlu | mps | cpu.
"""
if is_npu_available():
return 'npu'
elif is_cuda_available():
return 'cuda'
elif is_mlu_available():
return 'mlu'
elif is_mps_available():
return 'mps'
else:
return 'cpu'
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
def get_max_cuda_memory(device: Optional[torch.device] = None) -> int:
"""Returns the maximum GPU memory occupied by tensors in megabytes (MB) for
a given device. By default, this returns the peak allocated memory since
the beginning of this program.
Args:
device (torch.device, optional): selected device. Returns
statistic for the current device, given by
:func:`~torch.cuda.current_device`, if ``device`` is None.
Defaults to None.
Returns:
int: The maximum GPU memory occupied by tensors in megabytes
for a given device.
"""
mem = torch.cuda.max_memory_allocated(device=device)
mem_mb = torch.tensor([int(mem) // (1024 * 1024)],
dtype=torch.int,
device=device)
torch.cuda.reset_peak_memory_stats()
return int(mem_mb.item())
def is_cuda_available() -> bool:
"""Returns True if cuda devices exist."""
return torch.cuda.is_available()
def is_mlu_available() -> bool:
"""Returns True if Cambricon PyTorch and mlu devices exist."""
return hasattr(torch, 'is_mlu_available') and torch.is_mlu_available()
def is_mps_available() -> bool:
"""Return True if mps devices exist.
It's specialized for mac m1 chips and require torch version 1.12 or higher.
"""
return hasattr(torch.backends, 'mps') and torch.backends.mps.is_available()
def get_device() -> str:
"""Returns the currently existing device type.
Returns:
str: cuda | mlu | mps | cpu.
"""
if is_cuda_available():
return 'cuda'
elif is_mlu_available():
return 'mlu'
elif is_mps_available():
return 'mps'
else:
return 'cpu'
|
import itertools
from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
class GeneratorDataAdapter(DataAdapter):
"""Adapter for Python generators."""
def __init__(self, generator):
first_batches, generator = peek_and_restore(generator)
self.generator = generator
self._first_batches = first_batches
self._output_signature = None
if not isinstance(first_batches[0], tuple):
raise ValueError(
"When passing a Python generator to a Keras model, "
"the generator must return a tuple, either "
"(input,) or (inputs, targets) or "
"(inputs, targets, sample_weights). "
f"Received: {first_batches[0]}"
)
def get_numpy_iterator(self):
return data_adapter_utils.get_numpy_iterator(self.generator)
def get_jax_iterator(self):
return data_adapter_utils.get_jax_iterator(self.generator)
def get_tf_dataset(self):
from keras.src.utils.module_utils import tensorflow as tf
def convert_to_tf(x, spec):
if data_adapter_utils.is_scipy_sparse(x):
x = data_adapter_utils.scipy_sparse_to_tf_sparse(x)
elif data_adapter_utils.is_jax_sparse(x):
x = data_adapter_utils.jax_sparse_to_tf_sparse(x)
if not spec.shape.is_compatible_with(x.shape):
raise TypeError(
f"Generator yielded an element of shape {x.shape} where "
f"an element of shape {spec.shape} was expected. Your "
"generator provides tensors with variable input "
"dimensions other than the batch size. Make sure that the "
"generator's first two batches do not have the same "
"dimension value wherever there is a variable input "
"dimension."
)
return x
def get_tf_iterator():
for batch in self.generator:
batch = tree.map_structure(
convert_to_tf, batch, self._output_signature
)
yield batch
if self._output_signature is None:
self._output_signature = data_adapter_utils.get_tensor_spec(
self._first_batches
)
ds = tf.data.Dataset.from_generator(
get_tf_iterator,
output_signature=self._output_signature,
)
ds = ds.prefetch(tf.data.AUTOTUNE)
return ds
def get_torch_dataloader(self):
return data_adapter_utils.get_torch_dataloader(self.generator)
@property
def num_batches(self):
return None
@property
def batch_size(self):
return None
def peek_and_restore(generator):
batches = list(
itertools.islice(
generator, data_adapter_utils.NUM_BATCHES_FOR_TENSOR_SPEC
)
)
return batches, itertools.chain(batches, generator)
|
import itertools
from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
class GeneratorDataAdapter(DataAdapter):
"""Adapter for Python generators."""
def __init__(self, generator):
first_batches, generator = peek_and_restore(generator)
self.generator = generator
self._first_batches = first_batches
self._output_signature = None
if not isinstance(first_batches[0], tuple):
raise ValueError(
"When passing a Python generator to a Keras model, "
"the generator must return a tuple, either "
"(input,) or (inputs, targets) or "
"(inputs, targets, sample_weights). "
f"Received: {first_batches[0]}"
)
def get_numpy_iterator(self):
return data_adapter_utils.get_numpy_iterator(self.generator)
def get_jax_iterator(self):
from keras.src.backend.jax.core import convert_to_tensor
def convert_to_jax(x):
if data_adapter_utils.is_scipy_sparse(x):
return data_adapter_utils.scipy_sparse_to_jax_sparse(x)
elif data_adapter_utils.is_tensorflow_sparse(x):
return data_adapter_utils.tf_sparse_to_jax_sparse(x)
return convert_to_tensor(x)
for batch in self.generator:
yield tree.map_structure(convert_to_jax, batch)
def get_tf_dataset(self):
from keras.src.utils.module_utils import tensorflow as tf
def convert_to_tf(x, spec):
if data_adapter_utils.is_scipy_sparse(x):
x = data_adapter_utils.scipy_sparse_to_tf_sparse(x)
elif data_adapter_utils.is_jax_sparse(x):
x = data_adapter_utils.jax_sparse_to_tf_sparse(x)
if not spec.shape.is_compatible_with(x.shape):
raise TypeError(
f"Generator yielded an element of shape {x.shape} where "
f"an element of shape {spec.shape} was expected. Your "
"generator provides tensors with variable input "
"dimensions other than the batch size. Make sure that the "
"generator's first two batches do not have the same "
"dimension value wherever there is a variable input "
"dimension."
)
return x
def get_tf_iterator():
for batch in self.generator:
batch = tree.map_structure(
convert_to_tf, batch, self._output_signature
)
yield batch
if self._output_signature is None:
self._output_signature = data_adapter_utils.get_tensor_spec(
self._first_batches
)
ds = tf.data.Dataset.from_generator(
get_tf_iterator,
output_signature=self._output_signature,
)
ds = ds.prefetch(tf.data.AUTOTUNE)
return ds
def get_torch_dataloader(self):
return data_adapter_utils.get_torch_dataloader(self.generator)
@property
def num_batches(self):
return None
@property
def batch_size(self):
return None
def peek_and_restore(generator):
batches = list(
itertools.islice(
generator, data_adapter_utils.NUM_BATCHES_FOR_TENSOR_SPEC
)
)
return batches, itertools.chain(batches, generator)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyCustomLoss, CrossEntropyLoss,
binary_cross_entropy, cross_entropy,
mask_cross_entropy)
from .ddq_detr_aux_loss import DDQAuxLoss
from .dice_loss import DiceLoss
from .eqlv2_loss import EQLV2Loss
from .focal_loss import FocalCustomLoss, FocalLoss, sigmoid_focal_loss
from .gaussian_focal_loss import GaussianFocalLoss
from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss
from .ghm_loss import GHMC, GHMR
from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, EIoULoss, GIoULoss,
IoULoss, SIoULoss, bounded_iou_loss, iou_loss)
from .kd_loss import KnowledgeDistillationKLDivLoss
from .l2_loss import L2Loss
from .margin_loss import MarginL2Loss
from .mse_loss import MSELoss, mse_loss
from .multipos_cross_entropy_loss import MultiPosCrossEntropyLoss
from .pisa_loss import carl_loss, isr_p
from .seesaw_loss import SeesawLoss
from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss
from .triplet_loss import TripletLoss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
from .varifocal_loss import VarifocalLoss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss',
'EIoULoss', 'SIoULoss', 'GHMC', 'GHMR', 'reduce_loss',
'weight_reduce_loss', 'weighted_loss', 'L1Loss', 'l1_loss', 'isr_p',
'carl_loss', 'AssociativeEmbeddingLoss', 'GaussianFocalLoss',
'QualityFocalLoss', 'DistributionFocalLoss', 'VarifocalLoss',
'KnowledgeDistillationKLDivLoss', 'SeesawLoss', 'DiceLoss', 'EQLV2Loss',
'MarginL2Loss', 'MultiPosCrossEntropyLoss', 'L2Loss', 'TripletLoss',
'DDQAuxLoss', 'CrossEntropyCustomLoss', 'FocalCustomLoss'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, mask_cross_entropy)
from .ddq_detr_aux_loss import DDQAuxLoss
from .dice_loss import DiceLoss
from .eqlv2_loss import EQLV2Loss
from .focal_loss import FocalLoss, sigmoid_focal_loss
from .gaussian_focal_loss import GaussianFocalLoss
from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss
from .ghm_loss import GHMC, GHMR
from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, EIoULoss, GIoULoss,
IoULoss, SIoULoss, bounded_iou_loss, iou_loss)
from .kd_loss import KnowledgeDistillationKLDivLoss
from .l2_loss import L2Loss
from .margin_loss import MarginL2Loss
from .mse_loss import MSELoss, mse_loss
from .multipos_cross_entropy_loss import MultiPosCrossEntropyLoss
from .pisa_loss import carl_loss, isr_p
from .seesaw_loss import SeesawLoss
from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss
from .triplet_loss import TripletLoss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
from .varifocal_loss import VarifocalLoss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss',
'EIoULoss', 'SIoULoss', 'GHMC', 'GHMR', 'reduce_loss',
'weight_reduce_loss', 'weighted_loss', 'L1Loss', 'l1_loss', 'isr_p',
'carl_loss', 'AssociativeEmbeddingLoss', 'GaussianFocalLoss',
'QualityFocalLoss', 'DistributionFocalLoss', 'VarifocalLoss',
'KnowledgeDistillationKLDivLoss', 'SeesawLoss', 'DiceLoss', 'EQLV2Loss',
'MarginL2Loss', 'MultiPosCrossEntropyLoss', 'L2Loss', 'TripletLoss',
'DDQAuxLoss'
]
|
_base_ = './detr_r50_8xb2-500e_coco.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[512]))
|
_base_ = './detr_r50_8xb2-500e_coco.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
bbox_head=dict(in_channels=512))
|
_base_ = './scnet_x101_64x4d_fpn_20e_coco.py'
data = dict(samples_per_gpu=1, workers_per_gpu=1)
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
auto_scale_lr = dict(base_batch_size=8)
|
_base_ = './scnet_x101_64x4d_fpn_20e_coco.py'
data = dict(samples_per_gpu=1, workers_per_gpu=1)
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
from typing import Optional, Union, Callable, Tuple, TYPE_CHECKING, Dict
if TYPE_CHECKING: # pragma: no cover
import numpy as np
from docarray.typing import ArrayType
from docarray import DocumentArray
class MatchMixin:
"""A mixin that provides match functionality to DocumentArrays"""
def match(
self,
darray: 'DocumentArray',
metric: Union[
str, Callable[['ArrayType', 'ArrayType'], 'np.ndarray']
] = 'cosine',
limit: Optional[Union[int, float]] = 20,
normalization: Optional[Tuple[float, float]] = None,
metric_name: Optional[str] = None,
batch_size: Optional[int] = None,
exclude_self: bool = False,
filter: Optional[Dict] = None,
only_id: bool = False,
use_scipy: bool = False,
device: str = 'cpu',
num_worker: Optional[int] = 1,
on: Optional[str] = None,
**kwargs,
) -> None:
"""Compute embedding based nearest neighbour in `another` for each Document in `self`,
and store results in `matches`.
.. note::
'cosine', 'euclidean', 'sqeuclidean' are supported natively without extra dependency.
You can use other distance metric provided by ``scipy``, such as `braycurtis`, `canberra`, `chebyshev`,
`cityblock`, `correlation`, `cosine`, `dice`, `euclidean`, `hamming`, `jaccard`, `jensenshannon`,
`kulsinski`, `mahalanobis`, `matching`, `minkowski`, `rogerstanimoto`, `russellrao`, `seuclidean`,
`sokalmichener`, `sokalsneath`, `sqeuclidean`, `wminkowski`, `yule`.
To use scipy metric, please set ``use_scipy=True``.
- To make all matches values in [0, 1], use ``dA.match(dB, normalization=(0, 1))``
- To invert the distance as score and make all values in range [0, 1],
use ``dA.match(dB, normalization=(1, 0))``. Note, how ``normalization`` differs from the previous.
- If a custom metric distance is provided. Make sure that it returns scores as distances and not similarity, meaning the smaller the better.
:param darray: the other DocumentArray to match against
:param metric: the distance metric
:param limit: the maximum number of matches, when not given defaults to 20.
:param normalization: a tuple [a, b] to be used with min-max normalization,
the min distance will be rescaled to `a`, the max distance will be rescaled to `b`
all values will be rescaled into range `[a, b]`.
:param metric_name: if provided, then match result will be marked with this string.
:param batch_size: if provided, then ``darray`` is loaded in batches, where each of them is at most ``batch_size``
elements. When `darray` is big, this can significantly speedup the computation.
:param exclude_self: if set, Documents in ``darray`` with same ``id`` as the left-hand values will not be
considered as matches.
:param filter: filter query used for pre-filtering
:param only_id: if set, then returning matches will only contain ``id``
:param use_scipy: if set, use ``scipy`` as the computation backend. Note, ``scipy`` does not support distance
on sparse matrix.
:param device: the computational device for ``.match()``, can be either `cpu` or `cuda`.
:param num_worker: the number of parallel workers. If not given, then the number of CPUs in the system will be used.
.. note::
This argument is only effective when ``batch_size`` is set.
:param on: specifies a subindex to search on. If set, the returned DocumentArray will be retrieved from the given subindex.
:param kwargs: other kwargs.
"""
if not (self and darray):
return
for d in self:
d.matches.clear()
match_docs = darray.find(
self,
metric=metric,
limit=limit,
normalization=normalization,
metric_name=metric_name,
batch_size=batch_size,
exclude_self=exclude_self,
filter=filter,
only_id=only_id,
use_scipy=use_scipy,
device=device,
num_worker=num_worker,
on=on,
**kwargs,
)
if not isinstance(match_docs, list):
match_docs = [match_docs]
for m, d in zip(match_docs, self):
d.matches = m
|
from typing import Optional, Union, Callable, Tuple, TYPE_CHECKING, Dict
if TYPE_CHECKING:
import numpy as np
from docarray.typing import ArrayType
from docarray import DocumentArray
class MatchMixin:
"""A mixin that provides match functionality to DocumentArrays"""
def match(
self,
darray: 'DocumentArray',
metric: Union[
str, Callable[['ArrayType', 'ArrayType'], 'np.ndarray']
] = 'cosine',
limit: Optional[Union[int, float]] = 20,
normalization: Optional[Tuple[float, float]] = None,
metric_name: Optional[str] = None,
batch_size: Optional[int] = None,
exclude_self: bool = False,
filter: Optional[Dict] = None,
only_id: bool = False,
use_scipy: bool = False,
device: str = 'cpu',
num_worker: Optional[int] = 1,
on: Optional[str] = None,
**kwargs,
) -> None:
"""Compute embedding based nearest neighbour in `another` for each Document in `self`,
and store results in `matches`.
.. note::
'cosine', 'euclidean', 'sqeuclidean' are supported natively without extra dependency.
You can use other distance metric provided by ``scipy``, such as `braycurtis`, `canberra`, `chebyshev`,
`cityblock`, `correlation`, `cosine`, `dice`, `euclidean`, `hamming`, `jaccard`, `jensenshannon`,
`kulsinski`, `mahalanobis`, `matching`, `minkowski`, `rogerstanimoto`, `russellrao`, `seuclidean`,
`sokalmichener`, `sokalsneath`, `sqeuclidean`, `wminkowski`, `yule`.
To use scipy metric, please set ``use_scipy=True``.
- To make all matches values in [0, 1], use ``dA.match(dB, normalization=(0, 1))``
- To invert the distance as score and make all values in range [0, 1],
use ``dA.match(dB, normalization=(1, 0))``. Note, how ``normalization`` differs from the previous.
- If a custom metric distance is provided. Make sure that it returns scores as distances and not similarity, meaning the smaller the better.
:param darray: the other DocumentArray to match against
:param metric: the distance metric
:param limit: the maximum number of matches, when not given defaults to 20.
:param normalization: a tuple [a, b] to be used with min-max normalization,
the min distance will be rescaled to `a`, the max distance will be rescaled to `b`
all values will be rescaled into range `[a, b]`.
:param metric_name: if provided, then match result will be marked with this string.
:param batch_size: if provided, then ``darray`` is loaded in batches, where each of them is at most ``batch_size``
elements. When `darray` is big, this can significantly speedup the computation.
:param exclude_self: if set, Documents in ``darray`` with same ``id`` as the left-hand values will not be
considered as matches.
:param filter: filter query used for pre-filtering
:param only_id: if set, then returning matches will only contain ``id``
:param use_scipy: if set, use ``scipy`` as the computation backend. Note, ``scipy`` does not support distance
on sparse matrix.
:param device: the computational device for ``.match()``, can be either `cpu` or `cuda`.
:param num_worker: the number of parallel workers. If not given, then the number of CPUs in the system will be used.
.. note::
This argument is only effective when ``batch_size`` is set.
:param on: specifies a subindex to search on. If set, the returned DocumentArray will be retrieved from the given subindex.
:param kwargs: other kwargs.
"""
if not (self and darray):
return
for d in self:
d.matches.clear()
match_docs = darray.find(
self,
metric=metric,
limit=limit,
normalization=normalization,
metric_name=metric_name,
batch_size=batch_size,
exclude_self=exclude_self,
filter=filter,
only_id=only_id,
use_scipy=use_scipy,
device=device,
num_worker=num_worker,
on=on,
**kwargs,
)
if not isinstance(match_docs, list):
match_docs = [match_docs]
for m, d in zip(match_docs, self):
d.matches = m
|
from typing import Union, Optional, Iterable
from ..base.seqlike import BaseSequenceLikeMixin
from .... import Document
from ...memory import DocumentArrayInMemory
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods"""
def _insert_doc_at_idx(self, doc, idx: Optional[int] = None):
if idx is None:
idx = len(self)
self._sql(
f'INSERT INTO {self._table_name} (doc_id, serialized_value, item_order) VALUES (?, ?, ?)',
(doc.id, doc, idx),
)
self._offset2ids.insert(idx, doc.id)
def _shift_index_right_backward(self, start: int):
idx = len(self) - 1
while idx >= start:
self._sql(
f'UPDATE {self._table_name} SET item_order = ? WHERE item_order = ?',
(idx + 1, idx),
)
idx -= 1
def insert(self, index: int, value: 'Document'):
"""Insert `doc` at `index`.
:param index: Position of the insertion.
:param value: The doc needs to be inserted.
"""
length = len(self)
if index < 0:
index = length + index
index = max(0, min(length, index))
self._shift_index_right_backward(index)
self._insert_doc_at_idx(doc=value, idx=index)
self._commit()
def append(self, doc: 'Document') -> None:
self._sql(
f'INSERT INTO {self._table_name} (doc_id, serialized_value, item_order) VALUES (?, ?, ?)',
(doc.id, doc, len(self)),
)
self._offset2ids.append(doc.id)
self._commit()
def __del__(self) -> None:
super().__del__()
if not self._persist:
self._sql(
'DELETE FROM metadata WHERE table_name=? AND container_type=?',
(self._table_name, self.__class__.__name__),
)
self._sql(f'DROP TABLE IF EXISTS {self._table_name}')
self._commit()
def __contains__(self, item: Union[str, 'Document']):
if isinstance(item, str):
r = self._sql(f'SELECT 1 FROM {self._table_name} WHERE doc_id=?', (item,))
return len(list(r)) > 0
elif isinstance(item, Document):
return item.id in self # fall back to str check
else:
return False
def __len__(self) -> int:
request = self._sql(f'SELECT COUNT(*) FROM {self._table_name}')
return request.fetchone()[0]
def __repr__(self):
return f'<DocumentArray[SQLite] (length={len(self)}) at {id(self)}>'
def __eq__(self, other):
"""In sqlite backend, data are considered as identical if configs point to the same database source"""
return (
type(self) is type(other)
and type(self._config) is type(other._config)
and self._config == other._config
)
def extend(self, docs: Iterable['Document']) -> None:
self_len = len(self)
for doc in docs:
self._sql(
f'INSERT INTO {self._table_name} (doc_id, serialized_value, item_order) VALUES (?, ?, ?)',
(doc.id, doc, self_len),
)
self._offset2ids.append(doc.id)
self_len += 1
self._commit()
|
from typing import Union, Optional
from ..base.seqlike import BaseSequenceLikeMixin
from .... import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods"""
def _insert_doc_at_idx(self, doc, idx: Optional[int] = None):
if idx is None:
idx = len(self)
self._sql(
f'INSERT INTO {self._table_name} (doc_id, serialized_value, item_order) VALUES (?, ?, ?)',
(doc.id, doc, idx),
)
self._offset2ids.insert(idx, doc.id)
def _shift_index_right_backward(self, start: int):
idx = len(self) - 1
while idx >= start:
self._sql(
f'UPDATE {self._table_name} SET item_order = ? WHERE item_order = ?',
(idx + 1, idx),
)
idx -= 1
def insert(self, index: int, value: 'Document'):
"""Insert `doc` at `index`.
:param index: Position of the insertion.
:param value: The doc needs to be inserted.
"""
length = len(self)
if index < 0:
index = length + index
index = max(0, min(length, index))
self._shift_index_right_backward(index)
self._insert_doc_at_idx(doc=value, idx=index)
self._commit()
def append(self, doc: 'Document') -> None:
self._sql(
f'INSERT INTO {self._table_name} (doc_id, serialized_value, item_order) VALUES (?, ?, ?)',
(doc.id, doc, len(self)),
)
self._offset2ids.append(doc.id)
self._commit()
def __del__(self) -> None:
super().__del__()
if not self._persist:
self._sql(
'DELETE FROM metadata WHERE table_name=? AND container_type=?',
(self._table_name, self.__class__.__name__),
)
self._sql(f'DROP TABLE IF EXISTS {self._table_name}')
self._commit()
def __contains__(self, item: Union[str, 'Document']):
if isinstance(item, str):
r = self._sql(f'SELECT 1 FROM {self._table_name} WHERE doc_id=?', (item,))
return len(list(r)) > 0
elif isinstance(item, Document):
return item.id in self # fall back to str check
else:
return False
def __len__(self) -> int:
r = self._sql(f'SELECT COUNT(*) FROM {self._table_name}')
return r.fetchone()[0]
def __repr__(self):
return f'<DocumentArray[SQLite] (length={len(self)}) at {id(self)}>'
def __eq__(self, other):
"""In sqlite backend, data are considered as identical if configs point to the same database source"""
return (
type(self) is type(other)
and type(self._config) is type(other._config)
and self._config == other._config
)
|
from docarray import Document, DocumentArray
import pytest
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_add_ignore_existing_doc_id(start_storage):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': [('price', 'int')],
'distance': 'l2_norm',
'index_name': 'test_add_ignore_existing_doc_id',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='r0', embedding=[0, 0, 0]),
Document(id='r1', embedding=[1, 1, 1]),
Document(id='r2', embedding=[2, 2, 2]),
Document(id='r3', embedding=[3, 3, 3]),
Document(id='r4', embedding=[4, 4, 4]),
]
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='r0', embedding=[0, 0, 0]),
Document(id='r2', embedding=[2, 2, 2]),
Document(id='r4', embedding=[4, 4, 4]),
Document(id='r5', embedding=[2, 2, 2]),
Document(id='r6', embedding=[4, 4, 4]),
]
)
indexed_offset_count = elastic_doc._client.count(
index=elastic_doc._index_name_offset2id
)['count']
assert len(elastic_doc) == len(elastic_doc[:, 'embedding'])
assert len(elastic_doc) == indexed_offset_count
assert len(elastic_doc[:, 'embedding']) == 7
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_add_skip_wrong_data_type_and_fix_offset(start_storage):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': [('price', 'int')],
'index_name': 'test_add_skip_wrong_data_type_and_fix_offset',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='0', price=1000),
Document(id='1', price=20000),
Document(id='2', price=103000),
]
)
with pytest.raises(IndexError):
with elastic_doc:
elastic_doc.extend(
[
Document(id='0', price=10000),
Document(id='1', price=20000),
Document(id='3', price=30000),
Document(id='4', price=100000000000), # overflow int32
Document(id='5', price=2000),
Document(id='6', price=100000000000), # overflow int32
Document(id='7', price=30000),
]
)
expected_ids = ['0', '1', '2', '3', '5', '7']
assert len(elastic_doc) == 6
assert len(elastic_doc[:, 'id']) == 6
assert elastic_doc[:, 'id'] == expected_ids
assert elastic_doc._offset2ids.ids == expected_ids
|
from docarray import Document, DocumentArray
import pytest
def test_add_ignore_existing_doc_id(start_storage):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': [('price', 'int')],
'distance': 'l2_norm',
'index_name': 'test_add_ignore_existing_doc_id',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='r0', embedding=[0, 0, 0]),
Document(id='r1', embedding=[1, 1, 1]),
Document(id='r2', embedding=[2, 2, 2]),
Document(id='r3', embedding=[3, 3, 3]),
Document(id='r4', embedding=[4, 4, 4]),
]
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='r0', embedding=[0, 0, 0]),
Document(id='r2', embedding=[2, 2, 2]),
Document(id='r4', embedding=[4, 4, 4]),
Document(id='r5', embedding=[2, 2, 2]),
Document(id='r6', embedding=[4, 4, 4]),
]
)
indexed_offset_count = elastic_doc._client.count(
index=elastic_doc._index_name_offset2id
)['count']
assert len(elastic_doc) == len(elastic_doc[:, 'embedding'])
assert len(elastic_doc) == indexed_offset_count
assert len(elastic_doc[:, 'embedding']) == 7
def test_add_skip_wrong_data_type_and_fix_offset(start_storage):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': [('price', 'int')],
'index_name': 'test_add_skip_wrong_data_type_and_fix_offset',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='0', price=1000),
Document(id='1', price=20000),
Document(id='2', price=103000),
]
)
with pytest.raises(IndexError):
with elastic_doc:
elastic_doc.extend(
[
Document(id='0', price=10000),
Document(id='1', price=20000),
Document(id='3', price=30000),
Document(id='4', price=100000000000), # overflow int32
Document(id='5', price=2000),
Document(id='6', price=100000000000), # overflow int32
Document(id='7', price=30000),
]
)
expected_ids = ['0', '1', '2', '3', '5', '7']
assert len(elastic_doc) == 6
assert len(elastic_doc[:, 'id']) == 6
assert elastic_doc[:, 'id'] == expected_ids
assert elastic_doc._offset2ids.ids == expected_ids
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.dtype_policies import dtype_policy
from keras.src.dtype_policies.dtype_policy import QUANTIZATION_MODES
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy
from keras.src.dtype_policies.dtype_policy_map import DTypePolicyMap
ALL_OBJECTS = {
DTypePolicy,
FloatDTypePolicy,
QuantizedDTypePolicy,
QuantizedFloat8DTypePolicy,
DTypePolicyMap,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
@keras_export("keras.dtype_policies.serialize")
def serialize(dtype_policy):
"""Serializes `DTypePolicy` instance.
Args:
dtype_policy: A Keras `DTypePolicy` instance.
Returns:
`DTypePolicy` configuration dictionary.
"""
from keras.src.saving import serialization_lib
return serialization_lib.serialize_keras_object(dtype_policy)
@keras_export("keras.dtype_policies.deserialize")
def deserialize(config, custom_objects=None):
"""Deserializes a serialized `DTypePolicy` instance.
Args:
config: `DTypePolicy` configuration.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during
deserialization.
Returns:
A Keras `DTypePolicy` instance.
"""
from keras.src.saving import serialization_lib
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.dtype_policies.get")
def get(identifier):
"""Retrieves a Keras `DTypePolicy` instance.
The `identifier` may be the string name of a `DTypePolicy` class.
>>> policy = dtype_policies.get("mixed_bfloat16")
>>> type(policy)
<class '...DTypePolicy'>
You can also specify `config` of the dtype policy to this function by
passing dict containing `class_name` and `config` as an identifier. Also
note that the `class_name` must map to a `DTypePolicy` class
>>> identifier = {"class_name": "DTypePolicy",
... "config": {"name": "float32"}}
>>> policy = dtype_policies.get(identifier)
>>> type(policy)
<class '...DTypePolicy'>
Args:
identifier: A dtype policy identifier. One of `None` or string name of a
`DTypePolicy` or `DTypePolicy` configuration dictionary or a
`DTypePolicy` instance.
Returns:
A Keras `DTypePolicy` instance.
"""
from keras.src.dtype_policies.dtype_policy import (
_get_quantized_dtype_policy_by_str,
)
if identifier is None:
return dtype_policy.dtype_policy()
if isinstance(identifier, DTypePolicy):
return identifier
if isinstance(identifier, dict):
return deserialize(identifier)
if isinstance(identifier, str):
if identifier.startswith(QUANTIZATION_MODES):
return _get_quantized_dtype_policy_by_str(identifier)
else:
return DTypePolicy(identifier)
try:
return DTypePolicy(backend.standardize_dtype(identifier))
except:
raise ValueError(
"Cannot interpret `dtype` argument. Expected a string "
f"or an instance of DTypePolicy. Received: dtype={identifier}"
)
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.dtype_policies import dtype_policy
from keras.src.dtype_policies.dtype_policy import QUANTIZATION_MODES
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy
from keras.src.dtype_policies.dtype_policy_map import DTypePolicyMap
ALL_OBJECTS = {
DTypePolicy,
FloatDTypePolicy,
QuantizedDTypePolicy,
QuantizedFloat8DTypePolicy,
DTypePolicyMap,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
@keras_export("keras.dtype_policies.serialize")
def serialize(dtype_policy):
"""Serializes `DTypePolicy` instance.
Args:
dtype_policy: A Keras `DTypePolicy` instance.
Returns:
`DTypePolicy` configuration dictionary.
"""
from keras.src.saving import serialization_lib
return serialization_lib.serialize_keras_object(dtype_policy)
@keras_export("keras.dtype_policies.deserialize")
def deserialize(config, custom_objects=None):
"""Deserializes a serialized `DTypePolicy` instance.
Args:
config: `DTypePolicy` configuration.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during
deserialization.
Returns:
A Keras `DTypePolicy` instance.
"""
from keras.src.saving import serialization_lib
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.dtype_policies.get")
def get(identifier):
"""Retrieves a Keras `DTypePolicy` instance.
The `identifier` may be the string name of a `DTypePolicy` class.
>>> policy = dtype_policies.get("mixed_bfloat16")
>>> type(policy)
<class '...FloatDTypePolicy'>
You can also specify `config` of the dtype policy to this function by
passing dict containing `class_name` and `config` as an identifier. Also
note that the `class_name` must map to a `DTypePolicy` class
>>> identifier = {"class_name": "FloatDTypePolicy",
... "config": {"name": "float32"}}
>>> policy = dtype_policies.get(identifier)
>>> type(policy)
<class '...FloatDTypePolicy'>
Args:
identifier: A dtype policy identifier. One of `None` or string name of a
`DTypePolicy` or `DTypePolicy` configuration dictionary or a
`DTypePolicy` instance.
Returns:
A Keras `DTypePolicy` instance.
"""
from keras.src.dtype_policies.dtype_policy import (
_get_quantized_dtype_policy_by_str,
)
if identifier is None:
return dtype_policy.dtype_policy()
if isinstance(identifier, DTypePolicy):
return identifier
if isinstance(identifier, dict):
return deserialize(identifier)
if isinstance(identifier, str):
if identifier.startswith(QUANTIZATION_MODES):
return _get_quantized_dtype_policy_by_str(identifier)
else:
return FloatDTypePolicy(identifier)
try:
return FloatDTypePolicy(backend.standardize_dtype(identifier))
except:
raise ValueError(
"Cannot interpret `dtype` argument. Expected a string "
f"or an instance of DTypePolicy. Received: dtype={identifier}"
)
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import Image
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image():
image = Image(url=REMOTE_JPG)
image.tensor = image.url.load()
assert isinstance(image.tensor, np.ndarray)
def test_image_str():
image = parse_obj_as(Image, 'http://myurl.jpg')
assert image.url == 'http://myurl.jpg'
def test_image_np():
image = parse_obj_as(Image, np.zeros((10, 10, 3)))
assert (image.tensor == np.zeros((10, 10, 3))).all()
def test_image_torch():
image = parse_obj_as(Image, torch.zeros(10, 10, 3))
assert (image.tensor == torch.zeros(10, 10, 3)).all()
def test_image_shortcut_doc():
class MyDoc(BaseDocument):
image: Image
image2: Image
image3: Image
doc = MyDoc(
image='http://myurl.jpg',
image2=np.zeros((10, 10, 3)),
image3=torch.zeros(10, 10, 3),
)
assert doc.image.url == 'http://myurl.jpg'
assert (doc.image2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.image3.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.slow
@pytest.mark.internet
def test_byte():
img = Image(url=REMOTE_JPG)
img.bytes = img.url.load_bytes()
@pytest.mark.slow
@pytest.mark.internet
def test_byte_from_tensor():
img = Image(url=REMOTE_JPG)
img.tensor = img.url.load()
img.bytes = img.tensor.to_bytes()
assert isinstance(img.bytes, bytes)
assert len(img.bytes) > 0
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import Image
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image():
image = Image(url=REMOTE_JPG)
image.tensor = image.url.load()
assert isinstance(image.tensor, np.ndarray)
def test_image_str():
image = parse_obj_as(Image, 'http://myurl.jpg')
assert image.url == 'http://myurl.jpg'
def test_image_np():
image = parse_obj_as(Image, np.zeros((10, 10, 3)))
assert (image.tensor == np.zeros((10, 10, 3))).all()
def test_image_torch():
image = parse_obj_as(Image, torch.zeros(10, 10, 3))
assert (image.tensor == torch.zeros(10, 10, 3)).all()
def test_image_shortcut_doc():
class MyDoc(BaseDocument):
image: Image
image2: Image
image3: Image
doc = MyDoc(
image='http://myurl.jpg',
image2=np.zeros((10, 10, 3)),
image3=torch.zeros(10, 10, 3),
)
assert doc.image.url == 'http://myurl.jpg'
assert (doc.image2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.image3.tensor == torch.zeros(10, 10, 3)).all()
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.losses.CSRLoss import CSRLoss
from sentence_transformers.sparse_encoder.losses.CSRReconstructionLoss import (
CSRReconstructionLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseAnglELoss import SparseAnglELoss
from sentence_transformers.sparse_encoder.losses.SparseCachedGISTEmbedLoss import (
SparseCachedGISTEmbedLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseCachedMultipleNegativesRankingLoss import (
SparseCachedMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import (
SparseCoSENTLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseCosineSimilarityLoss import (
SparseCosineSimilarityLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseGISTEmbedLoss import (
SparseGISTEmbedLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseMarginMSELoss import (
SparseMarginMSELoss,
)
from sentence_transformers.sparse_encoder.losses.SparseMSELoss import SparseMSELoss
from sentence_transformers.sparse_encoder.losses.SparseMultipleNegativesRankingLoss import (
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseTripletLoss import (
SparseTripletLoss,
)
__all__ = [
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
]
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.losses.CSRLoss import CSRLoss
from sentence_transformers.sparse_encoder.losses.CSRReconstructionLoss import (
CSRReconstructionLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseCachedGISTEmbedLoss import (
SparseCachedGISTEmbedLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseCachedMultipleNegativesRankingLoss import (
SparseCachedMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import (
SparseCoSENTLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseCosineSimilarityLoss import (
SparseCosineSimilarityLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseGISTEmbedLoss import (
SparseGISTEmbedLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseMarginMSELoss import (
SparseMarginMSELoss,
)
from sentence_transformers.sparse_encoder.losses.SparseMultipleNegativesRankingLoss import (
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseTripletLoss import (
SparseTripletLoss,
)
__all__ = [
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
]
|
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
PATH_TO_TRANSFORMERS = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
transformers = direct_transformers_import(PATH_TO_TRANSFORMERS)
CONFIG_MAPPING = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased)`
_re_checkpoint = re.compile(r"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK = {
"DecisionTransformerConfig",
"EncoderDecoderConfig",
"MusicgenConfig",
"RagConfig",
"SpeechEncoderDecoderConfig",
"TimmBackboneConfig",
"TimmWrapperConfig",
"VisionEncoderDecoderConfig",
"VisionTextDualEncoderConfig",
"LlamaConfig",
"GraniteConfig",
"GraniteMoeConfig",
"GraniteMoeHybridConfig",
"Qwen3MoeConfig",
"GraniteSpeechConfig",
}
def get_checkpoint_from_config_class(config_class):
checkpoint = None
# source code of `config_class`
config_source = inspect.getsource(config_class)
checkpoints = _re_checkpoint.findall(config_source)
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('google-bert/bert-base-uncased', 'https://huggingface.co/google-bert/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/"):
ckpt_link = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
ckpt_link_from_name = f"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
checkpoint = ckpt_name
break
return checkpoint
def check_config_docstrings_have_checkpoints():
configs_without_checkpoint = []
for config_class in list(CONFIG_MAPPING.values()):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
checkpoint = get_checkpoint_from_config_class(config_class)
name = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(name)
if len(configs_without_checkpoint) > 0:
message = "\n".join(sorted(configs_without_checkpoint))
raise ValueError(
f"The following configurations don't contain any valid checkpoint:\n{message}\n\n"
"The requirement is to include a link pointing to one of the models of this architecture in the "
"docstring of the config classes listed above. The link should have be a markdown format like "
"[myorg/mymodel](https://huggingface.co/myorg/mymodel)."
)
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
|
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
PATH_TO_TRANSFORMERS = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
transformers = direct_transformers_import(PATH_TO_TRANSFORMERS)
CONFIG_MAPPING = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased)`
_re_checkpoint = re.compile(r"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK = {
"DecisionTransformerConfig",
"EncoderDecoderConfig",
"MusicgenConfig",
"RagConfig",
"SpeechEncoderDecoderConfig",
"TimmBackboneConfig",
"TimmWrapperConfig",
"VisionEncoderDecoderConfig",
"VisionTextDualEncoderConfig",
"LlamaConfig",
"GraniteConfig",
"GraniteMoeConfig",
"Qwen3MoeConfig",
"GraniteSpeechConfig",
}
def get_checkpoint_from_config_class(config_class):
checkpoint = None
# source code of `config_class`
config_source = inspect.getsource(config_class)
checkpoints = _re_checkpoint.findall(config_source)
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('google-bert/bert-base-uncased', 'https://huggingface.co/google-bert/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/"):
ckpt_link = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
ckpt_link_from_name = f"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
checkpoint = ckpt_name
break
return checkpoint
def check_config_docstrings_have_checkpoints():
configs_without_checkpoint = []
for config_class in list(CONFIG_MAPPING.values()):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
checkpoint = get_checkpoint_from_config_class(config_class)
name = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(name)
if len(configs_without_checkpoint) > 0:
message = "\n".join(sorted(configs_without_checkpoint))
raise ValueError(
f"The following configurations don't contain any valid checkpoint:\n{message}\n\n"
"The requirement is to include a link pointing to one of the models of this architecture in the "
"docstring of the config classes listed above. The link should have be a markdown format like "
"[myorg/mymodel](https://huggingface.co/myorg/mymodel)."
)
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from ..builder import DETECTORS
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@DETECTORS.register_module()
class SOLOv2(SingleStageInstanceSegmentor):
"""`SOLOv2: Dynamic and Fast Instance Segmentation
<https://arxiv.org/abs/2003.10152>`_
"""
def __init__(self,
backbone: ConfigType,
neck: OptConfigType = None,
bbox_head: OptConfigType = None,
mask_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig
from ..builder import DETECTORS
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@DETECTORS.register_module()
class SOLOv2(SingleStageInstanceSegmentor):
"""`SOLOv2: Dynamic and Fast Instance Segmentation
<https://arxiv.org/abs/2003.10152>`_
"""
def __init__(self,
backbone: ConfigType,
neck: OptConfigType = None,
bbox_head: OptConfigType = None,
mask_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import numpy as np
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import SOLOV2Head
from mmdet.structures.mask import BitmapMasks
def _rand_masks(num_items, bboxes, img_w, img_h):
rng = np.random.RandomState(0)
masks = np.zeros((num_items, img_h, img_w))
for i, bbox in enumerate(bboxes):
bbox = bbox.astype(np.int32)
mask = (rng.rand(1, bbox[3] - bbox[1], bbox[2] - bbox[0]) >
0.3).astype(np.int64)
masks[i:i + 1, bbox[1]:bbox[3], bbox[0]:bbox[2]] = mask
return BitmapMasks(masks, height=img_h, width=img_w)
def _fake_mask_feature_head():
mask_feature_head = ConfigDict(
feat_channels=128,
start_level=0,
end_level=3,
out_channels=256,
mask_stride=4,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))
return mask_feature_head
class TestSOLOv2Head(TestCase):
def test_solov2_head_loss(self):
"""Tests mask head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}]
mask_feature_head = _fake_mask_feature_head()
mask_head = SOLOV2Head(
num_classes=4, in_channels=1, mask_feature_head=mask_feature_head)
# SOLO head expects a multiple levels of features per image
feats = []
for i in range(len(mask_head.strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2))))
feats = tuple(feats)
mask_outs = mask_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty(0, 4)
gt_instances.labels = torch.LongTensor([])
gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s)
empty_gt_losses = mask_head.loss_by_feat(
*mask_outs,
batch_gt_instances=[gt_instances],
batch_img_metas=img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_mask_loss = empty_gt_losses['loss_mask']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_mask_loss.item(), 0,
'there should be no mask loss when there are no true mask')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s)
one_gt_losses = mask_head.loss_by_feat(
*mask_outs,
batch_gt_instances=[gt_instances],
batch_img_metas=img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_mask_loss = one_gt_losses['loss_mask']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_mask_loss.item(), 0,
'mask loss should be non-zero')
def test_solov2_head_empty_result(self):
s = 256
img_metas = {
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}
mask_feature_head = _fake_mask_feature_head()
mask_head = SOLOV2Head(
num_classes=4, in_channels=1, mask_feature_head=mask_feature_head)
kernel_preds = torch.empty(0, 128)
cls_scores = torch.empty(0, 80)
mask_feats = torch.empty(0, 16, 16)
test_cfg = ConfigDict(
score_thr=0.1,
mask_thr=0.5,
)
results = mask_head._predict_by_feat_single(
kernel_preds=kernel_preds,
cls_scores=cls_scores,
mask_feats=mask_feats,
img_meta=img_metas,
cfg=test_cfg)
self.assertIsInstance(results, InstanceData)
self.assertEqual(len(results), 0)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import numpy as np
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import SOLOV2Head
from mmdet.structures.mask import BitmapMasks
def _rand_masks(num_items, bboxes, img_w, img_h):
rng = np.random.RandomState(0)
masks = np.zeros((num_items, img_h, img_w))
for i, bbox in enumerate(bboxes):
bbox = bbox.astype(np.int32)
mask = (rng.rand(1, bbox[3] - bbox[1], bbox[2] - bbox[0]) >
0.3).astype(np.int)
masks[i:i + 1, bbox[1]:bbox[3], bbox[0]:bbox[2]] = mask
return BitmapMasks(masks, height=img_h, width=img_w)
def _fake_mask_feature_head():
mask_feature_head = ConfigDict(
feat_channels=128,
start_level=0,
end_level=3,
out_channels=256,
mask_stride=4,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))
return mask_feature_head
class TestSOLOv2Head(TestCase):
def test_solov2_head_loss(self):
"""Tests mask head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}]
mask_feature_head = _fake_mask_feature_head()
mask_head = SOLOV2Head(
num_classes=4, in_channels=1, mask_feature_head=mask_feature_head)
# SOLO head expects a multiple levels of features per image
feats = []
for i in range(len(mask_head.strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2))))
feats = tuple(feats)
mask_outs = mask_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty(0, 4)
gt_instances.labels = torch.LongTensor([])
gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s)
empty_gt_losses = mask_head.loss_by_feat(
*mask_outs,
batch_gt_instances=[gt_instances],
batch_img_metas=img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_mask_loss = empty_gt_losses['loss_mask']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_mask_loss.item(), 0,
'there should be no mask loss when there are no true mask')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s)
one_gt_losses = mask_head.loss_by_feat(
*mask_outs,
batch_gt_instances=[gt_instances],
batch_img_metas=img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_mask_loss = one_gt_losses['loss_mask']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_mask_loss.item(), 0,
'mask loss should be non-zero')
def test_solov2_head_empty_result(self):
s = 256
img_metas = {
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}
mask_feature_head = _fake_mask_feature_head()
mask_head = SOLOV2Head(
num_classes=4, in_channels=1, mask_feature_head=mask_feature_head)
kernel_preds = torch.empty(0, 128)
cls_scores = torch.empty(0, 80)
mask_feats = torch.empty(0, 16, 16)
test_cfg = ConfigDict(
score_thr=0.1,
mask_thr=0.5,
)
results = mask_head._predict_by_feat_single(
kernel_preds=kernel_preds,
cls_scores=cls_scores,
mask_feats=mask_feats,
img_meta=img_metas,
cfg=test_cfg)
self.assertIsInstance(results, InstanceData)
self.assertEqual(len(results), 0)
|
from __future__ import annotations
import logging
from typing import Any, Dict, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
logger = logging.getLogger(__name__)
class AzureAiServicesTextAnalyticsForHealthTool(BaseTool):
"""Tool that queries the Azure AI Services Text Analytics for Health API.
In order to set this up, follow instructions at:
https://learn.microsoft.com/en-us/azure/ai-services/language-service/text-analytics-for-health/quickstart?pivots=programming-language-python
"""
azure_ai_services_key: str = "" #: :meta private:
azure_ai_services_endpoint: str = "" #: :meta private:
text_analytics_client: Any #: :meta private:
name: str = "azure_ai_services_text_analytics_for_health"
description: str = (
"A wrapper around Azure AI Services Text Analytics for Health. "
"Useful for when you need to identify entities in healthcare data. "
"Input should be text."
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and endpoint exists in environment."""
azure_ai_services_key = get_from_dict_or_env(
values, "azure_ai_services_key", "AZURE_AI_SERVICES_KEY"
)
azure_ai_services_endpoint = get_from_dict_or_env(
values, "azure_ai_services_endpoint", "AZURE_AI_SERVICES_ENDPOINT"
)
try:
import azure.ai.textanalytics as sdk
from azure.core.credentials import AzureKeyCredential
values["text_analytics_client"] = sdk.TextAnalyticsClient(
endpoint=azure_ai_services_endpoint,
credential=AzureKeyCredential(azure_ai_services_key),
)
except ImportError:
raise ImportError(
"azure-ai-textanalytics is not installed. "
"Run `pip install azure-ai-textanalytics` to install."
)
return values
def _text_analysis(self, text: str) -> Dict:
poller = self.text_analytics_client.begin_analyze_healthcare_entities(
[{"id": "1", "language": "en", "text": text}]
)
result = poller.result()
res_dict = {}
docs = [doc for doc in result if not doc.is_error]
if docs is not None:
res_dict["entities"] = [
f"{x.text} is a healthcare entity of type {x.category}"
for y in docs
for x in y.entities
]
return res_dict
def _format_text_analysis_result(self, text_analysis_result: Dict) -> str:
formatted_result = []
if "entities" in text_analysis_result:
formatted_result.append(
f"""The text contains the following healthcare entities: {
", ".join(text_analysis_result["entities"])
}""".replace("\n", " ")
)
return "\n".join(formatted_result)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
try:
text_analysis_result = self._text_analysis(query)
return self._format_text_analysis_result(text_analysis_result)
except Exception as e:
raise RuntimeError(
f"Error while running AzureAiServicesTextAnalyticsForHealthTool: {e}"
)
|
from __future__ import annotations
import logging
from typing import Any, Dict, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
logger = logging.getLogger(__name__)
class AzureAiServicesTextAnalyticsForHealthTool(BaseTool): # type: ignore[override]
"""Tool that queries the Azure AI Services Text Analytics for Health API.
In order to set this up, follow instructions at:
https://learn.microsoft.com/en-us/azure/ai-services/language-service/text-analytics-for-health/quickstart?pivots=programming-language-python
"""
azure_ai_services_key: str = "" #: :meta private:
azure_ai_services_endpoint: str = "" #: :meta private:
text_analytics_client: Any #: :meta private:
name: str = "azure_ai_services_text_analytics_for_health"
description: str = (
"A wrapper around Azure AI Services Text Analytics for Health. "
"Useful for when you need to identify entities in healthcare data. "
"Input should be text."
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and endpoint exists in environment."""
azure_ai_services_key = get_from_dict_or_env(
values, "azure_ai_services_key", "AZURE_AI_SERVICES_KEY"
)
azure_ai_services_endpoint = get_from_dict_or_env(
values, "azure_ai_services_endpoint", "AZURE_AI_SERVICES_ENDPOINT"
)
try:
import azure.ai.textanalytics as sdk
from azure.core.credentials import AzureKeyCredential
values["text_analytics_client"] = sdk.TextAnalyticsClient(
endpoint=azure_ai_services_endpoint,
credential=AzureKeyCredential(azure_ai_services_key),
)
except ImportError:
raise ImportError(
"azure-ai-textanalytics is not installed. "
"Run `pip install azure-ai-textanalytics` to install."
)
return values
def _text_analysis(self, text: str) -> Dict:
poller = self.text_analytics_client.begin_analyze_healthcare_entities(
[{"id": "1", "language": "en", "text": text}]
)
result = poller.result()
res_dict = {}
docs = [doc for doc in result if not doc.is_error]
if docs is not None:
res_dict["entities"] = [
f"{x.text} is a healthcare entity of type {x.category}"
for y in docs
for x in y.entities
]
return res_dict
def _format_text_analysis_result(self, text_analysis_result: Dict) -> str:
formatted_result = []
if "entities" in text_analysis_result:
formatted_result.append(
f"""The text contains the following healthcare entities: {
", ".join(text_analysis_result["entities"])
}""".replace("\n", " ")
)
return "\n".join(formatted_result)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
try:
text_analysis_result = self._text_analysis(query)
return self._format_text_analysis_result(text_analysis_result)
except Exception as e:
raise RuntimeError(
f"Error while running AzureAiServicesTextAnalyticsForHealthTool: {e}"
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Iterable, Optional
import torch
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from jina_commons.batching import get_docs_batch_generator
from laserembeddings import Laser
class LaserEncoder(Executor):
"""
LaserEncoder is a text encoder based on Facebook Research's LASER encoder.
:class:`LaserEncoder` is a encoder based on Facebook Research's LASER
(Language-Agnostic SEntence Representations) to compute multilingual
sentence embeddings: https://github.com/facebookresearch/LASER
This encoder is suitable for producing multi-lingual sentence embeddings, enabling
you to have sentences from multiple languages in the same latent space.
"""
def __init__(
self,
path_to_bpe_codes: Optional[str] = None,
path_to_bpe_vocab: Optional[str] = None,
path_to_encoder: Optional[str] = None,
download_data: bool = True,
language: str = 'en',
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs,
):
"""
:param path_to_bpe_codes: path to bpe codes from Laser. Defaults to
``Laser.DEFAULT_BPE_CODES_FILE.``
:param path_to_bpe_vocab: path to bpe vocabs from Laser. Defaults to
``Laser.DEFAULT_BPE_VOCAB_FILE``.
:param path_to_encoder: path to the encoder from Laser. Defaults to
``Laser.DEFAULT_ENCODER_FILE``.
:param download_data: Whether data should be downloaded on initialization. This is
convenient when just trying out the encoder, but should be turned off in a
production setting (where you should already have the data on disk), as it can
lead to large startup times.
:param language: The default language of the text. Can be overriden by a
request parameter. The full list of possible values can be found at
[LASER](https://github.com/facebookresearch/LASER#supported-languages)
with the language code
([ISO 639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes))
:param traversal_paths: traversal path of the Documents, (e.g. 'r', 'c')
:param batch_size: size of each batch
:param device: Device string ('cpu'/'cuda'/'cuda:2')
"""
super().__init__(*args, **kwargs)
self.logger = JinaLogger(self.__class__.__name__)
self._path_to_bpe_codes = path_to_bpe_codes
self._path_to_bpe_vocab = path_to_bpe_vocab
self._path_to_encoder = path_to_encoder
self.device = device
self.batch_size = batch_size
self.traversal_paths = traversal_paths
self.language = language
if download_data:
self.logger.info("Downloading data for the Laser model")
subprocess.run(
['python', '-m', 'laserembeddings', 'download-models'], check=True
)
self.model = Laser(
bpe_codes=self._path_to_bpe_codes,
bpe_vocab=self._path_to_bpe_vocab,
encoder=self._path_to_encoder,
embedding_options={'cpu': self.device == 'cpu'},
)
self.device = torch.device(device)
self.model.bpeSentenceEmbedding.encoder.encoder.to(self.device)
@requests
def encode(
self, docs: Optional[DocumentArray] = None, parameters: dict = {}, **kwargs
):
"""
Encode all docs with text and store the encodings in the embedding attribute
of the docs.
:param docs: documents sent to the encoder. The docs must have the ``text``
attribute.
:param parameters: dictionary to define the ``traversal_path``, the
``batch_size`` and ``language``. For example,
``{'traversal_paths': ['r'], 'batch_size': 10}``. This will override the
default parameters set at init.
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get('traversal_paths', self.traversal_paths),
batch_size=parameters.get('batch_size', self.batch_size),
needs_attr='text',
)
for document_batch in document_batches_generator:
text_batch = [d.text for d in document_batch]
language = parameters.get('language', self.language)
embeddings = self.model.embed_sentences(text_batch, lang=language)
for document, embedding in zip(document_batch, embeddings):
document.embedding = embedding
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Iterable, Optional
import torch
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from jina_commons.batching import get_docs_batch_generator
from laserembeddings import Laser
class LaserEncoder(Executor):
"""
LaserEncoder is a text encoder based on Facebook Research's LASER encoder.
:class:`LaserEncoder` is a encoder based on Facebook Research's LASER
(Language-Agnostic SEntence Representations) to compute multilingual
sentence embeddings: https://github.com/facebookresearch/LASER
This encoder is suitable for producing multi-lingual sentence embeddings, enabling
you to have sentences from multiple languages in the same latent space.
"""
def __init__(
self,
path_to_bpe_codes: Optional[str] = None,
path_to_bpe_vocab: Optional[str] = None,
path_to_encoder: Optional[str] = None,
download_data: bool = True,
default_language: str = 'en',
default_batch_size: int = 32,
default_traversal_paths: Iterable[str] = ('r',),
device: str = 'cpu',
*args,
**kwargs,
):
"""
:param path_to_bpe_codes: path to bpe codes from Laser. Defaults to
``Laser.DEFAULT_BPE_CODES_FILE.``
:param path_to_bpe_vocab: path to bpe vocabs from Laser. Defaults to
``Laser.DEFAULT_BPE_VOCAB_FILE``.
:param path_to_encoder: path to the encoder from Laser. Defaults to
``Laser.DEFAULT_ENCODER_FILE``.
:param download_data: Whether data should be downloaded on initialization. This is
convenient when just trying out the encoder, but should be turned off in a
production setting (where you should already have the data on disk), as it can
lead to large startup times.
:param default_language: The default language of the text. Can be overriden by a
request parameter. The full list of possible values can be found at
[LASER](https://github.com/facebookresearch/LASER#supported-languages)
with the language code
([ISO 639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes))
:param default_batch_size: size of each batch
:param default_traversal_paths: traversal path of the Documents, (e.g. 'r', 'c')
:param device: Device string ('cpu'/'cuda'/'cuda:2')
"""
super().__init__(*args, **kwargs)
self.logger = JinaLogger(self.__class__.__name__)
self._path_to_bpe_codes = path_to_bpe_codes
self._path_to_bpe_vocab = path_to_bpe_vocab
self._path_to_encoder = path_to_encoder
self.device = device
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
self.default_language = default_language
if download_data:
self.logger.info("Downloading data for the Laser model")
subprocess.run(
['python', '-m', 'laserembeddings', 'download-models'], check=True
)
self.model = Laser(
bpe_codes=self._path_to_bpe_codes,
bpe_vocab=self._path_to_bpe_vocab,
encoder=self._path_to_encoder,
embedding_options={'cpu': self.device == 'cpu'},
)
self.device = torch.device(device)
self.model.bpeSentenceEmbedding.encoder.encoder.to(self.device)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: dict, **kwargs):
"""
Encode all docs with text and store the encodings in the embedding attribute
of the docs.
:param docs: documents sent to the encoder. The docs must have the ``text``
attribute.
:param parameters: dictionary to define the ``traversal_path``, the
``batch_size`` and ``language``. For example,
``{'traversal_paths': ['r'], 'batch_size': 10}``. This will override the
default parameters set at init.
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
)
for document_batch in document_batches_generator:
text_batch = [d.text for d in document_batch]
language = parameters.get('language', self.default_language)
embeddings = self.model.embed_sentences(text_batch, lang=language)
for document, embedding in zip(document_batch, embeddings):
document.embedding = embedding
|
"""Tests for training continuation."""
import json
from typing import Any, Dict, TypeVar
import numpy as np
import pytest
import xgboost as xgb
# pylint: disable=too-many-locals
def run_training_continuation_model_output(device: str, tree_method: str) -> None:
"""Run training continuation test."""
datasets = pytest.importorskip("sklearn.datasets")
n_samples = 64
n_features = 32
X, y = datasets.make_regression(n_samples, n_features, random_state=1)
dtrain = xgb.DMatrix(X, y)
params = {
"tree_method": tree_method,
"max_depth": "2",
"gamma": "0.1",
"alpha": "0.01",
"device": device,
}
bst_0 = xgb.train(params, dtrain, num_boost_round=64)
dump_0 = bst_0.get_dump(dump_format="json")
bst_1 = xgb.train(params, dtrain, num_boost_round=32)
bst_1 = xgb.train(params, dtrain, num_boost_round=32, xgb_model=bst_1)
dump_1 = bst_1.get_dump(dump_format="json")
T = TypeVar("T", Dict[str, Any], float, str, int, list)
def recursive_compare(obj_0: T, obj_1: T) -> None:
if isinstance(obj_0, float):
assert np.isclose(obj_0, obj_1, atol=1e-6)
elif isinstance(obj_0, str):
assert obj_0 == obj_1
elif isinstance(obj_0, int):
assert obj_0 == obj_1
elif isinstance(obj_0, dict):
for i in range(len(obj_0.items())):
assert list(obj_0.keys())[i] == list(obj_1.keys())[i]
if list(obj_0.keys())[i] != "missing":
recursive_compare(list(obj_0.values()), list(obj_1.values()))
else:
for i, lhs in enumerate(obj_0):
rhs = obj_1[i]
recursive_compare(lhs, rhs)
assert len(dump_0) == len(dump_1)
for i, lhs in enumerate(dump_0):
obj_0 = json.loads(lhs)
obj_1 = json.loads(dump_1[i])
recursive_compare(obj_0, obj_1)
|
"""Tests for training continuation."""
import json
from typing import Any, Dict, TypeVar
import numpy as np
import pytest
import xgboost as xgb
# pylint: disable=too-many-locals
def run_training_continuation_model_output(device: str, tree_method: str) -> None:
"""Run training continuation test."""
datasets = pytest.importorskip("sklearn.datasets")
n_samples = 64
n_features = 32
X, y = datasets.make_regression(n_samples, n_features, random_state=1)
dtrain = xgb.DMatrix(X, y)
params = {
"tree_method": tree_method,
"max_depth": "2",
"gamma": "0.1",
"alpha": "0.01",
"device": device,
}
bst_0 = xgb.train(params, dtrain, num_boost_round=64)
dump_0 = bst_0.get_dump(dump_format="json")
bst_1 = xgb.train(params, dtrain, num_boost_round=32)
bst_1 = xgb.train(params, dtrain, num_boost_round=32, xgb_model=bst_1)
dump_1 = bst_1.get_dump(dump_format="json")
T = TypeVar("T", Dict[str, Any], float, str, int, list)
def recursive_compare(obj_0: T, obj_1: T) -> None:
if isinstance(obj_0, float):
assert np.isclose(obj_0, obj_1, atol=1e-6)
elif isinstance(obj_0, str):
assert obj_0 == obj_1
elif isinstance(obj_0, int):
assert obj_0 == obj_1
elif isinstance(obj_0, dict):
for i in range(len(obj_0.items())):
assert list(obj_0.keys())[i] == list(obj_1.keys())[i]
if list(obj_0.keys())[i] != "missing":
recursive_compare(list(obj_0.values()), list(obj_1.values()))
else:
for i, lhs in enumerate(obj_0):
rhs = obj_1[i]
recursive_compare(lhs, rhs)
assert len(dump_0) == len(dump_1)
for i, lhs in enumerate(dump_0):
obj_0 = json.loads(lhs)
obj_1 = json.loads(dump_1[i])
recursive_compare(obj_0, obj_1)
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
from mmcv.runner import BaseModule, auto_fp16
from mmdet.models.backbones import ResNet
from mmdet.models.utils import ResLayer as _ResLayer
from mmdet.registry import MODELS
@MODELS.register_module()
class ResLayer(BaseModule):
def __init__(self,
depth,
stage=3,
stride=2,
dilation=1,
style='pytorch',
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
with_cp=False,
dcn=None,
pretrained=None,
init_cfg=None):
super(ResLayer, self).__init__(init_cfg)
self.norm_eval = norm_eval
self.norm_cfg = norm_cfg
self.stage = stage
self.fp16_enabled = False
block, stage_blocks = ResNet.arch_settings[depth]
stage_block = stage_blocks[stage]
planes = 64 * 2**stage
inplanes = 64 * 2**(stage - 1) * block.expansion
res_layer = _ResLayer(
block,
inplanes,
planes,
stage_block,
stride=stride,
dilation=dilation,
style=style,
with_cp=with_cp,
norm_cfg=self.norm_cfg,
dcn=dcn)
self.add_module(f'layer{stage + 1}', res_layer)
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is a deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
else:
raise TypeError('pretrained must be a str or None')
@auto_fp16()
def forward(self, x):
res_layer = getattr(self, f'layer{self.stage + 1}')
out = res_layer(x)
return out
def train(self, mode=True):
super(ResLayer, self).train(mode)
if self.norm_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
from mmcv.runner import BaseModule, auto_fp16
from mmdet.models.backbones import ResNet
from mmdet.models.builder import SHARED_HEADS
from mmdet.models.utils import ResLayer as _ResLayer
@SHARED_HEADS.register_module()
class ResLayer(BaseModule):
def __init__(self,
depth,
stage=3,
stride=2,
dilation=1,
style='pytorch',
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
with_cp=False,
dcn=None,
pretrained=None,
init_cfg=None):
super(ResLayer, self).__init__(init_cfg)
self.norm_eval = norm_eval
self.norm_cfg = norm_cfg
self.stage = stage
self.fp16_enabled = False
block, stage_blocks = ResNet.arch_settings[depth]
stage_block = stage_blocks[stage]
planes = 64 * 2**stage
inplanes = 64 * 2**(stage - 1) * block.expansion
res_layer = _ResLayer(
block,
inplanes,
planes,
stage_block,
stride=stride,
dilation=dilation,
style=style,
with_cp=with_cp,
norm_cfg=self.norm_cfg,
dcn=dcn)
self.add_module(f'layer{stage + 1}', res_layer)
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is a deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
else:
raise TypeError('pretrained must be a str or None')
@auto_fp16()
def forward(self, x):
res_layer = getattr(self, f'layer{self.stage + 1}')
out = res_layer(x)
return out
def train(self, mode=True):
super(ResLayer, self).train(mode)
if self.norm_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
|
""" Official evaluation script for v1.1 of the SQuAD dataset. """
import argparse
import json
import re
import string
import sys
from collections import Counter
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return normalize_answer(prediction) == normalize_answer(ground_truth)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article["paragraphs"]:
for qa in paragraph["qas"]:
total += 1
if qa["id"] not in predictions:
message = "Unanswered question " + qa["id"] + " will receive score 0."
print(message, file=sys.stderr)
continue
ground_truths = [x["text"] for x in qa["answers"]]
prediction = predictions[qa["id"]]
exact_match += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {"exact_match": exact_match, "f1": f1}
if __name__ == "__main__":
expected_version = "1.1"
parser = argparse.ArgumentParser(description="Evaluation for SQuAD " + expected_version)
parser.add_argument("dataset_file", help="Dataset file")
parser.add_argument("prediction_file", help="Prediction File")
args = parser.parse_args()
with open(args.dataset_file) as dataset_file:
dataset_json = json.load(dataset_file)
if dataset_json["version"] != expected_version:
print(
"Evaluation expects v-" + expected_version + ", but got dataset with v-" + dataset_json["version"],
file=sys.stderr,
)
dataset = dataset_json["data"]
with open(args.prediction_file) as prediction_file:
predictions = json.load(prediction_file)
print(json.dumps(evaluate(dataset, predictions)))
|
""" Official evaluation script for v1.1 of the SQuAD dataset. """
import argparse
import json
import re
import string
import sys
from collections import Counter
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return normalize_answer(prediction) == normalize_answer(ground_truth)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article["paragraphs"]:
for qa in paragraph["qas"]:
total += 1
if qa["id"] not in predictions:
message = "Unanswered question " + qa["id"] + " will receive score 0."
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x["text"], qa["answers"]))
prediction = predictions[qa["id"]]
exact_match += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {"exact_match": exact_match, "f1": f1}
if __name__ == "__main__":
expected_version = "1.1"
parser = argparse.ArgumentParser(description="Evaluation for SQuAD " + expected_version)
parser.add_argument("dataset_file", help="Dataset file")
parser.add_argument("prediction_file", help="Prediction File")
args = parser.parse_args()
with open(args.dataset_file) as dataset_file:
dataset_json = json.load(dataset_file)
if dataset_json["version"] != expected_version:
print(
"Evaluation expects v-" + expected_version + ", but got dataset with v-" + dataset_json["version"],
file=sys.stderr,
)
dataset = dataset_json["data"]
with open(args.prediction_file) as prediction_file:
predictions = json.load(prediction_file)
print(json.dumps(evaluate(dataset, predictions)))
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
from pathlib import Path
import pytest
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope='session', autouse=True)
def create_model_weights():
path_to_model = os.path.join(TEST_DIR, 'model', 'model_state_dict.pth')
if not os.path.isfile(path_to_model):
os.system(f'python {os.path.join(TEST_DIR, "model", "external_model.py")}')
yield
if os.path.exists(path_to_model):
os.remove(path_to_model)
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def build_docker_image(docker_image_name: str) -> str:
subprocess.run(['docker', 'build', '-t', docker_image_name, '.'], check=True)
return docker_image_name
@pytest.fixture(scope='session')
def build_docker_image_gpu(docker_image_name: str) -> str:
image_name = f'{docker_image_name}:gpu'
subprocess.run(
['docker', 'build', '-t', image_name, '-f', 'Dockerfile.gpu', '.'], check=True
)
return image_name
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import pytest
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope='session', autouse=True)
def create_model_weights():
path_to_model = os.path.join(TEST_DIR, 'model', 'model_state_dict.pth')
if not os.path.isfile(path_to_model):
os.system(f'python {os.path.join(TEST_DIR, "model", "external_model.py")}')
yield
if os.path.exists(path_to_model):
os.remove(path_to_model)
|
"""
Wrapper script to run a command inside a Docker container
"""
import argparse
import grp
import itertools
import os
import pathlib
import pwd
import subprocess
import sys
import textwrap
OPS_DIR = pathlib.Path(__file__).expanduser().resolve().parent
PROJECT_ROOT_DIR = OPS_DIR.parent
LINEWIDTH = 88
TEXT_WRAPPER = textwrap.TextWrapper(
width=LINEWIDTH,
initial_indent="",
subsequent_indent=" ",
break_long_words=False,
break_on_hyphens=False,
)
def parse_run_args(*, raw_run_args: str) -> list[str]:
return [x for x in raw_run_args.split() if x]
def get_user_ids() -> dict[str, str]:
uid = os.getuid()
gid = os.getgid()
return {
"CI_BUILD_UID": str(uid),
"CI_BUILD_USER": pwd.getpwuid(uid).pw_name,
"CI_BUILD_GID": str(gid),
"CI_BUILD_GROUP": grp.getgrgid(gid).gr_name,
}
def fancy_print_cli_args(*, cli_args: list[str]) -> None:
print(
"=" * LINEWIDTH
+ "\n"
+ " \\\n".join(TEXT_WRAPPER.wrap(" ".join(cli_args)))
+ "\n"
+ "=" * LINEWIDTH
+ "\n",
flush=True,
)
def docker_run(
*,
image_uri: str,
command_args: list[str],
use_gpus: bool,
workdir: pathlib.Path,
user_ids: dict[str, str],
extra_args: list[str],
) -> None:
# Command-line arguments to be passed to `docker run`
docker_run_cli_args = ["--rm", "--pid=host"]
if use_gpus:
docker_run_cli_args.extend(["--gpus", "all"])
docker_run_cli_args.extend(["-v", f"{workdir}:/workspace", "-w", "/workspace"])
docker_run_cli_args.extend(
itertools.chain.from_iterable([["-e", f"{k}={v}"] for k, v in user_ids.items()])
)
docker_run_cli_args.extend(extra_args)
docker_run_cli_args.append(image_uri)
docker_run_cli_args.extend(command_args)
cli_args = ["docker", "run"] + docker_run_cli_args
fancy_print_cli_args(cli_args=cli_args)
subprocess.run(cli_args, check=True, encoding="utf-8")
def main(*, args: argparse.Namespace) -> None:
run_args = parse_run_args(raw_run_args=args.run_args)
user_ids = get_user_ids()
if args.use_gpus:
print("Using NVIDIA GPUs for `docker run`")
if args.interactive:
print("Using interactive mode for `docker run`")
run_args.append("-it")
docker_run(
image_uri=args.image_uri,
command_args=args.command_args,
use_gpus=args.use_gpus,
workdir=args.workdir,
user_ids=user_ids,
extra_args=run_args,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
usage=(
f"{sys.argv[0]} --image-uri IMAGE_URI [--use-gpus] [--interactive] "
"[--workdir WORKDIR] [--run-args RUN_ARGS] -- COMMAND_ARG "
"[COMMAND_ARG ...]"
),
description="Run tasks inside a Docker container",
)
parser.add_argument(
"--image-uri",
type=str,
required=True,
help=(
"Fully qualified image URI to identify the container, e.g. "
"492475357299.dkr.ecr.us-west-2.amazonaws.com/xgb-ci.gpu:main"
),
)
parser.add_argument(
"--use-gpus",
action="store_true",
help=(
"Grant the container access to NVIDIA GPUs; requires the NVIDIA "
"Container Toolkit."
),
)
parser.add_argument(
"--interactive",
action="store_true",
help=(
"Run the container in the interactive mode; requires an interactive shell "
"(TTY). With this flag, you can use Ctrl-C to interrupt an long-running "
"command."
),
)
parser.add_argument(
"--workdir",
type=lambda p: pathlib.Path(p).expanduser().resolve(),
default=PROJECT_ROOT_DIR,
help="Path to working directory; if unset, use the project's root",
)
parser.add_argument(
"--run-args",
type=str,
default="",
help=(
"Argument(s) to be passed to `docker run`. When passing multiple "
"arguments, use single quotes to wrap them. Example: "
"--run-args '--cap-add SYS_PTRACE --shm-size=4g'"
),
)
parser.add_argument(
"command_args",
metavar="COMMAND_ARG",
type=str,
nargs="+",
help=(
"Argument(s) for the command to execute. NOTE. Make sure to specify "
"double-dash (--) to clearly distinguish between the command and the "
"preceding parameters. Example: --run-args '--cap-add SYS_PTRACE "
"--shm-size=4g' -- ./myprog"
),
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
parsed_args = parser.parse_args()
main(args=parsed_args)
|
"""
Wrapper script to run a command inside a Docker container
"""
import argparse
import grp
import itertools
import os
import pathlib
import pwd
import subprocess
import sys
import textwrap
OPS_DIR = pathlib.Path(__file__).expanduser().resolve().parent
PROJECT_ROOT_DIR = OPS_DIR.parent
LINEWIDTH = 88
TEXT_WRAPPER = textwrap.TextWrapper(
width=LINEWIDTH,
initial_indent="",
subsequent_indent=" ",
break_long_words=False,
break_on_hyphens=False,
)
def parse_run_args(*, raw_run_args: str) -> list[str]:
return [x for x in raw_run_args.split() if x]
def get_user_ids() -> dict[str, str]:
uid = os.getuid()
gid = os.getgid()
return {
"CI_BUILD_UID": str(uid),
"CI_BUILD_USER": pwd.getpwuid(uid).pw_name,
"CI_BUILD_GID": str(gid),
"CI_BUILD_GROUP": grp.getgrgid(gid).gr_name,
}
def fancy_print_cli_args(*, cli_args: list[str]) -> None:
print(
"=" * LINEWIDTH
+ "\n"
+ " \\\n".join(TEXT_WRAPPER.wrap(" ".join(cli_args)))
+ "\n"
+ "=" * LINEWIDTH
+ "\n",
flush=True,
)
def docker_run(
*,
container_tag: str,
command_args: list[str],
use_gpus: bool,
workdir: pathlib.Path,
user_ids: dict[str, str],
extra_args: list[str],
) -> None:
# Command-line arguments to be passed to `docker run`
docker_run_cli_args = ["--rm", "--pid=host"]
if use_gpus:
docker_run_cli_args.extend(["--gpus", "all"])
docker_run_cli_args.extend(["-v", f"{workdir}:/workspace", "-w", "/workspace"])
docker_run_cli_args.extend(
itertools.chain.from_iterable([["-e", f"{k}={v}"] for k, v in user_ids.items()])
)
docker_run_cli_args.extend(extra_args)
docker_run_cli_args.append(container_tag)
docker_run_cli_args.extend(command_args)
cli_args = ["docker", "run"] + docker_run_cli_args
fancy_print_cli_args(cli_args=cli_args)
subprocess.run(cli_args, check=True, encoding="utf-8")
def main(*, args: argparse.Namespace) -> None:
run_args = parse_run_args(raw_run_args=args.run_args)
user_ids = get_user_ids()
if args.use_gpus:
print("Using NVIDIA GPUs for `docker run`")
if args.interactive:
print("Using interactive mode for `docker run`")
run_args.append("-it")
docker_run(
container_tag=args.container_tag,
command_args=args.command_args,
use_gpus=args.use_gpus,
workdir=args.workdir,
user_ids=user_ids,
extra_args=run_args,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
usage=(
f"{sys.argv[0]} --container-tag CONTAINER_TAG [--use-gpus] [--interactive] "
"[--workdir WORKDIR] [--run-args RUN_ARGS] -- COMMAND_ARG "
"[COMMAND_ARG ...]"
),
description="Run tasks inside a Docker container",
)
parser.add_argument(
"--container-tag",
type=str,
required=True,
help=(
"Container tag to identify the container, e.g. "
"492475357299.dkr.ecr.us-west-2.amazonaws.com/xgb-ci.gpu:main"
),
)
parser.add_argument(
"--use-gpus",
action="store_true",
help=(
"Grant the container access to NVIDIA GPUs; requires the NVIDIA "
"Container Toolkit."
),
)
parser.add_argument(
"--interactive",
action="store_true",
help=(
"Run the container in the interactive mode; requires an interactive shell "
"(TTY). With this flag, you can use Ctrl-C to interrupt an long-running "
"command."
),
)
parser.add_argument(
"--workdir",
type=lambda p: pathlib.Path(p).expanduser().resolve(),
default=PROJECT_ROOT_DIR,
help="Path to working directory; if unset, use the project's root",
)
parser.add_argument(
"--run-args",
type=str,
default="",
help=(
"Argument(s) to be passed to `docker run`. When passing multiple "
"arguments, use single quotes to wrap them. Example: "
"--run-args '--cap-add SYS_PTRACE --shm-size=4g'"
),
)
parser.add_argument(
"command_args",
metavar="COMMAND_ARG",
type=str,
nargs="+",
help=(
"Argument(s) for the command to execute. NOTE. Make sure to specify "
"double-dash (--) to clearly distinguish between the command and the "
"preceding parameters. Example: --run-args '--cap-add SYS_PTRACE "
"--shm-size=4g' -- ./myprog"
),
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
parsed_args = parser.parse_args()
main(args=parsed_args)
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.roi_heads.mask_heads import (DynamicMaskHead, FCNMaskHead,
MaskIoUHead)
from .utils import _dummy_bbox_sampling
def test_mask_head_loss():
"""Test mask head loss when mask target is empty."""
self = FCNMaskHead(
num_convs=1,
roi_feat_size=6,
in_channels=8,
conv_out_channels=8,
num_classes=8)
# Dummy proposals
proposal_list = [
torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874]]),
]
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
gt_labels)
# create dummy mask
import numpy as np
from mmdet.core import BitmapMasks
dummy_mask = np.random.randint(0, 2, (1, 160, 240), dtype=np.uint8)
gt_masks = [BitmapMasks(dummy_mask, 160, 240)]
# create dummy train_cfg
train_cfg = mmcv.Config(dict(mask_size=12, mask_thr_binary=0.5))
# Create dummy features "extracted" for each sampled bbox
num_sampled = sum(len(res.bboxes) for res in sampling_results)
dummy_feats = torch.rand(num_sampled, 8, 6, 6)
mask_pred = self.forward(dummy_feats)
mask_targets = self.get_targets(sampling_results, gt_masks, train_cfg)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss_mask = self.loss(mask_pred, mask_targets, pos_labels)
onegt_mask_loss = sum(loss_mask['loss_mask'])
assert onegt_mask_loss.item() > 0, 'mask loss should be non-zero'
# test mask_iou_head
mask_iou_head = MaskIoUHead(
num_convs=1,
num_fcs=1,
roi_feat_size=6,
in_channels=8,
conv_out_channels=8,
fc_out_channels=8,
num_classes=8)
pos_mask_pred = mask_pred[range(mask_pred.size(0)), pos_labels]
mask_iou_pred = mask_iou_head(dummy_feats, pos_mask_pred)
pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)), pos_labels]
mask_iou_targets = mask_iou_head.get_targets(sampling_results, gt_masks,
pos_mask_pred, mask_targets,
train_cfg)
loss_mask_iou = mask_iou_head.loss(pos_mask_iou_pred, mask_iou_targets)
onegt_mask_iou_loss = loss_mask_iou['loss_mask_iou'].sum()
assert onegt_mask_iou_loss.item() >= 0
# test dynamic_mask_head
dummy_proposal_feats = torch.rand(num_sampled, 8)
dynamic_mask_head = DynamicMaskHead(
dynamic_conv_cfg=dict(
type='DynamicConv',
in_channels=8,
feat_channels=8,
out_channels=8,
input_feat_shape=6,
with_proj=False,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN')),
num_convs=1,
num_classes=8,
in_channels=8,
roi_feat_size=6)
mask_pred = dynamic_mask_head(dummy_feats, dummy_proposal_feats)
mask_target = dynamic_mask_head.get_targets(sampling_results, gt_masks,
train_cfg)
loss_mask = dynamic_mask_head.loss(mask_pred, mask_target, pos_labels)
loss_mask = loss_mask['loss_mask'].sum()
assert loss_mask.item() >= 0
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.roi_heads.mask_heads import (DynamicMaskHead, FCNMaskHead,
MaskIoUHead)
from .utils import _dummy_bbox_sampling
def test_mask_head_loss():
"""Test mask head loss when mask target is empty."""
self = FCNMaskHead(
num_convs=1,
roi_feat_size=6,
in_channels=8,
conv_out_channels=8,
num_classes=8)
# Dummy proposals
proposal_list = [
torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874]]),
]
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
gt_labels)
# create dummy mask
import numpy as np
from mmdet.core import BitmapMasks
dummy_mask = np.random.randint(0, 2, (1, 160, 240), dtype=np.uint8)
gt_masks = [BitmapMasks(dummy_mask, 160, 240)]
# create dummy train_cfg
train_cfg = mmcv.Config(dict(mask_size=12, mask_thr_binary=0.5))
# Create dummy features "extracted" for each sampled bbox
num_sampled = sum(len(res.bboxes) for res in sampling_results)
dummy_feats = torch.rand(num_sampled, 8, 6, 6)
mask_pred = self.forward(dummy_feats)
mask_targets = self.get_targets(sampling_results, gt_masks, train_cfg)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss_mask = self.loss(mask_pred, mask_targets, pos_labels)
onegt_mask_loss = sum(loss_mask['loss_mask'])
assert onegt_mask_loss.item() > 0, 'mask loss should be non-zero'
# test mask_iou_head
mask_iou_head = MaskIoUHead(
num_convs=1,
num_fcs=1,
roi_feat_size=6,
in_channels=8,
conv_out_channels=8,
fc_out_channels=8,
num_classes=8)
pos_mask_pred = mask_pred[range(mask_pred.size(0)), pos_labels]
mask_iou_pred = mask_iou_head(dummy_feats, pos_mask_pred)
pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)), pos_labels]
mask_iou_targets = mask_iou_head.get_targets(sampling_results, gt_masks,
pos_mask_pred, mask_targets,
train_cfg)
loss_mask_iou = mask_iou_head.loss(pos_mask_iou_pred, mask_iou_targets)
onegt_mask_iou_loss = loss_mask_iou['loss_mask_iou'].sum()
assert onegt_mask_iou_loss.item() >= 0
# test dynamic_mask_head
dummy_proposal_feats = torch.rand(num_sampled, 8)
dynamic_mask_head = DynamicMaskHead(
dynamic_conv_cfg=dict(
type='DynamicConv',
in_channels=8,
feat_channels=8,
out_channels=8,
input_feat_shape=6,
with_proj=False,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN')),
num_convs=1,
num_classes=8,
in_channels=8,
roi_feat_size=6)
mask_pred = dynamic_mask_head(dummy_feats, dummy_proposal_feats)
mask_target = dynamic_mask_head.get_targets(sampling_results, gt_masks,
train_cfg)
loss_mask = dynamic_mask_head.loss(mask_pred, mask_target, pos_labels)
loss_mask = loss_mask['loss_mask'].sum()
assert loss_mask.item() >= 0
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import Mock, patch
from mmdet.engine.hooks import YOLOXModeSwitchHook
class TestYOLOXModeSwitchHook(TestCase):
@patch('mmdet.engine.hooks.yolox_mode_switch_hook.is_model_wrapper')
def test_is_model_wrapper_and_persistent_workers_on(
self, mock_is_model_wrapper):
mock_is_model_wrapper.return_value = True
runner = Mock()
runner.model = Mock()
runner.model.module = Mock()
runner.model.module.detector.bbox_head.use_l1 = False
runner.train_dataloader = Mock()
runner.train_dataloader.persistent_workers = True
runner.train_dataloader._DataLoader__initialized = True
runner.epoch = 284
runner.max_epochs = 300
hook = YOLOXModeSwitchHook(num_last_epochs=15)
hook.before_train_epoch(runner)
self.assertTrue(hook._restart_dataloader)
self.assertTrue(runner.model.module.detector.bbox_head.use_l1)
self.assertFalse(runner.train_dataloader._DataLoader__initialized)
runner.epoch = 285
hook.before_train_epoch(runner)
self.assertTrue(runner.train_dataloader._DataLoader__initialized)
def test_not_model_wrapper_and_persistent_workers_off(self):
runner = Mock()
runner.model = Mock()
runner.model.detector.bbox_head.use_l1 = False
runner.train_dataloader = Mock()
runner.train_dataloader.persistent_workers = False
runner.train_dataloader._DataLoader__initialized = True
runner.epoch = 284
runner.max_epochs = 300
hook = YOLOXModeSwitchHook(num_last_epochs=15)
hook.before_train_epoch(runner)
self.assertFalse(hook._restart_dataloader)
self.assertTrue(runner.model.detector.bbox_head.use_l1)
self.assertTrue(runner.train_dataloader._DataLoader__initialized)
runner.epoch = 285
hook.before_train_epoch(runner)
self.assertFalse(hook._restart_dataloader)
self.assertTrue(runner.train_dataloader._DataLoader__initialized)
@patch('mmdet.engine.hooks.yolox_mode_switch_hook.is_model_wrapper')
def test_initialize_after_switching(self, mock_is_model_wrapper):
# This simulates the resumption after the switching.
mock_is_model_wrapper.return_value = True
runner = Mock()
runner.model = Mock()
runner.model.module = Mock()
runner.model.module.bbox_head.use_l1 = False
runner.train_dataloader = Mock()
runner.train_dataloader.persistent_workers = True
runner.train_dataloader._DataLoader__initialized = True
runner.epoch = 285
runner.max_epochs = 300
# epoch + 1 > max_epochs - num_last_epochs .
hook = YOLOXModeSwitchHook(num_last_epochs=15)
hook.before_train_epoch(runner)
self.assertTrue(hook._restart_dataloader)
self.assertTrue(runner.model.module.bbox_head.use_l1)
self.assertFalse(runner.train_dataloader._DataLoader__initialized)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import Mock, patch
from mmdet.engine.hooks import YOLOXModeSwitchHook
class TestYOLOXModeSwitchHook(TestCase):
@patch('mmdet.engine.hooks.yolox_mode_switch_hook.is_model_wrapper')
def test_is_model_wrapper_and_persistent_workers_on(
self, mock_is_model_wrapper):
mock_is_model_wrapper.return_value = True
runner = Mock()
runner.model = Mock()
runner.model.module = Mock()
runner.model.module.bbox_head.use_l1 = False
runner.train_dataloader = Mock()
runner.train_dataloader.persistent_workers = True
runner.train_dataloader._DataLoader__initialized = True
runner.epoch = 284
runner.max_epochs = 300
hook = YOLOXModeSwitchHook(num_last_epochs=15)
hook.before_train_epoch(runner)
self.assertTrue(hook._restart_dataloader)
self.assertTrue(runner.model.module.bbox_head.use_l1)
self.assertFalse(runner.train_dataloader._DataLoader__initialized)
runner.epoch = 285
hook.before_train_epoch(runner)
self.assertTrue(runner.train_dataloader._DataLoader__initialized)
def test_not_model_wrapper_and_persistent_workers_off(self):
runner = Mock()
runner.model = Mock()
runner.model.bbox_head.use_l1 = False
runner.train_dataloader = Mock()
runner.train_dataloader.persistent_workers = False
runner.train_dataloader._DataLoader__initialized = True
runner.epoch = 284
runner.max_epochs = 300
hook = YOLOXModeSwitchHook(num_last_epochs=15)
hook.before_train_epoch(runner)
self.assertFalse(hook._restart_dataloader)
self.assertTrue(runner.model.bbox_head.use_l1)
self.assertTrue(runner.train_dataloader._DataLoader__initialized)
runner.epoch = 285
hook.before_train_epoch(runner)
self.assertFalse(hook._restart_dataloader)
self.assertTrue(runner.train_dataloader._DataLoader__initialized)
@patch('mmdet.engine.hooks.yolox_mode_switch_hook.is_model_wrapper')
def test_initialize_after_switching(self, mock_is_model_wrapper):
# This simulates the resumption after the switching.
mock_is_model_wrapper.return_value = True
runner = Mock()
runner.model = Mock()
runner.model.module = Mock()
runner.model.module.bbox_head.use_l1 = False
runner.train_dataloader = Mock()
runner.train_dataloader.persistent_workers = True
runner.train_dataloader._DataLoader__initialized = True
runner.epoch = 285
runner.max_epochs = 300
# epoch + 1 > max_epochs - num_last_epochs .
hook = YOLOXModeSwitchHook(num_last_epochs=15)
hook.before_train_epoch(runner)
self.assertTrue(hook._restart_dataloader)
self.assertTrue(runner.model.module.bbox_head.use_l1)
self.assertFalse(runner.train_dataloader._DataLoader__initialized)
|
import unittest
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class TorchScriptConsistencyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(device=self.device, dtype=self.dtype)
inputs_.append(i)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(*inputs_)
torch.random.manual_seed(40)
ts_output = ts_func(*inputs_)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
@nested_params(
[F.convolve, F.fftconvolve],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (2, 3, 2)
L_x, L_y = 32, 55
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
self._assert_consistency(fn, (x, y, mode))
def test_add_noise(self):
leading_dims = (2, 3)
L = 31
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self._assert_consistency(F.add_noise, (waveform, noise, lengths, snr))
def test_barkscale_fbanks(self):
if self.device != torch.device("cpu"):
raise unittest.SkipTest("No need to perform test on device other than CPU")
n_stft = 100
f_min = 0.0
f_max = 20.0
n_barks = 10
sample_rate = 16000
self._assert_consistency(F.barkscale_fbanks, (n_stft, f_min, f_max, n_barks, sample_rate, "traunmuller"))
|
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class TorchScriptConsistencyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(device=self.device, dtype=self.dtype)
inputs_.append(i)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(*inputs_)
torch.random.manual_seed(40)
ts_output = ts_func(*inputs_)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
@nested_params(
[F.convolve, F.fftconvolve],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (2, 3, 2)
L_x, L_y = 32, 55
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
self._assert_consistency(fn, (x, y, mode))
def test_add_noise(self):
leading_dims = (2, 3)
L = 31
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self._assert_consistency(F.add_noise, (waveform, noise, lengths, snr))
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import TextDoc
def test_simple_init():
t = TextDoc(text='hello')
assert t.text == 'hello'
def test_str_init():
t = parse_obj_as(TextDoc, 'hello')
assert t.text == 'hello'
def test_doc():
class MyDoc(BaseDoc):
text1: TextDoc
text2: TextDoc
doc = MyDoc(text1='hello', text2=TextDoc(text='world'))
assert doc.text1.text == 'hello'
assert doc.text2.text == 'world'
|
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import TextDoc
def test_simple_init():
t = TextDoc(text='hello')
assert t.text == 'hello'
def test_str_init():
t = parse_obj_as(TextDoc, 'hello')
assert t.text == 'hello'
def test_doc():
class MyDoc(BaseDoc):
text1: TextDoc
text2: TextDoc
doc = MyDoc(text1='hello', text2=TextDoc(text='world'))
assert doc.text1.text == 'hello'
assert doc.text2.text == 'world'
|
import os
import subprocess
directory = os.path.dirname(os.path.realpath(__file__))
def run(*command: str) -> None:
print(f">>>>> Running poetry run {' '.join(command)}")
subprocess.run(["poetry", "run"] + list(command), cwd=directory, check=True)
def lint():
try:
run("ruff", "check", ".", "--exit-zero")
run("isort", "--diff", "--check", "--profile", "black", ".")
run("black", "--diff", "--check", ".")
run("pyright")
except subprocess.CalledProcessError as e:
print("Lint failed, try running `poetry run format` to fix the issues: ", e)
raise e
def populate_database():
import glob
import json
import pathlib
import requests
import market.model
templates = pathlib.Path(__file__).parent.parent / "graph_templates"
all_files = glob.glob(str(templates / "*.json"))
for file in all_files:
with open(file, "r") as f:
data = f.read()
req = market.model.AddAgentRequest(
graph=json.loads(data),
author="Populate DB",
categories=["Pre-Populated"],
keywords=["test"],
)
response = requests.post(
"http://localhost:8015/api/v1/market/admin/agent", json=req.model_dump()
)
print(response.text)
def format():
run("ruff", "check", "--fix", ".")
run("isort", "--profile", "black", ".")
run("black", ".")
run("pyright", ".")
def app():
port = os.getenv("PORT", "8015")
run("uvicorn", "market.app:app", "--reload", "--port", port, "--host", "0.0.0.0")
def setup():
run("prisma", "generate")
run("prisma", "migrate", "deploy")
|
import os
import subprocess
directory = os.path.dirname(os.path.realpath(__file__))
def run(*command: str) -> None:
print(f">>>>> Running poetry run {' '.join(command)}")
subprocess.run(["poetry", "run"] + list(command), cwd=directory, check=True)
def lint():
try:
run("ruff", "check", ".", "--exit-zero")
run("isort", "--diff", "--check", "--profile", "black", ".")
run("black", "--diff", "--check", ".")
run("pyright")
except subprocess.CalledProcessError as e:
print("Lint failed, try running `poetry run format` to fix the issues: ", e)
raise e
def populate_database():
import glob
import json
import pathlib
import requests
import market.model
templates = pathlib.Path(__file__).parent.parent / "backend" / "graph_templates"
all_files = glob.glob(str(templates / "*.json"))
for file in all_files:
with open(file, "r") as f:
data = f.read()
req = market.model.AddAgentRequest(
graph=json.loads(data),
author="Populate DB",
categories=["Pre-Populated"],
keywords=["test"],
)
response = requests.post(
"http://localhost:8015/api/v1/market/admin/agent", json=req.model_dump()
)
print(response.text)
def format():
run("ruff", "check", "--fix", ".")
run("isort", "--profile", "black", ".")
run("black", ".")
run("pyright", ".")
def app():
port = os.getenv("PORT", "8015")
run("uvicorn", "market.app:app", "--reload", "--port", port, "--host", "0.0.0.0")
def setup():
run("prisma", "generate")
run("prisma", "migrate", "deploy")
|
# We follow the original implementation which
# adopts the Caffe pre-trained backbone.
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[102.9801, 115.9465, 122.7717],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='AutoAssign',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
num_outs=5,
relu_before_extra_convs=True,
init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')),
bbox_head=dict(
type='AutoAssignHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_bbox=dict(type='GIoULoss', loss_weight=5.0)),
train_cfg=None,
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
optimizer=dict(lr=0.01), paramwise_cfg=dict(norm_decay_mult=0.))
|
# We follow the original implementation which
# adopts the Caffe pre-trained backbone.
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[102.9801, 115.9465, 122.7717],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='AutoAssign',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
num_outs=5,
relu_before_extra_convs=True,
init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')),
bbox_head=dict(
type='AutoAssignHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_bbox=dict(type='GIoULoss', loss_weight=5.0)),
train_cfg=None,
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
optimizer=dict(lr=0.01, paramwise_cfg=dict(norm_decay_mult=0.)))
|
from __future__ import annotations
from typing import Any, Dict, Optional
from docarray import BaseDoc, DocList
from docarray.typing import AnyEmbedding, AnyTensor
class LegacyDocument(BaseDoc):
"""
This Document is the LegacyDocument. It follows the same schema as in DocList v1.
It can be useful to start migrating a codebase from v1 to v2.
Nevertheless, the API is not totally compatible with DocAray v1 `Document`.
Indeed, none of the method associated with `Document` are present. Only the schema
of the data is similar.
.. code-block:: python
from docarray import DocList
from docarray.documents.legacy import LegacyDocument
import numpy as np
doc = LegacyDocument(text='hello')
doc.url = 'http://myimg.png'
doc.tensor = np.zeros((3, 224, 224))
doc.embedding = np.zeros((100, 1))
doc.tags['price'] = 10
doc.chunks = DocList[Document]([Document() for _ in range(10)])
doc.chunks = DocList[Document]([Document() for _ in range(10)])
"""
tensor: Optional[AnyTensor]
chunks: Optional[DocList[LegacyDocument]]
matches: Optional[DocList[LegacyDocument]]
blob: Optional[bytes]
text: Optional[str]
url: Optional[str]
embedding: Optional[AnyEmbedding]
tags: Dict[str, Any] = dict()
scores: Optional[Dict[str, Any]]
|
from __future__ import annotations
from typing import Any, Dict, Optional
from docarray import BaseDoc, DocArray
from docarray.typing import AnyEmbedding, AnyTensor
class LegacyDocument(BaseDoc):
"""
This Document is the LegacyDocument. It follows the same schema as in DocArray v1.
It can be useful to start migrating a codebase from v1 to v2.
Nevertheless, the API is not totally compatible with DocAray v1 `Document`.
Indeed, none of the method associated with `Document` are present. Only the schema
of the data is similar.
.. code-block:: python
from docarray import DocArray
from docarray.documents.legacy import LegacyDocument
import numpy as np
doc = LegacyDocument(text='hello')
doc.url = 'http://myimg.png'
doc.tensor = np.zeros((3, 224, 224))
doc.embedding = np.zeros((100, 1))
doc.tags['price'] = 10
doc.chunks = DocArray[Document]([Document() for _ in range(10)])
doc.chunks = DocArray[Document]([Document() for _ in range(10)])
"""
tensor: Optional[AnyTensor]
chunks: Optional[DocArray[LegacyDocument]]
matches: Optional[DocArray[LegacyDocument]]
blob: Optional[bytes]
text: Optional[str]
url: Optional[str]
embedding: Optional[AnyEmbedding]
tags: Dict[str, Any] = dict()
scores: Optional[Dict[str, Any]]
|
import logging
from fastapi import Request
from backend.data import integrations
from backend.data.model import APIKeyCredentials, Credentials
from backend.integrations.providers import ProviderName
from backend.integrations.webhooks._base import BaseWebhooksManager
from backend.util.request import Requests
logger = logging.getLogger(__name__)
class Slant3DWebhooksManager(BaseWebhooksManager):
"""Manager for Slant3D webhooks"""
PROVIDER_NAME = ProviderName.SLANT3D
BASE_URL = "https://www.slant3dapi.com/api"
async def _register_webhook(
self,
credentials: Credentials,
webhook_type: str,
resource: str,
events: list[str],
ingress_url: str,
secret: str,
) -> tuple[str, dict]:
"""Register a new webhook with Slant3D"""
if not isinstance(credentials, APIKeyCredentials):
raise ValueError("API key is required to register a webhook")
headers = {
"api-key": credentials.api_key.get_secret_value(),
"Content-Type": "application/json",
}
# Slant3D's API doesn't use events list, just register for all order updates
payload = {"endPoint": ingress_url}
response = await Requests().post(
f"{self.BASE_URL}/customer/webhookSubscribe", headers=headers, json=payload
)
if not response.ok:
error = response.json().get("error", "Unknown error")
raise RuntimeError(f"Failed to register webhook: {error}")
webhook_config = {
"endpoint": ingress_url,
"provider": self.PROVIDER_NAME,
"events": ["order.shipped"], # Currently the only supported event
"type": webhook_type,
}
return "", webhook_config
@classmethod
async def validate_payload(
cls, webhook: integrations.Webhook, request: Request
) -> tuple[dict, str]:
"""Validate incoming webhook payload from Slant3D"""
payload = await request.json()
# Validate required fields from Slant3D API spec
required_fields = ["orderId", "status", "trackingNumber", "carrierCode"]
missing_fields = [field for field in required_fields if field not in payload]
if missing_fields:
raise ValueError(f"Missing required fields: {', '.join(missing_fields)}")
# Normalize payload structure
normalized_payload = {
"orderId": payload["orderId"],
"status": payload["status"],
"trackingNumber": payload["trackingNumber"],
"carrierCode": payload["carrierCode"],
}
# Currently Slant3D only sends shipping notifications
# Convert status to lowercase for event format compatibility
event_type = f"order.{payload['status'].lower()}"
return normalized_payload, event_type
async def _deregister_webhook(
self, webhook: integrations.Webhook, credentials: Credentials
) -> None:
"""
Note: Slant3D API currently doesn't provide a deregistration endpoint.
This would need to be handled through support.
"""
# Log warning since we can't properly deregister
logger.warning(
f"Warning: Manual deregistration required for webhook {webhook.id}"
)
pass
|
import logging
from fastapi import Request
from backend.data import integrations
from backend.data.model import APIKeyCredentials, Credentials
from backend.integrations.providers import ProviderName
from backend.integrations.webhooks._base import BaseWebhooksManager
from backend.util.request import Requests
logger = logging.getLogger(__name__)
class Slant3DWebhooksManager(BaseWebhooksManager):
"""Manager for Slant3D webhooks"""
PROVIDER_NAME = ProviderName.SLANT3D
BASE_URL = "https://www.slant3dapi.com/api"
async def _register_webhook(
self,
credentials: Credentials,
webhook_type: str,
resource: str,
events: list[str],
ingress_url: str,
secret: str,
) -> tuple[str, dict]:
"""Register a new webhook with Slant3D"""
if not isinstance(credentials, APIKeyCredentials):
raise ValueError("API key is required to register a webhook")
headers = {
"api-key": credentials.api_key.get_secret_value(),
"Content-Type": "application/json",
}
# Slant3D's API doesn't use events list, just register for all order updates
payload = {"endPoint": ingress_url}
response = Requests().post(
f"{self.BASE_URL}/customer/webhookSubscribe", headers=headers, json=payload
)
if not response.ok:
error = response.json().get("error", "Unknown error")
raise RuntimeError(f"Failed to register webhook: {error}")
webhook_config = {
"endpoint": ingress_url,
"provider": self.PROVIDER_NAME,
"events": ["order.shipped"], # Currently the only supported event
"type": webhook_type,
}
return "", webhook_config
@classmethod
async def validate_payload(
cls, webhook: integrations.Webhook, request: Request
) -> tuple[dict, str]:
"""Validate incoming webhook payload from Slant3D"""
payload = await request.json()
# Validate required fields from Slant3D API spec
required_fields = ["orderId", "status", "trackingNumber", "carrierCode"]
missing_fields = [field for field in required_fields if field not in payload]
if missing_fields:
raise ValueError(f"Missing required fields: {', '.join(missing_fields)}")
# Normalize payload structure
normalized_payload = {
"orderId": payload["orderId"],
"status": payload["status"],
"trackingNumber": payload["trackingNumber"],
"carrierCode": payload["carrierCode"],
}
# Currently Slant3D only sends shipping notifications
# Convert status to lowercase for event format compatibility
event_type = f"order.{payload['status'].lower()}"
return normalized_payload, event_type
async def _deregister_webhook(
self, webhook: integrations.Webhook, credentials: Credentials
) -> None:
"""
Note: Slant3D API currently doesn't provide a deregistration endpoint.
This would need to be handled through support.
"""
# Log warning since we can't properly deregister
logger.warning(
f"Warning: Manual deregistration required for webhook {webhook.id}"
)
pass
|
import grpc
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from grpc_reflection.v1alpha import reflection
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.proto import jina_pb2, jina_pb2_grpc
class DummyResponseModel(BaseModel):
protocol: str
class MultiProtocolGateway(Gateway):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.http_port = self.ports[0]
self.grpc_port = self.ports[1]
self.health_servicer = health.HealthServicer(experimental_non_blocking=True)
async def _setup_http_server(self):
from fastapi import FastAPI
app = FastAPI(
title='HTTP Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {'protocol': 'http'}
self.http_server = Server(
Config(app, host=__default_host__, port=self.http_port)
)
async def _setup_grpc_server(self):
self.grpc_server = grpc.aio.server()
jina_pb2_grpc.add_JinaRPCServicer_to_server(
self.streamer._streamer, self.grpc_server
)
jina_pb2_grpc.add_JinaSingleDataRequestRPCServicer_to_server(
self.streamer._streamer, self.grpc_server
)
service_names = (
jina_pb2.DESCRIPTOR.services_by_name['JinaRPC'].full_name,
jina_pb2.DESCRIPTOR.services_by_name['JinaSingleDataRequestRPC'].full_name,
reflection.SERVICE_NAME,
)
# Mark all services as healthy.
health_pb2_grpc.add_HealthServicer_to_server(
self.health_servicer, self.grpc_server
)
for service in service_names:
self.health_servicer.set(service, health_pb2.HealthCheckResponse.SERVING)
reflection.enable_server_reflection(service_names, self.grpc_server)
self.grpc_server.add_insecure_port(f'{__default_host__}:{self.grpc_port}')
await self.grpc_server.start()
async def setup_server(self):
await self._setup_http_server()
await self._setup_grpc_server()
async def run_server(self):
await self.http_server.serve()
await self.grpc_server.wait_for_termination()
async def shutdown(self):
self.http_server.should_exit = True
await self.grpc_server.stop(0)
await self.http_server.shutdown()
self.health_servicer.enter_graceful_shutdown()
@property
def _should_exit(self) -> bool:
return self.http_server.should_exit
|
import grpc
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from grpc_reflection.v1alpha import reflection
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.proto import jina_pb2, jina_pb2_grpc
class DummyResponseModel(BaseModel):
protocol: str
class MultiProtocolGateway(Gateway):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.http_port = self.ports[0]
self.grpc_port = self.ports[1]
self.health_servicer = health.HealthServicer(experimental_non_blocking=True)
async def _setup_http_server(self):
from fastapi import FastAPI
app = FastAPI(
title='HTTP Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {'protocol': 'http'}
self.http_server = Server(
Config(app, host=__default_host__, port=self.http_port)
)
async def _setup_grpc_server(self):
self.grpc_server = grpc.aio.server()
jina_pb2_grpc.add_JinaRPCServicer_to_server(
self.streamer._streamer, self.grpc_server
)
service_names = (
jina_pb2.DESCRIPTOR.services_by_name['JinaRPC'].full_name,
reflection.SERVICE_NAME,
)
# Mark all services as healthy.
health_pb2_grpc.add_HealthServicer_to_server(
self.health_servicer, self.grpc_server
)
for service in service_names:
self.health_servicer.set(service, health_pb2.HealthCheckResponse.SERVING)
reflection.enable_server_reflection(service_names, self.grpc_server)
self.grpc_server.add_insecure_port(f'{__default_host__}:{self.grpc_port}')
await self.grpc_server.start()
async def setup_server(self):
await self._setup_http_server()
await self._setup_grpc_server()
async def run_server(self):
await self.http_server.serve()
await self.grpc_server.wait_for_termination()
async def shutdown(self):
self.http_server.should_exit = True
await self.grpc_server.stop(0)
await self.http_server.shutdown()
self.health_servicer.enter_graceful_shutdown()
@property
def _should_exit(self) -> bool:
return self.http_server.should_exit
|
from typing import Union, List
class InputExample:
"""Structure for one input example with texts, the label and a unique id"""
def __init__(self, guid: str = "", texts: List[str] = None, label: Union[int, float] = 0):
"""
Creates one InputExample with the given texts, guid and label
Args:
guid: id for the example
texts: the texts for the example.
label: the label for the example
"""
self.guid = guid
self.texts = texts
self.label = label
def __str__(self):
return "<InputExample> label: {}, texts: {}".format(str(self.label), "; ".join(self.texts))
|
from typing import Union, List
class InputExample:
"""
Structure for one input example with texts, the label and a unique id
"""
def __init__(self, guid: str = "", texts: List[str] = None, label: Union[int, float] = 0):
"""
Creates one InputExample with the given texts, guid and label
:param guid
id for the example
:param texts
the texts for the example.
:param label
the label for the example
"""
self.guid = guid
self.texts = texts
self.label = label
def __str__(self):
return "<InputExample> label: {}, texts: {}".format(str(self.label), "; ".join(self.texts))
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import patch
from mmengine.testing import RunnerTestCase
class TestEmptyCacheHook(RunnerTestCase):
def test_with_runner(self):
with patch('torch.cuda.empty_cache') as mock_empty_cache:
cfg = self.epoch_based_cfg
cfg.custom_hooks = [dict(type='EmptyCacheHook')]
cfg.train_cfg.val_interval = 1e6 # disable validation during training # noqa: E501
runner = self.build_runner(cfg)
runner.train()
runner.test()
runner.val()
# Call `torch.cuda.empty_cache` after each epoch:
# runner.train: `max_epochs` times.
# runner.val: `1` time.
# runner.test: `1` time.
target_called_times = runner.max_epochs + 2
self.assertEqual(mock_empty_cache.call_count, target_called_times)
with patch('torch.cuda.empty_cache') as mock_empty_cache:
cfg.custom_hooks = [dict(type='EmptyCacheHook', before_epoch=True)]
runner = self.build_runner(cfg)
runner.train()
runner.val()
runner.test()
# Call `torch.cuda.empty_cache` after/before each epoch:
# runner.train: `max_epochs*2` times.
# runner.val: `1*2` times.
# runner.test: `1*2` times.
target_called_times = runner.max_epochs * 2 + 4
self.assertEqual(mock_empty_cache.call_count, target_called_times)
with patch('torch.cuda.empty_cache') as mock_empty_cache:
cfg.custom_hooks = [
dict(
type='EmptyCacheHook', after_iter=True, before_epoch=True)
]
runner = self.build_runner(cfg)
runner.train()
runner.val()
runner.test()
# Call `torch.cuda.empty_cache` after/before each epoch,
# after each iteration:
# runner.train: `max_epochs*2 + len(dataloader)*max_epochs` times. # noqa: E501
# runner.val: `1*2 + len(val_dataloader)` times.
# runner.test: `1*2 + len(val_dataloader)` times.
target_called_times = \
runner.max_epochs * 2 + 4 + \
len(runner.train_dataloader) * runner.max_epochs + \
len(runner.val_dataloader) + \
len(runner.test_dataloader)
self.assertEqual(mock_empty_cache.call_count, target_called_times)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import EmptyCacheHook
class TestEmptyCacheHook:
def test_emtpy_cache_hook(self):
hook = EmptyCacheHook(True, True, True)
runner = Mock()
hook._after_iter(runner, 0)
hook._before_epoch(runner)
hook._after_epoch(runner)
|
# coding: utf-8
from pathlib import Path
import pandas as pd
from sklearn.metrics import mean_squared_error
import lightgbm as lgb
print("Loading data...")
# load or create your dataset
regression_example_dir = Path(__file__).absolute().parents[1] / "regression"
df_train = pd.read_csv(str(regression_example_dir / "regression.train"), header=None, sep="\t")
df_test = pd.read_csv(str(regression_example_dir / "regression.test"), header=None, sep="\t")
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
# create dataset for lightgbm
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
# specify your configurations as a dict
params = {
"boosting_type": "gbdt",
"objective": "regression",
"metric": {"l2", "l1"},
"num_leaves": 31,
"learning_rate": 0.05,
"feature_fraction": 0.9,
"bagging_fraction": 0.8,
"bagging_freq": 5,
"verbose": 0,
}
print("Starting training...")
# train
gbm = lgb.train(
params, lgb_train, num_boost_round=20, valid_sets=lgb_eval, callbacks=[lgb.early_stopping(stopping_rounds=5)]
)
print("Saving model...")
# save model to file
gbm.save_model("model.txt")
print("Starting predicting...")
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)
# eval
rmse_test = mean_squared_error(y_test, y_pred) ** 0.5
print(f"The RMSE of prediction is: {rmse_test}")
|
# coding: utf-8
from pathlib import Path
import pandas as pd
from sklearn.metrics import mean_squared_error
import lightgbm as lgb
print('Loading data...')
# load or create your dataset
regression_example_dir = Path(__file__).absolute().parents[1] / 'regression'
df_train = pd.read_csv(str(regression_example_dir / 'regression.train'), header=None, sep='\t')
df_test = pd.read_csv(str(regression_example_dir / 'regression.test'), header=None, sep='\t')
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
# create dataset for lightgbm
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
# specify your configurations as a dict
params = {
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': {'l2', 'l1'},
'num_leaves': 31,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'verbose': 0
}
print('Starting training...')
# train
gbm = lgb.train(params,
lgb_train,
num_boost_round=20,
valid_sets=lgb_eval,
callbacks=[lgb.early_stopping(stopping_rounds=5)])
print('Saving model...')
# save model to file
gbm.save_model('model.txt')
print('Starting predicting...')
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)
# eval
rmse_test = mean_squared_error(y_test, y_pred) ** 0.5
print(f'The RMSE of prediction is: {rmse_test}')
|
"""Init params."""
from llama_index.readers.huggingface_fs.base import HuggingFaceFSReader
__all__ = ["HuggingFaceFSReader"]
|
"""Init params."""
from llama_index.readers.huggingface_fs.base import HuggingFaceFSReader
__all__ = ["HuggingFaceFSReader"]
|
import os
import time
import pytest
from jina import Flow, Executor
class SlowExecutor(Executor):
def close(self) -> None:
with open(
os.path.join(self.metas.workspace, 'test'), 'w', encoding='utf-8'
) as f:
time.sleep(10)
f.write('x')
@pytest.mark.slow
def test_slow_executor_close(tmpdir):
with Flow().add(
uses={'jtype': 'SlowExecutor', 'with': {}, 'metas': {'workspace': str(tmpdir)}}
) as f:
pass
assert os.path.exists(os.path.join(tmpdir, 'test'))
|
import os
import time
import pytest
from jina import Flow, Executor
class SlowExecutor(Executor):
def close(self) -> None:
with open(os.path.join(self.metas.workspace, 'test'), 'w', encoding='utf-8') as f:
time.sleep(10)
f.write('x')
@pytest.mark.slow
def test_slow_executor_close(tmpdir):
with Flow().add(
uses={'jtype': 'SlowExecutor', 'with': {}, 'metas': {'workspace': str(tmpdir)}}
) as f:
pass
assert os.path.exists(os.path.join(tmpdir, 'test'))
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable
from sentence_transformers.evaluation import RerankingEvaluator
from sentence_transformers.util import cos_sim
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseRerankingEvaluator(RerankingEvaluator):
def __init__(
self,
samples: list[dict[str, str | list[str]]],
at_k: int = 10,
name: str = "",
write_csv: bool = True,
similarity_fct: Callable[[Tensor, Tensor], Tensor] = cos_sim,
batch_size: int = 64,
show_progress_bar: bool = False,
use_batched_encoding: bool = True,
truncate_dim: int | None = None,
mrr_at_k: int | None = None,
):
return super().__init__(
samples=samples,
at_k=at_k,
name=name,
write_csv=write_csv,
similarity_fct=similarity_fct,
batch_size=batch_size,
show_progress_bar=show_progress_bar,
use_batched_encoding=use_batched_encoding,
truncate_dim=truncate_dim,
mrr_at_k=mrr_at_k,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps)
def compute_metrices(self, model: SparseEncoder):
return super().compute_metrices(model)
def compute_metrices_batched(self, model: SparseEncoder):
return super().compute_metrices_batched(model)
def compute_metrices_individual(self, model: SparseEncoder):
return super().compute_metrices_individual(model)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
show_progress_bar: bool | None = None,
**kwargs,
) -> Tensor:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=show_progress_bar,
convert_to_sparse_tensor=True,
convert_to_tensor=False, # as we are using slicing on sparse tensors that is not supported so we want to keep a list of sparse tensors
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable
from sentence_transformers.evaluation import RerankingEvaluator
from sentence_transformers.util import cos_sim
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseRerankingEvaluator(RerankingEvaluator):
def __init__(
self,
samples: list[dict[str, str | list[str]]],
at_k: int = 10,
name: str = "",
write_csv: bool = True,
similarity_fct: Callable[[Tensor, Tensor], Tensor] = cos_sim,
batch_size: int = 64,
show_progress_bar: bool = False,
use_batched_encoding: bool = True,
truncate_dim: int | None = None,
mrr_at_k: int | None = None,
):
return super().__init__(
samples=samples,
at_k=at_k,
name=name,
write_csv=write_csv,
similarity_fct=similarity_fct,
batch_size=batch_size,
show_progress_bar=show_progress_bar,
use_batched_encoding=use_batched_encoding,
truncate_dim=truncate_dim,
mrr_at_k=mrr_at_k,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps)
def compute_metrices(self, model: SparseEncoder):
return super().compute_metrices(model)
def compute_metrices_batched(self, model: SparseEncoder):
return super().compute_metrices_batched(model)
def compute_metrices_individual(self, model: SparseEncoder):
return super().compute_metrices_individual(model)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
show_progress_bar: bool | None = None,
**kwargs,
) -> Tensor:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=show_progress_bar,
convert_to_tensor=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
import logging
from typing import Annotated
from autogpt_libs.auth.middleware import APIKeyValidator
from fastapi import APIRouter, Body, Depends, Query
from fastapi.responses import JSONResponse
from backend.data.user import (
get_user_by_email,
set_user_email_verification,
unsubscribe_user_by_token,
)
from backend.server.v2.postmark.models import (
PostmarkBounceEnum,
PostmarkBounceWebhook,
PostmarkClickWebhook,
PostmarkDeliveryWebhook,
PostmarkOpenWebhook,
PostmarkSpamComplaintWebhook,
PostmarkSubscriptionChangeWebhook,
PostmarkWebhook,
)
from backend.util.settings import Settings
settings = Settings()
postmark_validator = APIKeyValidator(
"X-Postmark-Webhook-Token",
settings.secrets.postmark_webhook_token,
)
router = APIRouter()
logger = logging.getLogger(__name__)
@router.post("/unsubscribe")
async def unsubscribe_via_one_click(token: Annotated[str, Query()]):
logger.info(f"Received unsubscribe request from One Click Unsubscribe: {token}")
try:
await unsubscribe_user_by_token(token)
except Exception as e:
logger.error(f"Failed to unsubscribe user by token {token}: {e}")
raise e
return JSONResponse(status_code=200, content={"status": "ok"})
@router.post("/", dependencies=[Depends(postmark_validator.get_dependency())])
async def postmark_webhook_handler(
webhook: Annotated[
PostmarkWebhook,
Body(discriminator="RecordType"),
]
):
logger.info(f"Received webhook from Postmark: {webhook}")
match webhook:
case PostmarkDeliveryWebhook():
delivery_handler(webhook)
case PostmarkBounceWebhook():
await bounce_handler(webhook)
case PostmarkSpamComplaintWebhook():
spam_handler(webhook)
case PostmarkOpenWebhook():
open_handler(webhook)
case PostmarkClickWebhook():
click_handler(webhook)
case PostmarkSubscriptionChangeWebhook():
subscription_handler(webhook)
case _:
logger.warning(f"Unknown webhook type: {type(webhook)}")
return
async def bounce_handler(event: PostmarkBounceWebhook):
logger.info(f"Bounce handler {event=}")
if event.TypeCode in [
PostmarkBounceEnum.Transient,
PostmarkBounceEnum.SoftBounce,
PostmarkBounceEnum.DnsError,
]:
logger.info(
f"Softish bounce: {event.TypeCode} for {event.Email}, not setting email verification to false"
)
return
logger.info(f"{event.Email=}")
user = await get_user_by_email(event.Email)
if not user:
logger.error(f"User not found for email: {event.Email}")
return
await set_user_email_verification(user.id, False)
logger.debug(f"Setting email verification to false for user: {user.id}")
def spam_handler(event: PostmarkSpamComplaintWebhook):
logger.info("Spam handler")
pass
def delivery_handler(event: PostmarkDeliveryWebhook):
logger.info("Delivery handler")
pass
def open_handler(event: PostmarkOpenWebhook):
logger.info("Open handler")
pass
def click_handler(event: PostmarkClickWebhook):
logger.info("Click handler")
pass
def subscription_handler(event: PostmarkSubscriptionChangeWebhook):
logger.info("Subscription handler")
pass
|
import logging
from typing import Annotated
from autogpt_libs.auth.middleware import APIKeyValidator
from fastapi import APIRouter, Body, Depends
from backend.data.user import get_user_by_email, set_user_email_verification
from backend.server.v2.postmark.models import (
PostmarkBounceEnum,
PostmarkBounceWebhook,
PostmarkClickWebhook,
PostmarkDeliveryWebhook,
PostmarkOpenWebhook,
PostmarkSpamComplaintWebhook,
PostmarkSubscriptionChangeWebhook,
PostmarkWebhook,
)
from backend.util.settings import Settings
settings = Settings()
postmark_validator = APIKeyValidator(
"X-Postmark-Webhook-Token",
settings.secrets.postmark_webhook_token,
)
router = APIRouter(dependencies=[Depends(postmark_validator.get_dependency())])
logger = logging.getLogger(__name__)
@router.post("/")
async def postmark_webhook_handler(
webhook: Annotated[
PostmarkWebhook,
Body(discriminator="RecordType"),
]
):
logger.info(f"Received webhook from Postmark: {webhook}")
match webhook:
case PostmarkDeliveryWebhook():
delivery_handler(webhook)
case PostmarkBounceWebhook():
await bounce_handler(webhook)
case PostmarkSpamComplaintWebhook():
spam_handler(webhook)
case PostmarkOpenWebhook():
open_handler(webhook)
case PostmarkClickWebhook():
click_handler(webhook)
case PostmarkSubscriptionChangeWebhook():
subscription_handler(webhook)
case _:
logger.warning(f"Unknown webhook type: {type(webhook)}")
return
async def bounce_handler(event: PostmarkBounceWebhook):
logger.info(f"Bounce handler {event=}")
if event.TypeCode in [
PostmarkBounceEnum.Transient,
PostmarkBounceEnum.SoftBounce,
PostmarkBounceEnum.DnsError,
]:
logger.info(
f"Softish bounce: {event.TypeCode} for {event.Email}, not setting email verification to false"
)
return
logger.info(f"{event.Email=}")
user = await get_user_by_email(event.Email)
if not user:
logger.error(f"User not found for email: {event.Email}")
return
await set_user_email_verification(user.id, False)
logger.debug(f"Setting email verification to false for user: {user.id}")
def spam_handler(event: PostmarkSpamComplaintWebhook):
logger.info("Spam handler")
pass
def delivery_handler(event: PostmarkDeliveryWebhook):
logger.info("Delivery handler")
pass
def open_handler(event: PostmarkOpenWebhook):
logger.info("Open handler")
pass
def click_handler(event: PostmarkClickWebhook):
logger.info("Click handler")
pass
def subscription_handler(event: PostmarkSubscriptionChangeWebhook):
logger.info("Subscription handler")
pass
|
import collections
import json
import os
import string
from typing import Iterable, List
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
class WhitespaceTokenizer(WordTokenizer):
"""
Simple and fast white-space tokenizer. Splits sentence based on white spaces.
Punctuation are stripped from tokens.
"""
def __init__(
self, vocab: Iterable[str] = [], stop_words: Iterable[str] = ENGLISH_STOP_WORDS, do_lower_case: bool = False
):
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.set_vocab(vocab)
def get_vocab(self):
return self.vocab
def set_vocab(self, vocab: Iterable[str]):
self.vocab = vocab
self.word2idx = collections.OrderedDict([(word, idx) for idx, word in enumerate(vocab)])
def tokenize(self, text: str, **kwargs) -> List[int]:
if self.do_lower_case:
text = text.lower()
tokens = text.split()
tokens_filtered = []
for token in tokens:
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.strip(string.punctuation)
if token in self.stop_words:
continue
elif len(token) > 0 and token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.lower()
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
return tokens_filtered
def save(self, output_path: str):
with open(os.path.join(output_path, "whitespacetokenizer_config.json"), "w") as fOut:
json.dump(
{
"vocab": list(self.word2idx.keys()),
"stop_words": list(self.stop_words),
"do_lower_case": self.do_lower_case,
},
fOut,
)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "whitespacetokenizer_config.json"), "r") as fIn:
config = json.load(fIn)
return WhitespaceTokenizer(**config)
|
from typing import List, Iterable
import collections
import string
import os
import json
from .WordTokenizer import WordTokenizer, ENGLISH_STOP_WORDS
class WhitespaceTokenizer(WordTokenizer):
"""
Simple and fast white-space tokenizer. Splits sentence based on white spaces.
Punctuation are stripped from tokens.
"""
def __init__(
self, vocab: Iterable[str] = [], stop_words: Iterable[str] = ENGLISH_STOP_WORDS, do_lower_case: bool = False
):
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.set_vocab(vocab)
def get_vocab(self):
return self.vocab
def set_vocab(self, vocab: Iterable[str]):
self.vocab = vocab
self.word2idx = collections.OrderedDict([(word, idx) for idx, word in enumerate(vocab)])
def tokenize(self, text: str, **kwargs) -> List[int]:
if self.do_lower_case:
text = text.lower()
tokens = text.split()
tokens_filtered = []
for token in tokens:
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.strip(string.punctuation)
if token in self.stop_words:
continue
elif len(token) > 0 and token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.lower()
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
return tokens_filtered
def save(self, output_path: str):
with open(os.path.join(output_path, "whitespacetokenizer_config.json"), "w") as fOut:
json.dump(
{
"vocab": list(self.word2idx.keys()),
"stop_words": list(self.stop_words),
"do_lower_case": self.do_lower_case,
},
fOut,
)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "whitespacetokenizer_config.json"), "r") as fIn:
config = json.load(fIn)
return WhitespaceTokenizer(**config)
|
import os
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
from .folder import default_loader
from .utils import check_integrity, download_and_extract_archive, download_url
from .vision import VisionDataset
class SBU(VisionDataset):
"""`SBU Captioned Photo <http://www.cs.virginia.edu/~vicente/sbucaptions/>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where tarball
``SBUCaptionedPhotoDataset.tar.gz`` exists.
transform (callable, optional): A function/transform that takes in a PIL image or torch.Tensor, depends on the given loader,
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
loader (callable, optional): A function to load an image given its path.
By default, it uses PIL as its image loader, but users could also pass in
``torchvision.io.decode_image`` for decoding image data into tensors directly.
"""
url = "https://www.cs.rice.edu/~vo9/sbucaptions/SBUCaptionedPhotoDataset.tar.gz"
filename = "SBUCaptionedPhotoDataset.tar.gz"
md5_checksum = "9aec147b3488753cf758b4d493422285"
def __init__(
self,
root: Union[str, Path],
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = True,
loader: Callable[[str], Any] = default_loader,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self.loader = loader
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
# Read the caption for each photo
self.photos = []
self.captions = []
file1 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")
file2 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_captions.txt")
for line1, line2 in zip(open(file1), open(file2)):
url = line1.rstrip()
photo = os.path.basename(url)
filename = os.path.join(self.root, "dataset", photo)
if os.path.exists(filename):
caption = line2.rstrip()
self.photos.append(photo)
self.captions.append(caption)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a caption for the photo.
"""
filename = os.path.join(self.root, "dataset", self.photos[index])
img = self.loader(filename)
if self.transform is not None:
img = self.transform(img)
target = self.captions[index]
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
"""The number of photos in the dataset."""
return len(self.photos)
def _check_integrity(self) -> bool:
"""Check the md5 checksum of the downloaded tarball."""
root = self.root
fpath = os.path.join(root, self.filename)
if not check_integrity(fpath, self.md5_checksum):
return False
return True
def download(self) -> None:
"""Download and extract the tarball, and download each individual photo."""
if self._check_integrity():
return
download_and_extract_archive(self.url, self.root, self.root, self.filename, self.md5_checksum)
# Download individual photos
with open(os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")) as fh:
for line in fh:
url = line.rstrip()
try:
download_url(url, os.path.join(self.root, "dataset"))
except OSError:
# The images point to public images on Flickr.
# Note: Images might be removed by users at anytime.
pass
|
import os
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
from PIL import Image
from .utils import check_integrity, download_and_extract_archive, download_url
from .vision import VisionDataset
class SBU(VisionDataset):
"""`SBU Captioned Photo <http://www.cs.virginia.edu/~vicente/sbucaptions/>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where tarball
``SBUCaptionedPhotoDataset.tar.gz`` exists.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
url = "https://www.cs.rice.edu/~vo9/sbucaptions/SBUCaptionedPhotoDataset.tar.gz"
filename = "SBUCaptionedPhotoDataset.tar.gz"
md5_checksum = "9aec147b3488753cf758b4d493422285"
def __init__(
self,
root: Union[str, Path],
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = True,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
# Read the caption for each photo
self.photos = []
self.captions = []
file1 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")
file2 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_captions.txt")
for line1, line2 in zip(open(file1), open(file2)):
url = line1.rstrip()
photo = os.path.basename(url)
filename = os.path.join(self.root, "dataset", photo)
if os.path.exists(filename):
caption = line2.rstrip()
self.photos.append(photo)
self.captions.append(caption)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a caption for the photo.
"""
filename = os.path.join(self.root, "dataset", self.photos[index])
img = Image.open(filename).convert("RGB")
if self.transform is not None:
img = self.transform(img)
target = self.captions[index]
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
"""The number of photos in the dataset."""
return len(self.photos)
def _check_integrity(self) -> bool:
"""Check the md5 checksum of the downloaded tarball."""
root = self.root
fpath = os.path.join(root, self.filename)
if not check_integrity(fpath, self.md5_checksum):
return False
return True
def download(self) -> None:
"""Download and extract the tarball, and download each individual photo."""
if self._check_integrity():
return
download_and_extract_archive(self.url, self.root, self.root, self.filename, self.md5_checksum)
# Download individual photos
with open(os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")) as fh:
for line in fh:
url = line.rstrip()
try:
download_url(url, os.path.join(self.root, "dataset"))
except OSError:
# The images point to public images on Flickr.
# Note: Images might be removed by users at anytime.
pass
|
_base_ = ['faster-rcnn_r50_fpn_32xb2-1x_openimages.py']
model = dict(
roi_head=dict(bbox_head=dict(num_classes=500)),
test_cfg=dict(rcnn=dict(score_thr=0.01)))
# dataset settings
dataset_type = 'OpenImagesChallengeDataset'
train_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='challenge2019/challenge-2019-train-detection-bbox.txt',
label_file='challenge2019/cls-label-description.csv',
hierarchy_file='challenge2019/class_label_tree.np',
meta_file='challenge2019/challenge-2019-train-metas.pkl'))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='challenge2019/challenge-2019-validation-detection-bbox.txt',
data_prefix=dict(img='OpenImages/'),
label_file='challenge2019/cls-label-description.csv',
hierarchy_file='challenge2019/class_label_tree.np',
meta_file='challenge2019/challenge-2019-validation-metas.pkl',
image_level_ann_file='challenge2019/challenge-2019-validation-'
'detection-human-imagelabels.csv'))
test_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='challenge2019/challenge-2019-validation-detection-bbox.txt',
label_file='challenge2019/cls-label-description.csv',
hierarchy_file='challenge2019/class_label_tree.np',
meta_file='challenge2019/challenge-2019-validation-metas.pkl',
image_level_ann_file='challenge2019/challenge-2019-validation-'
'detection-human-imagelabels.csv'))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = ['faster-rcnn_r50_fpn_32xb2-1x_openimages.py']
model = dict(
roi_head=dict(bbox_head=dict(num_classes=500)),
test_cfg=dict(rcnn=dict(score_thr=0.01)))
# dataset settings
dataset_type = 'OpenImagesChallengeDataset'
train_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='challenge2019/challenge-2019-train-detection-bbox.txt',
label_file='challenge2019/cls-label-description.csv',
hierarchy_file='challenge2019/class_label_tree.np',
meta_file='challenge2019/challenge-2019-train-metas.pkl'))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='challenge2019/challenge-2019-validation-detection-bbox.txt',
label_file='challenge2019/cls-label-description.csv',
hierarchy_file='challenge2019/class_label_tree.np',
meta_file='challenge2019/challenge-2019-validation-metas.pkl',
image_level_ann_file='challenge2019/challenge-2019-validation-'
'detection-human-imagelabels.csv'))
test_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='challenge2019/challenge-2019-validation-detection-bbox.txt',
label_file='challenge2019/cls-label-description.csv',
hierarchy_file='challenge2019/class_label_tree.np',
meta_file='challenge2019/challenge-2019-validation-metas.pkl',
image_level_ann_file='challenge2019/challenge-2019-validation-'
'detection-human-imagelabels.csv'))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
from datasets import load_dataset
from sentence_transformers import (
SentenceTransformer,
SentenceTransformerTrainer,
SentenceTransformerTrainingArguments,
losses,
)
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, SimilarityFunction
from sentence_transformers.training_args import BatchSamplers
# 1. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli, 10k samples
train_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="train[:10000]")
eval_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev[:1000]")
# 2. Create an evaluator to perform useful HPO
stsb_eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=stsb_eval_dataset["sentence1"],
sentences2=stsb_eval_dataset["sentence2"],
scores=stsb_eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 3. Define the Hyperparameter Search Space
def hpo_search_space(trial):
return {
"num_train_epochs": trial.suggest_int("num_train_epochs", 1, 2),
"per_device_train_batch_size": trial.suggest_int("per_device_train_batch_size", 32, 128),
"warmup_ratio": trial.suggest_float("warmup_ratio", 0, 0.3),
"learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True),
}
# 4. Define the Model Initialization
def hpo_model_init(trial):
return SentenceTransformer("distilbert-base-uncased")
# 5. Define the Loss Initialization
def hpo_loss_init(model):
return losses.MultipleNegativesRankingLoss(model)
# 6. Define the Objective Function
def hpo_compute_objective(metrics):
"""
Valid keys are: 'eval_loss', 'eval_sts-dev_pearson_cosine', 'eval_sts-dev_spearman_cosine',
'eval_sts-dev_pearson_manhattan', 'eval_sts-dev_spearman_manhattan', 'eval_sts-dev_pearson_euclidean',
'eval_sts-dev_spearman_euclidean', 'eval_sts-dev_pearson_dot', 'eval_sts-dev_spearman_dot',
'eval_sts-dev_pearson_max', 'eval_sts-dev_spearman_max', 'eval_runtime', 'eval_samples_per_second',
'eval_steps_per_second', 'epoch'
due to the evaluator that we're using.
"""
return metrics["eval_sts-dev_spearman_cosine"]
# 7. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir="checkpoints",
# Optional training parameters:
# max_steps=10000, # We might want to limit the number of steps for HPO
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
# Optional tracking/debugging parameters:
eval_strategy="no", # We don't need to evaluate/save during HPO
save_strategy="no",
logging_steps=10,
run_name="hpo", # Will be used in W&B if `wandb` is installed
)
# 8. Create the trainer with model_init rather than model
trainer = SentenceTransformerTrainer(
model=None,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
evaluator=dev_evaluator,
model_init=hpo_model_init,
loss=hpo_loss_init,
)
# 9. Perform the HPO
best_trial = trainer.hyperparameter_search(
hp_space=hpo_search_space,
compute_objective=hpo_compute_objective,
n_trials=20,
direction="maximize",
backend="optuna",
)
print(best_trial)
# Alternatively, to just train normally:
# trainer.train()
# print(dev_evaluator(trainer.model))
|
from sentence_transformers import losses
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, SentenceTransformerTrainingArguments
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, SimilarityFunction
from sentence_transformers.training_args import BatchSamplers
from datasets import load_dataset
# 1. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli, 10k samples
train_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="train[:10000]")
eval_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev[:1000]")
# 2. Create an evaluator to perform useful HPO
stsb_eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=stsb_eval_dataset["sentence1"],
sentences2=stsb_eval_dataset["sentence2"],
scores=stsb_eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 3. Define the Hyperparameter Search Space
def hpo_search_space(trial):
return {
"num_train_epochs": trial.suggest_int("num_train_epochs", 1, 2),
"per_device_train_batch_size": trial.suggest_int("per_device_train_batch_size", 32, 128),
"warmup_ratio": trial.suggest_float("warmup_ratio", 0, 0.3),
"learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True),
}
# 4. Define the Model Initialization
def hpo_model_init(trial):
return SentenceTransformer("distilbert-base-uncased")
# 5. Define the Loss Initialization
def hpo_loss_init(model):
return losses.MultipleNegativesRankingLoss(model)
# 6. Define the Objective Function
def hpo_compute_objective(metrics):
"""
Valid keys are: 'eval_loss', 'eval_sts-dev_pearson_cosine', 'eval_sts-dev_spearman_cosine',
'eval_sts-dev_pearson_manhattan', 'eval_sts-dev_spearman_manhattan', 'eval_sts-dev_pearson_euclidean',
'eval_sts-dev_spearman_euclidean', 'eval_sts-dev_pearson_dot', 'eval_sts-dev_spearman_dot',
'eval_sts-dev_pearson_max', 'eval_sts-dev_spearman_max', 'eval_runtime', 'eval_samples_per_second',
'eval_steps_per_second', 'epoch'
due to the evaluator that we're using.
"""
return metrics["eval_sts-dev_spearman_cosine"]
# 7. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir="checkpoints",
# Optional training parameters:
# max_steps=10000, # We might want to limit the number of steps for HPO
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
# Optional tracking/debugging parameters:
eval_strategy="no", # We don't need to evaluate/save during HPO
save_strategy="no",
logging_steps=10,
run_name="hpo", # Will be used in W&B if `wandb` is installed
)
# 8. Create the trainer with model_init rather than model
trainer = SentenceTransformerTrainer(
model=None,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
evaluator=dev_evaluator,
model_init=hpo_model_init,
loss=hpo_loss_init,
)
# 9. Perform the HPO
best_trial = trainer.hyperparameter_search(
hp_space=hpo_search_space,
compute_objective=hpo_compute_objective,
n_trials=20,
direction="maximize",
backend="optuna",
)
print(best_trial)
# Alternatively, to just train normally:
# trainer.train()
# print(dev_evaluator(trainer.model))
|
# Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .fileio import *
from .registry import *
from .utils import *
|
# Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .fileio import *
from .utils import *
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(
bbox_head=dict(
num_classes=20, anchor_generator=dict(basesize_ratio_range=(0.2,
0.9))))
# dataset settings
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(300, 300),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=3,
train=dict(
type='RepeatDataset', times=10, dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict()
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[16, 20])
checkpoint_config = dict(interval=1)
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=24)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(
bbox_head=dict(
num_classes=20, anchor_generator=dict(basesize_ratio_range=(0.2,
0.9))))
# dataset settings
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(300, 300),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=3,
train=dict(
type='RepeatDataset', times=10, dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict()
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[16, 20])
checkpoint_config = dict(interval=1)
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .consisid_transformer_3d import ConsisIDTransformer3DModel
from .dit_transformer_2d import DiTTransformer2DModel
from .dual_transformer_2d import DualTransformer2DModel
from .hunyuan_transformer_2d import HunyuanDiT2DModel
from .latte_transformer_3d import LatteTransformer3DModel
from .lumina_nextdit2d import LuminaNextDiT2DModel
from .pixart_transformer_2d import PixArtTransformer2DModel
from .prior_transformer import PriorTransformer
from .sana_transformer import SanaTransformer2DModel
from .stable_audio_transformer import StableAudioDiTModel
from .t5_film_transformer import T5FilmDecoder
from .transformer_2d import Transformer2DModel
from .transformer_allegro import AllegroTransformer3DModel
from .transformer_cogview3plus import CogView3PlusTransformer2DModel
from .transformer_cogview4 import CogView4Transformer2DModel
from .transformer_cosmos import CosmosTransformer3DModel
from .transformer_easyanimate import EasyAnimateTransformer3DModel
from .transformer_flux import FluxTransformer2DModel
from .transformer_hidream_image import HiDreamImageTransformer2DModel
from .transformer_hunyuan_video import HunyuanVideoTransformer3DModel
from .transformer_hunyuan_video_framepack import HunyuanVideoFramepackTransformer3DModel
from .transformer_ltx import LTXVideoTransformer3DModel
from .transformer_lumina2 import Lumina2Transformer2DModel
from .transformer_mochi import MochiTransformer3DModel
from .transformer_omnigen import OmniGenTransformer2DModel
from .transformer_sd3 import SD3Transformer2DModel
from .transformer_temporal import TransformerTemporalModel
from .transformer_wan import WanTransformer3DModel
from .transformer_wan_vace import WanVACETransformer3DModel
|
from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .consisid_transformer_3d import ConsisIDTransformer3DModel
from .dit_transformer_2d import DiTTransformer2DModel
from .dual_transformer_2d import DualTransformer2DModel
from .hunyuan_transformer_2d import HunyuanDiT2DModel
from .latte_transformer_3d import LatteTransformer3DModel
from .lumina_nextdit2d import LuminaNextDiT2DModel
from .pixart_transformer_2d import PixArtTransformer2DModel
from .prior_transformer import PriorTransformer
from .sana_transformer import SanaTransformer2DModel
from .stable_audio_transformer import StableAudioDiTModel
from .t5_film_transformer import T5FilmDecoder
from .transformer_2d import Transformer2DModel
from .transformer_allegro import AllegroTransformer3DModel
from .transformer_cogview3plus import CogView3PlusTransformer2DModel
from .transformer_cogview4 import CogView4Transformer2DModel
from .transformer_cosmos import CosmosTransformer3DModel
from .transformer_easyanimate import EasyAnimateTransformer3DModel
from .transformer_flux import FluxTransformer2DModel
from .transformer_hidream_image import HiDreamImageTransformer2DModel
from .transformer_hunyuan_video import HunyuanVideoTransformer3DModel
from .transformer_hunyuan_video_framepack import HunyuanVideoFramepackTransformer3DModel
from .transformer_ltx import LTXVideoTransformer3DModel
from .transformer_lumina2 import Lumina2Transformer2DModel
from .transformer_mochi import MochiTransformer3DModel
from .transformer_omnigen import OmniGenTransformer2DModel
from .transformer_sd3 import SD3Transformer2DModel
from .transformer_temporal import TransformerTemporalModel
from .transformer_wan import WanTransformer3DModel
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseMSEEvaluator,
SparseNanoBEIREvaluator,
SparseRerankingEvaluator,
SparseTranslationEvaluator,
SparseTripletEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
CSRReconstructionLoss,
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.models import CSRSparsity, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
# Core components
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
# Models
"CSRSparsity",
"MLMTransformer",
"SpladePooling",
# Losses
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
# Evaluators
"SparseBinaryClassificationEvaluator",
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
"SparseTripletEvaluator",
]
# TODO : Complete the SparseEncoder class
# TODO : Add tests for all the components
# TODO : Ask Update to TOM on loss to implement
# TODO : Add the equivalent of the quantization file for the sparse encoder
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseMSEEvaluator,
SparseNanoBEIREvaluator,
SparseTripletEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
ReconstructionLoss,
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.models import CSRSparsity, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
# Core components
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
# Models
"CSRSparsity",
"MLMTransformer",
"SpladePooling",
# Losses
"CSRLoss",
"ReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
# Evaluators
"SparseBinaryClassificationEvaluator",
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTripletEvaluator",
]
# TODO : Complete the SparseEncoder class
# TODO : Add tests for all the components
# TODO : Add in models the possibility to have the MLM head(for splade)
# TODO : Check for every loss if compatible with the SparseEncoder but should have some minor modifications for the ones not using the utils similarity function
# TODO : Same for the evaluator
# TODO : Add the equivalent of the quantization file for the sparse encoder
|
import csv
import os
from pathlib import Path
from typing import Tuple, Union
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _load_waveform
SAMPLE_RATE = 16000
class FluentSpeechCommands(Dataset):
"""Create *Fluent Speech Commands* :cite:`fluent` Dataset
Args:
root (str of Path): Path to the directory where the dataset is found.
subset (str, optional): subset of the dataset to use. Options: [`"train"`, `"valid"`, `"test"`].
(Default: ``"train"``)
"""
def __init__(self, root: Union[str, Path], subset: str = "train"):
if subset not in ["train", "valid", "test"]:
raise ValueError("`subset` must be one of ['train', 'valid', 'test']")
root = os.fspath(root)
self._path = os.path.join(root, "fluent_speech_commands_dataset")
if not os.path.isdir(self._path):
raise RuntimeError("Dataset not found.")
subset_path = os.path.join(self._path, "data", f"{subset}_data.csv")
with open(subset_path) as subset_csv:
subset_reader = csv.reader(subset_csv)
data = list(subset_reader)
self.header = data[0]
self.data = data[1:]
def get_metadata(self, n: int) -> Tuple[str, int, str, int, str, str, str, str]:
"""Get metadata for the n-th sample from the dataset. Returns filepath instead of waveform,
but otherwise returns the same fields as :py:func:`__getitem__`.
Args:
n (int): The index of the sample to be loaded
Returns:
(str, int, str, int, str, str, str, str):
``(filepath, sample_rate, file_name, speaker_id, transcription, action, object, location)``
"""
sample = self.data[n]
file_name = sample[self.header.index("path")].split("/")[-1]
file_name = file_name.split(".")[0]
speaker_id, transcription, action, obj, location = sample[2:]
file_path = os.path.join("wavs", "speakers", speaker_id, f"{file_name}.wav")
return file_path, SAMPLE_RATE, file_name, speaker_id, transcription, action, obj, location
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, int, str, str, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, int, str, str, str, str):
``(waveform, sample_rate, file_name, speaker_id, transcription, action, object, location)``
"""
metadata = self.get_metadata(n)
waveform = _load_waveform(self._path, metadata[0], metadata[1])
return (waveform,) + metadata[1:]
|
import csv
import os
from pathlib import Path
from typing import Union
import torchaudio
from torch.utils.data import Dataset
class FluentSpeechCommands(Dataset):
"""Create *Fluent Speech Commands* :cite:`fluent` Dataset
Args:
root (str of Path): Path to the directory where the dataset is found.
subset (str, optional): subset of the dataset to use. Options: [`"train"`, `"valid"`, `"test"`].
(Default: ``"train"``)
"""
def __init__(self, root: Union[str, Path], subset: str = "train"):
if subset not in ["train", "valid", "test"]:
raise ValueError("`subset` must be one of ['train', 'valid', 'test']")
root = os.fspath(root)
self._path = os.path.join(root, "fluent_speech_commands_dataset")
if not os.path.isdir(self._path):
raise RuntimeError("Dataset not found.")
subset_path = os.path.join(self._path, "data", f"{subset}_data.csv")
with open(subset_path) as subset_csv:
subset_reader = csv.reader(subset_csv)
data = list(subset_reader)
self.header = data[0]
self.data = data[1:]
def __len__(self):
return len(self.data)
def __getitem__(self, n: int):
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, int, str, str, str, str):
``(waveform, sample_rate, file_name, speaker_id, transcription, action, object, location)``
"""
sample = self.data[n]
file_name = sample[self.header.index("path")].split("/")[-1]
file_name = file_name.split(".")[0]
speaker_id, transcription, action, obj, location = sample[2:]
wav_path = os.path.join(self._path, "wavs", "speakers", speaker_id, f"{file_name}.wav")
wav, sample_rate = torchaudio.load(wav_path)
return wav, sample_rate, file_name, speaker_id, transcription, action, obj, location
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import ATSSHead
def test_atss_head_loss():
"""Tests atss head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
self = ATSSHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds, centernesses = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, centernesses,
gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_centerness_loss = sum(empty_gt_losses['loss_centerness'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_centerness_loss.item() == 0, (
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, centernesses, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
onegt_centerness_loss = sum(one_gt_losses['loss_centerness'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_centerness_loss.item() > 0, (
'centerness loss should be non-zero')
|
import mmcv
import torch
from mmdet.models.dense_heads import ATSSHead
def test_atss_head_loss():
"""Tests atss head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
self = ATSSHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds, centernesses = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, centernesses,
gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_centerness_loss = sum(empty_gt_losses['loss_centerness'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_centerness_loss.item() == 0, (
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, centernesses, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
onegt_centerness_loss = sum(one_gt_losses['loss_centerness'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_centerness_loss.item() > 0, (
'centerness loss should be non-zero')
|
from abc import ABC
from collections import namedtuple
from dataclasses import is_dataclass, asdict
from typing import Dict, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from docarray.typing import DocumentArraySourceType, ArrayType
TypeMap = namedtuple('TypeMap', ['type', 'converter'])
class BaseBackendMixin(ABC):
TYPE_MAP: Dict[str, TypeMap]
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
copy: bool = False,
*args,
**kwargs
):
self._load_offset2ids()
def _get_storage_infos(self) -> Optional[Dict]:
if hasattr(self, '_config') and is_dataclass(self._config):
return {k: str(v) for k, v in asdict(self._config).items()}
def _map_id(self, _id: str) -> str:
return _id
def _map_column(self, value, col_type) -> str:
return self.TYPE_MAP[col_type].converter(value)
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
from docarray.math.ndarray import to_numpy_array
return to_numpy_array(embedding)
def _map_type(self, col_type: str) -> str:
return self.TYPE_MAP[col_type].type
def _normalize_columns(self, columns):
if columns is None:
return []
return columns
|
from abc import ABC
from collections import namedtuple
from dataclasses import is_dataclass, asdict
from typing import Dict, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from ....typing import DocumentArraySourceType, ArrayType
TypeMap = namedtuple('TypeMap', ['type', 'converter'])
class BaseBackendMixin(ABC):
TYPE_MAP: Dict[str, TypeMap]
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
copy: bool = False,
*args,
**kwargs
):
self._load_offset2ids()
def _get_storage_infos(self) -> Optional[Dict]:
if hasattr(self, '_config') and is_dataclass(self._config):
return {k: str(v) for k, v in asdict(self._config).items()}
def _map_id(self, _id: str) -> str:
return _id
def _map_column(self, value, col_type) -> str:
return self.TYPE_MAP[col_type].converter(value)
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
from ....math.ndarray import to_numpy_array
return to_numpy_array(embedding)
def _map_type(self, col_type: str) -> str:
return self.TYPE_MAP[col_type].type
def _normalize_columns(self, columns):
if columns is None:
return []
return columns
|
from typing import Any
from io import StringIO
def md_to_df(md_str: str) -> Any:
"""Convert Markdown to dataframe."""
try:
import pandas as pd
except ImportError:
raise ImportError(
"You must install the `pandas` package to use this node parser."
)
# Replace " by "" in md_str
md_str = md_str.replace('"', '""')
# Replace markdown pipe tables with commas
md_str = md_str.replace("|", '","')
# Remove the second line (table header separator)
lines = md_str.split("\n")
md_str = "\n".join(lines[:1] + lines[2:])
# Remove the first and last second char of the line (the pipes, transformed to ",")
lines = md_str.split("\n")
md_str = "\n".join([line[2:-2] for line in lines])
# Check if the table is empty
if len(md_str) == 0:
return None
# Use pandas to read the CSV string into a DataFrame
try:
return pd.read_csv(StringIO(md_str))
except pd.errors.ParserError:
return None
def html_to_df(html_str: str) -> Any:
"""Convert HTML to dataframe."""
try:
from lxml import html
except ImportError:
raise ImportError(
"You must install the `lxml` package to use this node parser."
)
try:
import pandas as pd
except ImportError:
raise ImportError(
"You must install the `pandas` package to use this node parser."
)
tree = html.fromstring(html_str)
table_element = tree.xpath("//table")[0]
rows = table_element.xpath(".//tr")
data = []
for row in rows:
cols = row.xpath(".//td")
cols = [c.text.strip() if c.text is not None else "" for c in cols]
data.append(cols)
# Check if the table is empty
if len(data) == 0:
return None
# Check if the all rows have the same number of columns
if not all(len(row) == len(data[0]) for row in data):
return None
return pd.DataFrame(data[1:], columns=data[0])
|
from typing import Any
from io import StringIO
def md_to_df(md_str: str) -> Any:
"""Convert Markdown to dataframe."""
try:
import pandas as pd
except ImportError:
raise ImportError(
"You must install the `pandas` package to use this node parser."
)
# Replace " by "" in md_str
md_str = md_str.replace('"', '""')
# Replace markdown pipe tables with commas
md_str = md_str.replace("|", '","')
# Remove the second line (table header separator)
lines = md_str.split("\n")
md_str = "\n".join(lines[:1] + lines[2:])
# Remove the first and last second char of the line (the pipes, transformed to ",")
lines = md_str.split("\n")
md_str = "\n".join([line[2:-2] for line in lines])
# Check if the table is empty
if len(md_str) == 0:
return None
# Use pandas to read the CSV string into a DataFrame
return pd.read_csv(StringIO(md_str))
def html_to_df(html_str: str) -> Any:
"""Convert HTML to dataframe."""
try:
from lxml import html
except ImportError:
raise ImportError(
"You must install the `lxml` package to use this node parser."
)
try:
import pandas as pd
except ImportError:
raise ImportError(
"You must install the `pandas` package to use this node parser."
)
tree = html.fromstring(html_str)
table_element = tree.xpath("//table")[0]
rows = table_element.xpath(".//tr")
data = []
for row in rows:
cols = row.xpath(".//td")
cols = [c.text.strip() if c.text is not None else "" for c in cols]
data.append(cols)
# Check if the table is empty
if len(data) == 0:
return None
# Check if the all rows have the same number of columns
if not all(len(row) == len(data[0]) for row in data):
return None
return pd.DataFrame(data[1:], columns=data[0])
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centripetal_head import CentripetalHead
from .corner_head import CornerHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead',
'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'CascadeRPNHead',
'AutoAssignHead', 'DETRHead', 'YOLOFHead', 'DeformableDETRHead',
'SOLOHead', 'DecoupledSOLOHead', 'CenterNetHead', 'YOLOXHead',
'DecoupledSOLOLightHead', 'LADHead', 'TOODHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centripetal_head import CentripetalHead
from .corner_head import CornerHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .ssd_head import SSDHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead',
'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'CascadeRPNHead',
'AutoAssignHead', 'DETRHead', 'YOLOFHead', 'DeformableDETRHead',
'SOLOHead', 'DecoupledSOLOHead', 'CenterNetHead', 'YOLOXHead',
'DecoupledSOLOLightHead', 'LADHead'
]
|
import functools
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ..providers import ProviderName
from ._base import BaseWebhooksManager
# --8<-- [start:load_webhook_managers]
@functools.cache
def load_webhook_managers() -> dict["ProviderName", type["BaseWebhooksManager"]]:
webhook_managers = {}
from .compass import CompassWebhookManager
from .generic import GenericWebhooksManager
from .github import GithubWebhooksManager
from .slant3d import Slant3DWebhooksManager
webhook_managers.update(
{
handler.PROVIDER_NAME: handler
for handler in [
CompassWebhookManager,
GithubWebhooksManager,
Slant3DWebhooksManager,
GenericWebhooksManager,
]
}
)
return webhook_managers
# --8<-- [end:load_webhook_managers]
def get_webhook_manager(provider_name: "ProviderName") -> "BaseWebhooksManager":
return load_webhook_managers()[provider_name]()
def supports_webhooks(provider_name: "ProviderName") -> bool:
return provider_name in load_webhook_managers()
__all__ = ["get_webhook_manager", "supports_webhooks"]
|
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ..providers import ProviderName
from ._base import BaseWebhooksManager
_WEBHOOK_MANAGERS: dict["ProviderName", type["BaseWebhooksManager"]] = {}
# --8<-- [start:load_webhook_managers]
def load_webhook_managers() -> dict["ProviderName", type["BaseWebhooksManager"]]:
if _WEBHOOK_MANAGERS:
return _WEBHOOK_MANAGERS
from .compass import CompassWebhookManager
from .generic import GenericWebhooksManager
from .github import GithubWebhooksManager
from .slant3d import Slant3DWebhooksManager
_WEBHOOK_MANAGERS.update(
{
handler.PROVIDER_NAME: handler
for handler in [
CompassWebhookManager,
GithubWebhooksManager,
Slant3DWebhooksManager,
GenericWebhooksManager,
]
}
)
return _WEBHOOK_MANAGERS
# --8<-- [end:load_webhook_managers]
def get_webhook_manager(provider_name: "ProviderName") -> "BaseWebhooksManager":
return load_webhook_managers()[provider_name]()
def supports_webhooks(provider_name: "ProviderName") -> bool:
return provider_name in load_webhook_managers()
__all__ = ["get_webhook_manager", "supports_webhooks"]
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class DistSamplerSeedHook(Hook):
"""Data-loading sampler for distributed training.
When distributed training, it is only useful in conjunction with
:obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same
purpose with :obj:`IterLoader`.
"""
priority = 'NORMAL'
def before_epoch(self, runner) -> None:
"""Set the seed for sampler and batch_sampler.
Args:
runner (Runner): The runner of the training process.
"""
if hasattr(runner.data_loader.sampler, 'set_epoch'):
# in case the data loader uses `SequentialSampler` in Pytorch
runner.data_loader.sampler.set_epoch(runner.epoch)
elif hasattr(runner.data_loader.batch_sampler.sampler, 'set_epoch'):
# batch sampler in pytorch warps the sampler as its attributes.
runner.data_loader.batch_sampler.sampler.set_epoch(runner.epoch)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class DistSamplerSeedHook(Hook):
"""Data-loading sampler for distributed training.
When distributed training, it is only useful in conjunction with
:obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same
purpose with :obj:`IterLoader`.
"""
priority = 'NORMAL'
def before_epoch(self, runner: object) -> None:
"""Set the seed for sampler and batch_sampler.
Args:
runner (Runner): The runner of the training process.
"""
if hasattr(runner.data_loader.sampler, 'set_epoch'): # type: ignore
# in case the data loader uses `SequentialSampler` in Pytorch
runner.data_loader.sampler.set_epoch(runner.epoch) # type: ignore
elif hasattr(
runner.data_loader.batch_sampler.sampler, # type: ignore
'set_epoch'):
# batch sampler in pytorch warps the sampler as its attributes.
runner.data_loader.batch_sampler.sampler.set_epoch( # type: ignore
runner.epoch) # type: ignore
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import mmengine
def parse_args():
parser = argparse.ArgumentParser(description='Override Category')
parser.add_argument('data_root')
return parser.parse_args()
def main():
args = parse_args()
ChessPieces = [{
'id': 1,
'name': ' ',
'supercategory': 'pieces'
}, {
'id': 2,
'name': 'black bishop',
'supercategory': 'pieces'
}, {
'id': 3,
'name': 'black king',
'supercategory': 'pieces'
}, {
'id': 4,
'name': 'black knight',
'supercategory': 'pieces'
}, {
'id': 5,
'name': 'black pawn',
'supercategory': 'pieces'
}, {
'id': 6,
'name': 'black queen',
'supercategory': 'pieces'
}, {
'id': 7,
'name': 'black rook',
'supercategory': 'pieces'
}, {
'id': 8,
'name': 'white bishop',
'supercategory': 'pieces'
}, {
'id': 9,
'name': 'white king',
'supercategory': 'pieces'
}, {
'id': 10,
'name': 'white knight',
'supercategory': 'pieces'
}, {
'id': 11,
'name': 'white pawn',
'supercategory': 'pieces'
}, {
'id': 12,
'name': 'white queen',
'supercategory': 'pieces'
}, {
'id': 13,
'name': 'white rook',
'supercategory': 'pieces'
}]
_data_root = args.data_root + 'ChessPieces/Chess Pieces.v23-raw.coco/'
json_data = mmengine.load(_data_root +
'valid/annotations_without_background.json')
json_data['categories'] = ChessPieces
mmengine.dump(json_data,
_data_root + 'valid/new_annotations_without_background.json')
CottontailRabbits = [{
'id': 1,
'name': 'rabbit',
'supercategory': 'Cottontail-Rabbit'
}]
_data_root = args.data_root + 'CottontailRabbits/'
json_data = mmengine.load(_data_root +
'valid/annotations_without_background.json')
json_data['categories'] = CottontailRabbits
mmengine.dump(json_data,
_data_root + 'valid/new_annotations_without_background.json')
NorthAmericaMushrooms = [{
'id': 1,
'name': 'flat mushroom',
'supercategory': 'mushroom'
}, {
'id': 2,
'name': 'yellow mushroom',
'supercategory': 'mushroom'
}]
_data_root = args.data_root + 'NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/' # noqa
json_data = mmengine.load(_data_root +
'valid/annotations_without_background.json')
json_data['categories'] = NorthAmericaMushrooms
mmengine.dump(json_data,
_data_root + 'valid/new_annotations_without_background.json')
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import mmengine
def parse_args():
parser = argparse.ArgumentParser(description='Override Category')
parser.add_argument('data_root')
return parser.parse_args()
def main():
args = parse_args()
ChessPieces = [{
'id': 1,
'name': ' ',
'supercategory': 'pieces'
}, {
'id': 2,
'name': 'black bishop',
'supercategory': 'pieces'
}, {
'id': 3,
'name': 'black king',
'supercategory': 'pieces'
}, {
'id': 4,
'name': 'black knight',
'supercategory': 'pieces'
}, {
'id': 5,
'name': 'black pawn',
'supercategory': 'pieces'
}, {
'id': 6,
'name': 'black queen',
'supercategory': 'pieces'
}, {
'id': 7,
'name': 'black rook',
'supercategory': 'pieces'
}, {
'id': 8,
'name': 'white bishop',
'supercategory': 'pieces'
}, {
'id': 9,
'name': 'white king',
'supercategory': 'pieces'
}, {
'id': 10,
'name': 'white knight',
'supercategory': 'pieces'
}, {
'id': 11,
'name': 'white pawn',
'supercategory': 'pieces'
}, {
'id': 12,
'name': 'white queen',
'supercategory': 'pieces'
}, {
'id': 13,
'name': 'white rook',
'supercategory': 'pieces'
}]
_data_root = args.data_root + 'ChessPieces/Chess Pieces.v23-raw.coco/'
json_data = mmengine.load(_data_root +
'valid/annotations_without_background.json')
json_data['categories'] = ChessPieces
mmengine.dump(json_data,
_data_root + 'valid/new_annotations_without_background.json')
NorthAmericaMushrooms = [{
'id': 1,
'name': 'flat mushroom',
'supercategory': 'mushroom'
}, {
'id': 2,
'name': 'yellow mushroom',
'supercategory': 'mushroom'
}]
_data_root = args.data_root + 'NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/' # noqa
json_data = mmengine.load(_data_root +
'valid/annotations_without_background.json')
json_data['categories'] = NorthAmericaMushrooms
mmengine.dump(json_data,
_data_root + 'valid/new_annotations_without_background.json')
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from abc import ABCMeta, abstractmethod
from typing import Any, List, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataElement
from mmengine.dist import (broadcast_object_list, collect_results,
is_main_process)
class BaseEvaluator(metaclass=ABCMeta):
"""Base class for an evaluator.
The evaluator first processes each batch of data_samples and
predictions, and appends the processed results in to the results list.
Then it collects all results together from all ranks if distributed
training is used. Finally, it computes the metrics of the entire dataset.
A subclass of class:`BaseEvaluator` should assign a meaningful value to the
class attribute `default_prefix`. See the argument `prefix` for details.
Args:
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Default: None
"""
default_prefix: Optional[str] = None
def __init__(self,
collect_device: str = 'cpu',
prefix: Optional[str] = None) -> None:
self._dataset_meta: Union[None, dict] = None
self.collect_device = collect_device
self.results: List[Any] = []
self.prefix = prefix or self.default_prefix
if self.prefix is None:
warnings.warn('The prefix is not set in evaluator class '
f'{self.__class__.__name__}.')
@property
def dataset_meta(self) -> Optional[dict]:
return self._dataset_meta
@dataset_meta.setter
def dataset_meta(self, dataset_meta: dict) -> None:
self._dataset_meta = dataset_meta
@abstractmethod
def process(self, data_batch: Sequence[Tuple[Any, BaseDataElement]],
predictions: Sequence[BaseDataElement]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (Sequence[Tuple[Any, BaseDataElement]]): A batch of data
from the dataloader.
predictions (Sequence[BaseDataElement]): A batch of outputs from
the model.
"""
@abstractmethod
def compute_metrics(self, results: list) -> dict:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results.
"""
def evaluate(self, size: int) -> dict:
"""Evaluate the model performance of the whole dataset after processing
all batches.
Args:
size (int): Length of the entire validation dataset. When batch
size > 1, the dataloader may pad some data samples to make
sure all ranks have the same length of dataset slice. The
``collect_results`` function will drop the padded data base on
this size.
Returns:
dict: Evaluation metrics dict on the val dataset. The keys are the
names of the metrics, and the values are corresponding results.
"""
if len(self.results) == 0:
warnings.warn(
f'{self.__class__.__name__} got empty `self._results`. Please '
'ensure that the processed results are properly added into '
'`self._results` in `process` method.')
results = collect_results(self.results, size, self.collect_device)
if is_main_process():
_metrics = self.compute_metrics(results) # type: ignore
# Add prefix to metric names
if self.prefix:
_metrics = {
'/'.join((self.prefix, k)): v
for k, v in _metrics.items()
}
metrics = [_metrics]
else:
metrics = [None] # type: ignore
broadcast_object_list(metrics)
# reset the results list
self.results.clear()
return metrics[0]
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from abc import ABCMeta, abstractmethod
from typing import Any, List, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataSample
from mmengine.dist import (broadcast_object_list, collect_results,
is_main_process)
class BaseEvaluator(metaclass=ABCMeta):
"""Base class for an evaluator.
The evaluator first processes each batch of data_samples and
predictions, and appends the processed results in to the results list.
Then it collects all results together from all ranks if distributed
training is used. Finally, it computes the metrics of the entire dataset.
A subclass of class:`BaseEvaluator` should assign a meaningful value to the
class attribute `default_prefix`. See the argument `prefix` for details.
Args:
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Default: None
"""
default_prefix: Optional[str] = None
def __init__(self,
collect_device: str = 'cpu',
prefix: Optional[str] = None) -> None:
self._dataset_meta: Union[None, dict] = None
self.collect_device = collect_device
self.results: List[Any] = []
self.prefix = prefix or self.default_prefix
if self.prefix is None:
warnings.warn('The prefix is not set in evaluator class '
f'{self.__class__.__name__}.')
@property
def dataset_meta(self) -> Optional[dict]:
return self._dataset_meta
@dataset_meta.setter
def dataset_meta(self, dataset_meta: dict) -> None:
self._dataset_meta = dataset_meta
@abstractmethod
def process(self, data_batch: Sequence[Tuple[Any, BaseDataSample]],
predictions: Sequence[BaseDataSample]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (Sequence[Tuple[Any, BaseDataSample]]): A batch of data
from the dataloader.
predictions (Sequence[BaseDataSample]): A batch of outputs from
the model.
"""
@abstractmethod
def compute_metrics(self, results: list) -> dict:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results.
"""
def evaluate(self, size: int) -> dict:
"""Evaluate the model performance of the whole dataset after processing
all batches.
Args:
size (int): Length of the entire validation dataset. When batch
size > 1, the dataloader may pad some data samples to make
sure all ranks have the same length of dataset slice. The
``collect_results`` function will drop the padded data base on
this size.
Returns:
dict: Evaluation metrics dict on the val dataset. The keys are the
names of the metrics, and the values are corresponding results.
"""
if len(self.results) == 0:
warnings.warn(
f'{self.__class__.__name__} got empty `self._results`. Please '
'ensure that the processed results are properly added into '
'`self._results` in `process` method.')
results = collect_results(self.results, size, self.collect_device)
if is_main_process():
_metrics = self.compute_metrics(results) # type: ignore
# Add prefix to metric names
if self.prefix:
_metrics = {
'/'.join((self.prefix, k)): v
for k, v in _metrics.items()
}
metrics = [_metrics]
else:
metrics = [None] # type: ignore
broadcast_object_list(metrics)
# reset the results list
self.results.clear()
return metrics[0]
|
import warnings
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.filetypes import AUDIO_FILE_FORMATS
from docarray.utils._internal.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AudioUrl')
@_register_proto(proto_type_name='audio_url')
class AudioUrl(AnyUrl):
"""
URL to a audio file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
import os
from urllib.parse import urlparse
url = super().validate(value, field, config) # basic url validation
path = urlparse(url).path
ext = os.path.splitext(path)[1][1:].lower()
# pass test if extension is valid or no extension
has_audio_extension = ext in AUDIO_FILE_FORMATS or ext == ''
if not has_audio_extension:
raise ValueError('Audio URL must have a valid extension')
return cls(str(url), scheme=None)
def load(self: T) -> Tuple[np.ndarray, int]:
"""
Load the data from the url into an AudioNdArray.
---
```python
from typing import Optional
from docarray import BaseDoc
import numpy as np
from docarray.typing import AudioUrl, AudioNdArray
class MyDoc(BaseDoc):
audio_url: AudioUrl
audio_tensor: Optional[AudioNdArray]
doc = MyDoc(audio_url='https://www.kozco.com/tech/piano2.wav')
doc.audio_tensor, _ = doc.audio_url.load()
assert isinstance(doc.audio_tensor, np.ndarray)
```
---
:return: AudioNdArray representing the audio file content.
"""
bytes_ = AudioBytes(self.load_bytes())
return bytes_.load()
def display(self):
"""
Play the audio sound from url in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Audio(data=self))
else:
display(Audio(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
import warnings
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.filetypes import AUDIO_FILE_FORMATS
from docarray.utils._internal.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AudioUrl')
@_register_proto(proto_type_name='audio_url')
class AudioUrl(AnyUrl):
"""
URL to a audio file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
import os
from urllib.parse import urlparse
url = super().validate(value, field, config) # basic url validation
path = urlparse(url).path
ext = os.path.splitext(path)[1][1:].lower()
# pass test if extension is valid or no extension
has_audio_extension = ext in AUDIO_FILE_FORMATS or ext == ''
if not has_audio_extension:
raise ValueError('Audio URL must have a valid extension')
return cls(str(url), scheme=None)
def load(self: T) -> Tuple[np.ndarray, int]:
"""
Load the data from the url into an AudioNdArray.
:return: AudioNdArray representing the audio file content.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDoc
import numpy as np
from docarray.typing import AudioUrl
class MyDoc(Document):
audio_url: AudioUrl
audio_tensor: AudioNdArray
doc = MyDoc(audio_url="toydata/hello.wav")
doc.audio_tensor, doc.frame_rate = doc.audio_url.load()
assert isinstance(doc.audio_tensor, np.ndarray)
"""
bytes_ = AudioBytes(self.load_bytes())
return bytes_.load()
def display(self):
"""
Play the audio sound from url in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Audio(data=self))
else:
display(Audio(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# TODO: delete custom_imports after mmcls supports auto import
# please install mmcls>=1.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.TIMMBackbone',
model_name='efficientnet_b1',
features_only=True,
pretrained=True,
out_indices=(1, 2, 3, 4)),
neck=dict(in_channels=[24, 40, 112, 320]))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# please install mmcls>=0.20.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.TIMMBackbone',
model_name='efficientnet_b1',
features_only=True,
pretrained=True,
out_indices=(1, 2, 3, 4)),
neck=dict(in_channels=[24, 40, 112, 320]))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from docarray.base_doc.any_doc import AnyDoc
from docarray.base_doc.base_node import BaseNode
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
__all__ = ['AnyDoc', 'BaseDoc', 'BaseNode']
def __getattr__(name: str):
if name == 'DocArrayResponse':
import_library('fastapi', raise_error=True)
from docarray.base_doc.docarray_response import DocArrayResponse
if name not in __all__:
__all__.append(name)
return DocArrayResponse
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
from docarray.base_doc.any_doc import AnyDoc
from docarray.base_doc.base_node import BaseNode
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
__all__ = ['AnyDoc', 'BaseDoc', 'BaseNode']
def __getattr__(name: str):
if name == 'DocArrayResponse':
import_library('fastapi', raise_error=True)
from docarray.base_doc.docarray_response import DocArrayResponse
if name not in __all__:
__all__.append(name)
return DocArrayResponse
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
def get_doc_value():
return 'MyExecutorBeforeReload'
|
def get_doc_value():
return 'MyExecutorBeforeReload'
|
import os
import sys
from pathlib import Path
import pytest
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
from .utils import execute_subprocess_async, get_torch_dist_unique_port, require_torch
def test_split_dataset_by_node_map_style():
full_ds = Dataset.from_dict({"i": range(17)})
full_size = len(full_ds)
world_size = 3
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert sum(len(ds) for ds in datasets_per_rank) == full_size
assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size
def test_split_dataset_by_node_iterable():
def gen():
return ({"i": i} for i in range(17))
world_size = 3
full_ds = IterableDataset.from_generator(gen)
full_size = len(list(full_ds))
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert sum(len(list(ds)) for ds in datasets_per_rank) == full_size
assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size
@pytest.mark.parametrize("shards_per_node", [1, 2, 3])
def test_split_dataset_by_node_iterable_sharded(shards_per_node):
def gen(shards):
for shard in shards:
yield from ({"i": i, "shard": shard} for i in range(17))
world_size = 3
num_shards = shards_per_node * world_size
gen_kwargs = {"shards": [f"shard_{shard_idx}.txt" for shard_idx in range(num_shards)]}
full_ds = IterableDataset.from_generator(gen, gen_kwargs=gen_kwargs)
full_size = len(list(full_ds))
assert full_ds.n_shards == world_size * shards_per_node
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert [ds.n_shards for ds in datasets_per_rank] == [shards_per_node] * world_size
assert sum(len(list(ds)) for ds in datasets_per_rank) == full_size
assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size
def test_distributed_shuffle_iterable():
def gen():
return ({"i": i} for i in range(17))
world_size = 2
full_ds = IterableDataset.from_generator(gen)
full_size = len(list(full_ds))
ds_rank0 = split_dataset_by_node(full_ds, rank=0, world_size=world_size).shuffle(seed=42)
assert len(list(ds_rank0)) == 1 + full_size // world_size
with pytest.raises(RuntimeError):
split_dataset_by_node(full_ds, rank=0, world_size=world_size).shuffle()
ds_rank0 = split_dataset_by_node(full_ds.shuffle(seed=42), rank=0, world_size=world_size)
assert len(list(ds_rank0)) == 1 + full_size // world_size
with pytest.raises(RuntimeError):
split_dataset_by_node(full_ds.shuffle(), rank=0, world_size=world_size)
@pytest.mark.parametrize("streaming", [False, True])
@require_torch
@pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows")
@pytest.mark.integration
def test_torch_distributed_launch(streaming):
nproc_per_node = 2
master_port = get_torch_dist_unique_port()
test_script = Path(__file__).resolve().parent / "distributed_scripts" / "test_torch_distributed_launch.py"
distributed_args = f"""
-m torch.distributed.launch
--nproc_per_node={nproc_per_node}
--master_port={master_port}
{test_script}
""".split()
args = f"""
--streaming={streaming}
""".split()
cmd = [sys.executable] + distributed_args + args
execute_subprocess_async(cmd, env=os.environ.copy())
@pytest.mark.parametrize(
"nproc_per_node, num_workers",
[
(2, 2), # each node has 2 shards and each worker has 1 shards
(3, 2), # each node uses all the shards but skips examples, and each worker has 2 shards
],
)
@require_torch
@pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows")
@pytest.mark.integration
def test_torch_distributed_launch_streaming_with_num_workers(nproc_per_node, num_workers):
streaming = True
master_port = get_torch_dist_unique_port()
test_script = Path(__file__).resolve().parent / "distributed_scripts" / "test_torch_distributed_launch.py"
distributed_args = f"""
-m torch.distributed.launch
--nproc_per_node={nproc_per_node}
--master_port={master_port}
{test_script}
""".split()
args = f"""
--streaming={streaming}
--num_workers={num_workers}
""".split()
cmd = [sys.executable] + distributed_args + args
execute_subprocess_async(cmd, env=os.environ.copy())
|
import os
import sys
from pathlib import Path
import pytest
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
from .utils import execute_subprocess_async, get_torch_dist_unique_port, require_torch
def test_split_dataset_by_node_map_style():
full_ds = Dataset.from_dict({"i": range(17)})
full_size = len(full_ds)
world_size = 3
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert sum(len(ds) for ds in datasets_per_rank) == full_size
assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size
def test_split_dataset_by_node_iterable():
def gen():
return ({"i": i} for i in range(17))
world_size = 3
full_ds = IterableDataset.from_generator(gen)
full_size = len(list(full_ds))
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert sum(len(list(ds)) for ds in datasets_per_rank) == full_size
assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size
@pytest.mark.parametrize("shards_per_node", [1, 2, 3])
def test_split_dataset_by_node_iterable_sharded(shards_per_node):
def gen(shards):
for shard in shards:
yield from ({"i": i, "shard": shard} for i in range(17))
world_size = 3
num_shards = shards_per_node * world_size
gen_kwargs = {"shards": [f"shard_{shard_idx}.txt" for shard_idx in range(num_shards)]}
full_ds = IterableDataset.from_generator(gen, gen_kwargs=gen_kwargs)
full_size = len(list(full_ds))
assert full_ds.n_shards == world_size * shards_per_node
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert [ds.n_shards for ds in datasets_per_rank] == [shards_per_node] * world_size
assert sum(len(list(ds)) for ds in datasets_per_rank) == full_size
assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size
@pytest.mark.parametrize("streaming", [False, True])
@require_torch
@pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows")
@pytest.mark.integration
def test_torch_distributed_launch(streaming):
nproc_per_node = 2
master_port = get_torch_dist_unique_port()
test_script = Path(__file__).resolve().parent / "distributed_scripts" / "test_torch_distributed_launch.py"
distributed_args = f"""
-m torch.distributed.launch
--nproc_per_node={nproc_per_node}
--master_port={master_port}
{test_script}
""".split()
args = f"""
--streaming={streaming}
""".split()
cmd = [sys.executable] + distributed_args + args
execute_subprocess_async(cmd, env=os.environ.copy())
@pytest.mark.parametrize(
"nproc_per_node, num_workers",
[
(2, 2), # each node has 2 shards and each worker has 1 shards
(3, 2), # each node uses all the shards but skips examples, and each worker has 2 shards
],
)
@require_torch
@pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows")
@pytest.mark.integration
def test_torch_distributed_launch_streaming_with_num_workers(nproc_per_node, num_workers):
streaming = True
master_port = get_torch_dist_unique_port()
test_script = Path(__file__).resolve().parent / "distributed_scripts" / "test_torch_distributed_launch.py"
distributed_args = f"""
-m torch.distributed.launch
--nproc_per_node={nproc_per_node}
--master_port={master_port}
{test_script}
""".split()
args = f"""
--streaming={streaming}
--num_workers={num_workers}
""".split()
cmd = [sys.executable] + distributed_args + args
execute_subprocess_async(cmd, env=os.environ.copy())
|
import os
import time
import uuid
from contextlib import contextmanager
from typing import Optional
import pytest
import requests
from huggingface_hub.hf_api import HfApi, RepositoryNotFoundError
CI_HUB_USER = "__DUMMY_TRANSFORMERS_USER__"
CI_HUB_USER_FULL_NAME = "Dummy User"
CI_HUB_USER_TOKEN = "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"
CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co"
CI_HUB_DATASETS_URL = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
@pytest.fixture
def ci_hfh_hf_hub_url(monkeypatch):
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE
)
@pytest.fixture
def ci_hub_config(monkeypatch):
monkeypatch.setattr("datasets.config.HF_ENDPOINT", CI_HUB_ENDPOINT)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", CI_HUB_DATASETS_URL)
@pytest.fixture
def set_ci_hub_access_token(ci_hub_config):
old_environ = dict(os.environ)
os.environ["HF_TOKEN"] = CI_HUB_USER_TOKEN
yield
os.environ.clear()
os.environ.update(old_environ)
@pytest.fixture(scope="session")
def hf_api():
return HfApi(endpoint=CI_HUB_ENDPOINT)
@pytest.fixture(scope="session")
def hf_token():
yield CI_HUB_USER_TOKEN
@pytest.fixture
def cleanup_repo(hf_api):
def _cleanup_repo(repo_id):
hf_api.delete_repo(repo_id, token=CI_HUB_USER_TOKEN, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def temporary_repo(cleanup_repo):
@contextmanager
def _temporary_repo(repo_id: Optional[str] = None):
repo_id = repo_id or f"{CI_HUB_USER}/test-dataset-{uuid.uuid4().hex[:6]}-{int(time.time() * 10e3)}"
try:
yield repo_id
finally:
try:
cleanup_repo(repo_id)
except RepositoryNotFoundError:
pass
return _temporary_repo
@pytest.fixture(scope="session")
def hf_private_dataset_repo_txt_data_(hf_api: HfApi, hf_token, text_file_content):
repo_name = f"repo_txt_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=text_file_content.encode(),
path_in_repo="data/text_data.txt",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_txt_data(hf_private_dataset_repo_txt_data_, ci_hub_config, ci_hfh_hf_hub_url):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_txt_data_(hf_api: HfApi, hf_token, zip_csv_with_dir_path):
repo_name = f"repo_zipped_txt_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_csv_with_dir_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_txt_data(
hf_private_dataset_repo_zipped_txt_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_img_data_(hf_api: HfApi, hf_token, zip_image_path):
repo_name = f"repo_zipped_img_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_image_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_img_data(
hf_private_dataset_repo_zipped_img_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_img_data_
|
import os
import time
import uuid
from contextlib import contextmanager
from typing import Optional
import pytest
import requests
from huggingface_hub.hf_api import HfApi, RepositoryNotFoundError
CI_HUB_USER = "__DUMMY_TRANSFORMERS_USER__"
CI_HUB_USER_FULL_NAME = "Dummy User"
CI_HUB_USER_TOKEN = "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"
CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co"
CI_HUB_DATASETS_URL = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
@pytest.fixture
def ci_hfh_hf_hub_url(monkeypatch):
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE
)
@pytest.fixture
def ci_hub_config(monkeypatch):
monkeypatch.setattr("datasets.config.HF_ENDPOINT", CI_HUB_ENDPOINT)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", CI_HUB_DATASETS_URL)
@pytest.fixture
def set_ci_hub_access_token(ci_hub_config):
old_environ = dict(os.environ)
os.environ["HF_TOKEN"] = CI_HUB_USER_TOKEN
yield
os.environ.clear()
os.environ.update(old_environ)
@pytest.fixture(scope="session")
def hf_api():
return HfApi(endpoint=CI_HUB_ENDPOINT)
@pytest.fixture(scope="session")
def hf_token():
yield CI_HUB_USER_TOKEN
@pytest.fixture
def cleanup_repo(hf_api):
def _cleanup_repo(repo_id):
hf_api.delete_repo(repo_id, token=CI_HUB_USER_TOKEN, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def temporary_repo(cleanup_repo):
@contextmanager
def _temporary_repo(repo_id: Optional[str] = None):
repo_id = repo_id or f"{CI_HUB_USER}/test-dataset-{uuid.uuid4().hex[:6]}-{int(time.time() * 10e3)}"
try:
yield repo_id
finally:
try:
cleanup_repo(repo_id)
except RepositoryNotFoundError:
pass
return _temporary_repo
@pytest.fixture(scope="session")
def hf_private_dataset_repo_txt_data_(hf_api: HfApi, hf_token, text_file):
repo_name = f"repo_txt_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(text_file),
path_in_repo="data/text_data.txt",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_txt_data(hf_private_dataset_repo_txt_data_, ci_hub_config, ci_hfh_hf_hub_url):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_txt_data_(hf_api: HfApi, hf_token, zip_csv_with_dir_path):
repo_name = f"repo_zipped_txt_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_csv_with_dir_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_txt_data(
hf_private_dataset_repo_zipped_txt_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_img_data_(hf_api: HfApi, hf_token, zip_image_path):
repo_name = f"repo_zipped_img_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_image_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_img_data(
hf_private_dataset_repo_zipped_img_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_img_data_
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import ConvModule, Linear
from mmcv.runner import ModuleList, auto_fp16
from mmdet.registry import MODELS
from .fcn_mask_head import FCNMaskHead
@MODELS.register_module()
class CoarseMaskHead(FCNMaskHead):
"""Coarse mask head used in PointRend.
Compared with standard ``FCNMaskHead``, ``CoarseMaskHead`` will downsample
the input feature map instead of upsample it.
Args:
num_convs (int): Number of conv layers in the head. Default: 0.
num_fcs (int): Number of fc layers in the head. Default: 2.
fc_out_channels (int): Number of output channels of fc layer.
Default: 1024.
downsample_factor (int): The factor that feature map is downsampled by.
Default: 2.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_convs=0,
num_fcs=2,
fc_out_channels=1024,
downsample_factor=2,
init_cfg=dict(
type='Xavier',
override=[
dict(name='fcs'),
dict(type='Constant', val=0.001, name='fc_logits')
]),
*arg,
**kwarg):
super(CoarseMaskHead, self).__init__(
*arg,
num_convs=num_convs,
upsample_cfg=dict(type=None),
init_cfg=None,
**kwarg)
self.init_cfg = init_cfg
self.num_fcs = num_fcs
assert self.num_fcs > 0
self.fc_out_channels = fc_out_channels
self.downsample_factor = downsample_factor
assert self.downsample_factor >= 1
# remove conv_logit
delattr(self, 'conv_logits')
if downsample_factor > 1:
downsample_in_channels = (
self.conv_out_channels
if self.num_convs > 0 else self.in_channels)
self.downsample_conv = ConvModule(
downsample_in_channels,
self.conv_out_channels,
kernel_size=downsample_factor,
stride=downsample_factor,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
else:
self.downsample_conv = None
self.output_size = (self.roi_feat_size[0] // downsample_factor,
self.roi_feat_size[1] // downsample_factor)
self.output_area = self.output_size[0] * self.output_size[1]
last_layer_dim = self.conv_out_channels * self.output_area
self.fcs = ModuleList()
for i in range(num_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
self.fcs.append(Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
output_channels = self.num_classes * self.output_area
self.fc_logits = Linear(last_layer_dim, output_channels)
def init_weights(self):
super(FCNMaskHead, self).init_weights()
@auto_fp16()
def forward(self, x):
for conv in self.convs:
x = conv(x)
if self.downsample_conv is not None:
x = self.downsample_conv(x)
x = x.flatten(1)
for fc in self.fcs:
x = self.relu(fc(x))
mask_pred = self.fc_logits(x).view(
x.size(0), self.num_classes, *self.output_size)
return mask_pred
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import ConvModule, Linear
from mmcv.runner import ModuleList, auto_fp16
from mmdet.models.builder import HEADS
from .fcn_mask_head import FCNMaskHead
@HEADS.register_module()
class CoarseMaskHead(FCNMaskHead):
"""Coarse mask head used in PointRend.
Compared with standard ``FCNMaskHead``, ``CoarseMaskHead`` will downsample
the input feature map instead of upsample it.
Args:
num_convs (int): Number of conv layers in the head. Default: 0.
num_fcs (int): Number of fc layers in the head. Default: 2.
fc_out_channels (int): Number of output channels of fc layer.
Default: 1024.
downsample_factor (int): The factor that feature map is downsampled by.
Default: 2.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_convs=0,
num_fcs=2,
fc_out_channels=1024,
downsample_factor=2,
init_cfg=dict(
type='Xavier',
override=[
dict(name='fcs'),
dict(type='Constant', val=0.001, name='fc_logits')
]),
*arg,
**kwarg):
super(CoarseMaskHead, self).__init__(
*arg,
num_convs=num_convs,
upsample_cfg=dict(type=None),
init_cfg=None,
**kwarg)
self.init_cfg = init_cfg
self.num_fcs = num_fcs
assert self.num_fcs > 0
self.fc_out_channels = fc_out_channels
self.downsample_factor = downsample_factor
assert self.downsample_factor >= 1
# remove conv_logit
delattr(self, 'conv_logits')
if downsample_factor > 1:
downsample_in_channels = (
self.conv_out_channels
if self.num_convs > 0 else self.in_channels)
self.downsample_conv = ConvModule(
downsample_in_channels,
self.conv_out_channels,
kernel_size=downsample_factor,
stride=downsample_factor,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
else:
self.downsample_conv = None
self.output_size = (self.roi_feat_size[0] // downsample_factor,
self.roi_feat_size[1] // downsample_factor)
self.output_area = self.output_size[0] * self.output_size[1]
last_layer_dim = self.conv_out_channels * self.output_area
self.fcs = ModuleList()
for i in range(num_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
self.fcs.append(Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
output_channels = self.num_classes * self.output_area
self.fc_logits = Linear(last_layer_dim, output_channels)
def init_weights(self):
super(FCNMaskHead, self).init_weights()
@auto_fp16()
def forward(self, x):
for conv in self.convs:
x = conv(x)
if self.downsample_conv is not None:
x = self.downsample_conv(x)
x = x.flatten(1)
for fc in self.fcs:
x = self.relu(fc(x))
mask_pred = self.fc_logits(x).view(
x.size(0), self.num_classes, *self.output_size)
return mask_pred
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
type='LAD',
# student
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
optim_wrapper = dict(optimizer=dict(lr=0.01))
# TODO: MMEngine does not support fp16 yet.
# fp16 = dict(loss_scale=512.)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa
model = dict(
type='LAD',
# student
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
data = dict(samples_per_gpu=8, workers_per_gpu=4)
optimizer = dict(lr=0.01)
fp16 = dict(loss_scale=512.)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
"""
Develop installable templates.
"""
import re
import shutil
import subprocess
from pathlib import Path
from typing import Annotated, Optional
import typer
from langchain_cli.utils.packages import get_langserve_export, get_package_root
package_cli = typer.Typer(no_args_is_help=True, add_completion=False)
@package_cli.command()
def new(
name: Annotated[str, typer.Argument(help="The name of the folder to create")],
with_poetry: Annotated[
bool,
typer.Option("--with-poetry/--no-poetry", help="Don't run poetry install"),
] = False,
):
"""
Creates a new template package.
"""
computed_name = name if name != "." else Path.cwd().name
destination_dir = Path.cwd() / name if name != "." else Path.cwd()
# copy over template from ../package_template
project_template_dir = Path(__file__).parents[1] / "package_template"
shutil.copytree(project_template_dir, destination_dir, dirs_exist_ok=name == ".")
package_name_split = computed_name.split("/")
package_name = (
package_name_split[-2]
if len(package_name_split) > 1 and package_name_split[-1] == ""
else package_name_split[-1]
)
module_name = re.sub(
r"[^a-zA-Z0-9_]",
"_",
package_name,
)
# generate app route code
chain_name = f"{module_name}_chain"
app_route_code = (
f"from {module_name} import chain as {chain_name}\n\n"
f'add_routes(app, {chain_name}, path="/{package_name}")'
)
# replace template strings
pyproject = destination_dir / "pyproject.toml"
pyproject_contents = pyproject.read_text()
pyproject.write_text(
pyproject_contents.replace("__package_name__", package_name).replace(
"__module_name__", module_name
)
)
# move module folder
package_dir = destination_dir / module_name
shutil.move(destination_dir / "package_template", package_dir)
# update init
init = package_dir / "__init__.py"
init_contents = init.read_text()
init.write_text(init_contents.replace("__module_name__", module_name))
# replace readme
readme = destination_dir / "README.md"
readme_contents = readme.read_text()
readme.write_text(
readme_contents.replace("__package_name__", package_name).replace(
"__app_route_code__", app_route_code
)
)
# poetry install
if with_poetry:
subprocess.run(["poetry", "install"], cwd=destination_dir)
@package_cli.command()
def serve(
*,
port: Annotated[
Optional[int], typer.Option(help="The port to run the server on")
] = None,
host: Annotated[
Optional[str], typer.Option(help="The host to run the server on")
] = None,
configurable: Annotated[
Optional[bool],
typer.Option(
"--configurable/--no-configurable",
help="Whether to include a configurable route",
),
] = None, # defaults to `not chat_playground`
chat_playground: Annotated[
bool,
typer.Option(
"--chat-playground/--no-chat-playground",
help="Whether to include a chat playground route",
),
] = False,
) -> None:
"""
Starts a demo app for this template.
"""
# load pyproject.toml
project_dir = get_package_root()
pyproject = project_dir / "pyproject.toml"
# get langserve export - throws KeyError if invalid
get_langserve_export(pyproject)
host_str = host if host is not None else "127.0.0.1"
script = (
"langchain_cli.dev_scripts:create_demo_server_chat"
if chat_playground
else (
"langchain_cli.dev_scripts:create_demo_server_configurable"
if configurable
else "langchain_cli.dev_scripts:create_demo_server"
)
)
import uvicorn
uvicorn.run(
script,
factory=True,
reload=True,
port=port if port is not None else 8000,
host=host_str,
)
@package_cli.command()
def list(contains: Annotated[Optional[str], typer.Argument()] = None) -> None:
"""
List all or search for available templates.
"""
from langchain_cli.utils.github import list_packages
packages = list_packages(contains=contains)
for package in packages:
typer.echo(package)
|
"""
Develop installable templates.
"""
import re
import shutil
import subprocess
from pathlib import Path
from typing import Optional
import typer
from typing_extensions import Annotated
from langchain_cli.utils.packages import get_langserve_export, get_package_root
package_cli = typer.Typer(no_args_is_help=True, add_completion=False)
@package_cli.command()
def new(
name: Annotated[str, typer.Argument(help="The name of the folder to create")],
with_poetry: Annotated[
bool,
typer.Option("--with-poetry/--no-poetry", help="Don't run poetry install"),
] = False,
):
"""
Creates a new template package.
"""
computed_name = name if name != "." else Path.cwd().name
destination_dir = Path.cwd() / name if name != "." else Path.cwd()
# copy over template from ../package_template
project_template_dir = Path(__file__).parents[1] / "package_template"
shutil.copytree(project_template_dir, destination_dir, dirs_exist_ok=name == ".")
package_name_split = computed_name.split("/")
package_name = (
package_name_split[-2]
if len(package_name_split) > 1 and package_name_split[-1] == ""
else package_name_split[-1]
)
module_name = re.sub(
r"[^a-zA-Z0-9_]",
"_",
package_name,
)
# generate app route code
chain_name = f"{module_name}_chain"
app_route_code = (
f"from {module_name} import chain as {chain_name}\n\n"
f'add_routes(app, {chain_name}, path="/{package_name}")'
)
# replace template strings
pyproject = destination_dir / "pyproject.toml"
pyproject_contents = pyproject.read_text()
pyproject.write_text(
pyproject_contents.replace("__package_name__", package_name).replace(
"__module_name__", module_name
)
)
# move module folder
package_dir = destination_dir / module_name
shutil.move(destination_dir / "package_template", package_dir)
# update init
init = package_dir / "__init__.py"
init_contents = init.read_text()
init.write_text(init_contents.replace("__module_name__", module_name))
# replace readme
readme = destination_dir / "README.md"
readme_contents = readme.read_text()
readme.write_text(
readme_contents.replace("__package_name__", package_name).replace(
"__app_route_code__", app_route_code
)
)
# poetry install
if with_poetry:
subprocess.run(["poetry", "install"], cwd=destination_dir)
@package_cli.command()
def serve(
*,
port: Annotated[
Optional[int], typer.Option(help="The port to run the server on")
] = None,
host: Annotated[
Optional[str], typer.Option(help="The host to run the server on")
] = None,
configurable: Annotated[
Optional[bool],
typer.Option(
"--configurable/--no-configurable",
help="Whether to include a configurable route",
),
] = None, # defaults to `not chat_playground`
chat_playground: Annotated[
bool,
typer.Option(
"--chat-playground/--no-chat-playground",
help="Whether to include a chat playground route",
),
] = False,
) -> None:
"""
Starts a demo app for this template.
"""
# load pyproject.toml
project_dir = get_package_root()
pyproject = project_dir / "pyproject.toml"
# get langserve export - throws KeyError if invalid
get_langserve_export(pyproject)
host_str = host if host is not None else "127.0.0.1"
script = (
"langchain_cli.dev_scripts:create_demo_server_chat"
if chat_playground
else (
"langchain_cli.dev_scripts:create_demo_server_configurable"
if configurable
else "langchain_cli.dev_scripts:create_demo_server"
)
)
import uvicorn
uvicorn.run(
script,
factory=True,
reload=True,
port=port if port is not None else 8000,
host=host_str,
)
@package_cli.command()
def list(contains: Annotated[Optional[str], typer.Argument()] = None) -> None:
"""
List all or search for available templates.
"""
from langchain_cli.utils.github import list_packages
packages = list_packages(contains=contains)
for package in packages:
typer.echo(package)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_video_metric import BaseVideoMetric
from .cityscapes_metric import CityScapesMetric
from .coco_caption_metric import COCOCaptionMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .coco_video_metric import CocoVideoMetric
from .crowdhuman_metric import CrowdHumanMetric
from .dod_metric import DODCocoMetric
from .dump_det_results import DumpDetResults
from .dump_odvg_results import DumpODVGResults
from .dump_proposals_metric import DumpProposals
from .flickr30k_metric import Flickr30kMetric
from .grefcoco_metric import gRefCOCOMetric
from .lvis_metric import LVISMetric
from .mot_challenge_metric import MOTChallengeMetric
from .openimages_metric import OpenImagesMetric
from .refexp_metric import RefExpMetric
from .refseg_metric import RefSegMetric
from .reid_metric import ReIDMetrics
from .semseg_metric import SemSegMetric
from .voc_metric import VOCMetric
from .youtube_vis_metric import YouTubeVISMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric', 'OpenImagesMetric',
'VOCMetric', 'LVISMetric', 'CrowdHumanMetric', 'DumpProposals',
'CocoOccludedSeparatedMetric', 'DumpDetResults', 'BaseVideoMetric',
'MOTChallengeMetric', 'CocoVideoMetric', 'ReIDMetrics', 'YouTubeVISMetric',
'COCOCaptionMetric', 'SemSegMetric', 'RefSegMetric', 'RefExpMetric',
'gRefCOCOMetric', 'DODCocoMetric', 'DumpODVGResults', 'Flickr30kMetric'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_video_metric import BaseVideoMetric
from .cityscapes_metric import CityScapesMetric
from .coco_caption_metric import COCOCaptionMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .coco_video_metric import CocoVideoMetric
from .crowdhuman_metric import CrowdHumanMetric
from .dump_det_results import DumpDetResults
from .dump_proposals_metric import DumpProposals
from .lvis_metric import LVISMetric
from .mot_challenge_metric import MOTChallengeMetric
from .openimages_metric import OpenImagesMetric
from .refseg_metric import RefSegMetric
from .reid_metric import ReIDMetrics
from .semseg_metric import SemSegMetric
from .voc_metric import VOCMetric
from .youtube_vis_metric import YouTubeVISMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric', 'OpenImagesMetric',
'VOCMetric', 'LVISMetric', 'CrowdHumanMetric', 'DumpProposals',
'CocoOccludedSeparatedMetric', 'DumpDetResults', 'BaseVideoMetric',
'MOTChallengeMetric', 'CocoVideoMetric', 'ReIDMetrics', 'YouTubeVISMetric',
'COCOCaptionMetric', 'SemSegMetric', 'RefSegMetric'
]
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.documents import AudioDoc
from docarray.typing import AnyEmbedding, AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
tf = import_library('tensorflow', raise_error=False)
torch = import_library('torch', raise_error=False)
T = TypeVar('T', bound='VideoDoc')
class VideoDoc(BaseDoc):
"""
Document for handling video.
The Video Document can contain a VideoUrl (`VideoDoc.url`), an Audio Document
(`VideoDoc.audio`), a VideoTensor (`VideoDoc.tensor`), an AnyTensor representing
the indices of the video's key frames (`VideoDoc.key_frame_indices`) and an
AnyEmbedding (`VideoDoc.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Video
# use it directly
vid = Video(
url='https://github.com/docarray/docarray/tree/feat-add-video-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.audio.tensor, vid.tensor, vid.key_frame_indices = vid.url.load()
model = MyEmbeddingModel()
vid.embedding = model(vid.tensor)
You can extend this Document:
.. code-block:: python
from typing import Optional
from docarray.documents import TextDoc, VideoDoc
# extend it
class MyVideo(Video):
name: Optional[Text]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video.video_tensor = video.url.load().video
model = MyEmbeddingModel()
video.embedding = model(video.tensor)
video.name = Text(text='my first video')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDoc
from docarray.documents import TextDoc, VideoDoc
# compose it
class MultiModalDoc(BaseDoc):
video: Video
text: Text
mmdoc = MultiModalDoc(
video=Video(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.video.video_tensor = mmdoc.video.url.load().video
# or
mmdoc.video.bytes_ = mmdoc.video.url.load_bytes()
"""
url: Optional[VideoUrl]
audio: Optional[AudioDoc] = AudioDoc()
tensor: Optional[VideoTensor]
key_frame_indices: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
bytes_: Optional[bytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch is not None
and isinstance(value, torch.Tensor)
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.documents import AudioDoc
from docarray.typing import AnyEmbedding, AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='VideoDoc')
class VideoDoc(BaseDoc):
"""
Document for handling video.
The Video Document can contain a VideoUrl (`VideoDoc.url`), an Audio Document
(`VideoDoc.audio`), a VideoTensor (`VideoDoc.tensor`), an AnyTensor representing
the indices of the video's key frames (`VideoDoc.key_frame_indices`) and an
AnyEmbedding (`VideoDoc.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Video
# use it directly
vid = Video(
url='https://github.com/docarray/docarray/tree/feat-add-video-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.audio.tensor, vid.tensor, vid.key_frame_indices = vid.url.load()
model = MyEmbeddingModel()
vid.embedding = model(vid.tensor)
You can extend this Document:
.. code-block:: python
from typing import Optional
from docarray.documents import TextDoc, VideoDoc
# extend it
class MyVideo(Video):
name: Optional[Text]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video.video_tensor = video.url.load().video
model = MyEmbeddingModel()
video.embedding = model(video.tensor)
video.name = Text(text='my first video')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDoc
from docarray.documents import TextDoc, VideoDoc
# compose it
class MultiModalDoc(BaseDoc):
video: Video
text: Text
mmdoc = MultiModalDoc(
video=Video(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.video.video_tensor = mmdoc.video.url.load().video
# or
mmdoc.video.bytes_ = mmdoc.video.url.load_bytes()
"""
url: Optional[VideoUrl]
audio: Optional[AudioDoc] = AudioDoc()
tensor: Optional[VideoTensor]
key_frame_indices: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
bytes_: Optional[bytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
from __future__ import annotations
from typing import Any, Optional, Union
import PIL.Image
import torch
from ._tv_tensor import TVTensor
class Image(TVTensor):
""":class:`torch.Tensor` subclass for images.
.. note::
In the :ref:`transforms <transforms>`, ``Image`` instances are largely
interchangeable with pure :class:`torch.Tensor`. See
:ref:`this note <passthrough_heuristic>` for more details.
Args:
data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as
well as PIL images.
dtype (torch.dtype, optional): Desired data type. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the image is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Image:
if isinstance(data, PIL.Image.Image):
from torchvision.transforms.v2 import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
if tensor.ndim < 2:
raise ValueError
elif tensor.ndim == 2:
tensor = tensor.unsqueeze(0)
return tensor.as_subclass(cls)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr()
|
from __future__ import annotations
from typing import Any, Optional, Union
import PIL.Image
import torch
from ._tv_tensor import TVTensor
class Image(TVTensor):
"""[BETA] :class:`torch.Tensor` subclass for images.
.. note::
In the :ref:`transforms <transforms>`, ``Image`` instances are largely
interchangeable with pure :class:`torch.Tensor`. See
:ref:`this note <passthrough_heuristic>` for more details.
Args:
data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as
well as PIL images.
dtype (torch.dtype, optional): Desired data type. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the image is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Image:
if isinstance(data, PIL.Image.Image):
from torchvision.transforms.v2 import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
if tensor.ndim < 2:
raise ValueError
elif tensor.ndim == 2:
tensor = tensor.unsqueeze(0)
return tensor.as_subclass(cls)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr()
|
import importlib
from types import ModuleType
import pytest
from ...utils import needs_py39, needs_py310
@pytest.fixture(
name="test_module",
params=[
"app_b.test_main",
pytest.param("app_b_py310.test_main", marks=needs_py310),
"app_b_an.test_main",
pytest.param("app_b_an_py39.test_main", marks=needs_py39),
pytest.param("app_b_an_py310.test_main", marks=needs_py310),
],
)
def get_test_module(request: pytest.FixtureRequest) -> ModuleType:
mod: ModuleType = importlib.import_module(f"docs_src.app_testing.{request.param}")
return mod
def test_app(test_module: ModuleType):
test_main = test_module
test_main.test_create_existing_item()
test_main.test_create_item()
test_main.test_create_item_bad_token()
test_main.test_read_nonexistent_item()
test_main.test_read_item()
test_main.test_read_item_bad_token()
|
from docs_src.app_testing.app_b import test_main
def test_app():
test_main.test_create_existing_item()
test_main.test_create_item()
test_main.test_create_item_bad_token()
test_main.test_read_nonexistent_item()
test_main.test_read_item()
test_main.test_read_item_bad_token()
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.0.1"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.0.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
import os # type: ignore[import-not-found]
from exa_py import Exa
from langchain_core.utils import convert_to_secret_str
def initialize_client(values: dict) -> dict:
"""Initialize the client."""
exa_api_key = values.get("exa_api_key") or os.environ.get("EXA_API_KEY") or ""
values["exa_api_key"] = convert_to_secret_str(exa_api_key)
args = {
"api_key": values["exa_api_key"].get_secret_value(),
}
if values.get("exa_base_url"):
args["base_url"] = values["exa_base_url"]
values["client"] = Exa(**args)
return values
|
import os # type: ignore[import-not-found]
from exa_py import Exa # type: ignore
from langchain_core.utils import convert_to_secret_str
def initialize_client(values: dict) -> dict:
"""Initialize the client."""
exa_api_key = values.get("exa_api_key") or os.environ.get("EXA_API_KEY") or ""
values["exa_api_key"] = convert_to_secret_str(exa_api_key)
args = {
"api_key": values["exa_api_key"].get_secret_value(),
}
if values.get("exa_base_url"):
args["base_url"] = values["exa_base_url"]
values["client"] = Exa(**args)
return values
|
# Copyright (c) OpenMMLab. All rights reserved.
# Copyright (c) 2019 Western Digital Corporation or its affiliates.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class YOLOV3(SingleStageDetector):
r"""Implementation of `Yolov3: An incremental improvement
<https://arxiv.org/abs/1804.02767>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of YOLOX. Default: None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of YOLOX. Default: None.
data_preprocessor (:obj:`ConfigDict` or dict, optional):
Model preprocessing config for processing the input data.
it usually includes ``to_rgb``, ``pad_size_divisor``,
``pad_value``, ``mean`` and ``std``. Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
# Copyright (c) 2019 Western Digital Corporation or its affiliates.
from mmdet.core.utils.typing import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class YOLOV3(SingleStageDetector):
r"""Implementation of `Yolov3: An incremental improvement
<https://arxiv.org/abs/1804.02767>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of YOLOX. Default: None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of YOLOX. Default: None.
data_preprocessor (:obj:`ConfigDict` or dict, optional):
Model preprocessing config for processing the input data.
it usually includes ``to_rgb``, ``pad_size_divisor``,
``pad_value``, ``mean`` and ``std``. Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
import os
import torch
import torchaudio.prototype.transforms as T
import torchaudio.transforms as transforms
from torchaudio_unittest.common_utils import nested_params, TorchaudioTestCase
class BatchConsistencyTest(TorchaudioTestCase):
def assert_batch_consistency(self, transform, batch, *args, atol=1e-8, rtol=1e-5, seed=42, **kwargs):
n = batch.size(0)
# Compute items separately, then batch the result
torch.random.manual_seed(seed)
items_input = batch.clone()
items_result = torch.stack([transform(items_input[i], *args, **kwargs) for i in range(n)])
# Batch the input and run
torch.random.manual_seed(seed)
batch_input = batch.clone()
batch_result = transform(batch_input, *args, **kwargs)
self.assertEqual(items_input, batch_input, rtol=rtol, atol=atol)
self.assertEqual(items_result, batch_result, rtol=rtol, atol=atol)
@nested_params(
[T.Convolve, T.FFTConvolve],
["full", "valid", "same"],
)
def test_Convolve(self, cls, mode):
leading_dims = (2, 3)
L_x, L_y = 89, 43
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
convolve = cls(mode=mode)
actual = convolve(x, y)
expected = torch.stack(
[
torch.stack(
[convolve(x[i, j].unsqueeze(0), y[i, j].unsqueeze(0)).squeeze(0) for j in range(leading_dims[1])]
)
for i in range(leading_dims[0])
]
)
self.assertEqual(expected, actual)
def test_batch_BarkScale(self):
specgram = torch.randn(3, 2, 201, 256)
atol = 1e-6 if os.name == "nt" else 1e-8
transform = T.BarkScale()
self.assert_batch_consistency(transform, specgram, atol=atol)
def test_batch_InverseBarkScale(self):
n_barks = 32
n_stft = 5
bark_spec = torch.randn(3, 2, n_barks, 32) ** 2
transform = transforms.InverseMelScale(n_stft, n_barks)
# Because InverseBarkScale runs SGD on randomly initialized values so they do not yield
# exactly same result. For this reason, tolerance is very relaxed here.
self.assert_batch_consistency(transform, bark_spec, atol=1.0, rtol=1e-5)
|
import torch
import torchaudio.prototype.transforms as T
from torchaudio_unittest.common_utils import nested_params, TorchaudioTestCase
class BatchConsistencyTest(TorchaudioTestCase):
@nested_params(
[T.Convolve, T.FFTConvolve],
["full", "valid", "same"],
)
def test_Convolve(self, cls, mode):
leading_dims = (2, 3)
L_x, L_y = 89, 43
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
convolve = cls(mode=mode)
actual = convolve(x, y)
expected = torch.stack(
[
torch.stack(
[convolve(x[i, j].unsqueeze(0), y[i, j].unsqueeze(0)).squeeze(0) for j in range(leading_dims[1])]
)
for i in range(leading_dims[0])
]
)
self.assertEqual(expected, actual)
|
# Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import tensorflow as tf
class TFFormatter(TensorFormatter[Mapping, "tf.Tensor", Mapping]):
def __init__(self, features=None, **tf_tensor_kwargs):
super().__init__(features=features)
self.tf_tensor_kwargs = tf_tensor_kwargs
import tensorflow as tf # noqa: import tf at initialization
def _consolidate(self, column):
import tensorflow as tf
if isinstance(column, list) and column:
if all(
isinstance(x, tf.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
):
return tf.stack(column)
elif all(
isinstance(x, (tf.Tensor, tf.RaggedTensor)) and x.ndim == 1 and x.dtype == column[0].dtype
for x in column
):
# only rag 1-D tensors, otherwise some dimensions become ragged even though they were consolidated
return tf.ragged.stack(column)
return column
def _tensorize(self, value):
import tensorflow as tf
if value is None:
return value
default_dtype = {}
if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
default_dtype = {"dtype": tf.int64}
elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": tf.float32}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
value = np.asarray(value)
return tf.convert_to_tensor(value, **{**default_dtype, **self.tf_tensor_kwargs})
def _recursive_tensorize(self, data_struct):
import tensorflow as tf
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(data_struct, torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(data_struct, "__array__") and not isinstance(data_struct, tf.Tensor):
data_struct = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # tf tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
elif isinstance(data_struct, (list, tuple)):
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct, map_list=False)
def format_row(self, pa_table: pa.Table) -> Mapping:
row = self.numpy_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> "tf.Tensor":
column = self.numpy_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
|
# Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import Formatter
if TYPE_CHECKING:
import tensorflow as tf
class TFFormatter(Formatter[Mapping, "tf.Tensor", Mapping]):
def __init__(self, features=None, **tf_tensor_kwargs):
super().__init__(features=features)
self.tf_tensor_kwargs = tf_tensor_kwargs
import tensorflow as tf # noqa: import tf at initialization
def _consolidate(self, column):
import tensorflow as tf
if isinstance(column, list) and column:
if all(
isinstance(x, tf.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
):
return tf.stack(column)
elif all(
isinstance(x, (tf.Tensor, tf.RaggedTensor)) and x.ndim == 1 and x.dtype == column[0].dtype
for x in column
):
# only rag 1-D tensors, otherwise some dimensions become ragged even though they were consolidated
return tf.ragged.stack(column)
return column
def _tensorize(self, value):
import tensorflow as tf
if value is None:
return value
default_dtype = {}
if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
default_dtype = {"dtype": tf.int64}
elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": tf.float32}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
value = np.asarray(value)
return tf.convert_to_tensor(value, **{**default_dtype, **self.tf_tensor_kwargs})
def _recursive_tensorize(self, data_struct: dict):
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # tf tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct)
def format_row(self, pa_table: pa.Table) -> Mapping:
row = self.numpy_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> "tf.Tensor":
column = self.numpy_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
|
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import WanTransformer3DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin
enable_full_determinism()
class WanTransformer3DTests(ModelTesterMixin, TorchCompileTesterMixin, unittest.TestCase):
model_class = WanTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 1
num_channels = 4
num_frames = 2
height = 16
width = 16
text_encoder_embedding_dim = 16
sequence_length = 12
hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, text_encoder_embedding_dim)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (4, 1, 16, 16)
@property
def output_shape(self):
return (4, 1, 16, 16)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"patch_size": (1, 2, 2),
"num_attention_heads": 2,
"attention_head_dim": 12,
"in_channels": 4,
"out_channels": 4,
"text_dim": 16,
"freq_dim": 256,
"ffn_dim": 32,
"num_layers": 2,
"cross_attn_norm": True,
"qk_norm": "rms_norm_across_heads",
"rope_max_seq_len": 32,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"WanTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import WanTransformer3DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
is_torch_compile,
require_torch_2,
require_torch_gpu,
slow,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class WanTransformer3DTests(ModelTesterMixin, unittest.TestCase):
model_class = WanTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 1
num_channels = 4
num_frames = 2
height = 16
width = 16
text_encoder_embedding_dim = 16
sequence_length = 12
hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, text_encoder_embedding_dim)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (4, 1, 16, 16)
@property
def output_shape(self):
return (4, 1, 16, 16)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"patch_size": (1, 2, 2),
"num_attention_heads": 2,
"attention_head_dim": 12,
"in_channels": 4,
"out_channels": 4,
"text_dim": 16,
"freq_dim": 256,
"ffn_dim": 32,
"num_layers": 2,
"cross_attn_norm": True,
"qk_norm": "rms_norm_across_heads",
"rope_max_seq_len": 32,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"WanTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
@require_torch_gpu
@require_torch_2
@is_torch_compile
@slow
def test_torch_compile_recompilation_and_graph_break(self):
torch._dynamo.reset()
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
model = self.model_class(**init_dict).to(torch_device)
model = torch.compile(model, fullgraph=True)
with torch._dynamo.config.patch(error_on_recompile=True), torch.no_grad():
_ = model(**inputs_dict)
_ = model(**inputs_dict)
|
# -*- coding: utf-8 -*-
"""
Audio Datasets
==============
**Author**: `Moto Hira <[email protected]>`__
``torchaudio`` provides easy access to common, publicly accessible
datasets. Please refer to the official documentation for the list of
available datasets.
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio
import torch
import torchaudio
print(torch.__version__)
print(torchaudio.__version__)
######################################################################
# Preparing data and utility functions (skip this section)
# --------------------------------------------------------
#
# @title Prepare data and utility functions. {display-mode: "form"}
# @markdown
# @markdown You do not need to look into this cell.
# @markdown Just execute once and you are good to go.
# -------------------------------------------------------------------------------
# Preparation of data and helper functions.
# -------------------------------------------------------------------------------
import os
import matplotlib.pyplot as plt
from IPython.display import Audio, display
_SAMPLE_DIR = "_assets"
YESNO_DATASET_PATH = os.path.join(_SAMPLE_DIR, "yes_no")
os.makedirs(YESNO_DATASET_PATH, exist_ok=True)
def plot_specgram(waveform, sample_rate, title="Spectrogram", xlim=None):
waveform = waveform.numpy()
num_channels, _ = waveform.shape
figure, axes = plt.subplots(num_channels, 1)
if num_channels == 1:
axes = [axes]
for c in range(num_channels):
axes[c].specgram(waveform[c], Fs=sample_rate)
if num_channels > 1:
axes[c].set_ylabel(f"Channel {c+1}")
if xlim:
axes[c].set_xlim(xlim)
figure.suptitle(title)
plt.show(block=False)
def play_audio(waveform, sample_rate):
waveform = waveform.numpy()
num_channels, _ = waveform.shape
if num_channels == 1:
display(Audio(waveform[0], rate=sample_rate))
elif num_channels == 2:
display(Audio((waveform[0], waveform[1]), rate=sample_rate))
else:
raise ValueError("Waveform with more than 2 channels are not supported.")
######################################################################
# Here, we show how to use the
# :py:class:`torchaudio.datasets.YESNO` dataset.
#
dataset = torchaudio.datasets.YESNO(YESNO_DATASET_PATH, download=True)
for i in [1, 3, 5]:
waveform, sample_rate, label = dataset[i]
plot_specgram(waveform, sample_rate, title=f"Sample {i}: {label}")
play_audio(waveform, sample_rate)
|
# -*- coding: utf-8 -*-
"""
Audio Datasets
==============
``torchaudio`` provides easy access to common, publicly accessible
datasets. Please refer to the official documentation for the list of
available datasets.
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio
import torch
import torchaudio
print(torch.__version__)
print(torchaudio.__version__)
######################################################################
# Preparing data and utility functions (skip this section)
# --------------------------------------------------------
#
# @title Prepare data and utility functions. {display-mode: "form"}
# @markdown
# @markdown You do not need to look into this cell.
# @markdown Just execute once and you are good to go.
# -------------------------------------------------------------------------------
# Preparation of data and helper functions.
# -------------------------------------------------------------------------------
import os
import matplotlib.pyplot as plt
from IPython.display import Audio, display
_SAMPLE_DIR = "_assets"
YESNO_DATASET_PATH = os.path.join(_SAMPLE_DIR, "yes_no")
os.makedirs(YESNO_DATASET_PATH, exist_ok=True)
def plot_specgram(waveform, sample_rate, title="Spectrogram", xlim=None):
waveform = waveform.numpy()
num_channels, _ = waveform.shape
figure, axes = plt.subplots(num_channels, 1)
if num_channels == 1:
axes = [axes]
for c in range(num_channels):
axes[c].specgram(waveform[c], Fs=sample_rate)
if num_channels > 1:
axes[c].set_ylabel(f"Channel {c+1}")
if xlim:
axes[c].set_xlim(xlim)
figure.suptitle(title)
plt.show(block=False)
def play_audio(waveform, sample_rate):
waveform = waveform.numpy()
num_channels, _ = waveform.shape
if num_channels == 1:
display(Audio(waveform[0], rate=sample_rate))
elif num_channels == 2:
display(Audio((waveform[0], waveform[1]), rate=sample_rate))
else:
raise ValueError("Waveform with more than 2 channels are not supported.")
######################################################################
# Here, we show how to use the
# :py:class:`torchaudio.datasets.YESNO` dataset.
#
dataset = torchaudio.datasets.YESNO(YESNO_DATASET_PATH, download=True)
for i in [1, 3, 5]:
waveform, sample_rate, label = dataset[i]
plot_specgram(waveform, sample_rate, title=f"Sample {i}: {label}")
play_audio(waveform, sample_rate)
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Optional
from .base import HfQuantizer
if TYPE_CHECKING:
from ..modeling_utils import PreTrainedModel
from ..utils import is_auto_round_available, is_torch_available, logging
from ..utils.quantization_config import QuantizationConfigMixin
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class AutoRoundQuantizer(HfQuantizer):
"""
Quantizer of the AutoRound method. (https://huggingface.co/papers/2309.05516)
"""
# AutoRound requires data calibration - we support only inference
requires_calibration = True
required_packages = ["auto_round"]
def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
super().__init__(quantization_config, **kwargs)
def validate_environment(self, *args, **kwargs):
self.device_map = kwargs.get("device_map", None)
if not is_auto_round_available():
raise ImportError(
"Loading an AutoRound quantized model requires auto-round library (`pip install 'auto-round>=0.5'`)"
)
def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
if torch_dtype is None:
torch_dtype = torch.bfloat16
logger.info("Loading the model in `torch.bfloat16`. To overwrite it, set `torch_dtype` manually.")
return torch_dtype
def _process_model_before_weight_loading(self, model: "PreTrainedModel", **kwargs):
if model.__class__.main_input_name != "input_ids":
logger.warning("AutoRound offers only limited support for models that are not strictly text-based.")
from auto_round.inference.convert_model import convert_hf_model, infer_target_device
if self.pre_quantized:
target_device = infer_target_device(self.device_map)
model, used_backends = convert_hf_model(model, target_device)
self.used_backends = used_backends
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
if self.pre_quantized:
from auto_round.inference.convert_model import post_init
post_init(model, self.used_backends)
else:
raise ValueError("AutoRound only sports pre-quantized models.")
@property
def is_trainable(self, model: Optional["PreTrainedModel"] = None):
return False
def is_serializable(self, safe_serialization=None):
## for gptq/awq models, the quantization config will be changed
return True
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Optional
from .base import HfQuantizer
if TYPE_CHECKING:
from ..modeling_utils import PreTrainedModel
from ..utils import is_auto_round_available, is_torch_available, logging
from ..utils.quantization_config import QuantizationConfigMixin
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class AutoRoundQuantizer(HfQuantizer):
"""
Quantizer of the AutoRound method. (https://arxiv.org/pdf/2309.05516)
"""
# AutoRound requires data calibration - we support only inference
requires_calibration = True
required_packages = ["auto_round"]
def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
super().__init__(quantization_config, **kwargs)
def validate_environment(self, *args, **kwargs):
self.device_map = kwargs.get("device_map", None)
if not is_auto_round_available():
raise ImportError(
"Loading an AutoRound quantized model requires auto-round library (`pip install 'auto-round>=0.5'`)"
)
def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
if torch_dtype is None:
torch_dtype = torch.bfloat16
logger.info("Loading the model in `torch.bfloat16`. To overwrite it, set `torch_dtype` manually.")
return torch_dtype
def _process_model_before_weight_loading(self, model: "PreTrainedModel", **kwargs):
if model.__class__.main_input_name != "input_ids":
logger.warning("AutoRound offers only limited support for models that are not strictly text-based.")
from auto_round.inference.convert_model import convert_hf_model, infer_target_device
if self.pre_quantized:
target_device = infer_target_device(self.device_map)
model, used_backends = convert_hf_model(model, target_device)
self.used_backends = used_backends
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
if self.pre_quantized:
from auto_round.inference.convert_model import post_init
post_init(model, self.used_backends)
else:
raise ValueError("AutoRound only sports pre-quantized models.")
@property
def is_trainable(self, model: Optional["PreTrainedModel"] = None):
return False
def is_serializable(self, safe_serialization=None):
## for gptq/awq models, the quantization config will be changed
return True
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for MGT-STR CHAR."""
import json
import os
from typing import Optional
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.json"}
class MgpstrTokenizer(PreTrainedTokenizer):
"""
Construct a MGP-STR char tokenizer.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
unk_token (`str`, *optional*, defaults to `"[GO]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str`, *optional*, defaults to `"[GO]"`):
The beginning of sequence token.
eos_token (`str`, *optional*, defaults to `"[s]"`):
The end of sequence token.
pad_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"[GO]"`):
A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
attention mechanisms or loss computation.
"""
vocab_files_names = VOCAB_FILES_NAMES
def __init__(self, vocab_file, unk_token="[GO]", bos_token="[GO]", eos_token="[s]", pad_token="[GO]", **kwargs):
with open(vocab_file, encoding="utf-8") as vocab_handle:
self.vocab = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.vocab.items()}
super().__init__(
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
pad_token=pad_token,
**kwargs,
)
@property
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
vocab = dict(self.vocab).copy()
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text):
"""Tokenize a string."""
char_tokens = []
for s in text:
char_tokens.extend(s)
return char_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
with open(vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
return (vocab_file,)
__all__ = ["MgpstrTokenizer"]
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for MGT-STR CHAR."""
import json
import os
from typing import Optional
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.json"}
class MgpstrTokenizer(PreTrainedTokenizer):
"""
Construct a MGP-STR char tokenizer.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
unk_token (`str`, *optional*, defaults to `"[GO]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str`, *optional*, defaults to `"[GO]"`):
The beginning of sequence token.
eos_token (`str`, *optional*, defaults to `"[s]"`):
The end of sequence token.
pad_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"[GO]"`):
A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
attention mechanisms or loss computation.
"""
vocab_files_names = VOCAB_FILES_NAMES
def __init__(self, vocab_file, unk_token="[GO]", bos_token="[GO]", eos_token="[s]", pad_token="[GO]", **kwargs):
with open(vocab_file, encoding="utf-8") as vocab_handle:
self.vocab = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.vocab.items()}
super().__init__(
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
pad_token=pad_token,
**kwargs,
)
@property
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
vocab = dict(self.vocab).copy()
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text):
"""Tokenize a string."""
char_tokens = []
for s in text:
char_tokens.extend(s)
return char_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
return
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
with open(vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
return (vocab_file,)
__all__ = ["MgpstrTokenizer"]
|
"""Query Understanding agent pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.agent import AgentRunner
from llama_index.core.callbacks import CallbackManager
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.llms.llm import LLM
from llama_index.core.tools.types import BaseTool
from llama_index.llms.openai import OpenAI
from .step import QueryUnderstandingAgentWorker
class QueryUnderstandingAgentPack(BaseLlamaPack):
"""
LLMCompilerAgent pack.
Args:
tools (List[BaseTool]): List of tools to use.
llm (Optional[LLM]): LLM to use.
"""
def __init__(
self,
tools: List[BaseTool],
llm: Optional[LLM] = None,
callback_manager: Optional[CallbackManager] = None,
agent_worker_kwargs: Optional[Dict[str, Any]] = None,
agent_runner_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
"""Init params."""
self.llm = llm or OpenAI(model="gpt-4")
self.callback_manager = callback_manager or self.llm.callback_manager
self.agent_worker = QueryUnderstandingAgentWorker.from_tools(
tools,
llm=llm,
verbose=True,
callback_manager=self.callback_manager,
**(agent_worker_kwargs or {}),
)
self.agent = AgentRunner(
self.agent_worker,
callback_manager=self.callback_manager,
**(agent_runner_kwargs or {}),
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"llm": self.llm,
"callback_manager": self.callback_manager,
"agent_worker": self.agent_worker,
"agent": self.agent,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.agent.chat(*args, **kwargs)
|
"""Query Understanding agent pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.agent import AgentRunner
from llama_index.core.callbacks import CallbackManager
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.llms.llm import LLM
from llama_index.core.tools.types import BaseTool
from llama_index.llms.openai import OpenAI
from .step import QueryUnderstandingAgentWorker
class QueryUnderstandingAgentPack(BaseLlamaPack):
"""
LLMCompilerAgent pack.
Args:
tools (List[BaseTool]): List of tools to use.
llm (Optional[LLM]): LLM to use.
"""
def __init__(
self,
tools: List[BaseTool],
llm: Optional[LLM] = None,
callback_manager: Optional[CallbackManager] = None,
agent_worker_kwargs: Optional[Dict[str, Any]] = None,
agent_runner_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
"""Init params."""
self.llm = llm or OpenAI(model="gpt-4")
self.callback_manager = callback_manager or self.llm.callback_manager
self.agent_worker = QueryUnderstandingAgentWorker.from_tools(
tools,
llm=llm,
verbose=True,
callback_manager=self.callback_manager,
**(agent_worker_kwargs or {})
)
self.agent = AgentRunner(
self.agent_worker,
callback_manager=self.callback_manager,
**(agent_runner_kwargs or {})
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"llm": self.llm,
"callback_manager": self.callback_manager,
"agent_worker": self.agent_worker,
"agent": self.agent,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.agent.chat(*args, **kwargs)
|
"""Helpers for interfacing array like objects."""
import copy
import ctypes
import json
from typing import Literal, Optional, Protocol, Tuple, Type, TypedDict, Union, cast
import numpy as np
from ._typing import CNumericPtr, DataType, NumpyOrCupy
from .compat import import_cupy
class _ArrayLikeArg(Protocol):
@property
def __array_interface__(self) -> "ArrayInf": ...
class TransformedDf(Protocol):
"""Protocol class for storing transformed dataframe."""
def array_interface(self) -> bytes:
"""Get a JSON-encoded list of array interfaces."""
@property
def shape(self) -> Tuple[int, int]:
"""Return the shape of the dataframe."""
ArrayInf = TypedDict(
"ArrayInf",
{
"data": Tuple[int, bool],
"typestr": str,
"version": Literal[3],
"strides": Optional[Tuple[int, ...]],
"shape": Tuple[int, ...],
"mask": Union["ArrayInf", None, _ArrayLikeArg],
},
)
def array_hasobject(data: DataType) -> bool:
"""Whether the numpy array has object dtype."""
return hasattr(data.dtype, "hasobject") and data.dtype.hasobject
def cuda_array_interface(data: DataType) -> bytes:
"""Make cuda array interface str."""
if array_hasobject(data):
raise ValueError("Input data contains `object` dtype. Expecting numeric data.")
interface = data.__cuda_array_interface__
if "mask" in interface:
interface["mask"] = interface["mask"].__cuda_array_interface__
interface_str = bytes(json.dumps(interface), "utf-8")
return interface_str
def from_array_interface(interface: ArrayInf, zero_copy: bool = False) -> NumpyOrCupy:
"""Convert array interface to numpy or cupy array"""
class Array:
"""Wrapper type for communicating with numpy and cupy."""
_interface: Optional[ArrayInf] = None
@property
def __array_interface__(self) -> Optional[ArrayInf]:
return self._interface
@__array_interface__.setter
def __array_interface__(self, interface: ArrayInf) -> None:
self._interface = copy.copy(interface)
# Convert some fields to tuple as required by numpy
self._interface["shape"] = tuple(self._interface["shape"])
self._interface["data"] = (
self._interface["data"][0],
self._interface["data"][1],
)
strides = self._interface.get("strides", None)
if strides is not None:
self._interface["strides"] = tuple(strides)
@property
def __cuda_array_interface__(self) -> Optional[ArrayInf]:
return self.__array_interface__
@__cuda_array_interface__.setter
def __cuda_array_interface__(self, interface: ArrayInf) -> None:
self.__array_interface__ = interface
arr = Array()
if "stream" in interface:
# CUDA stream is presented, this is a __cuda_array_interface__.
arr.__cuda_array_interface__ = interface
out = import_cupy().array(arr, copy=not zero_copy)
else:
arr.__array_interface__ = interface
out = np.array(arr, copy=not zero_copy)
return out
def make_array_interface(
ptr: Union[CNumericPtr, int],
shape: Tuple[int, ...],
dtype: Type[np.number],
is_cuda: bool,
) -> ArrayInf:
"""Make an __(cuda)_array_interface__ from a pointer."""
# Use an empty array to handle typestr and descr
if is_cuda:
empty = import_cupy().empty(shape=(0,), dtype=dtype)
array = empty.__cuda_array_interface__ # pylint: disable=no-member
else:
empty = np.empty(shape=(0,), dtype=dtype)
array = empty.__array_interface__ # pylint: disable=no-member
if not isinstance(ptr, int):
addr = ctypes.cast(ptr, ctypes.c_void_p).value
else:
addr = ptr
length = int(np.prod(shape))
# Handle empty dataset.
assert addr is not None or length == 0
if addr is None:
return array
array["data"] = (addr, True)
if is_cuda:
array["stream"] = 2
array["shape"] = shape
array["strides"] = None
return array
def array_interface_dict(data: np.ndarray) -> ArrayInf:
"""Convert array interface into a Python dictionary."""
if array_hasobject(data):
raise ValueError("Input data contains `object` dtype. Expecting numeric data.")
arrinf = data.__array_interface__
if "mask" in arrinf:
arrinf["mask"] = arrinf["mask"].__array_interface__
return cast(ArrayInf, arrinf)
def array_interface(data: np.ndarray) -> bytes:
"""Make array interface str."""
interface = array_interface_dict(data)
interface_str = bytes(json.dumps(interface), "utf-8")
return interface_str
|
"""Helpers for interfacing array like objects."""
import copy
import ctypes
import json
from typing import Literal, Optional, Protocol, Tuple, Type, TypedDict, Union, cast
import numpy as np
from ._typing import CNumericPtr, DataType, NumpyOrCupy
from .compat import import_cupy
class _ArrayLikeArg(Protocol):
@property
def __array_interface__(self) -> "ArrayInf": ...
ArrayInf = TypedDict(
"ArrayInf",
{
"data": Tuple[int, bool],
"typestr": str,
"version": Literal[3],
"strides": Optional[Tuple[int, ...]],
"shape": Tuple[int, ...],
"mask": Union["ArrayInf", None, _ArrayLikeArg],
},
)
def array_hasobject(data: DataType) -> bool:
"""Whether the numpy array has object dtype."""
return hasattr(data.dtype, "hasobject") and data.dtype.hasobject
def cuda_array_interface(data: DataType) -> bytes:
"""Make cuda array interface str."""
if array_hasobject(data):
raise ValueError("Input data contains `object` dtype. Expecting numeric data.")
interface = data.__cuda_array_interface__
if "mask" in interface:
interface["mask"] = interface["mask"].__cuda_array_interface__
interface_str = bytes(json.dumps(interface), "utf-8")
return interface_str
def from_array_interface(interface: ArrayInf, zero_copy: bool = False) -> NumpyOrCupy:
"""Convert array interface to numpy or cupy array"""
class Array:
"""Wrapper type for communicating with numpy and cupy."""
_interface: Optional[ArrayInf] = None
@property
def __array_interface__(self) -> Optional[ArrayInf]:
return self._interface
@__array_interface__.setter
def __array_interface__(self, interface: ArrayInf) -> None:
self._interface = copy.copy(interface)
# Convert some fields to tuple as required by numpy
self._interface["shape"] = tuple(self._interface["shape"])
self._interface["data"] = (
self._interface["data"][0],
self._interface["data"][1],
)
strides = self._interface.get("strides", None)
if strides is not None:
self._interface["strides"] = tuple(strides)
@property
def __cuda_array_interface__(self) -> Optional[ArrayInf]:
return self.__array_interface__
@__cuda_array_interface__.setter
def __cuda_array_interface__(self, interface: ArrayInf) -> None:
self.__array_interface__ = interface
arr = Array()
if "stream" in interface:
# CUDA stream is presented, this is a __cuda_array_interface__.
arr.__cuda_array_interface__ = interface
out = import_cupy().array(arr, copy=not zero_copy)
else:
arr.__array_interface__ = interface
out = np.array(arr, copy=not zero_copy)
return out
def make_array_interface(
ptr: CNumericPtr, shape: Tuple[int, ...], dtype: Type[np.number], is_cuda: bool
) -> ArrayInf:
"""Make an __(cuda)_array_interface__ from a pointer."""
# Use an empty array to handle typestr and descr
if is_cuda:
empty = import_cupy().empty(shape=(0,), dtype=dtype)
array = empty.__cuda_array_interface__ # pylint: disable=no-member
else:
empty = np.empty(shape=(0,), dtype=dtype)
array = empty.__array_interface__ # pylint: disable=no-member
addr = ctypes.cast(ptr, ctypes.c_void_p).value
length = int(np.prod(shape))
# Handle empty dataset.
assert addr is not None or length == 0
if addr is None:
return array
array["data"] = (addr, True)
if is_cuda:
array["stream"] = 2
array["shape"] = shape
array["strides"] = None
return array
def array_interface_dict(data: np.ndarray) -> ArrayInf:
"""Convert array interface into a Python dictionary."""
if array_hasobject(data):
raise ValueError("Input data contains `object` dtype. Expecting numeric data.")
arrinf = data.__array_interface__
if "mask" in arrinf:
arrinf["mask"] = arrinf["mask"].__array_interface__
return cast(ArrayInf, arrinf)
def array_interface(data: np.ndarray) -> bytes:
"""Make array interface str."""
interface = array_interface_dict(data)
interface_str = bytes(json.dumps(interface), "utf-8")
return interface_str
|
# flake8: noqa
import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.optimizers.nadam import Nadam
class NadamTest(testing.TestCase):
def test_config(self):
optimizer = Nadam(
learning_rate=0.5,
beta_1=0.5,
beta_2=0.67,
epsilon=1e-5,
)
self.run_class_serialization_test(optimizer)
def test_build_with_empty_var_list(self):
optimizer = Nadam()
optimizer.build([])
self.assertEqual(optimizer._u_product.dtype, backend.floatx())
def test_single_step(self):
optimizer = Nadam(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(
vars, [0.4686, 1.4686, 2.4686, 3.4686], rtol=1e-4, atol=1e-4
)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = Nadam(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = Nadam(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = Nadam(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = Nadam(
learning_rate=0.5,
beta_1=0.5,
beta_2=0.67,
epsilon=1e-5,
)
x = backend.Variable(np.ones([10]))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.4281, 0.4281, 0.4281, 0.4281, 0.4281, 0.4281, 0.4281, 0.4281, 0.4281, 0.4281],
[-0.1738, -0.1731, -0.1726, -0.1723, -0.1721, -0.172, -0.1719, -0.1718, -0.1718, -0.1717],
[-0.7115, -0.7103, -0.7096, -0.7092, -0.709, -0.7088, -0.7086, -0.7085, -0.7085, -0.7084],
[-1.2335, -1.2322, -1.2313, -1.2309, -1.2306, -1.2304, -1.2302, -1.2301, -1.23, -1.2299],
[-1.7492, -1.7478, -1.7469, -1.7464, -1.7461, -1.7459, -1.7457, -1.7456, -1.7455, -1.7454]]
)
# fmt: on
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Nadam(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Nadam(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
|
# flake8: noqa
import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.optimizers.nadam import Nadam
class NadamTest(testing.TestCase):
def test_config(self):
optimizer = Nadam(
learning_rate=0.5,
beta_1=0.5,
beta_2=0.67,
epsilon=1e-5,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = Nadam(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(
vars, [0.4686, 1.4686, 2.4686, 3.4686], rtol=1e-4, atol=1e-4
)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = Nadam(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = Nadam(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = Nadam(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = Nadam(
learning_rate=0.5,
beta_1=0.5,
beta_2=0.67,
epsilon=1e-5,
)
x = backend.Variable(np.ones([10]))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.4281, 0.4281, 0.4281, 0.4281, 0.4281, 0.4281, 0.4281, 0.4281, 0.4281, 0.4281],
[-0.1738, -0.1731, -0.1726, -0.1723, -0.1721, -0.172, -0.1719, -0.1718, -0.1718, -0.1717],
[-0.7115, -0.7103, -0.7096, -0.7092, -0.709, -0.7088, -0.7086, -0.7085, -0.7085, -0.7084],
[-1.2335, -1.2322, -1.2313, -1.2309, -1.2306, -1.2304, -1.2302, -1.2301, -1.23, -1.2299],
[-1.7492, -1.7478, -1.7469, -1.7464, -1.7461, -1.7459, -1.7457, -1.7456, -1.7455, -1.7454]]
)
# fmt: on
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Nadam(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Nadam(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
|
"""Documents module.
**Document** module is a collection of classes that handle documents
and their transformations.
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .base import Document
from .compressor import BaseDocumentCompressor
from .transformers import BaseDocumentTransformer
__all__ = ["Document", "BaseDocumentTransformer", "BaseDocumentCompressor"]
_dynamic_imports = {
"Document": "base",
"BaseDocumentCompressor": "compressor",
"BaseDocumentTransformer": "transformers",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent # type: ignore[name-defined]
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""Documents module.
**Document** module is a collection of classes that handle documents
and their transformations.
"""
from langchain_core.documents.base import Document
from langchain_core.documents.compressor import BaseDocumentCompressor
from langchain_core.documents.transformers import BaseDocumentTransformer
__all__ = ["Document", "BaseDocumentTransformer", "BaseDocumentCompressor"]
|
import sys
from absl import logging
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
@keras_export(
[
"keras.config.enable_interactive_logging",
"keras.utils.enable_interactive_logging",
]
)
def enable_interactive_logging():
"""Turn on interactive logging.
When interactive logging is enabled, Keras displays logs via stdout.
This provides the best experience when using Keras in an interactive
environment such as a shell or a notebook.
"""
global_state.set_global_attribute("interactive_logging", True)
@keras_export(
[
"keras.config.disable_interactive_logging",
"keras.utils.disable_interactive_logging",
]
)
def disable_interactive_logging():
"""Turn off interactive logging.
When interactive logging is disabled, Keras sends logs to `absl.logging`.
This is the best option when using Keras in a non-interactive
way, such as running a training or inference job on a server.
"""
global_state.set_global_attribute("interactive_logging", False)
@keras_export(
[
"keras.config.is_interactive_logging_enabled",
"keras.utils.is_interactive_logging_enabled",
]
)
def is_interactive_logging_enabled():
"""Check if interactive logging is enabled.
To switch between writing logs to stdout and `absl.logging`, you may use
`keras.config.enable_interactive_logging()` and
`keras.config.disable_interactive_logging()`.
Returns:
Boolean, `True` if interactive logging is enabled,
and `False` otherwise.
"""
return global_state.get_global_attribute("interactive_logging", True)
def set_logging_verbosity(level):
"""Sets the verbosity level for logging.
Supported log levels are as follows:
- `"FATAL"` (least verbose)
- `"ERROR"`
- `"WARNING"`
- `"INFO"`
- `"DEBUG"` (most verbose)
Args:
level: A string corresponding to the level of verbosity for logging.
"""
valid_levels = {
"FATAL": logging.FATAL,
"ERROR": logging.ERROR,
"WARNING": logging.WARNING,
"INFO": logging.INFO,
"DEBUG": logging.DEBUG,
}
verbosity = valid_levels.get(level)
if verbosity is None:
raise ValueError(
"Please pass a valid level for logging verbosity. "
f"Expected one of: {set(valid_levels.keys())}. "
f"Received: {level}"
)
logging.set_verbosity(verbosity)
def print_msg(message, line_break=True):
"""Print the message to absl logging or stdout."""
message = str(message)
if is_interactive_logging_enabled():
message = message + "\n" if line_break else message
try:
sys.stdout.write(message)
except UnicodeEncodeError:
# If the encoding differs from UTF-8, `sys.stdout.write` may fail.
# To address this, replace special unicode characters in the
# message, and then encode and decode using the target encoding.
message = _replace_special_unicode_character(message)
message_bytes = message.encode(sys.stdout.encoding, errors="ignore")
message = message_bytes.decode(sys.stdout.encoding)
sys.stdout.write(message)
sys.stdout.flush()
else:
logging.info(message)
def ask_to_proceed_with_overwrite(filepath):
"""Produces a prompt asking about overwriting a file.
Args:
filepath: the path to the file to be overwritten.
Returns:
True if we can proceed with overwrite, False otherwise.
"""
overwrite = (
input(f"[WARNING] {filepath} already exists - overwrite? [y/n]")
.strip()
.lower()
)
while overwrite not in ("y", "n"):
overwrite = (
input('Enter "y" (overwrite) or "n" (cancel).').strip().lower()
)
if overwrite == "n":
return False
print_msg("[TIP] Next time specify overwrite=True!")
return True
def _replace_special_unicode_character(message):
message = str(message).replace("━", "=") # Fall back to Keras2 behavior.
return message
|
import sys
from absl import logging
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
@keras_export(
[
"keras.config.enable_interactive_logging",
"keras.utils.enable_interactive_logging",
]
)
def enable_interactive_logging():
"""Turn on interactive logging.
When interactive logging is enabled, Keras displays logs via stdout.
This provides the best experience when using Keras in an interactive
environment such as a shell or a notebook.
"""
global_state.set_global_attribute("interactive_logging", True)
@keras_export(
[
"keras.config.disable_interactive_logging",
"keras.utils.disable_interactive_logging",
]
)
def disable_interactive_logging():
"""Turn off interactive logging.
When interactive logging is disabled, Keras sends logs to `absl.logging`.
This is the best option when using Keras in a non-interactive
way, such as running a training or inference job on a server.
"""
global_state.set_global_attribute("interactive_logging", False)
@keras_export(
[
"keras.config.is_interactive_logging_enabled",
"keras.utils.is_interactive_logging_enabled",
]
)
def is_interactive_logging_enabled():
"""Check if interactive logging is enabled.
To switch between writing logs to stdout and `absl.logging`, you may use
`keras.config.enable_interactive_logging()` and
`keras.config.disable_interactive_logging()`.
Returns:
Boolean, `True` if interactive logging is enabled,
and `False` otherwise.
"""
return global_state.get_global_attribute("interactive_logging", True)
def set_logging_verbosity(level):
"""Sets the verbosity level for logging.
Supported log levels are as follows:
- `"FATAL"` (least verbose)
- `"ERROR"`
- `"WARNING"`
- `"INFO"`
- `"DEBUG"` (most verbose)
Args:
level: A string corresponding to the level of verbosity for logging.
"""
valid_levels = {
"FATAL": logging.FATAL,
"ERROR": logging.ERROR,
"WARNING": logging.WARNING,
"INFO": logging.INFO,
"DEBUG": logging.DEBUG,
}
verbosity = valid_levels.get(level)
if verbosity is None:
raise ValueError(
"Please pass a valid level for logging verbosity. "
f"Expected one of: {set(valid_levels.keys())}. "
f"Received: {level}"
)
logging.set_verbosity(verbosity)
def print_msg(message, line_break=True):
"""Print the message to absl logging or stdout."""
if is_interactive_logging_enabled():
if line_break:
sys.stdout.write(message + "\n")
else:
sys.stdout.write(message)
sys.stdout.flush()
else:
logging.info(message)
def ask_to_proceed_with_overwrite(filepath):
"""Produces a prompt asking about overwriting a file.
Args:
filepath: the path to the file to be overwritten.
Returns:
True if we can proceed with overwrite, False otherwise.
"""
overwrite = (
input(f"[WARNING] {filepath} already exists - overwrite? [y/n]")
.strip()
.lower()
)
while overwrite not in ("y", "n"):
overwrite = (
input('Enter "y" (overwrite) or "n" (cancel).').strip().lower()
)
if overwrite == "n":
return False
print_msg("[TIP] Next time specify overwrite=True!")
return True
|
"""Language models.
**Language Model** is a type of model that can generate text or complete
text prompts.
LangChain has two main classes to work with language models: **Chat Models**
and "old-fashioned" **LLMs**.
**Chat Models**
Language models that use a sequence of messages as inputs and return chat messages
as outputs (as opposed to using plain text). These are traditionally newer models (
older models are generally LLMs, see below). Chat models support the assignment of
distinct roles to conversation messages, helping to distinguish messages from the AI,
users, and instructions such as system messages.
The key abstraction for chat models is `BaseChatModel`. Implementations
should inherit from this class. Please see LangChain how-to guides with more
information on how to implement a custom chat model.
To implement a custom Chat Model, inherit from `BaseChatModel`. See
the following guide for more information on how to implement a custom Chat Model:
https://python.langchain.com/docs/how_to/custom_chat_model/
**LLMs**
Language models that takes a string as input and returns a string.
These are traditionally older models (newer models generally are Chat Models, see below).
Although the underlying models are string in, string out, the LangChain wrappers
also allow these models to take messages as input. This gives them the same interface
as Chat Models. When messages are passed in as input, they will be formatted into a
string under the hood before being passed to the underlying model.
To implement a custom LLM, inherit from `BaseLLM` or `LLM`.
Please see the following guide for more information on how to implement a custom LLM:
https://python.langchain.com/docs/how_to/custom_llm/
""" # noqa: E501
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.language_models.base import (
BaseLanguageModel,
LangSmithParams,
LanguageModelInput,
LanguageModelLike,
LanguageModelOutput,
get_tokenizer,
)
from langchain_core.language_models.chat_models import (
BaseChatModel,
SimpleChatModel,
)
from langchain_core.language_models.fake import FakeListLLM, FakeStreamingListLLM
from langchain_core.language_models.fake_chat_models import (
FakeListChatModel,
FakeMessagesListChatModel,
GenericFakeChatModel,
ParrotFakeChatModel,
)
from langchain_core.language_models.llms import LLM, BaseLLM
__all__ = [
"BaseLanguageModel",
"BaseChatModel",
"SimpleChatModel",
"BaseLLM",
"LLM",
"LanguageModelInput",
"get_tokenizer",
"LangSmithParams",
"LanguageModelOutput",
"LanguageModelLike",
"FakeListLLM",
"FakeStreamingListLLM",
"FakeListChatModel",
"FakeMessagesListChatModel",
"GenericFakeChatModel",
"ParrotFakeChatModel",
]
_dynamic_imports = {
"BaseLanguageModel": "base",
"LangSmithParams": "base",
"LanguageModelInput": "base",
"LanguageModelLike": "base",
"LanguageModelOutput": "base",
"get_tokenizer": "base",
"BaseChatModel": "chat_models",
"SimpleChatModel": "chat_models",
"FakeListLLM": "fake",
"FakeStreamingListLLM": "fake",
"FakeListChatModel": "fake_chat_models",
"FakeMessagesListChatModel": "fake_chat_models",
"GenericFakeChatModel": "fake_chat_models",
"ParrotFakeChatModel": "fake_chat_models",
"LLM": "llms",
"BaseLLM": "llms",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""Language models.
**Language Model** is a type of model that can generate text or complete
text prompts.
LangChain has two main classes to work with language models: **Chat Models**
and "old-fashioned" **LLMs**.
**Chat Models**
Language models that use a sequence of messages as inputs and return chat messages
as outputs (as opposed to using plain text). These are traditionally newer models (
older models are generally LLMs, see below). Chat models support the assignment of
distinct roles to conversation messages, helping to distinguish messages from the AI,
users, and instructions such as system messages.
The key abstraction for chat models is `BaseChatModel`. Implementations
should inherit from this class. Please see LangChain how-to guides with more
information on how to implement a custom chat model.
To implement a custom Chat Model, inherit from `BaseChatModel`. See
the following guide for more information on how to implement a custom Chat Model:
https://python.langchain.com/docs/how_to/custom_chat_model/
**LLMs**
Language models that takes a string as input and returns a string.
These are traditionally older models (newer models generally are Chat Models, see below).
Although the underlying models are string in, string out, the LangChain wrappers
also allow these models to take messages as input. This gives them the same interface
as Chat Models. When messages are passed in as input, they will be formatted into a
string under the hood before being passed to the underlying model.
To implement a custom LLM, inherit from `BaseLLM` or `LLM`.
Please see the following guide for more information on how to implement a custom LLM:
https://python.langchain.com/docs/how_to/custom_llm/
""" # noqa: E501
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.language_models.base import (
BaseLanguageModel,
LangSmithParams,
LanguageModelInput,
LanguageModelLike,
LanguageModelOutput,
get_tokenizer,
)
from langchain_core.language_models.chat_models import (
BaseChatModel,
SimpleChatModel,
)
from langchain_core.language_models.fake import FakeListLLM, FakeStreamingListLLM
from langchain_core.language_models.fake_chat_models import (
FakeListChatModel,
FakeMessagesListChatModel,
GenericFakeChatModel,
ParrotFakeChatModel,
)
from langchain_core.language_models.llms import LLM, BaseLLM
__all__ = [
"BaseLanguageModel",
"BaseChatModel",
"SimpleChatModel",
"BaseLLM",
"LLM",
"LanguageModelInput",
"get_tokenizer",
"LangSmithParams",
"LanguageModelOutput",
"LanguageModelLike",
"FakeListLLM",
"FakeStreamingListLLM",
"FakeListChatModel",
"FakeMessagesListChatModel",
"GenericFakeChatModel",
"ParrotFakeChatModel",
]
_dynamic_imports = {
"BaseLanguageModel": "base",
"LangSmithParams": "base",
"LanguageModelInput": "base",
"LanguageModelLike": "base",
"LanguageModelOutput": "base",
"get_tokenizer": "base",
"BaseChatModel": "chat_models",
"SimpleChatModel": "chat_models",
"FakeListLLM": "fake",
"FakeStreamingListLLM": "fake",
"FakeListChatModel": "fake_chat_models",
"FakeMessagesListChatModel": "fake_chat_models",
"GenericFakeChatModel": "fake_chat_models",
"ParrotFakeChatModel": "fake_chat_models",
"LLM": "llms",
"BaseLLM": "llms",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent # type: ignore[name-defined]
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
data_preprocessor=dict(
# The mean and std are used in PyCls when training RegNets
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False),
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_3.2gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')),
neck=dict(
type='FPN',
in_channels=[96, 192, 432, 1008],
out_channels=256,
num_outs=5))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005))
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_3.2gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')),
neck=dict(
type='FPN',
in_channels=[96, 192, 432, 1008],
out_channels=256,
num_outs=5))
img_norm_cfg = dict(
# The mean and std are used in PyCls when training RegNets
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.