input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from contextlib import nullcontext
from sentence_transformers.evaluation import SentenceEvaluator
from sentence_transformers import SentenceTransformer
from typing import List, Optional, Tuple, Dict
import numpy as np
import logging
import os
import csv
logger = logging.getLogger(__name__)
class MSEEvaluatorFromDataFrame(SentenceEvaluator):
"""
Computes the mean squared error (x100) between the computed sentence embedding and some target sentence embedding.
:param dataframe: It must have the following format. Rows contains different, parallel sentences.
Columns are the respective language codes::
[{'en': 'My sentence in English', 'es': 'Oración en español', 'fr': 'Phrase en français'...},
{'en': 'My second sentence', ...}]
:param combinations: Must be of the format ``[('en', 'es'), ('en', 'fr'), ...]``.
First entry in a tuple is the source language. The sentence in the respective language will be fetched from
the dataframe and passed to the teacher model. Second entry in a tuple the the target language. Sentence
will be fetched from the dataframe and passed to the student model
:param batch_size: Batch size to compute sentence embeddings
:param name: Name of the evaluator
:param write_csv: Write results to CSV file
:param truncate_dim: The dimension to truncate sentence embeddings to. `None` uses the model's current truncation
dimension. Defaults to None.
"""
def __init__(
self,
dataframe: List[Dict[str, str]],
teacher_model: SentenceTransformer,
combinations: List[Tuple[str, str]],
batch_size: int = 8,
name: str = "",
write_csv: bool = True,
truncate_dim: Optional[int] = None,
):
self.combinations = combinations
self.name = name
self.batch_size = batch_size
if name:
name = "_" + name
self.csv_file = "mse_evaluation" + name + "_results.csv"
self.csv_headers = ["epoch", "steps"]
self.write_csv = write_csv
self.truncate_dim = truncate_dim
self.data = {}
logger.info("Compute teacher embeddings")
all_source_sentences = set()
for src_lang, trg_lang in self.combinations:
src_sentences = []
trg_sentences = []
for row in dataframe:
if row[src_lang].strip() != "" and row[trg_lang].strip() != "":
all_source_sentences.add(row[src_lang])
src_sentences.append(row[src_lang])
trg_sentences.append(row[trg_lang])
self.data[(src_lang, trg_lang)] = (src_sentences, trg_sentences)
self.csv_headers.append("{}-{}".format(src_lang, trg_lang))
all_source_sentences = list(all_source_sentences)
with nullcontext() if self.truncate_dim is None else teacher_model.truncate_sentence_embeddings(
self.truncate_dim
):
all_src_embeddings = teacher_model.encode(all_source_sentences, batch_size=self.batch_size)
self.teacher_embeddings = {sent: emb for sent, emb in zip(all_source_sentences, all_src_embeddings)}
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1):
model.eval()
mse_scores = []
for src_lang, trg_lang in self.combinations:
src_sentences, trg_sentences = self.data[(src_lang, trg_lang)]
src_embeddings = np.asarray([self.teacher_embeddings[sent] for sent in src_sentences])
with nullcontext() if self.truncate_dim is None else model.truncate_sentence_embeddings(self.truncate_dim):
trg_embeddings = np.asarray(model.encode(trg_sentences, batch_size=self.batch_size))
mse = ((src_embeddings - trg_embeddings) ** 2).mean()
mse *= 100
mse_scores.append(mse)
logger.info("MSE evaluation on {} dataset - {}-{}:".format(self.name, src_lang, trg_lang))
logger.info("MSE (*100):\t{:4f}".format(mse))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, newline="", mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps] + mse_scores)
return -np.mean(mse_scores) # Return negative score as SentenceTransformers maximizes the performance
|
from sentence_transformers.evaluation import SentenceEvaluator
from sentence_transformers import SentenceTransformer
from typing import List, Tuple, Dict
import numpy as np
import logging
import os
import csv
logger = logging.getLogger(__name__)
class MSEEvaluatorFromDataFrame(SentenceEvaluator):
"""
Computes the mean squared error (x100) between the computed sentence embedding and some target sentence embedding.
:param dataframe: It must have the following format. Rows contains different, parallel sentences.
Columns are the respective language codes::
[{'en': 'My sentence', 'es': 'Sentence in Spanisch', 'fr': 'Sentence in French'...},
{'en': 'My second sentence', ...}]
:param combinations: Must be of the format ``[('en', 'es'), ('en', 'fr'), ...]``.
First entry in a tuple is the source language. The sentence in the respective language will be fetched from
the dataframe and passed to the teacher model. Second entry in a tuple the the target language. Sentence
will be fetched from the dataframe and passed to the student model
"""
def __init__(
self,
dataframe: List[Dict[str, str]],
teacher_model: SentenceTransformer,
combinations: List[Tuple[str, str]],
batch_size: int = 8,
name="",
write_csv: bool = True,
):
self.combinations = combinations
self.name = name
self.batch_size = batch_size
if name:
name = "_" + name
self.csv_file = "mse_evaluation" + name + "_results.csv"
self.csv_headers = ["epoch", "steps"]
self.write_csv = write_csv
self.data = {}
logger.info("Compute teacher embeddings")
all_source_sentences = set()
for src_lang, trg_lang in self.combinations:
src_sentences = []
trg_sentences = []
for row in dataframe:
if row[src_lang].strip() != "" and row[trg_lang].strip() != "":
all_source_sentences.add(row[src_lang])
src_sentences.append(row[src_lang])
trg_sentences.append(row[trg_lang])
self.data[(src_lang, trg_lang)] = (src_sentences, trg_sentences)
self.csv_headers.append("{}-{}".format(src_lang, trg_lang))
all_source_sentences = list(all_source_sentences)
all_src_embeddings = teacher_model.encode(all_source_sentences, batch_size=self.batch_size)
self.teacher_embeddings = {sent: emb for sent, emb in zip(all_source_sentences, all_src_embeddings)}
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1):
model.eval()
mse_scores = []
for src_lang, trg_lang in self.combinations:
src_sentences, trg_sentences = self.data[(src_lang, trg_lang)]
src_embeddings = np.asarray([self.teacher_embeddings[sent] for sent in src_sentences])
trg_embeddings = np.asarray(model.encode(trg_sentences, batch_size=self.batch_size))
mse = ((src_embeddings - trg_embeddings) ** 2).mean()
mse *= 100
mse_scores.append(mse)
logger.info("MSE evaluation on {} dataset - {}-{}:".format(self.name, src_lang, trg_lang))
logger.info("MSE (*100):\t{:4f}".format(mse))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, newline="", mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps] + mse_scores)
return -np.mean(mse_scores) # Return negative score as SentenceTransformers maximizes the performance
|
from google.protobuf import __version__ as __pb__version__
if __pb__version__.startswith('4'):
from docarray.proto.pb.docarray_pb2 import (
DictOfAnyProto,
DocumentArrayProto,
DocumentArrayStackedProto,
DocumentProto,
ListOfAnyProto,
ListOfDocumentArrayProto,
NdArrayProto,
NodeProto,
)
else:
from docarray.proto.pb2.docarray_pb2 import (
DictOfAnyProto,
DocumentArrayProto,
DocumentArrayStackedProto,
DocumentProto,
ListOfAnyProto,
ListOfDocumentArrayProto,
NdArrayProto,
NodeProto,
)
__all__ = [
'DocumentArrayProto',
'DocumentProto',
'NdArrayProto',
'NodeProto',
'DocumentArrayStackedProto',
'DocumentArrayProto',
'ListOfDocumentArrayProto',
'ListOfAnyProto',
'DictOfAnyProto',
]
|
from google.protobuf import __version__ as __pb__version__
if __pb__version__.startswith('4'):
from docarray.proto.pb.docarray_pb2 import (
DocumentArrayProto,
DocumentArrayStackedProto,
DocumentProto,
ListOfAnyProto,
ListOfDocumentArrayProto,
NdArrayProto,
NodeProto,
)
else:
from docarray.proto.pb2.docarray_pb2 import (
DocumentArrayProto,
DocumentArrayStackedProto,
DocumentProto,
ListOfAnyProto,
ListOfDocumentArrayProto,
NdArrayProto,
NodeProto,
)
__all__ = [
'DocumentArrayProto',
'DocumentProto',
'NdArrayProto',
'NodeProto',
'DocumentArrayStackedProto',
'DocumentArrayProto',
'ListOfDocumentArrayProto',
'ListOfAnyProto',
]
|
_base_ = [
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
model = dict(
type='SingleStageDetector',
backbone=dict(
type='MobileNetV2',
out_indices=(4, 7),
norm_cfg=dict(type='BN', eps=0.001, momentum=0.03),
init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)),
neck=dict(
type='SSDNeck',
in_channels=(96, 1280),
out_channels=(96, 1280, 512, 256, 256, 128),
level_strides=(2, 2, 2, 2),
level_paddings=(1, 1, 1, 1),
l2_norm_scale=None,
use_depthwise=True,
norm_cfg=dict(type='BN', eps=0.001, momentum=0.03),
act_cfg=dict(type='ReLU6'),
init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)),
bbox_head=dict(
type='SSDHead',
in_channels=(96, 1280, 512, 256, 256, 128),
num_classes=80,
use_depthwise=True,
norm_cfg=dict(type='BN', eps=0.001, momentum=0.03),
act_cfg=dict(type='ReLU6'),
init_cfg=dict(type='Normal', layer='Conv2d', std=0.001),
# set anchor size manually instead of using the predefined
# SSD300 setting.
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
strides=[16, 32, 64, 107, 160, 320],
ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]],
min_sizes=[48, 100, 150, 202, 253, 304],
max_sizes=[100, 150, 202, 253, 304, 320]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2])),
# model training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False),
test_cfg=dict(
nms_pre=1000,
nms=dict(type='nms', iou_threshold=0.45),
min_bbox_size=0,
score_thr=0.02,
max_per_img=200))
cudnn_benchmark = True
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(320, 320), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=320),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(320, 320),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=320),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=24,
workers_per_gpu=4,
train=dict(
_delete_=True,
type='RepeatDataset', # use RepeatDataset to speed up training
times=5,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.015, momentum=0.9, weight_decay=4.0e-5)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='CosineAnnealing',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
min_lr=0)
runner = dict(type='EpochBasedRunner', max_epochs=120)
# Avoid evaluation and saving weights too frequently
evaluation = dict(interval=5, metric='bbox')
checkpoint_config = dict(interval=5)
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (24 samples per GPU)
auto_scale_lr = dict(base_batch_size=192)
|
_base_ = [
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
model = dict(
type='SingleStageDetector',
backbone=dict(
type='MobileNetV2',
out_indices=(4, 7),
norm_cfg=dict(type='BN', eps=0.001, momentum=0.03),
init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)),
neck=dict(
type='SSDNeck',
in_channels=(96, 1280),
out_channels=(96, 1280, 512, 256, 256, 128),
level_strides=(2, 2, 2, 2),
level_paddings=(1, 1, 1, 1),
l2_norm_scale=None,
use_depthwise=True,
norm_cfg=dict(type='BN', eps=0.001, momentum=0.03),
act_cfg=dict(type='ReLU6'),
init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)),
bbox_head=dict(
type='SSDHead',
in_channels=(96, 1280, 512, 256, 256, 128),
num_classes=80,
use_depthwise=True,
norm_cfg=dict(type='BN', eps=0.001, momentum=0.03),
act_cfg=dict(type='ReLU6'),
init_cfg=dict(type='Normal', layer='Conv2d', std=0.001),
# set anchor size manually instead of using the predefined
# SSD300 setting.
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
strides=[16, 32, 64, 107, 160, 320],
ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]],
min_sizes=[48, 100, 150, 202, 253, 304],
max_sizes=[100, 150, 202, 253, 304, 320]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2])),
# model training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False),
test_cfg=dict(
nms_pre=1000,
nms=dict(type='nms', iou_threshold=0.45),
min_bbox_size=0,
score_thr=0.02,
max_per_img=200))
cudnn_benchmark = True
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(320, 320), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=320),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(320, 320),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=320),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=24,
workers_per_gpu=4,
train=dict(
_delete_=True,
type='RepeatDataset', # use RepeatDataset to speed up training
times=5,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.015, momentum=0.9, weight_decay=4.0e-5)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='CosineAnnealing',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
min_lr=0)
runner = dict(type='EpochBasedRunner', max_epochs=120)
# Avoid evaluation and saving weights too frequently
evaluation = dict(interval=5, metric='bbox')
checkpoint_config = dict(interval=5)
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
|
import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
@keras_export("keras.visualization.draw_segmentation_masks")
def draw_segmentation_masks(
images,
segmentation_masks,
num_classes=None,
color_mapping=None,
alpha=0.8,
blend=True,
ignore_index=-1,
data_format=None,
):
"""Draws segmentation masks on images.
The function overlays segmentation masks on the input images.
The masks are blended with the images using the specified alpha value.
Args:
images: A batch of images as a 4D tensor or NumPy array. Shape
should be (batch_size, height, width, channels).
segmentation_masks: A batch of segmentation masks as a 3D or 4D tensor
or NumPy array. Shape should be (batch_size, height, width) or
(batch_size, height, width, 1). The values represent class indices
starting from 1 up to `num_classes`. Class 0 is reserved for
the background and will be ignored if `ignore_index` is not 0.
num_classes: The number of segmentation classes. If `None`, it is
inferred from the maximum value in `segmentation_masks`.
color_mapping: A dictionary mapping class indices to RGB colors.
If `None`, a default color palette is generated. The keys should be
integers starting from 1 up to `num_classes`.
alpha: The opacity of the segmentation masks. Must be in the range
`[0, 1]`.
blend: Whether to blend the masks with the input image using the
`alpha` value. If `False`, the masks are drawn directly on the
images without blending. Defaults to `True`.
ignore_index: The class index to ignore. Mask pixels with this value
will not be drawn. Defaults to -1.
data_format: Image data format, either `"channels_last"` or
`"channels_first"`. Defaults to the `image_data_format` value found
in your Keras config file at `~/.keras/keras.json`. If you never
set it, then it will be `"channels_last"`.
Returns:
A NumPy array of the images with the segmentation masks overlaid.
Raises:
ValueError: If the input `images` is not a 4D tensor or NumPy array.
TypeError: If the input `segmentation_masks` is not an integer type.
"""
data_format = data_format or backend.image_data_format()
images_shape = ops.shape(images)
if len(images_shape) != 4:
raise ValueError(
"`images` must be batched 4D tensor. "
f"Received: images.shape={images_shape}"
)
if data_format == "channels_first":
images = ops.transpose(images, (0, 2, 3, 1))
segmentation_masks = ops.transpose(segmentation_masks, (0, 2, 3, 1))
images = ops.convert_to_tensor(images, dtype="float32")
segmentation_masks = ops.convert_to_tensor(segmentation_masks)
if not backend.is_int_dtype(segmentation_masks.dtype):
dtype = backend.standardize_dtype(segmentation_masks.dtype)
raise TypeError(
"`segmentation_masks` must be in integer dtype. "
f"Received: segmentation_masks.dtype={dtype}"
)
# Infer num_classes
if num_classes is None:
num_classes = int(ops.convert_to_numpy(ops.max(segmentation_masks)))
if color_mapping is None:
colors = _generate_color_palette(num_classes)
else:
colors = [color_mapping[i] for i in range(num_classes)]
valid_masks = ops.not_equal(segmentation_masks, ignore_index)
valid_masks = ops.squeeze(valid_masks, axis=-1)
segmentation_masks = ops.one_hot(segmentation_masks, num_classes)
segmentation_masks = segmentation_masks[..., 0, :]
segmentation_masks = ops.convert_to_numpy(segmentation_masks)
# Replace class with color
masks = segmentation_masks
masks = np.transpose(masks, axes=(3, 0, 1, 2)).astype("bool")
images_to_draw = ops.convert_to_numpy(images).copy()
for mask, color in zip(masks, colors):
color = np.array(color, dtype=images_to_draw.dtype)
images_to_draw[mask, ...] = color[None, :]
images_to_draw = ops.convert_to_tensor(images_to_draw)
outputs = ops.cast(images_to_draw, dtype="float32")
if blend:
outputs = images * (1 - alpha) + outputs * alpha
outputs = ops.where(valid_masks[..., None], outputs, images)
outputs = ops.cast(outputs, dtype="uint8")
outputs = ops.convert_to_numpy(outputs)
return outputs
def _generate_color_palette(num_classes):
palette = np.array([2**25 - 1, 2**15 - 1, 2**21 - 1])
return [((i * palette) % 255).tolist() for i in range(num_classes)]
|
import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
@keras_export("keras.visualization.draw_segmentation_masks")
def draw_segmentation_masks(
images,
segmentation_masks,
num_classes=None,
color_mapping=None,
alpha=0.8,
blend=True,
ignore_index=-1,
data_format=None,
):
"""Draws segmentation masks on images.
The function overlays segmentation masks on the input images.
The masks are blended with the images using the specified alpha value.
Args:
images: A batch of images as a 4D tensor or NumPy array. Shape
should be (batch_size, height, width, channels).
segmentation_masks: A batch of segmentation masks as a 3D or 4D tensor
or NumPy array. Shape should be (batch_size, height, width) or
(batch_size, height, width, 1). The values represent class indices
starting from 1 up to `num_classes`. Class 0 is reserved for
the background and will be ignored if `ignore_index` is not 0.
num_classes: The number of segmentation classes. If `None`, it is
inferred from the maximum value in `segmentation_masks`.
color_mapping: A dictionary mapping class indices to RGB colors.
If `None`, a default color palette is generated. The keys should be
integers starting from 1 up to `num_classes`.
alpha: The opacity of the segmentation masks. Must be in the range
`[0, 1]`.
blend: Whether to blend the masks with the input image using the
`alpha` value. If `False`, the masks are drawn directly on the
images without blending. Defaults to `True`.
ignore_index: The class index to ignore. Mask pixels with this value
will not be drawn. Defaults to -1.
data_format: Image data format, either `"channels_last"` or
`"channels_first"`. Defaults to the `image_data_format` value found
in your Keras config file at `~/.keras/keras.json`. If you never
set it, then it will be `"channels_last"`.
Returns:
A NumPy array of the images with the segmentation masks overlaid.
Raises:
ValueError: If the input `images` is not a 4D tensor or NumPy array.
TypeError: If the input `segmentation_masks` is not an integer type.
"""
data_format = data_format or backend.image_data_format()
images_shape = ops.shape(images)
if len(images_shape) != 4:
raise ValueError(
"`images` must be batched 4D tensor. "
f"Received: images.shape={images_shape}"
)
if data_format == "channels_first":
images = ops.transpose(images, (0, 2, 3, 1))
segmentation_masks = ops.transpose(segmentation_masks, (0, 2, 3, 1))
images = ops.convert_to_tensor(images, dtype="float32")
segmentation_masks = ops.convert_to_tensor(segmentation_masks)
if not backend.is_int_dtype(segmentation_masks.dtype):
dtype = backend.standardize_dtype(segmentation_masks.dtype)
raise TypeError(
"`segmentation_masks` must be in integer dtype. "
f"Received: segmentation_masks.dtype={dtype}"
)
# Infer num_classes
if num_classes is None:
num_classes = int(ops.convert_to_numpy(ops.max(segmentation_masks)))
if color_mapping is None:
colors = _generate_color_palette(num_classes)
else:
colors = [color_mapping[i] for i in range(num_classes)]
valid_masks = ops.not_equal(segmentation_masks, ignore_index)
valid_masks = ops.squeeze(valid_masks, axis=-1)
segmentation_masks = ops.one_hot(segmentation_masks, num_classes)
segmentation_masks = segmentation_masks[..., 0, :]
segmentation_masks = ops.convert_to_numpy(segmentation_masks)
# Replace class with color
masks = segmentation_masks
masks = np.transpose(masks, axes=(3, 0, 1, 2)).astype("bool")
images_to_draw = ops.convert_to_numpy(images).copy()
for mask, color in zip(masks, colors):
color = np.array(color, dtype=images_to_draw.dtype)
images_to_draw[mask, ...] = color[None, :]
images_to_draw = ops.convert_to_tensor(images_to_draw)
outputs = ops.cast(images_to_draw, dtype="float32")
if blend:
outputs = images * (1 - alpha) + outputs * alpha
outputs = ops.where(valid_masks[..., None], outputs, images)
outputs = ops.cast(outputs, dtype="uint8")
outputs = ops.convert_to_numpy(outputs)
return outputs
def _generate_color_palette(num_classes: int):
palette = np.array([2**25 - 1, 2**15 - 1, 2**21 - 1])
return [((i * palette) % 255).tolist() for i in range(num_classes)]
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.data import BaseDataElement
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.misc import tensor2imgs
# TODO: Due to interface changes, the current class
# functions incorrectly
@HOOKS.register_module()
class NaiveVisualizationHook(Hook):
"""Show or Write the predicted results during the process of testing.
Args:
interval (int): Visualization interval. Default: 1.
draw_gt (bool): Whether to draw the ground truth. Default to True.
draw_pred (bool): Whether to draw the predicted result.
Default to True.
"""
priority = 'NORMAL'
def __init__(self,
interval: int = 1,
draw_gt: bool = True,
draw_pred: bool = True):
self.draw_gt = draw_gt
self.draw_pred = draw_pred
self._interval = interval
def _unpad(self, input: np.ndarray, unpad_shape: Tuple[int,
int]) -> np.ndarray:
unpad_width, unpad_height = unpad_shape
unpad_image = input[:unpad_height, :unpad_width]
return unpad_image
def after_test_iter(
self,
runner,
batch_idx: int,
data_batch: Optional[Sequence[dict]] = None,
outputs: Optional[Sequence[BaseDataElement]] = None) -> None:
"""Show or Write the predicted results.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the test loop.
data_batch (Sequence[dict], optional): Data
from dataloader. Defaults to None.
outputs (Sequence[BaseDataElement], optional): Outputs from model.
Defaults to None.
"""
if self.every_n_iters(runner, self._interval):
for data, output in zip(data_batch, outputs): # type: ignore
input = data['inputs']
data_sample = data['data_sample']
input = tensor2imgs(input,
**data_sample.get('img_norm_cfg',
dict()))[0]
# TODO We will implement a function to revert the augmentation
# in the future.
ori_shape = (data_sample.ori_width, data_sample.ori_height)
if 'pad_shape' in data_sample:
input = self._unpad(input,
data_sample.get('scale', ori_shape))
origin_image = cv2.resize(input, ori_shape)
name = osp.basename(data_sample.img_path)
runner.visualizer.add_datasample(name, origin_image,
data_sample, output,
self.draw_gt, self.draw_pred)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.data import BaseDataElement
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.misc import tensor2imgs
@HOOKS.register_module()
class NaiveVisualizationHook(Hook):
"""Show or Write the predicted results during the process of testing.
Args:
interval (int): Visualization interval. Default: 1.
draw_gt (bool): Whether to draw the ground truth. Default to True.
draw_pred (bool): Whether to draw the predicted result.
Default to True.
"""
priority = 'NORMAL'
def __init__(self,
interval: int = 1,
draw_gt: bool = True,
draw_pred: bool = True):
self.draw_gt = draw_gt
self.draw_pred = draw_pred
self._interval = interval
def _unpad(self, input: np.ndarray, unpad_shape: Tuple[int,
int]) -> np.ndarray:
unpad_width, unpad_height = unpad_shape
unpad_image = input[:unpad_height, :unpad_width]
return unpad_image
def after_test_iter(
self,
runner,
batch_idx: int,
data_batch: Optional[Sequence[dict]] = None,
outputs: Optional[Sequence[BaseDataElement]] = None) -> None:
"""Show or Write the predicted results.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the test loop.
data_batch (Sequence[dict], optional): Data
from dataloader. Defaults to None.
outputs (Sequence[BaseDataElement], optional): Outputs from model.
Defaults to None.
"""
if self.every_n_iters(runner, self._interval):
for data, output in zip(data_batch, outputs): # type: ignore
input = data['inputs']
data_sample = data['data_sample']
input = tensor2imgs(input,
**data_sample.get('img_norm_cfg',
dict()))[0]
# TODO We will implement a function to revert the augmentation
# in the future.
ori_shape = (data_sample.ori_width, data_sample.ori_height)
if 'pad_shape' in data_sample:
input = self._unpad(input,
data_sample.get('scale', ori_shape))
origin_image = cv2.resize(input, ori_shape)
name = osp.basename(data_sample.img_path)
runner.writer.add_image(name, origin_image, data_sample,
output, self.draw_gt, self.draw_pred)
|
from jina.clients.base.websocket import WebSocketBaseClient
from jina.clients.mixin import (
AsyncHealthCheckMixin,
AsyncPostMixin,
AsyncProfileMixin,
HealthCheckMixin,
PostMixin,
ProfileMixin,
)
class WebSocketClient(WebSocketBaseClient, PostMixin, ProfileMixin, HealthCheckMixin):
"""A client connecting to a Gateway using WebSocket protocol.
Instantiate this class through the :meth:`jina.Client` convenience method.
EXAMPLE USAGE
.. code-block:: python
from jina import Client
from docarray import Document
# select host address to connect to
c = Client(
protocol='websocket', asyncio=False, host='ws://my.awesome.flow:1234'
) # returns WebSocketClient instance
c.post(on='/index', inputs=Document(text='hello!'))
"""
class AsyncWebSocketClient(
WebSocketBaseClient, AsyncPostMixin, AsyncProfileMixin, AsyncHealthCheckMixin
):
"""
Asynchronous client connecting to a Gateway using WebSocket protocol.
Instantiate this class through the :meth:`jina.Client` convenience method.
Unlike :class:`WebSocketClient`, here :meth:`post` is a coroutine (i.e. declared with the async/await syntax),
simply calling them will not schedule them to be executed.
To actually run a coroutine, user need to put them in an event loop, e.g. via ``asyncio.run()``,
``asyncio.create_task()``.
:class:`AsyncWebSocketClient` can be very useful in
the integration settings, where Jina/Flow/Client is NOT the main logic, but rather served as a part of other program.
In this case, users often do not want to let Jina control the ``asyncio.eventloop``. On contrary, :class:`Client`
is controlling and wrapping the event loop internally, making the Client looks synchronous from outside.
EXAMPLE USAGE
.. code-block:: python
from jina import Client
from docarray import Document
# async inputs for the client
async def async_inputs():
for _ in range(10):
yield Document()
await asyncio.sleep(0.1)
# select host address to connect to
c = Client(
protocol='websocket', asyncio=True, host='http://ws.awesome.flow:1234'
) # returns AsyncWebSocketClient instance
async for resp in client.post(on='/index', async_inputs, request_size=1):
print(resp)
"""
|
from jina.clients.base.websocket import WebSocketBaseClient
from jina.clients.mixin import (
AsyncHealthCheckMixin,
AsyncPostMixin,
HealthCheckMixin,
PostMixin,
)
class WebSocketClient(WebSocketBaseClient, PostMixin, HealthCheckMixin):
"""A client connecting to a Gateway using WebSocket protocol.
Instantiate this class through the :meth:`jina.Client` convenience method.
EXAMPLE USAGE
.. code-block:: python
from jina import Client
from docarray import Document
# select host address to connect to
c = Client(
protocol='websocket', asyncio=False, host='ws://my.awesome.flow:1234'
) # returns WebSocketClient instance
c.post(on='/index', inputs=Document(text='hello!'))
"""
class AsyncWebSocketClient(WebSocketBaseClient, AsyncPostMixin, AsyncHealthCheckMixin):
"""
Asynchronous client connecting to a Gateway using WebSocket protocol.
Instantiate this class through the :meth:`jina.Client` convenience method.
Unlike :class:`WebSocketClient`, here :meth:`post` is a coroutine (i.e. declared with the async/await syntax),
simply calling them will not schedule them to be executed.
To actually run a coroutine, user need to put them in an event loop, e.g. via ``asyncio.run()``,
``asyncio.create_task()``.
:class:`AsyncWebSocketClient` can be very useful in
the integration settings, where Jina/Flow/Client is NOT the main logic, but rather served as a part of other program.
In this case, users often do not want to let Jina control the ``asyncio.eventloop``. On contrary, :class:`Client`
is controlling and wrapping the event loop internally, making the Client looks synchronous from outside.
EXAMPLE USAGE
.. code-block:: python
from jina import Client
from docarray import Document
# async inputs for the client
async def async_inputs():
for _ in range(10):
yield Document()
await asyncio.sleep(0.1)
# select host address to connect to
c = Client(
protocol='websocket', asyncio=True, host='http://ws.awesome.flow:1234'
) # returns AsyncWebSocketClient instance
async for resp in client.post(on='/index', async_inputs, request_size=1):
print(resp)
"""
|
from typing import List, Optional
import pandas as pd
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: ImageDoc
lst: List[str]
return MyDocNested
def test_to_from_pandas_df(nested_doc_cls):
da = DocList[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=ImageDoc(url='aux.png'),
lst=["hello", "world"],
),
nested_doc_cls(
text='hello world', image=ImageDoc(), lst=["hello", "world"]
),
]
)
df = da.to_dataframe()
assert isinstance(df, pd.DataFrame)
assert len(df) == 2
assert (
df.columns
== [
'id',
'count',
'text',
'image__id',
'image__url',
'image__tensor',
'image__embedding',
'image__bytes_',
'lst',
]
).all()
da_from_df = DocList[nested_doc_cls].from_dataframe(df)
for doc1, doc2 in zip(da, da_from_df):
assert doc1 == doc2
@pytest.fixture()
def nested_doc():
class Inner(BaseDoc):
img: Optional[ImageDoc]
class Middle(BaseDoc):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDoc):
img: Optional[ImageDoc]
middle: Optional[Middle]
doc = Outer(
img=ImageDoc(), middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc()))
)
return doc
def test_from_pandas_without_schema_raise_exception():
with pytest.raises(TypeError, match='no document schema defined'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
DocList.from_dataframe(df=df)
def test_from_pandas_with_wrong_schema_raise_exception(nested_doc):
with pytest.raises(ValueError, match='Column names do not match the schema'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
DocList[nested_doc.__class__].from_dataframe(df=df)
def test_doc_list_error():
class Book(BaseDoc):
title: str
docs = DocList([Book(title='hello'), Book(title='world')])
with pytest.raises(TypeError):
docs.to_dataframe()
@pytest.mark.proto
def test_union_type_error():
from typing import Union
from docarray.documents import TextDoc
class CustomDoc(BaseDoc):
ud: Union[TextDoc, ImageDoc] = TextDoc(text='union type')
docs = DocList[CustomDoc]([CustomDoc(ud=TextDoc(text='union type'))])
with pytest.raises(ValueError):
DocList[CustomDoc].from_dataframe(docs.to_dataframe())
class BasisUnion(BaseDoc):
ud: Union[int, str]
docs_basic = DocList[BasisUnion]([BasisUnion(ud="hello")])
docs_copy = DocList[BasisUnion].from_dataframe(docs_basic.to_dataframe())
assert docs_copy == docs_basic
|
from typing import List, Optional
import pandas as pd
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: ImageDoc
lst: List[str]
return MyDocNested
def test_to_from_pandas_df(nested_doc_cls):
da = DocList[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=ImageDoc(url='aux.png'),
lst=["hello", "world"],
),
nested_doc_cls(
text='hello world', image=ImageDoc(), lst=["hello", "world"]
),
]
)
df = da.to_dataframe()
assert isinstance(df, pd.DataFrame)
assert len(df) == 2
assert (
df.columns
== [
'id',
'count',
'text',
'image__id',
'image__url',
'image__tensor',
'image__embedding',
'image__bytes_',
'lst',
]
).all()
da_from_df = DocList[nested_doc_cls].from_dataframe(df)
for doc1, doc2 in zip(da, da_from_df):
assert doc1 == doc2
@pytest.fixture()
def nested_doc():
class Inner(BaseDoc):
img: Optional[ImageDoc]
class Middle(BaseDoc):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDoc):
img: Optional[ImageDoc]
middle: Optional[Middle]
doc = Outer(
img=ImageDoc(), middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc()))
)
return doc
def test_from_pandas_without_schema_raise_exception():
with pytest.raises(TypeError, match='no document schema defined'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
DocList.from_dataframe(df=df)
def test_from_pandas_with_wrong_schema_raise_exception(nested_doc):
with pytest.raises(ValueError, match='Column names do not match the schema'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
DocList[nested_doc.__class__].from_dataframe(df=df)
def test_doc_list_error():
class Book(BaseDoc):
title: str
docs = DocList([Book(title='hello'), Book(title='world')])
with pytest.raises(TypeError):
docs.to_dataframe()
|
# Copyright (c) OpenMMLab. All rights reserved.
import random
import warnings
import torch
from mmcv.runner import get_dist_info
from mmcv.runner.hooks import Hook
from torch import distributed as dist
from mmdet.registry import HOOKS
@HOOKS.register_module()
class SyncRandomSizeHook(Hook):
"""Change and synchronize the random image size across ranks.
SyncRandomSizeHook is deprecated, please use Resize pipeline to achieve
similar functions. Such as `dict(type='Resize', img_scale=[(448, 448),
(832, 832)], multiscale_mode='range', keep_ratio=True)`.
Note: Due to the multi-process dataloader, its behavior is different
from YOLOX's official implementation, the official is to change the
size every fixed iteration interval and what we achieved is a fixed
epoch interval.
Args:
ratio_range (tuple[int]): Random ratio range. It will be multiplied
by 32, and then change the dataset output image size.
Default: (14, 26).
img_scale (tuple[int]): Size of input image. Default: (640, 640).
interval (int): The epoch interval of change image size. Default: 1.
device (torch.device | str): device for returned tensors.
Default: 'cuda'.
"""
def __init__(self,
ratio_range=(14, 26),
img_scale=(640, 640),
interval=1,
device='cuda'):
warnings.warn('DeprecationWarning: SyncRandomSizeHook is deprecated. '
'Please use Resize pipeline to achieve similar '
'functions. Due to the multi-process dataloader, '
'its behavior is different from YOLOX\'s official '
'implementation, the official is to change the size '
'every fixed iteration interval and what we achieved '
'is a fixed epoch interval.')
self.rank, world_size = get_dist_info()
self.is_distributed = world_size > 1
self.ratio_range = ratio_range
self.img_scale = img_scale
self.interval = interval
self.device = device
def after_train_epoch(self, runner):
"""Change the dataset output image size."""
if self.ratio_range is not None and (runner.epoch +
1) % self.interval == 0:
# Due to DDP and DP get the device behavior inconsistent,
# so we did not get the device from runner.model.
tensor = torch.LongTensor(2).to(self.device)
if self.rank == 0:
size_factor = self.img_scale[1] * 1. / self.img_scale[0]
size = random.randint(*self.ratio_range)
size = (int(32 * size), 32 * int(size * size_factor))
tensor[0] = size[0]
tensor[1] = size[1]
if self.is_distributed:
dist.barrier()
dist.broadcast(tensor, 0)
runner.data_loader.dataset.update_dynamic_scale(
(tensor[0].item(), tensor[1].item()))
|
# Copyright (c) OpenMMLab. All rights reserved.
import random
import warnings
import torch
from mmcv.runner import get_dist_info
from mmcv.runner.hooks import HOOKS, Hook
from torch import distributed as dist
@HOOKS.register_module()
class SyncRandomSizeHook(Hook):
"""Change and synchronize the random image size across ranks.
SyncRandomSizeHook is deprecated, please use Resize pipeline to achieve
similar functions. Such as `dict(type='Resize', img_scale=[(448, 448),
(832, 832)], multiscale_mode='range', keep_ratio=True)`.
Note: Due to the multi-process dataloader, its behavior is different
from YOLOX's official implementation, the official is to change the
size every fixed iteration interval and what we achieved is a fixed
epoch interval.
Args:
ratio_range (tuple[int]): Random ratio range. It will be multiplied
by 32, and then change the dataset output image size.
Default: (14, 26).
img_scale (tuple[int]): Size of input image. Default: (640, 640).
interval (int): The epoch interval of change image size. Default: 1.
device (torch.device | str): device for returned tensors.
Default: 'cuda'.
"""
def __init__(self,
ratio_range=(14, 26),
img_scale=(640, 640),
interval=1,
device='cuda'):
warnings.warn('DeprecationWarning: SyncRandomSizeHook is deprecated. '
'Please use Resize pipeline to achieve similar '
'functions. Due to the multi-process dataloader, '
'its behavior is different from YOLOX\'s official '
'implementation, the official is to change the size '
'every fixed iteration interval and what we achieved '
'is a fixed epoch interval.')
self.rank, world_size = get_dist_info()
self.is_distributed = world_size > 1
self.ratio_range = ratio_range
self.img_scale = img_scale
self.interval = interval
self.device = device
def after_train_epoch(self, runner):
"""Change the dataset output image size."""
if self.ratio_range is not None and (runner.epoch +
1) % self.interval == 0:
# Due to DDP and DP get the device behavior inconsistent,
# so we did not get the device from runner.model.
tensor = torch.LongTensor(2).to(self.device)
if self.rank == 0:
size_factor = self.img_scale[1] * 1. / self.img_scale[0]
size = random.randint(*self.ratio_range)
size = (int(32 * size), 32 * int(size * size_factor))
tensor[0] = size[0]
tensor[1] = size[1]
if self.is_distributed:
dist.barrier()
dist.broadcast(tensor, 0)
runner.data_loader.dataset.update_dynamic_scale(
(tensor[0].item(), tensor[1].item()))
|
"""Standard LangChain interface tests"""
import os
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openai import AzureChatOpenAI
OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION", "")
OPENAI_API_BASE = os.environ.get("AZURE_OPENAI_API_BASE", "")
class TestAzureOpenAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"],
"model": "gpt-4o-mini",
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
"stream_usage": True,
}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_image_urls(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
class TestAzureOpenAIResponses(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"],
"model": "gpt-4o-mini",
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
"use_responses_api": True,
}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_image_urls(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
@pytest.mark.xfail(reason="Unsupported.")
def test_stop_sequence(self, model: BaseChatModel) -> None:
super().test_stop_sequence(model)
class TestAzureOpenAIStandardLegacy(ChatModelIntegrationTests):
"""Test a legacy model."""
@property
def chat_model_class(self) -> type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME"],
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
"stream_usage": True,
}
@property
def structured_output_kwargs(self) -> dict:
return {"method": "function_calling"}
|
"""Standard LangChain interface tests"""
import os
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openai import AzureChatOpenAI
OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION", "")
OPENAI_API_BASE = os.environ.get("AZURE_OPENAI_API_BASE", "")
class TestAzureOpenAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"],
"model": "gpt-4o-mini",
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
"stream_usage": True,
}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_image_urls(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
class TestAzureOpenAIStandardLegacy(ChatModelIntegrationTests):
"""Test a legacy model."""
@property
def chat_model_class(self) -> type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME"],
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
"stream_usage": True,
}
@property
def structured_output_kwargs(self) -> dict:
return {"method": "function_calling"}
|
import os
import pytest
from llama_index.graph_stores.memgraph import MemgraphPropertyGraphStore
from llama_index.core.graph_stores.types import (
EntityNode,
Relation,
)
from llama_index.core.schema import TextNode
memgraph_user = os.environ.get("MEMGRAPH_TEST_USER")
memgraph_pass = os.environ.get("MEMGRAPH_TEST_PASS")
memgraph_url = os.environ.get("MEMGRAPH_TEST_URL")
if not memgraph_user or not memgraph_pass or not memgraph_url:
MEMGRAPH_AVAILABLE = False
else:
MEMGRAPH_AVAILABLE = True
@pytest.fixture()
def pg_store() -> MemgraphPropertyGraphStore:
"""Define Memgraph graph store."""
if not MEMGRAPH_AVAILABLE:
pytest.skip("No Memgraph credentials provided")
return MemgraphPropertyGraphStore(
username=memgraph_user, password=memgraph_pass, url=memgraph_url
)
def test_memgraph_pg_store(pg_store: MemgraphPropertyGraphStore) -> None:
"""Test functions for Memgraph graph store."""
# Clear the database
pg_store.structured_query("STORAGE MODE IN_MEMORY_ANALYTICAL")
pg_store.structured_query("DROP GRAPH")
pg_store.structured_query("STORAGE MODE IN_MEMORY_TRANSACTIONAL")
# Test inserting nodes into Memgraph.
entity1 = EntityNode(label="PERSON", name="Logan", properties={"age": 28})
entity2 = EntityNode(label="ORGANIZATION", name="LlamaIndex")
pg_store.upsert_nodes([entity1, entity2])
# Assert the nodes are inserted correctly
kg_nodes = pg_store.get(ids=[entity1.id])
assert kg_nodes[0].name == entity1.name
# Test inserting relations into Memgraph.
relation = Relation(
label="WORKS_FOR",
source_id=entity1.id,
target_id=entity2.id,
properties={"since": 2023},
)
pg_store.upsert_relations([relation])
# Assert the relation is inserted correctly by retrieving the relation map
kg_nodes = pg_store.get(ids=[entity1.id])
pg_store.get_rel_map(kg_nodes, depth=1)
# Test inserting a source text node and 'MENTIONS' relations.
source_node = TextNode(text="Logan (age 28), works for 'LlamaIndex' since 2023.")
relations = [
Relation(label="MENTIONS", target_id=entity1.id, source_id=source_node.node_id),
Relation(label="MENTIONS", target_id=entity2.id, source_id=source_node.node_id),
]
pg_store.upsert_llama_nodes([source_node])
pg_store.upsert_relations(relations)
# Assert the source node and relations are inserted correctly
pg_store.get_llama_nodes([source_node.node_id])
# Test retrieving nodes by properties.
kg_nodes = pg_store.get(properties={"age": 28})
# Test executing a structured query in Memgraph.
query = "MATCH (n:`__Entity__`) RETURN n"
pg_store.structured_query(query)
# Test upserting a new node with additional properties.
new_node = EntityNode(
label="PERSON", name="Logan", properties={"age": 28, "location": "Canada"}
)
pg_store.upsert_nodes([new_node])
# Assert the node has been updated with the new property
kg_nodes = pg_store.get(properties={"age": 28})
# Test deleting nodes from Memgraph.
pg_store.delete(ids=[source_node.node_id])
pg_store.delete(ids=[entity1.id, entity2.id])
# Assert the nodes have been deleted
pg_store.get(ids=[entity1.id, entity2.id])
|
import os
import pytest
from llama_index.graph_stores.memgraph import MemgraphPropertyGraphStore
from llama_index.core.graph_stores.types import (
EntityNode,
Relation,
)
from llama_index.core.schema import TextNode
memgraph_user = os.environ.get("MEMGRAPH_TEST_USER")
memgraph_pass = os.environ.get("MEMGRAPH_TEST_PASS")
memgraph_url = os.environ.get("MEMGRAPH_TEST_URL")
if not memgraph_user or not memgraph_pass or not memgraph_url:
memgraph_available = False
else:
memgraph_available = True
@pytest.fixture()
def pg_store() -> MemgraphPropertyGraphStore:
if not memgraph_available:
pytest.skip("No Memgraph credentials provided")
return MemgraphPropertyGraphStore(
username=memgraph_user, password=memgraph_pass, url=memgraph_url
)
def test_memgraph_pg_store(pg_store: MemgraphPropertyGraphStore) -> None:
# Clear the database
pg_store.structured_query("STORAGE MODE IN_MEMORY_ANALYTICAL")
pg_store.structured_query("DROP GRAPH")
pg_store.structured_query("STORAGE MODE IN_MEMORY_TRANSACTIONAL")
# Test upsert nodes
entity1 = EntityNode(label="PERSON", name="Logan", properties={"age": 28})
entity2 = EntityNode(label="ORGANIZATION", name="LlamaIndex")
pg_store.upsert_nodes([entity1, entity2])
# Assert the nodes are inserted correctly
kg_nodes = pg_store.get(ids=[entity1.id])
# Test inserting relations into Memgraph.
relation = Relation(
label="WORKS_FOR",
source_id=entity1.id,
target_id=entity2.id,
properties={"since": 2023},
)
pg_store.upsert_relations([relation])
# Assert the relation is inserted correctly by retrieving the relation map
kg_nodes = pg_store.get(ids=[entity1.id])
paths = pg_store.get_rel_map(kg_nodes, depth=1)
# Test inserting a source text node and 'MENTIONS' relations.
source_node = TextNode(text="Logan (age 28), works for LlamaIndex since 2023.")
relations = [
Relation(label="MENTIONS", target_id=entity1.id, source_id=source_node.node_id),
Relation(label="MENTIONS", target_id=entity2.id, source_id=source_node.node_id),
]
pg_store.upsert_llama_nodes([source_node])
pg_store.upsert_relations(relations)
# Assert the source node and relations are inserted correctly
llama_nodes = pg_store.get_llama_nodes([source_node.node_id])
# Test retrieving nodes by properties.
kg_nodes = pg_store.get(properties={"age": 28})
# Test executing a structured query in Memgraph.
query = "MATCH (n:`__Entity__`) RETURN n"
result = pg_store.structured_query(query)
# Test upserting a new node with additional properties.
new_node = EntityNode(
label="PERSON", name="Logan", properties={"age": 28, "location": "Canada"}
)
pg_store.upsert_nodes([new_node])
# Assert the node has been updated with the new property
kg_nodes = pg_store.get(properties={"age": 28})
# Test deleting nodes from Memgraph.
pg_store.delete(ids=[source_node.node_id])
pg_store.delete(ids=[entity1.id, entity2.id])
# Assert the nodes have been deleted
nodes = pg_store.get(ids=[entity1.id, entity2.id])
|
"""
This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_stsbenchmark.py
"""
import csv
import gzip
import logging
import math
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CECorrelationEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Define our Cross-Encoder
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_stsbenchmark-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# We use distilroberta-base as base model and set num_labels=1, which predicts a continuous score between 0 and 1
model = CrossEncoder("distilroberta-base", num_labels=1)
# Read STSb dataset
logger.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
if row["split"] == "dev":
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
elif row["split"] == "test":
test_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
else:
# As we want to get symmetric scores, i.e. CrossEncoder(A,B) = CrossEncoder(B,A), we pass both combinations to the train set
train_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
train_samples.append(InputExample(texts=[row["sentence2"], row["sentence1"]], label=score))
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# We add an evaluator, which evaluates the performance during training
evaluator = CECorrelationEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##### Load model and eval on test set
model = CrossEncoder(model_save_path)
evaluator = CECorrelationEvaluator.from_input_examples(test_samples, name="sts-test")
evaluator(model)
|
"""
This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continious labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_stsbenchmark.py
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CECorrelationEvaluator
from sentence_transformers import InputExample
import logging
from datetime import datetime
import sys
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
logger = logging.getLogger(__name__)
#### /print debug information to stdout
#Check if dataset exsist. If not, download and extract it
sts_dataset_path = 'datasets/stsbenchmark.tsv.gz'
if not os.path.exists(sts_dataset_path):
util.http_get('https://sbert.net/datasets/stsbenchmark.tsv.gz', sts_dataset_path)
#Define our Cross-Encoder
train_batch_size = 16
num_epochs = 4
model_save_path = 'output/training_stsbenchmark-'+datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
#We use distilroberta-base as base model and set num_labels=1, which predicts a continous score between 0 and 1
model = CrossEncoder('distilroberta-base', num_labels=1)
# Read STSb dataset
logger.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, 'rt', encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row['score']) / 5.0 # Normalize score to range 0 ... 1
if row['split'] == 'dev':
dev_samples.append(InputExample(texts=[row['sentence1'], row['sentence2']], label=score))
elif row['split'] == 'test':
test_samples.append(InputExample(texts=[row['sentence1'], row['sentence2']], label=score))
else:
#As we want to get symmetric scores, i.e. CrossEncoder(A,B) = CrossEncoder(B,A), we pass both combinations to the train set
train_samples.append(InputExample(texts=[row['sentence1'], row['sentence2']], label=score))
train_samples.append(InputExample(texts=[row['sentence2'], row['sentence1']], label=score))
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# We add an evaluator, which evaluates the performance during training
evaluator = CECorrelationEvaluator.from_input_examples(dev_samples, name='sts-dev')
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) #10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path)
##### Load model and eval on test set
model = CrossEncoder(model_save_path)
evaluator = CECorrelationEvaluator.from_input_examples(test_samples, name='sts-test')
evaluator(model)
|
"""Various utilities to help with development."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ..exceptions import DataConversionWarning
from . import metadata_routing
from ._bunch import Bunch
from ._chunking import gen_batches, gen_even_slices
# Make _safe_indexing importable from here for backward compat as this particular
# helper is considered semi-private and typically very useful for third-party
# libraries that want to comply with scikit-learn's estimator API. In particular,
# _safe_indexing was included in our public API documentation despite the leading
# `_` in its name.
from ._indexing import (
_safe_indexing, # noqa: F401
resample,
shuffle,
)
from ._mask import safe_mask
from ._repr_html.base import _HTMLDocumentationLinkMixin # noqa: F401
from ._repr_html.estimator import estimator_html_repr
from ._tags import (
ClassifierTags,
InputTags,
RegressorTags,
Tags,
TargetTags,
TransformerTags,
get_tags,
)
from .class_weight import compute_class_weight, compute_sample_weight
from .deprecation import deprecated
from .discovery import all_estimators
from .extmath import safe_sqr
from .murmurhash import murmurhash3_32
from .validation import (
as_float_array,
assert_all_finite,
check_array,
check_consistent_length,
check_random_state,
check_scalar,
check_symmetric,
check_X_y,
column_or_1d,
indexable,
)
__all__ = [
"Bunch",
"ClassifierTags",
"DataConversionWarning",
"InputTags",
"RegressorTags",
"Tags",
"TargetTags",
"TransformerTags",
"all_estimators",
"as_float_array",
"assert_all_finite",
"check_X_y",
"check_array",
"check_consistent_length",
"check_random_state",
"check_scalar",
"check_symmetric",
"column_or_1d",
"compute_class_weight",
"compute_sample_weight",
"deprecated",
"estimator_html_repr",
"gen_batches",
"gen_even_slices",
"get_tags",
"indexable",
"metadata_routing",
"murmurhash3_32",
"resample",
"safe_mask",
"safe_sqr",
"shuffle",
]
|
"""Various utilities to help with development."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ..exceptions import DataConversionWarning
from . import metadata_routing
from ._bunch import Bunch
from ._chunking import gen_batches, gen_even_slices
from ._estimator_html_repr import estimator_html_repr
# Make _safe_indexing importable from here for backward compat as this particular
# helper is considered semi-private and typically very useful for third-party
# libraries that want to comply with scikit-learn's estimator API. In particular,
# _safe_indexing was included in our public API documentation despite the leading
# `_` in its name.
from ._indexing import (
_safe_indexing, # noqa: F401
resample,
shuffle,
)
from ._mask import safe_mask
from ._tags import (
ClassifierTags,
InputTags,
RegressorTags,
Tags,
TargetTags,
TransformerTags,
get_tags,
)
from .class_weight import compute_class_weight, compute_sample_weight
from .deprecation import deprecated
from .discovery import all_estimators
from .extmath import safe_sqr
from .murmurhash import murmurhash3_32
from .validation import (
as_float_array,
assert_all_finite,
check_array,
check_consistent_length,
check_random_state,
check_scalar,
check_symmetric,
check_X_y,
column_or_1d,
indexable,
)
__all__ = [
"Bunch",
"ClassifierTags",
"DataConversionWarning",
"InputTags",
"RegressorTags",
"Tags",
"TargetTags",
"TransformerTags",
"all_estimators",
"as_float_array",
"assert_all_finite",
"check_X_y",
"check_array",
"check_consistent_length",
"check_random_state",
"check_scalar",
"check_symmetric",
"column_or_1d",
"compute_class_weight",
"compute_sample_weight",
"deprecated",
"estimator_html_repr",
"gen_batches",
"gen_even_slices",
"get_tags",
"indexable",
"metadata_routing",
"murmurhash3_32",
"resample",
"safe_mask",
"safe_sqr",
"shuffle",
]
|
"""
===========================================================================
Normal, Ledoit-Wolf and OAS Linear Discriminant Analysis for classification
===========================================================================
This example illustrates how the Ledoit-Wolf and Oracle Approximating
Shrinkage (OAS) estimators of covariance can improve classification.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.covariance import OAS
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2, acc_clf3 = [], [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2, score_clf3 = 0, 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=None).fit(X, y)
clf2 = LinearDiscriminantAnalysis(solver="lsqr", shrinkage="auto").fit(X, y)
oa = OAS(store_precision=False, assume_centered=False)
clf3 = LinearDiscriminantAnalysis(solver="lsqr", covariance_estimator=oa).fit(
X, y
)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
score_clf3 += clf3.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
acc_clf3.append(score_clf3 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(
features_samples_ratio,
acc_clf1,
linewidth=2,
label="LDA",
color="gold",
linestyle="solid",
)
plt.plot(
features_samples_ratio,
acc_clf2,
linewidth=2,
label="LDA with Ledoit Wolf",
color="navy",
linestyle="dashed",
)
plt.plot(
features_samples_ratio,
acc_clf3,
linewidth=2,
label="LDA with OAS",
color="red",
linestyle="dotted",
)
plt.xlabel("n_features / n_samples")
plt.ylabel("Classification accuracy")
plt.legend(loc="lower left")
plt.ylim((0.65, 1.0))
plt.suptitle(
"LDA (Linear Discriminant Analysis) vs."
"\n"
"LDA with Ledoit Wolf vs."
"\n"
"LDA with OAS (1 discriminative feature)"
)
plt.show()
|
"""
===========================================================================
Normal, Ledoit-Wolf and OAS Linear Discriminant Analysis for classification
===========================================================================
This example illustrates how the Ledoit-Wolf and Oracle Approximating
Shrinkage (OAS) estimators of covariance can improve classification.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.covariance import OAS
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2, acc_clf3 = [], [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2, score_clf3 = 0, 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=None).fit(X, y)
clf2 = LinearDiscriminantAnalysis(solver="lsqr", shrinkage="auto").fit(X, y)
oa = OAS(store_precision=False, assume_centered=False)
clf3 = LinearDiscriminantAnalysis(solver="lsqr", covariance_estimator=oa).fit(
X, y
)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
score_clf3 += clf3.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
acc_clf3.append(score_clf3 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(
features_samples_ratio,
acc_clf1,
linewidth=2,
label="LDA",
color="gold",
linestyle="solid",
)
plt.plot(
features_samples_ratio,
acc_clf2,
linewidth=2,
label="LDA with Ledoit Wolf",
color="navy",
linestyle="dashed",
)
plt.plot(
features_samples_ratio,
acc_clf3,
linewidth=2,
label="LDA with OAS",
color="red",
linestyle="dotted",
)
plt.xlabel("n_features / n_samples")
plt.ylabel("Classification accuracy")
plt.legend(loc="lower left")
plt.ylim((0.65, 1.0))
plt.suptitle(
"LDA (Linear Discriminant Analysis) vs. "
+ "\n"
+ "LDA with Ledoit Wolf vs. "
+ "\n"
+ "LDA with OAS (1 discriminative feature)"
)
plt.show()
|
"""Unit tests for ScrapegraphAI tool specification."""
from unittest.mock import Mock, patch
import pytest
from pydantic import BaseModel
from llama_index.tools.scrapegraph import ScrapegraphToolSpec
class TestSchema(BaseModel):
"""Test schema for scraping operations."""
title: str
description: str
@pytest.fixture()
def tool_spec():
"""Create a ScrapegraphToolSpec instance for testing."""
return ScrapegraphToolSpec()
@pytest.fixture()
def mock_sync_client():
"""Create a mock SyncClient."""
with patch("llama_index.tools.scrapegraph.base.Client") as mock:
client_instance = Mock()
mock.return_value = client_instance
yield client_instance
def test_sync_scraping(tool_spec: ScrapegraphToolSpec, mock_sync_client: Mock):
"""Test synchronous scraping functionality."""
# Test data
prompt = "Extract product information"
url = "https://example.com"
api_key = "sgai-0000-0000-0000-0000-0000-0000-0000-0000"
schema = [TestSchema]
expected_response = [{"title": "Test Product", "description": "Test Description"}]
# Configure mock
mock_sync_client.smartscraper.return_value = expected_response
# Execute test
response = tool_spec.scrapegraph_smartscraper(
prompt=prompt, url=url, api_key=api_key, schema=schema
)
# Verify
mock_sync_client.smartscraper.assert_called_once_with(
website_url=url, user_prompt=prompt, output_schema=schema
)
assert response == expected_response
def test_search(tool_spec: ScrapegraphToolSpec, mock_sync_client: Mock):
"""Test search functionality."""
# Test data
query = "test search query"
api_key = "sgai-0000-0000-0000-0000-0000-0000-0000-0000"
expected_response = "Search results"
# Configure mock
mock_sync_client.search.return_value = expected_response
# Execute test
response = tool_spec.scrapegraph_search(query=query, api_key=api_key)
# Verify
mock_sync_client.search.assert_called_once_with(query=query)
assert response == expected_response
|
"""Unit tests for ScrapegraphAI tool specification."""
from unittest.mock import Mock, patch
import pytest
from pydantic import BaseModel
from llama_index.tools.scrapegraph import ScrapegraphToolSpec
class TestSchema(BaseModel):
"""Test schema for scraping operations."""
title: str
description: str
@pytest.fixture()
def tool_spec():
"""Create a ScrapegraphToolSpec instance for testing."""
return ScrapegraphToolSpec()
@pytest.fixture()
def mock_sync_client():
"""Create a mock SyncClient."""
with patch("llama_index.tools.scrapegraph.base.Client") as mock:
client_instance = Mock()
mock.return_value = client_instance
yield client_instance
def test_sync_scraping(tool_spec: ScrapegraphToolSpec, mock_sync_client: Mock):
"""Test synchronous scraping functionality."""
# Test data
prompt = "Extract product information"
url = "https://example.com"
api_key = "sgai-0000-0000-0000-0000-0000-0000-0000-0000"
schema = [TestSchema]
expected_response = [{"title": "Test Product", "description": "Test Description"}]
# Configure mock
mock_sync_client.smartscraper.return_value = expected_response
# Execute test
response = tool_spec.scrapegraph_smartscraper(
prompt=prompt, url=url, api_key=api_key, schema=schema
)
# Verify
mock_sync_client.smartscraper.assert_called_once_with(
website_url=url, user_prompt=prompt, output_schema=schema
)
assert response == expected_response
|
"""Standard LangChain interface tests"""
from langchain_core.embeddings import Embeddings
from langchain_tests.unit_tests.embeddings import EmbeddingsUnitTests
from langchain_openai import OpenAIEmbeddings
class TestOpenAIStandard(EmbeddingsUnitTests):
@property
def embeddings_class(self) -> type[Embeddings]:
return OpenAIEmbeddings
@property
def init_from_env_params(self) -> tuple[dict, dict, dict]:
return (
{
"OPENAI_API_KEY": "api_key",
"OPENAI_ORG_ID": "org_id",
"OPENAI_API_BASE": "api_base",
"OPENAI_PROXY": "https://proxy.com",
},
{},
{
"openai_api_key": "api_key",
"openai_organization": "org_id",
"openai_api_base": "api_base",
"openai_proxy": "https://proxy.com",
},
)
|
"""Standard LangChain interface tests"""
from typing import Tuple, Type
from langchain_core.embeddings import Embeddings
from langchain_tests.unit_tests.embeddings import EmbeddingsUnitTests
from langchain_openai import OpenAIEmbeddings
class TestOpenAIStandard(EmbeddingsUnitTests):
@property
def embeddings_class(self) -> Type[Embeddings]:
return OpenAIEmbeddings
@property
def init_from_env_params(self) -> Tuple[dict, dict, dict]:
return (
{
"OPENAI_API_KEY": "api_key",
"OPENAI_ORG_ID": "org_id",
"OPENAI_API_BASE": "api_base",
"OPENAI_PROXY": "https://proxy.com",
},
{},
{
"openai_api_key": "api_key",
"openai_organization": "org_id",
"openai_api_base": "api_base",
"openai_proxy": "https://proxy.com",
},
)
|
from prisma.models import User
from backend.blocks.basic import AgentInputBlock, PrintToConsoleBlock
from backend.blocks.text import FillTextTemplateBlock
from backend.data import graph
from backend.data.graph import create_graph
from backend.data.user import get_or_create_user
from backend.util.test import SpinTestServer, wait_execution
async def create_test_user(alt_user: bool = False) -> User:
if alt_user:
test_user_data = {
"sub": "3e53486c-cf57-477e-ba2a-cb02dc828e1b",
"email": "[email protected]",
"name": "Test User 2",
}
else:
test_user_data = {
"sub": "ef3b97d7-1161-4eb4-92b2-10c24fb154c1",
"email": "[email protected]",
"name": "Test User",
}
user = await get_or_create_user(test_user_data)
return user
def create_test_graph() -> graph.Graph:
"""
InputBlock
\
---- FillTextTemplateBlock ---- PrintToConsoleBlock
/
InputBlock
"""
nodes = [
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_1"},
),
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_2"},
),
graph.Node(
block_id=FillTextTemplateBlock().id,
input_default={
"format": "{{a}}, {{b}}{{c}}",
"values_#_c": "!!!",
},
),
graph.Node(block_id=PrintToConsoleBlock().id),
]
links = [
graph.Link(
source_id=nodes[0].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_a",
),
graph.Link(
source_id=nodes[1].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_b",
),
graph.Link(
source_id=nodes[2].id,
sink_id=nodes[3].id,
source_name="output",
sink_name="text",
),
]
return graph.Graph(
name="TestGraph",
description="Test graph",
nodes=nodes,
links=links,
)
async def sample_agent():
async with SpinTestServer() as server:
test_user = await create_test_user()
test_graph = await create_graph(create_test_graph(), test_user.id)
input_data = {"input_1": "Hello", "input_2": "World"}
response = await server.agent_server.test_execute_graph(
test_graph.id, input_data, test_user.id
)
print(response)
result = await wait_execution(test_user.id, test_graph.id, response["id"], 10)
print(result)
if __name__ == "__main__":
import asyncio
asyncio.run(sample_agent())
|
from prisma.models import User
from backend.blocks.basic import AgentInputBlock, PrintToConsoleBlock
from backend.blocks.text import FillTextTemplateBlock
from backend.data import graph
from backend.data.graph import create_graph
from backend.data.user import get_or_create_user
from backend.util.test import SpinTestServer, wait_execution
async def create_test_user() -> User:
test_user_data = {
"sub": "ef3b97d7-1161-4eb4-92b2-10c24fb154c1",
"email": "testuser#example.com",
"name": "Test User",
}
user = await get_or_create_user(test_user_data)
return user
def create_test_graph() -> graph.Graph:
"""
InputBlock
\
---- FillTextTemplateBlock ---- PrintToConsoleBlock
/
InputBlock
"""
nodes = [
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_1"},
),
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_2"},
),
graph.Node(
block_id=FillTextTemplateBlock().id,
input_default={
"format": "{{a}}, {{b}}{{c}}",
"values_#_c": "!!!",
},
),
graph.Node(block_id=PrintToConsoleBlock().id),
]
links = [
graph.Link(
source_id=nodes[0].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_a",
),
graph.Link(
source_id=nodes[1].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_b",
),
graph.Link(
source_id=nodes[2].id,
sink_id=nodes[3].id,
source_name="output",
sink_name="text",
),
]
return graph.Graph(
name="TestGraph",
description="Test graph",
nodes=nodes,
links=links,
)
async def sample_agent():
async with SpinTestServer() as server:
test_user = await create_test_user()
test_graph = await create_graph(create_test_graph(), test_user.id)
input_data = {"input_1": "Hello", "input_2": "World"}
response = await server.agent_server.test_execute_graph(
test_graph.id, input_data, test_user.id
)
print(response)
result = await wait_execution(test_user.id, test_graph.id, response["id"], 10)
print(result)
if __name__ == "__main__":
import asyncio
asyncio.run(sample_agent())
|
import random
import time
from typing import List
from llama_index.schema import TextNode
from llama_index.vector_stores.simple import SimpleVectorStore
from llama_index.vector_stores.types import (
VectorStoreQuery,
VectorStoreQueryMode,
)
def generate_nodes(
num_vectors: int = 100, embedding_length: int = 1536
) -> List[TextNode]:
random.seed(42) # Make this reproducible
return [
TextNode(
embedding=[random.uniform(0, 1) for _ in range(embedding_length)],
)
for _ in range(num_vectors)
]
def bench_simple_vector_store(
num_vectors: List[int] = [10, 50, 100, 500, 1000],
) -> None:
"""Benchmark simple vector store."""
print("Benchmarking SimpleVectorStore\n---------------------------")
for num_vector in num_vectors:
nodes = generate_nodes(num_vectors=num_vector)
vector_store = SimpleVectorStore()
time1 = time.time()
vector_store.add(nodes=nodes)
time2 = time.time()
print(f"Adding {num_vector} vectors took {time2 - time1} seconds")
for mode in [
VectorStoreQueryMode.DEFAULT,
VectorStoreQueryMode.SVM,
VectorStoreQueryMode.MMR,
]:
time1 = time.time()
query = VectorStoreQuery(
query_embedding=nodes[0].get_embedding(), similarity_top_k=10, mode=mode
)
vector_store.query(query=query)
time2 = time.time()
print(
f"""Querying store of {num_vector} \
vectors with {mode} mode took {time2 - time1} seconds"""
)
if __name__ == "__main__":
bench_simple_vector_store()
|
import random
import time
from typing import List
from llama_index.schema import TextNode
from llama_index.vector_stores.simple import SimpleVectorStore
from llama_index.vector_stores.types import (
VectorStoreQuery,
VectorStoreQueryMode,
)
def generate_nodes(
num_vectors: int = 100, embedding_length: int = 1536
) -> List[TextNode]:
random.seed(42) # Make this reproducible
return [
TextNode(
embedding=[random.uniform(0, 1) for _ in range(embedding_length)],
)
for _ in range(num_vectors)
]
def bench_simple_vector_store(
num_vectors: List[int] = [10, 50, 100, 500, 1000]
) -> None:
"""Benchmark simple vector store."""
print("Benchmarking SimpleVectorStore\n---------------------------")
for num_vector in num_vectors:
nodes = generate_nodes(num_vectors=num_vector)
vector_store = SimpleVectorStore()
time1 = time.time()
vector_store.add(nodes=nodes)
time2 = time.time()
print(f"Adding {num_vector} vectors took {time2 - time1} seconds")
for mode in [
VectorStoreQueryMode.DEFAULT,
VectorStoreQueryMode.SVM,
VectorStoreQueryMode.MMR,
]:
time1 = time.time()
query = VectorStoreQuery(
query_embedding=nodes[0].get_embedding(), similarity_top_k=10, mode=mode
)
vector_store.query(query=query)
time2 = time.time()
print(
f"""Querying store of {num_vector} \
vectors with {mode} mode took {time2 - time1} seconds"""
)
if __name__ == "__main__":
bench_simple_vector_store()
|
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
from ._bounding_box import BoundingBox, BoundingBoxFormat
from ._datapoint import _FillType, _FillTypeJIT, _InputType, _InputTypeJIT
from ._image import _ImageType, _ImageTypeJIT, _TensorImageType, _TensorImageTypeJIT, Image
from ._mask import Mask
from ._video import _TensorVideoType, _TensorVideoTypeJIT, _VideoType, _VideoTypeJIT, Video
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn(_BETA_TRANSFORMS_WARNING)
|
from ._bounding_box import BoundingBox, BoundingBoxFormat
from ._datapoint import _FillType, _FillTypeJIT, _InputType, _InputTypeJIT
from ._image import _ImageType, _ImageTypeJIT, _TensorImageType, _TensorImageTypeJIT, Image
from ._mask import Mask
from ._video import _TensorVideoType, _TensorVideoTypeJIT, _VideoType, _VideoTypeJIT, Video
from ._dataset_wrapper import wrap_dataset_for_transforms_v2 # type: ignore[attr-defined] # usort: skip
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn(_BETA_TRANSFORMS_WARNING)
|
import os
from typing import Any, Optional
from llama_index.llms.openai_like import OpenAILike
from llama_index.llms.deepseek.utils import get_context_window, FUNCTION_CALLING_MODELS
class DeepSeek(OpenAILike):
"""
DeepSeek LLM.
Examples:
`pip install llama-index-llms-deepseek`
```python
from llama_index.llms.deepseek import DeepSeek
# Set up the DeepSeek class with the required model and API key
llm = DeepSeek(model="deepseek-chat", api_key="your_api_key")
# Call the complete method with a query
response = llm.complete("Explain the importance of low latency LLMs")
print(response)
```
"""
def __init__(
self,
model: str,
api_key: Optional[str] = None,
api_base: str = "https://api.deepseek.com",
**openai_llm_kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("DEEPSEEK_API_KEY", None)
context_window = openai_llm_kwargs.pop(
"context_window", get_context_window(model)
)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=openai_llm_kwargs.pop("is_chat_model", True),
is_function_calling_model=openai_llm_kwargs.pop(
"is_function_calling_model", model in FUNCTION_CALLING_MODELS
),
**openai_llm_kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "DeepSeek"
|
import os
from typing import Any, Optional
from llama_index.llms.openai_like import OpenAILike
from llama_index.llms.deepseek.utils import get_context_window, FUNCTION_CALLING_MODELS
class DeepSeek(OpenAILike):
"""
DeepSeek LLM.
Examples:
`pip install llama-index-llms-deepseek`
```python
from llama_index.llms.deepseek import DeepSeek
# Set up the DeepSeek class with the required model and API key
llm = DeepSeek(model="deepseek-chat", api_key="your_api_key")
# Call the complete method with a query
response = llm.complete("Explain the importance of low latency LLMs")
print(response)
```
"""
def __init__(
self,
model: str,
api_key: Optional[str] = None,
api_base: str = "https://api.deepseek.com",
**openai_llm_kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("DEEPSEEK_API_KEY", None)
context_window = openai_llm_kwargs.pop(
"context_window", get_context_window(model)
)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=openai_llm_kwargs.pop("is_chat_model", True),
is_function_calling_model=openai_llm_kwargs.pop(
"is_function_calling_model", model in FUNCTION_CALLING_MODELS
),
**openai_llm_kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "DeepSeek"
|
from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments
from sentence_transformers.evaluation import SequentialEvaluator, SimilarityFunction
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder import evaluation, losses, models
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
# Initialize model components
model_name = "tomaarsen/mpnet-base-nli"
transformer = Transformer(model_name)
transformer.requires_grad_(False) # Freeze the transformer model
pooling = Pooling(transformer.get_word_embedding_dimension(), pooling_mode="mean")
csr_sparsity = models.CSRSparsity(
input_dim=transformer.get_word_embedding_dimension(),
hidden_dim=4 * transformer.get_word_embedding_dimension(),
k=256, # Number of top values to keep
k_aux=512, # Number of top values for auxiliary loss
)
# Create the SparseEncoder model
model = SparseEncoder(modules=[transformer, pooling, csr_sparsity])
output_dir = "examples/sparse_encoder/output/sparse_encoder_nli_frozen_transformer_from_pretrained"
# 2. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
train_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="train")
eval_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev")
logging.info(train_dataset)
# 3. Initialize the loss
loss = losses.CSRLoss(
model=model,
beta=0.1, # Weight for auxiliary loss
gamma=1, # Weight for ranking loss
scale=20.0, # Scale for similarity computation
)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
stsb_eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(
evaluation.SparseEmbeddingSimilarityEvaluator(
sentences1=stsb_eval_dataset["sentence1"],
sentences2=stsb_eval_dataset["sentence2"],
scores=stsb_eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name=f"sts-dev-{k_dim}",
truncate_dim=k_dim,
)
)
dev_evaluator = SequentialEvaluator(evaluators, main_score_function=lambda scores: scores[-1])
# Set up training arguments
training_args = SparseEncoderTrainingArguments(
output_dir=output_dir,
num_train_epochs=1,
per_device_train_batch_size=128,
per_device_eval_batch_size=128,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
logging_steps=100,
eval_strategy="steps",
eval_steps=200,
save_strategy="steps",
save_steps=200,
learning_rate=4e-5,
optim="adamw_torch",
weight_decay=1e-4,
adam_epsilon=6.25e-10,
run_name="sparse_encoder_nli_frozen_transformer_from_pretrained",
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(
evaluation.SparseEmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name=f"sts-test-{k_dim}",
truncate_dim=k_dim,
)
)
test_evaluator = SequentialEvaluator(evaluators)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
model.save(output_dir)
if __name__ == "__main__":
main()
|
from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments, losses
from sentence_transformers.evaluation import SequentialEvaluator, SimilarityFunction
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
from sentence_transformers.sparse_encoder.models import CSRSparsity
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
# Initialize model components
model_name = "tomaarsen/mpnet-base-nli"
transformer = Transformer(model_name)
transformer.requires_grad_(False) # Freeze the transformer model
pooling = Pooling(transformer.get_word_embedding_dimension(), pooling_mode="mean")
csr_sparsity = CSRSparsity(
input_dim=transformer.get_word_embedding_dimension(),
hidden_dim=4 * transformer.get_word_embedding_dimension(),
k=256, # Number of top values to keep
k_aux=512, # Number of top values for auxiliary loss
)
# Create the SparseEncoder model
model = SparseEncoder(modules=[transformer, pooling, csr_sparsity])
output_dir = "examples/sparse_encoder/output/sparse_encoder_nli_frozen_transformer_from_pretrained"
# 2. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
train_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="train")
eval_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev")
logging.info(train_dataset)
# 3. Initialize the loss
loss = losses.CSRLoss(
model=model,
beta=0.1, # Weight for auxiliary loss
gamma=1, # Weight for ranking loss
scale=20.0, # Scale for similarity computation
)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
stsb_eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(
SparseEmbeddingSimilarityEvaluator(
sentences1=stsb_eval_dataset["sentence1"],
sentences2=stsb_eval_dataset["sentence2"],
scores=stsb_eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name=f"sts-dev-{k_dim}",
truncate_dim=k_dim,
)
)
dev_evaluator = SequentialEvaluator(evaluators, main_score_function=lambda scores: scores[-1])
# Set up training arguments
training_args = SparseEncoderTrainingArguments(
output_dir=output_dir,
num_train_epochs=1,
per_device_train_batch_size=128,
per_device_eval_batch_size=128,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
logging_steps=100,
eval_strategy="steps",
eval_steps=200,
save_strategy="steps",
save_steps=200,
learning_rate=4e-5,
optim="adamw_torch",
weight_decay=1e-4,
adam_epsilon=6.25e-10,
run_name="sparse_encoder_nli_frozen_transformer_from_pretrained",
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(
SparseEmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name=f"sts-test-{k_dim}",
truncate_dim=k_dim,
)
)
test_evaluator = SequentialEvaluator(evaluators)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
model.save(output_dir)
if __name__ == "__main__":
main()
|
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py'
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
lr_config = dict(
warmup_iters=500, warmup_ratio=0.067, step=[81000, 85500, 87750])
# 90k iterations with batch_size 64 is roughly equivalent to 48 epochs
runner = dict(type='IterBasedRunner', max_iters=90000)
|
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_270k_coco.py'
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
lr_config = dict(
warmup_iters=500, warmup_ratio=0.067, step=[81000, 85500, 87750])
# 90k iterations with batch_size 64 is roughly equivalent to 48 epochs
runner = dict(type='IterBasedRunner', max_iters=90000)
|
import pathlib
from typing import Any, Callable, Optional, Tuple
from PIL import Image
from .utils import verify_str_arg
from .vision import VisionDataset
class StanfordCars(VisionDataset):
"""Stanford Cars Dataset
The Cars dataset contains 16,185 images of 196 classes of cars. The data is
split into 8,144 training images and 8,041 testing images, where each class
has been split roughly in a 50-50 split
The original URL is https://ai.stanford.edu/~jkrause/cars/car_dataset.html, but it is broken.
.. note::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
Args:
root (string): Root directory of dataset
split (string, optional): The dataset split, supports ``"train"`` (default) or ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): This parameter exists for backward compatibility but it does not
download the dataset, since the original URL is not available anymore. The dataset
seems to be available on Kaggle so you can try to manually download it using
`these instructions <https://github.com/pytorch/vision/issues/7545#issuecomment-1631441616>`_.
"""
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
try:
import scipy.io as sio
except ImportError:
raise RuntimeError("Scipy is not found. This dataset needs to have scipy installed: pip install scipy")
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "test"))
self._base_folder = pathlib.Path(root) / "stanford_cars"
devkit = self._base_folder / "devkit"
if self._split == "train":
self._annotations_mat_path = devkit / "cars_train_annos.mat"
self._images_base_path = self._base_folder / "cars_train"
else:
self._annotations_mat_path = self._base_folder / "cars_test_annos_withlabels.mat"
self._images_base_path = self._base_folder / "cars_test"
if download:
raise ValueError(
"The original URL is broken so the StanfordCars dataset is not available for automatic "
"download anymore. You can try to download it manually following "
"https://github.com/pytorch/vision/issues/7545#issuecomment-1631441616, "
"and set download=False to avoid this error."
)
if not self._check_exists():
raise RuntimeError(
"Dataset not found. Try to manually download following the instructions in "
"https://github.com/pytorch/vision/issues/7545#issuecomment-1631441616."
)
self._samples = [
(
str(self._images_base_path / annotation["fname"]),
annotation["class"] - 1, # Original target mapping starts from 1, hence -1
)
for annotation in sio.loadmat(self._annotations_mat_path, squeeze_me=True)["annotations"]
]
self.classes = sio.loadmat(str(devkit / "cars_meta.mat"), squeeze_me=True)["class_names"].tolist()
self.class_to_idx = {cls: i for i, cls in enumerate(self.classes)}
def __len__(self) -> int:
return len(self._samples)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
"""Returns pil_image and class_id for given index"""
image_path, target = self._samples[idx]
pil_image = Image.open(image_path).convert("RGB")
if self.transform is not None:
pil_image = self.transform(pil_image)
if self.target_transform is not None:
target = self.target_transform(target)
return pil_image, target
def _check_exists(self) -> bool:
if not (self._base_folder / "devkit").is_dir():
return False
return self._annotations_mat_path.exists() and self._images_base_path.is_dir()
|
import pathlib
from typing import Any, Callable, Optional, Tuple
from PIL import Image
from .utils import download_and_extract_archive, download_url, verify_str_arg
from .vision import VisionDataset
class StanfordCars(VisionDataset):
"""`Stanford Cars <https://ai.stanford.edu/~jkrause/cars/car_dataset.html>`_ Dataset
The Cars dataset contains 16,185 images of 196 classes of cars. The data is
split into 8,144 training images and 8,041 testing images, where each class
has been split roughly in a 50-50 split
.. note::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
Args:
root (string): Root directory of dataset
split (string, optional): The dataset split, supports ``"train"`` (default) or ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again."""
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
try:
import scipy.io as sio
except ImportError:
raise RuntimeError("Scipy is not found. This dataset needs to have scipy installed: pip install scipy")
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "test"))
self._base_folder = pathlib.Path(root) / "stanford_cars"
devkit = self._base_folder / "devkit"
if self._split == "train":
self._annotations_mat_path = devkit / "cars_train_annos.mat"
self._images_base_path = self._base_folder / "cars_train"
else:
self._annotations_mat_path = self._base_folder / "cars_test_annos_withlabels.mat"
self._images_base_path = self._base_folder / "cars_test"
if download:
self.download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
self._samples = [
(
str(self._images_base_path / annotation["fname"]),
annotation["class"] - 1, # Original target mapping starts from 1, hence -1
)
for annotation in sio.loadmat(self._annotations_mat_path, squeeze_me=True)["annotations"]
]
self.classes = sio.loadmat(str(devkit / "cars_meta.mat"), squeeze_me=True)["class_names"].tolist()
self.class_to_idx = {cls: i for i, cls in enumerate(self.classes)}
def __len__(self) -> int:
return len(self._samples)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
"""Returns pil_image and class_id for given index"""
image_path, target = self._samples[idx]
pil_image = Image.open(image_path).convert("RGB")
if self.transform is not None:
pil_image = self.transform(pil_image)
if self.target_transform is not None:
target = self.target_transform(target)
return pil_image, target
def download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(
url="https://ai.stanford.edu/~jkrause/cars/car_devkit.tgz",
download_root=str(self._base_folder),
md5="c3b158d763b6e2245038c8ad08e45376",
)
if self._split == "train":
download_and_extract_archive(
url="https://ai.stanford.edu/~jkrause/car196/cars_train.tgz",
download_root=str(self._base_folder),
md5="065e5b463ae28d29e77c1b4b166cfe61",
)
else:
download_and_extract_archive(
url="https://ai.stanford.edu/~jkrause/car196/cars_test.tgz",
download_root=str(self._base_folder),
md5="4ce7ebf6a94d07f1952d94dd34c4d501",
)
download_url(
url="https://ai.stanford.edu/~jkrause/car196/cars_test_annos_withlabels.mat",
root=str(self._base_folder),
md5="b0a2b23655a3edd16d84508592a98d10",
)
def _check_exists(self) -> bool:
if not (self._base_folder / "devkit").is_dir():
return False
return self._annotations_mat_path.exists() and self._images_base_path.is_dir()
|
_base_ = './ga-rpn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
_base_ = './ga_rpn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
import torch
import torchaudio.prototype.functional as F
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin
class AutogradTestImpl(TestBaseMixin):
@nested_params(
[F.convolve, F.fftconvolve],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (4, 3, 2)
L_x, L_y = 23, 40
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device, requires_grad=True)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device, requires_grad=True)
self.assertTrue(gradcheck(fn, (x, y, mode)))
self.assertTrue(gradgradcheck(fn, (x, y, mode)))
def test_add_noise(self):
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self.assertTrue(gradcheck(F.add_noise, (waveform, noise, lengths, snr)))
self.assertTrue(gradgradcheck(F.add_noise, (waveform, noise, lengths, snr)))
|
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import TestBaseMixin
class AutogradTestImpl(TestBaseMixin):
@parameterized.expand(
[
(F.convolve,),
(F.fftconvolve,),
]
)
def test_convolve(self, fn):
leading_dims = (4, 3, 2)
L_x, L_y = 23, 40
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device, requires_grad=True)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device, requires_grad=True)
self.assertTrue(gradcheck(fn, (x, y)))
self.assertTrue(gradgradcheck(fn, (x, y)))
def test_add_noise(self):
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self.assertTrue(gradcheck(F.add_noise, (waveform, noise, lengths, snr)))
self.assertTrue(gradgradcheck(F.add_noise, (waveform, noise, lengths, snr)))
|
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
# use caffe img_norm
preprocess_cfg=preprocess_cfg,
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768),
(1333, 800)]),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
"""Function Message."""
from typing import Any, Literal
from typing_extensions import override
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
)
from langchain_core.utils._merge import merge_dicts
class FunctionMessage(BaseMessage):
"""Message for passing the result of executing a tool back to a model.
FunctionMessage are an older version of the ToolMessage schema, and
do not contain the tool_call_id field.
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
"""
name: str
"""The name of the function that was executed."""
type: Literal["function"] = "function"
"""The type of the message (used for serialization). Defaults to "function"."""
FunctionMessage.model_rebuild()
class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
"""Function Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["FunctionMessageChunk"] = "FunctionMessageChunk" # type: ignore[assignment]
"""The type of the message (used for serialization).
Defaults to "FunctionMessageChunk"."""
@override
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override]
if isinstance(other, FunctionMessageChunk):
if self.name != other.name:
msg = "Cannot concatenate FunctionMessageChunks with different names."
raise ValueError(msg)
return self.__class__(
name=self.name,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
id=self.id,
)
return super().__add__(other)
|
"""Function Message."""
from typing import Any, Literal
from typing_extensions import override
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
)
from langchain_core.utils._merge import merge_dicts
class FunctionMessage(BaseMessage):
"""Message for passing the result of executing a tool back to a model.
FunctionMessage are an older version of the ToolMessage schema, and
do not contain the tool_call_id field.
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
"""
name: str
"""The name of the function that was executed."""
type: Literal["function"] = "function"
"""The type of the message (used for serialization). Defaults to "function"."""
FunctionMessage.model_rebuild()
class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
"""Function Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["FunctionMessageChunk"] = "FunctionMessageChunk" # type: ignore[assignment]
"""The type of the message (used for serialization).
Defaults to "FunctionMessageChunk"."""
@override
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, FunctionMessageChunk):
if self.name != other.name:
msg = "Cannot concatenate FunctionMessageChunks with different names."
raise ValueError(msg)
return self.__class__(
name=self.name,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
id=self.id,
)
return super().__add__(other)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Union
import torch
from numpy import ndarray
from torch import Tensor
from mmdet.core.bbox.assigners import AssignResult
from mmdet.registry import TASK_UTILS
from .base_sampler import BaseSampler
@TASK_UTILS.register_module()
class RandomSampler(BaseSampler):
"""Random sampler.
Args:
num (int): Number of samples
pos_fraction (float): Fraction of positive samples
neg_pos_up (int): Upper bound number of negative and
positive samples. Defaults to -1.
add_gt_as_proposals (bool): Whether to add ground truth
boxes as proposals. Defaults to True.
"""
def __init__(self,
num: int,
pos_fraction: float,
neg_pos_ub: int = -1,
add_gt_as_proposals: bool = True,
**kwargs):
from mmdet.core.bbox import demodata
super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub,
add_gt_as_proposals)
self.rng = demodata.ensure_rng(kwargs.get('rng', None))
def random_choice(self, gallery: Union[Tensor, ndarray, list],
num: int) -> Union[Tensor, ndarray]:
"""Random select some elements from the gallery.
If `gallery` is a Tensor, the returned indices will be a Tensor;
If `gallery` is a ndarray or list, the returned indices will be a
ndarray.
Args:
gallery (Tensor | ndarray | list): indices pool.
num (int): expected sample num.
Returns:
Tensor or ndarray: sampled indices.
"""
assert len(gallery) >= num
is_tensor = isinstance(gallery, torch.Tensor)
if not is_tensor:
if torch.cuda.is_available():
device = torch.cuda.current_device()
else:
device = 'cpu'
gallery = torch.tensor(gallery, dtype=torch.long, device=device)
# This is a temporary fix. We can revert the following code
# when PyTorch fixes the abnormal return of torch.randperm.
# See: https://github.com/open-mmlab/mmdetection/pull/5014
perm = torch.randperm(gallery.numel())[:num].to(device=gallery.device)
rand_inds = gallery[perm]
if not is_tensor:
rand_inds = rand_inds.cpu().numpy()
return rand_inds
def _sample_pos(self, assign_result: AssignResult, num_expected: int,
**kwargs) -> Union[Tensor, ndarray]:
"""Randomly sample some positive samples.
Args:
assign_result (:obj:`AssignResult`): Bbox assigning results.
num_expected (int): The number of expected positive samples
Returns:
Tensor or ndarray: sampled indices.
"""
pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.random_choice(pos_inds, num_expected)
def _sample_neg(self, assign_result: AssignResult, num_expected: int,
**kwargs) -> Union[Tensor, ndarray]:
"""Randomly sample some negative samples.
Args:
assign_result (:obj:`AssignResult`): Bbox assigning results.
num_expected (int): The number of expected positive samples
Returns:
Tensor or ndarray: sampled indices.
"""
neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
return self.random_choice(neg_inds, num_expected)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.registry import TASK_UTILS
from .base_sampler import BaseSampler
@TASK_UTILS.register_module()
class RandomSampler(BaseSampler):
"""Random sampler.
Args:
num (int): Number of samples
pos_fraction (float): Fraction of positive samples
neg_pos_up (int, optional): Upper bound number of negative and
positive samples. Defaults to -1.
add_gt_as_proposals (bool, optional): Whether to add ground truth
boxes as proposals. Defaults to True.
"""
def __init__(self,
num,
pos_fraction,
neg_pos_ub=-1,
add_gt_as_proposals=True,
**kwargs):
from mmdet.core.bbox import demodata
super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub,
add_gt_as_proposals)
self.rng = demodata.ensure_rng(kwargs.get('rng', None))
def random_choice(self, gallery, num):
"""Random select some elements from the gallery.
If `gallery` is a Tensor, the returned indices will be a Tensor;
If `gallery` is a ndarray or list, the returned indices will be a
ndarray.
Args:
gallery (Tensor | ndarray | list): indices pool.
num (int): expected sample num.
Returns:
Tensor or ndarray: sampled indices.
"""
assert len(gallery) >= num
is_tensor = isinstance(gallery, torch.Tensor)
if not is_tensor:
if torch.cuda.is_available():
device = torch.cuda.current_device()
else:
device = 'cpu'
gallery = torch.tensor(gallery, dtype=torch.long, device=device)
# This is a temporary fix. We can revert the following code
# when PyTorch fixes the abnormal return of torch.randperm.
# See: https://github.com/open-mmlab/mmdetection/pull/5014
perm = torch.randperm(gallery.numel())[:num].to(device=gallery.device)
rand_inds = gallery[perm]
if not is_tensor:
rand_inds = rand_inds.cpu().numpy()
return rand_inds
def _sample_pos(self, assign_result, num_expected, **kwargs):
"""Randomly sample some positive samples."""
pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.random_choice(pos_inds, num_expected)
def _sample_neg(self, assign_result, num_expected, **kwargs):
"""Randomly sample some negative samples."""
neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
return self.random_choice(neg_inds, num_expected)
|
from __future__ import annotations
from typing import Any, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field, SecretStr
from langchain_community.utilities.brave_search import BraveSearchWrapper
class BraveSearch(BaseTool):
"""Tool that queries the BraveSearch.
Api key can be provided as an environment variable BRAVE_SEARCH_API_KEY
or as a parameter.
Example usages:
.. code-block:: python
# uses BRAVE_SEARCH_API_KEY from environment
tool = BraveSearch()
.. code-block:: python
# uses the provided api key
tool = BraveSearch.from_api_key("your-api-key")
.. code-block:: python
# uses the provided api key and search kwargs
tool = BraveSearch.from_api_key(
api_key = "your-api-key",
search_kwargs={"max_results": 5}
)
.. code-block:: python
# uses BRAVE_SEARCH_API_KEY from environment
tool = BraveSearch.from_search_kwargs({"max_results": 5})
"""
name: str = "brave_search"
description: str = (
"a search engine. "
"useful for when you need to answer questions about current events."
" input should be a search query."
)
search_wrapper: BraveSearchWrapper = Field(default_factory=BraveSearchWrapper)
@classmethod
def from_api_key(
cls, api_key: str, search_kwargs: Optional[dict] = None, **kwargs: Any
) -> BraveSearch:
"""Create a tool from an api key.
Args:
api_key: The api key to use.
search_kwargs: Any additional kwargs to pass to the search wrapper.
**kwargs: Any additional kwargs to pass to the tool.
Returns:
A tool.
"""
wrapper = BraveSearchWrapper(
api_key=SecretStr(api_key), search_kwargs=search_kwargs or {}
)
return cls(search_wrapper=wrapper, **kwargs)
@classmethod
def from_search_kwargs(cls, search_kwargs: dict, **kwargs: Any) -> BraveSearch:
"""Create a tool from search kwargs.
Uses the environment variable BRAVE_SEARCH_API_KEY for api key.
Args:
search_kwargs: Any additional kwargs to pass to the search wrapper.
**kwargs: Any additional kwargs to pass to the tool.
Returns:
A tool.
"""
# we can not provide api key because it's calculated in the wrapper,
# so the ignore is needed for linter
# not ideal but needed to keep the tool code changes non-breaking
wrapper = BraveSearchWrapper(search_kwargs=search_kwargs)
return cls(search_wrapper=wrapper, **kwargs)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.search_wrapper.run(query)
|
from __future__ import annotations
from typing import Any, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field, SecretStr
from langchain_community.utilities.brave_search import BraveSearchWrapper
class BraveSearch(BaseTool): # type: ignore[override]
"""Tool that queries the BraveSearch.
Api key can be provided as an environment variable BRAVE_SEARCH_API_KEY
or as a parameter.
Example usages:
.. code-block:: python
# uses BRAVE_SEARCH_API_KEY from environment
tool = BraveSearch()
.. code-block:: python
# uses the provided api key
tool = BraveSearch.from_api_key("your-api-key")
.. code-block:: python
# uses the provided api key and search kwargs
tool = BraveSearch.from_api_key(
api_key = "your-api-key",
search_kwargs={"max_results": 5}
)
.. code-block:: python
# uses BRAVE_SEARCH_API_KEY from environment
tool = BraveSearch.from_search_kwargs({"max_results": 5})
"""
name: str = "brave_search"
description: str = (
"a search engine. "
"useful for when you need to answer questions about current events."
" input should be a search query."
)
search_wrapper: BraveSearchWrapper = Field(default_factory=BraveSearchWrapper)
@classmethod
def from_api_key(
cls, api_key: str, search_kwargs: Optional[dict] = None, **kwargs: Any
) -> BraveSearch:
"""Create a tool from an api key.
Args:
api_key: The api key to use.
search_kwargs: Any additional kwargs to pass to the search wrapper.
**kwargs: Any additional kwargs to pass to the tool.
Returns:
A tool.
"""
wrapper = BraveSearchWrapper(
api_key=SecretStr(api_key), search_kwargs=search_kwargs or {}
)
return cls(search_wrapper=wrapper, **kwargs)
@classmethod
def from_search_kwargs(cls, search_kwargs: dict, **kwargs: Any) -> BraveSearch:
"""Create a tool from search kwargs.
Uses the environment variable BRAVE_SEARCH_API_KEY for api key.
Args:
search_kwargs: Any additional kwargs to pass to the search wrapper.
**kwargs: Any additional kwargs to pass to the tool.
Returns:
A tool.
"""
# we can not provide api key because it's calculated in the wrapper,
# so the ignore is needed for linter
# not ideal but needed to keep the tool code changes non-breaking
wrapper = BraveSearchWrapper(search_kwargs=search_kwargs)
return cls(search_wrapper=wrapper, **kwargs)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.search_wrapper.run(query)
|
_base_ = ['./yolov3_mobilenetv2_8xb24-ms-416-300e_coco.py']
# yapf:disable
model = dict(
bbox_head=dict(
anchor_generator=dict(
base_sizes=[[(220, 125), (128, 222), (264, 266)],
[(35, 87), (102, 96), (60, 170)],
[(10, 15), (24, 36), (72, 42)]])))
# yapf:enable
input_size = (320, 320)
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
# `mean` and `to_rgb` should be the same with the `preprocess_cfg`
dict(
type='Expand',
mean=[123.675, 116.28, 103.53],
to_rgb=True,
ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=input_size, keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=input_size, keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
_base_ = ['./yolov3_mobilenetv2_8xb24-ms-416-300e_coco.py']
# yapf:disable
model = dict(
bbox_head=dict(
anchor_generator=dict(
base_sizes=[[(220, 125), (128, 222), (264, 266)],
[(35, 87), (102, 96), (60, 170)],
[(10, 15), (24, 36), (72, 42)]])))
# yapf:enable
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
input_size = (320, 320)
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
# `mean` and `to_rgb` should be the same with the `preprocess_cfg`
dict(
type='Expand',
mean=[123.675, 116.28, 103.53],
to_rgb=True,
ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=input_size, keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=input_size, keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
"""
The system trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) on the SNLI + MultiNLI (AllNLI) dataset
with softmax loss function. At every 1000 training steps, the model is evaluated on the
STS benchmark dataset
Usage:
python training_nli.py
OR
python training_nli.py pretrained_transformer_model_name
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Hugging Face pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-uncased"
train_batch_size = 16
output_dir = "output/training_nli_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# 2. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
# We'll start with 10k training samples, but you can increase this to get a stronger model
logging.info("Read AllNLI train dataset")
train_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="train").select(range(10000))
eval_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="dev").select(range(1000))
logging.info(train_dataset)
# 3. Define our training loss: https://sbert.net/docs/package_reference/sentence_transformer/losses.html#softmaxloss
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=3,
)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
stsb_eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=stsb_eval_dataset["sentence1"],
sentences2=stsb_eval_dataset["sentence2"],
scores=stsb_eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
logging.info("Evaluation before training:")
dev_evaluator(model)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=1,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="nli-v1", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-nli-v1")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-nli-v1')`."
)
|
"""
The system trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) on the SNLI + MultiNLI (AllNLI) dataset
with softmax loss function. At every 1000 training steps, the model is evaluated on the
STS benchmark dataset
Usage:
python training_nli.py
OR
python training_nli.py pretrained_transformer_model_name
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Hugging Face pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-uncased"
train_batch_size = 16
output_dir = "output/training_nli_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# 2. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
# We'll start with 10k training samples, but you can increase this to get a stronger model
logging.info("Read AllNLI train dataset")
train_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="train").select(range(10000))
eval_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="dev").select(range(1000))
logging.info(train_dataset)
# 3. Define our training loss: https://sbert.net/docs/package_reference/sentence_transformer/losses.html#softmaxloss
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=3,
)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
stsb_eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=stsb_eval_dataset["sentence1"],
sentences2=stsb_eval_dataset["sentence2"],
scores=stsb_eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
logging.info("Evaluation before training:")
dev_evaluator(model)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=1,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="nli-v1", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-nli-v1")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-nli-v1')`."
)
|
import numpy as np
import pytest
from keras.src import testing
from keras.src.layers.activations import softmax
class SoftmaxTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_softmax(self):
self.run_layer_test(
softmax.Softmax,
init_kwargs={},
input_shape=(2, 3, 4),
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_softmax_correctness(self):
softmax_layer = softmax.Softmax()
input = np.array([[1.0, 2.0, 1.0], [1.0, 2.0, 1.0]])
expected_output = np.array(
[
[0.21194157, 0.5761169, 0.21194157],
[0.21194157, 0.5761169, 0.21194157],
]
)
result = softmax_layer(input)
self.assertAllClose(result, expected_output)
def test_softmax_correctness_with_mask(self):
softmax_layer = softmax.Softmax(axis=(1, 0))
input = np.array([[1.0, 2.0, 1.0], [1.0, 2.0, 1.0]])
mask = np.array([[1.0, 0.0, 1.0], [0.0, 1.0, 0.0]])
expected_output = np.array(
[[0.21194154, 0.0, 0.21194154], [0.0, 0.57611686, 0.0]]
)
result = softmax_layer(input, mask=mask)
self.assertAllClose(result, expected_output)
def test_softmax_correctness_with_axis(self):
softmax_layer = softmax.Softmax(axis=(1))
input = np.array([[1.0, 2.0, 1.0], [1.0, 2.0, 1.0]])
expected_output = np.array(
[
[0.21194157, 0.5761169, 0.21194157],
[0.21194157, 0.5761169, 0.21194157],
]
)
result = softmax_layer(input)
self.assertAllClose(result, expected_output)
|
import numpy as np
import pytest
from keras.src import testing
from keras.src.layers.activations import softmax
class SoftmaxTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_softmax(self):
self.run_layer_test(
softmax.Softmax,
init_kwargs={},
input_shape=(2, 3, 4),
supports_masking=True,
)
def test_softmax_correctness(self):
softmax_layer = softmax.Softmax()
input = np.array([[1.0, 2.0, 1.0], [1.0, 2.0, 1.0]])
expected_output = np.array(
[
[0.21194157, 0.5761169, 0.21194157],
[0.21194157, 0.5761169, 0.21194157],
]
)
result = softmax_layer(input)
self.assertAllClose(result, expected_output)
def test_softmax_correctness_with_mask(self):
softmax_layer = softmax.Softmax(axis=(1, 0))
input = np.array([[1.0, 2.0, 1.0], [1.0, 2.0, 1.0]])
mask = np.array([[1.0, 0.0, 1.0], [0.0, 1.0, 0.0]])
expected_output = np.array(
[[0.21194154, 0.0, 0.21194154], [0.0, 0.57611686, 0.0]]
)
result = softmax_layer(input, mask=mask)
self.assertAllClose(result, expected_output)
def test_softmax_correctness_with_axis(self):
softmax_layer = softmax.Softmax(axis=(1))
input = np.array([[1.0, 2.0, 1.0], [1.0, 2.0, 1.0]])
expected_output = np.array(
[
[0.21194157, 0.5761169, 0.21194157],
[0.21194157, 0.5761169, 0.21194157],
]
)
result = softmax_layer(input)
self.assertAllClose(result, expected_output)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.22.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.21.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
from __future__ import annotations
import re
from typing import Optional
from langchain_core.output_parsers import BaseOutputParser
class RegexParser(BaseOutputParser[dict[str, str]]):
"""Parse the output of an LLM call using a regex."""
@classmethod
def is_lc_serializable(cls) -> bool:
return True
regex: str
"""The regex to use to parse the output."""
output_keys: list[str]
"""The keys to use for the output."""
default_output_key: Optional[str] = None
"""The default key to use for the output."""
@property
def _type(self) -> str:
"""Return the type key."""
return "regex_parser"
def parse(self, text: str) -> dict[str, str]:
"""Parse the output of an LLM call."""
match = re.search(self.regex, text)
if match:
return {key: match.group(i + 1) for i, key in enumerate(self.output_keys)}
else:
if self.default_output_key is None:
raise ValueError(f"Could not parse output: {text}")
else:
return {
key: text if key == self.default_output_key else ""
for key in self.output_keys
}
|
from __future__ import annotations
import re
from typing import Dict, List, Optional
from langchain_core.output_parsers import BaseOutputParser
class RegexParser(BaseOutputParser[Dict[str, str]]):
"""Parse the output of an LLM call using a regex."""
@classmethod
def is_lc_serializable(cls) -> bool:
return True
regex: str
"""The regex to use to parse the output."""
output_keys: List[str]
"""The keys to use for the output."""
default_output_key: Optional[str] = None
"""The default key to use for the output."""
@property
def _type(self) -> str:
"""Return the type key."""
return "regex_parser"
def parse(self, text: str) -> Dict[str, str]:
"""Parse the output of an LLM call."""
match = re.search(self.regex, text)
if match:
return {key: match.group(i + 1) for i, key in enumerate(self.output_keys)}
else:
if self.default_output_key is None:
raise ValueError(f"Could not parse output: {text}")
else:
return {
key: text if key == self.default_output_key else ""
for key in self.output_keys
}
|
# flake8: noqa
import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.optimizers.sgd import SGD
class SGDTest(testing.TestCase):
def test_config(self):
optimizer = SGD(
learning_rate=0.5,
momentum=0.06,
nesterov=True,
weight_decay=0.004,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = SGD(learning_rate=0.5)
self.assertEqual(len(optimizer.variables), 2)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.build([vars])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(vars, [0.5, -1.0, -0.5, 3.0], rtol=1e-4, atol=1e-4)
self.assertEqual(len(optimizer.variables), 2)
self.assertEqual(optimizer.variables[0], 1)
self.assertEqual(optimizer.variables[1], 0.5)
def test_invalid_momentum(self):
with self.assertRaisesRegex(
ValueError, "`momentum` must be a float between \\[0, 1\\]."
):
SGD(momentum=-1.0)
with self.assertRaisesRegex(
ValueError, "`momentum` must be a float between \\[0, 1\\]."
):
SGD(momentum=2.0)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = SGD(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = SGD(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = SGD(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = SGD(nesterov=True)
x = backend.Variable(np.ones([10]))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.9999, 0.9999, 0.9999, 0.9999, 0.9999, 0.9999, 0.9999, 0.9999,
0.9999, 0.9999], [0.9989, 0.9979, 0.9969, 0.9959, 0.9949, 0.9939,
0.9929, 0.9919, 0.9909, 0.9899], [0.9979, 0.9959, 0.9939, 0.9919,
0.9899, 0.9879, 0.9859, 0.9839, 0.9819, 0.9799], [0.9969, 0.9939,
0.9909, 0.9879, 0.9849, 0.9819, 0.9789, 0.9759, 0.9729, 0.9699],
[0.9959, 0.9919, 0.9879, 0.9839, 0.9799, 0.9759, 0.9719, 0.9679,
0.9639, 0.9599]]
)
# fmt: on
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = SGD(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = SGD(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
|
# flake8: noqa
import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.optimizers.sgd import SGD
class SGDTest(testing.TestCase):
def test_config(self):
optimizer = SGD(
learning_rate=0.5,
momentum=0.06,
nesterov=True,
weight_decay=0.004,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = SGD(learning_rate=0.5)
self.assertEqual(len(optimizer.variables), 2)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.build([vars])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(vars, [0.5, -1.0, -0.5, 3.0], rtol=1e-4, atol=1e-4)
self.assertEqual(len(optimizer.variables), 2)
self.assertEqual(optimizer.variables[0], 1)
self.assertEqual(optimizer.variables[1], 0.5)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = SGD(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = SGD(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = SGD(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = SGD(nesterov=True)
x = backend.Variable(np.ones([10]))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.9999, 0.9999, 0.9999, 0.9999, 0.9999, 0.9999, 0.9999, 0.9999,
0.9999, 0.9999], [0.9989, 0.9979, 0.9969, 0.9959, 0.9949, 0.9939,
0.9929, 0.9919, 0.9909, 0.9899], [0.9979, 0.9959, 0.9939, 0.9919,
0.9899, 0.9879, 0.9859, 0.9839, 0.9819, 0.9799], [0.9969, 0.9939,
0.9909, 0.9879, 0.9849, 0.9819, 0.9789, 0.9759, 0.9729, 0.9699],
[0.9959, 0.9919, 0.9879, 0.9839, 0.9799, 0.9759, 0.9719, 0.9679,
0.9639, 0.9599]]
)
# fmt: on
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = SGD(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = SGD(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
|
from .conv_emformer import ConvEmformer
from .conv_tasnet import conv_tasnet_base
from .rnnt import conformer_rnnt_base, conformer_rnnt_model
__all__ = [
"conformer_rnnt_base",
"conformer_rnnt_model",
"conv_tasnet_base",
"ConvEmformer",
]
|
from .conv_emformer import ConvEmformer
from .rnnt import conformer_rnnt_base, conformer_rnnt_model
__all__ = [
"conformer_rnnt_base",
"conformer_rnnt_model",
"ConvEmformer",
]
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import NdArray, PointCloud3DUrl
from docarray.typing.url.mimetypes import (
OBJ_MIMETYPE,
AUDIO_MIMETYPE,
VIDEO_MIMETYPE,
IMAGE_MIMETYPE,
TEXT_MIMETYPE,
)
from tests import TOYDATA_DIR
MESH_FILES = {
'obj': str(TOYDATA_DIR / 'tetrahedron.obj'),
'glb': str(TOYDATA_DIR / 'test.glb'),
'ply': str(TOYDATA_DIR / 'cube.ply'),
}
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load(file_format, file_path):
n_samples = 100
url = parse_obj_as(PointCloud3DUrl, file_path)
tensors = url.load(samples=n_samples)
assert isinstance(tensors.points, np.ndarray)
assert isinstance(tensors.points, NdArray)
assert tensors.points.shape == (n_samples, 3)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load_with_multiple_geometries_true(file_format, file_path):
n_samples = 100
url = parse_obj_as(PointCloud3DUrl, file_path)
tensors = url.load(samples=n_samples, multiple_geometries=True)
assert isinstance(tensors.points, np.ndarray)
assert len(tensors.points.shape) == 3
assert tensors.points.shape[1:] == (100, 3)
def test_json_schema():
schema_json_of(PointCloud3DUrl)
def test_dump_json():
url = parse_obj_as(PointCloud3DUrl, REMOTE_OBJ_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[*MESH_FILES.values(), REMOTE_OBJ_FILE],
)
def test_validation(path_to_file):
url = parse_obj_as(PointCloud3DUrl, path_to_file)
assert isinstance(url, PointCloud3DUrl)
assert isinstance(url, str)
@pytest.mark.proto
def test_proto_point_cloud_url():
uri = parse_obj_as(PointCloud3DUrl, REMOTE_OBJ_FILE)
uri._to_node_protobuf()
@pytest.mark.parametrize(
'file_type, file_source',
[
(OBJ_MIMETYPE, MESH_FILES['obj']),
(OBJ_MIMETYPE, MESH_FILES['glb']),
(OBJ_MIMETYPE, MESH_FILES['ply']),
(OBJ_MIMETYPE, REMOTE_OBJ_FILE),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.aac')),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.mp3')),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.ogg')),
(VIDEO_MIMETYPE, os.path.join(TOYDATA_DIR, 'mov_bbb.mp4')),
(IMAGE_MIMETYPE, os.path.join(TOYDATA_DIR, 'test.png')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'test' 'test.html')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'test' 'test.md')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'penal_colony.txt')),
],
)
def test_file_validation(file_type, file_source):
if file_type != PointCloud3DUrl.mime_type():
with pytest.raises(ValueError):
parse_obj_as(PointCloud3DUrl, file_source)
else:
parse_obj_as(PointCloud3DUrl, file_source)
|
import os
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import NdArray, PointCloud3DUrl
from docarray.typing.url.mimetypes import (
OBJ_MIMETYPE,
AUDIO_MIMETYPE,
VIDEO_MIMETYPE,
IMAGE_MIMETYPE,
TEXT_MIMETYPE,
)
from tests import TOYDATA_DIR
MESH_FILES = {
'obj': str(TOYDATA_DIR / 'tetrahedron.obj'),
'glb': str(TOYDATA_DIR / 'test.glb'),
'ply': str(TOYDATA_DIR / 'cube.ply'),
}
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load(file_format, file_path):
n_samples = 100
url = parse_obj_as(PointCloud3DUrl, file_path)
tensors = url.load(samples=n_samples)
assert isinstance(tensors.points, np.ndarray)
assert isinstance(tensors.points, NdArray)
assert tensors.points.shape == (n_samples, 3)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load_with_multiple_geometries_true(file_format, file_path):
n_samples = 100
url = parse_obj_as(PointCloud3DUrl, file_path)
tensors = url.load(samples=n_samples, multiple_geometries=True)
assert isinstance(tensors.points, np.ndarray)
assert len(tensors.points.shape) == 3
assert tensors.points.shape[1:] == (100, 3)
def test_json_schema():
schema_json_of(PointCloud3DUrl)
def test_dump_json():
url = parse_obj_as(PointCloud3DUrl, REMOTE_OBJ_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[*MESH_FILES.values(), REMOTE_OBJ_FILE],
)
def test_validation(path_to_file):
url = parse_obj_as(PointCloud3DUrl, path_to_file)
assert isinstance(url, PointCloud3DUrl)
assert isinstance(url, str)
@pytest.mark.proto
def test_proto_point_cloud_url():
uri = parse_obj_as(PointCloud3DUrl, REMOTE_OBJ_FILE)
uri._to_node_protobuf()
@pytest.mark.parametrize(
'file_type, file_source',
[
(OBJ_MIMETYPE, MESH_FILES['obj']),
(OBJ_MIMETYPE, MESH_FILES['glb']),
(OBJ_MIMETYPE, MESH_FILES['ply']),
(OBJ_MIMETYPE, REMOTE_OBJ_FILE),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.aac')),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.mp3')),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.ogg')),
(VIDEO_MIMETYPE, os.path.join(TOYDATA_DIR, 'mov_bbb.mp4')),
(IMAGE_MIMETYPE, os.path.join(TOYDATA_DIR, 'test.png')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'test' 'test.html')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'test' 'test.md')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'penal_colony.txt')),
],
)
def test_file_validation(file_type, file_source):
if file_type != PointCloud3DUrl.mime_type():
with pytest.raises(ValueError):
parse_obj_as(PointCloud3DUrl, file_source)
else:
parse_obj_as(PointCloud3DUrl, file_source)
|
from typing import TYPE_CHECKING, Any, NamedTuple, Type, TypeVar, Union
import numpy as np
from pydantic.tools import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.video import VideoNdArray
from docarray.typing.url.any_url import AnyUrl
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='VideoUrl')
VIDEO_FILE_FORMATS = ['mp4']
class VideoLoadResult(NamedTuple):
video: VideoNdArray
audio: AudioNdArray
key_frame_indices: NdArray
@_register_proto(proto_type_name='video_url')
class VideoUrl(AnyUrl):
"""
URL to a .wav file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config)
has_video_extension = any(ext in url for ext in VIDEO_FILE_FORMATS)
if not has_video_extension:
raise ValueError(
f'Video URL must have one of the following extensions:'
f'{VIDEO_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(self: T, **kwargs) -> VideoLoadResult:
"""
Load the data from the url into a named Tuple of VideoNdArray, AudioNdArray and
NdArray.
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described in:
https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open
:return: AudioNdArray representing the audio content, VideoNdArray representing
the images of the video, NdArray of the key frame indices.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
from docarray import BaseDocument
from docarray.typing import VideoUrl, VideoNdArray, AudioNdArray, NdArray
class MyDoc(BaseDocument):
video_url: VideoUrl
video: Optional[VideoNdArray]
audio: Optional[AudioNdArray]
key_frame_indices: Optional[NdArray]
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
doc.video, doc.audio, doc.key_frame_indices = doc.video_url.load()
assert isinstance(doc.video, VideoNdArray)
assert isinstance(doc.audio, AudioNdArray)
assert isinstance(doc.key_frame_indices, NdArray)
You can load only the key frames (or video, audio respectively):
.. code-block:: python
from pydantic import parse_obj_as
from docarray.typing import NdArray, VideoUrl
url = parse_obj_as(
VideoUrl,
'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true',
)
key_frame_indices = url.load().key_frame_indices
assert isinstance(key_frame_indices, NdArray)
"""
import av
with av.open(self, **kwargs) as container:
audio_frames = []
video_frames = []
keyframe_indices = []
for frame in container.decode():
if type(frame) == av.audio.frame.AudioFrame:
audio_frames.append(frame.to_ndarray())
elif type(frame) == av.video.frame.VideoFrame:
video_frames.append(frame.to_ndarray(format='rgb24'))
if frame.key_frame == 1:
curr_index = len(video_frames)
keyframe_indices.append(curr_index)
if len(audio_frames) == 0:
audio = parse_obj_as(AudioNdArray, np.array(audio_frames))
else:
audio = parse_obj_as(AudioNdArray, np.stack(audio_frames))
video = parse_obj_as(VideoNdArray, np.stack(video_frames))
indices = parse_obj_as(NdArray, keyframe_indices)
return VideoLoadResult(video=video, audio=audio, key_frame_indices=indices)
|
from typing import TYPE_CHECKING, Any, NamedTuple, Type, TypeVar, Union
import numpy as np
from pydantic.tools import parse_obj_as
from docarray.typing import AudioNdArray, NdArray
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.video import VideoNdArray
from docarray.typing.url.any_url import AnyUrl
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='VideoUrl')
VIDEO_FILE_FORMATS = ['mp4']
class VideoLoadResult(NamedTuple):
video: VideoNdArray
audio: AudioNdArray
key_frame_indices: NdArray
@_register_proto(proto_type_name='video_url')
class VideoUrl(AnyUrl):
"""
URL to a .wav file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config)
has_video_extension = any(ext in url for ext in VIDEO_FILE_FORMATS)
if not has_video_extension:
raise ValueError(
f'Video URL must have one of the following extensions:'
f'{VIDEO_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(self: T, **kwargs) -> VideoLoadResult:
"""
Load the data from the url into a named Tuple of VideoNdArray, AudioNdArray and
NdArray.
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described in:
https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open
:return: AudioNdArray representing the audio content, VideoNdArray representing
the images of the video, NdArray of the key frame indices.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
from docarray import BaseDocument
from docarray.typing import VideoUrl, VideoNdArray, AudioNdArray, NdArray
class MyDoc(BaseDocument):
video_url: VideoUrl
video: Optional[VideoNdArray]
audio: Optional[AudioNdArray]
key_frame_indices: Optional[NdArray]
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
doc.video, doc.audio, doc.key_frame_indices = doc.video_url.load()
assert isinstance(doc.video, VideoNdArray)
assert isinstance(doc.audio, AudioNdArray)
assert isinstance(doc.key_frame_indices, NdArray)
You can load only the key frames (or video, audio respectively):
.. code-block:: python
from pydantic import parse_obj_as
from docarray.typing import NdArray, VideoUrl
url = parse_obj_as(
VideoUrl,
'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true',
)
key_frame_indices = url.load().key_frame_indices
assert isinstance(key_frame_indices, NdArray)
"""
import av
with av.open(self, **kwargs) as container:
audio_frames = []
video_frames = []
keyframe_indices = []
for frame in container.decode():
if type(frame) == av.audio.frame.AudioFrame:
audio_frames.append(frame.to_ndarray())
elif type(frame) == av.video.frame.VideoFrame:
video_frames.append(frame.to_ndarray(format='rgb24'))
if frame.key_frame == 1:
curr_index = len(video_frames)
keyframe_indices.append(curr_index)
if len(audio_frames) == 0:
audio = parse_obj_as(AudioNdArray, np.array(audio_frames))
else:
audio = parse_obj_as(AudioNdArray, np.stack(audio_frames))
video = parse_obj_as(VideoNdArray, np.stack(video_frames))
indices = parse_obj_as(NdArray, keyframe_indices)
return VideoLoadResult(video=video, audio=audio, key_frame_indices=indices)
|
from docutils import nodes
from docutils.parsers.rst import Directive
class BetaStatus(Directive):
has_content = True
text = "The {api_name} is in Beta stage, and backward compatibility is not guaranteed."
node = nodes.warning
def run(self):
text = self.text.format(api_name=" ".join(self.content))
return [self.node("", nodes.paragraph("", "", nodes.Text(text)))]
class V2BetaStatus(BetaStatus):
text = (
"The {api_name} is in Beta stage, and while we do not expect disruptive breaking changes, "
"some APIs may slightly change according to user feedback. Please submit any feedback you may have "
"in this issue: https://github.com/pytorch/vision/issues/6753."
)
node = nodes.note
def setup(app):
app.add_directive("betastatus", BetaStatus)
app.add_directive("v2betastatus", V2BetaStatus)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
from docutils import nodes
from docutils.parsers.rst import Directive
class BetaStatus(Directive):
has_content = True
text = "The {api_name} is in Beta stage, and backward compatibility is not guaranteed."
def run(self):
text = self.text.format(api_name=" ".join(self.content))
return [nodes.warning("", nodes.paragraph("", "", nodes.Text(text)))]
class V2BetaStatus(BetaStatus):
text = (
"The {api_name} is in Beta stage, and while we do not expect major breaking changes, "
"some APIs may still change according to user feedback. Please submit any feedback you may have "
"in this issue: https://github.com/pytorch/vision/issues/6753, and you can also check "
"out https://github.com/pytorch/vision/issues/7319 to learn "
"more about the APIs that we suspect might involve future changes."
)
def setup(app):
app.add_directive("betastatus", BetaStatus)
app.add_directive("v2betastatus", V2BetaStatus)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.runner import BaseModule
from mmdet.data_elements.bbox import bbox_cxcywh_to_xyxy
from mmdet.registry import MODELS
@MODELS.register_module()
class EmbeddingRPNHead(BaseModule):
"""RPNHead in the `Sparse R-CNN <https://arxiv.org/abs/2011.12450>`_ .
Unlike traditional RPNHead, this module does not need FPN input, but just
decode `init_proposal_bboxes` and expand the first dimension of
`init_proposal_bboxes` and `init_proposal_features` to the batch_size.
Args:
num_proposals (int): Number of init_proposals. Default 100.
proposal_feature_channel (int): Channel number of
init_proposal_feature. Defaults to 256.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
num_proposals=100,
proposal_feature_channel=256,
init_cfg=None,
**kwargs):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super(EmbeddingRPNHead, self).__init__(init_cfg)
self.num_proposals = num_proposals
self.proposal_feature_channel = proposal_feature_channel
self._init_layers()
def _init_layers(self):
"""Initialize a sparse set of proposal boxes and proposal features."""
self.init_proposal_bboxes = nn.Embedding(self.num_proposals, 4)
self.init_proposal_features = nn.Embedding(
self.num_proposals, self.proposal_feature_channel)
def init_weights(self):
"""Initialize the init_proposal_bboxes as normalized.
[c_x, c_y, w, h], and we initialize it to the size of the entire
image.
"""
super(EmbeddingRPNHead, self).init_weights()
nn.init.constant_(self.init_proposal_bboxes.weight[:, :2], 0.5)
nn.init.constant_(self.init_proposal_bboxes.weight[:, 2:], 1)
def _decode_init_proposals(self, imgs, img_metas):
"""Decode init_proposal_bboxes according to the size of images and
expand dimension of init_proposal_features to batch_size.
Args:
imgs (list[Tensor]): List of FPN features.
img_metas (list[dict]): List of meta-information of
images. Need the img_shape to decode the init_proposals.
Returns:
Tuple(Tensor):
- proposals (Tensor): Decoded proposal bboxes,
has shape (batch_size, num_proposals, 4).
- init_proposal_features (Tensor): Expanded proposal
features, has shape
(batch_size, num_proposals, proposal_feature_channel).
- imgs_whwh (Tensor): Tensor with shape
(batch_size, 4), the dimension means
[img_width, img_height, img_width, img_height].
"""
proposals = self.init_proposal_bboxes.weight.clone()
proposals = bbox_cxcywh_to_xyxy(proposals)
num_imgs = len(imgs[0])
imgs_whwh = []
for meta in img_metas:
h, w, _ = meta['img_shape']
imgs_whwh.append(imgs[0].new_tensor([[w, h, w, h]]))
imgs_whwh = torch.cat(imgs_whwh, dim=0)
imgs_whwh = imgs_whwh[:, None, :]
# imgs_whwh has shape (batch_size, 1, 4)
# The shape of proposals change from (num_proposals, 4)
# to (batch_size ,num_proposals, 4)
proposals = proposals * imgs_whwh
init_proposal_features = self.init_proposal_features.weight.clone()
init_proposal_features = init_proposal_features[None].expand(
num_imgs, *init_proposal_features.size())
return proposals, init_proposal_features, imgs_whwh
def forward_dummy(self, img, img_metas):
"""Dummy forward function.
Used in flops calculation.
"""
return self._decode_init_proposals(img, img_metas)
def forward_train(self, img, img_metas):
"""Forward function in training stage."""
return self._decode_init_proposals(img, img_metas)
def simple_test_rpn(self, img, img_metas):
"""Forward function in testing stage."""
return self._decode_init_proposals(img, img_metas)
def simple_test(self, img, img_metas):
"""Forward function in testing stage."""
raise NotImplementedError
def aug_test_rpn(self, feats, img_metas):
raise NotImplementedError(
'EmbeddingRPNHead does not support test-time augmentation')
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.runner import BaseModule
from mmdet.registry import MODELS
from ...core import bbox_cxcywh_to_xyxy
@MODELS.register_module()
class EmbeddingRPNHead(BaseModule):
"""RPNHead in the `Sparse R-CNN <https://arxiv.org/abs/2011.12450>`_ .
Unlike traditional RPNHead, this module does not need FPN input, but just
decode `init_proposal_bboxes` and expand the first dimension of
`init_proposal_bboxes` and `init_proposal_features` to the batch_size.
Args:
num_proposals (int): Number of init_proposals. Default 100.
proposal_feature_channel (int): Channel number of
init_proposal_feature. Defaults to 256.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
num_proposals=100,
proposal_feature_channel=256,
init_cfg=None,
**kwargs):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super(EmbeddingRPNHead, self).__init__(init_cfg)
self.num_proposals = num_proposals
self.proposal_feature_channel = proposal_feature_channel
self._init_layers()
def _init_layers(self):
"""Initialize a sparse set of proposal boxes and proposal features."""
self.init_proposal_bboxes = nn.Embedding(self.num_proposals, 4)
self.init_proposal_features = nn.Embedding(
self.num_proposals, self.proposal_feature_channel)
def init_weights(self):
"""Initialize the init_proposal_bboxes as normalized.
[c_x, c_y, w, h], and we initialize it to the size of the entire
image.
"""
super(EmbeddingRPNHead, self).init_weights()
nn.init.constant_(self.init_proposal_bboxes.weight[:, :2], 0.5)
nn.init.constant_(self.init_proposal_bboxes.weight[:, 2:], 1)
def _decode_init_proposals(self, imgs, img_metas):
"""Decode init_proposal_bboxes according to the size of images and
expand dimension of init_proposal_features to batch_size.
Args:
imgs (list[Tensor]): List of FPN features.
img_metas (list[dict]): List of meta-information of
images. Need the img_shape to decode the init_proposals.
Returns:
Tuple(Tensor):
- proposals (Tensor): Decoded proposal bboxes,
has shape (batch_size, num_proposals, 4).
- init_proposal_features (Tensor): Expanded proposal
features, has shape
(batch_size, num_proposals, proposal_feature_channel).
- imgs_whwh (Tensor): Tensor with shape
(batch_size, 4), the dimension means
[img_width, img_height, img_width, img_height].
"""
proposals = self.init_proposal_bboxes.weight.clone()
proposals = bbox_cxcywh_to_xyxy(proposals)
num_imgs = len(imgs[0])
imgs_whwh = []
for meta in img_metas:
h, w, _ = meta['img_shape']
imgs_whwh.append(imgs[0].new_tensor([[w, h, w, h]]))
imgs_whwh = torch.cat(imgs_whwh, dim=0)
imgs_whwh = imgs_whwh[:, None, :]
# imgs_whwh has shape (batch_size, 1, 4)
# The shape of proposals change from (num_proposals, 4)
# to (batch_size ,num_proposals, 4)
proposals = proposals * imgs_whwh
init_proposal_features = self.init_proposal_features.weight.clone()
init_proposal_features = init_proposal_features[None].expand(
num_imgs, *init_proposal_features.size())
return proposals, init_proposal_features, imgs_whwh
def forward_dummy(self, img, img_metas):
"""Dummy forward function.
Used in flops calculation.
"""
return self._decode_init_proposals(img, img_metas)
def forward_train(self, img, img_metas):
"""Forward function in training stage."""
return self._decode_init_proposals(img, img_metas)
def simple_test_rpn(self, img, img_metas):
"""Forward function in testing stage."""
return self._decode_init_proposals(img, img_metas)
def simple_test(self, img, img_metas):
"""Forward function in testing stage."""
raise NotImplementedError
def aug_test_rpn(self, feats, img_metas):
raise NotImplementedError(
'EmbeddingRPNHead does not support test-time augmentation')
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn.bricks.wrappers import NewEmptyTensorOp, obsolete_torch_version
if torch.__version__ == 'parrots':
TORCH_VERSION = torch.__version__
else:
# torch.__version__ could be 1.3.1+cu92, we only need the first two
# for comparison
TORCH_VERSION = tuple(int(x) for x in torch.__version__.split('.')[:2])
def adaptive_avg_pool2d(input, output_size):
"""Handle empty batch dimension to adaptive_avg_pool2d.
Args:
input (tensor): 4D tensor.
output_size (int, tuple[int,int]): the target output size.
"""
if input.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):
if isinstance(output_size, int):
output_size = [output_size, output_size]
output_size = [*input.shape[:2], *output_size]
empty = NewEmptyTensorOp.apply(input, output_size)
return empty
else:
return F.adaptive_avg_pool2d(input, output_size)
class AdaptiveAvgPool2d(nn.AdaptiveAvgPool2d):
"""Handle empty batch dimension to AdaptiveAvgPool2d."""
def forward(self, x):
# PyTorch 1.9 does not support empty tensor inference yet
if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):
output_size = self.output_size
if isinstance(output_size, int):
output_size = [output_size, output_size]
else:
output_size = [
v if v is not None else d
for v, d in zip(output_size,
x.size()[-2:])
]
output_size = [*x.shape[:2], *output_size]
empty = NewEmptyTensorOp.apply(x, output_size)
return empty
return super().forward(x)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn.bricks.wrappers import NewEmptyTensorOp, obsolete_torch_version
if torch.__version__ == 'parrots':
TORCH_VERSION = torch.__version__
else:
# torch.__version__ could be 1.3.1+cu92, we only need the first two
# for comparison
TORCH_VERSION = tuple(int(x) for x in torch.__version__.split('.')[:2])
def adaptive_avg_pool2d(input, output_size):
"""Handle empty batch dimension to adaptive_avg_pool2d.
Args:
input (tensor): 4D tensor.
output_size (int, tuple[int,int]): the target output size.
"""
if input.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):
if isinstance(output_size, int):
output_size = [output_size, output_size]
output_size = [*input.shape[:2], *output_size]
empty = NewEmptyTensorOp.apply(input, output_size)
return empty
else:
return F.adaptive_avg_pool2d(input, output_size)
class AdaptiveAvgPool2d(nn.AdaptiveAvgPool2d):
"""Handle empty batch dimension to AdaptiveAvgPool2d."""
def forward(self, x):
# PyTorch 1.9 does not support empty tensor inference yet
if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):
output_size = self.output_size
if isinstance(output_size, int):
output_size = [output_size, output_size]
else:
output_size = [
v if v is not None else d
for v, d in zip(output_size,
x.size()[-2:])
]
output_size = [*x.shape[:2], *output_size]
empty = NewEmptyTensorOp.apply(x, output_size)
return empty
return super().forward(x)
|
import numpy as np
from docarray import Document, DocumentArray, Image, Text
from docarray.typing import NdArray
def test_simple_proto():
class CustomDoc(Document):
text: str
tensor: NdArray
da = DocumentArray(
[CustomDoc(text='hello', tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
new_da = DocumentArray[CustomDoc].from_protobuf(da.to_protobuf())
for doc1, doc2 in zip(da, new_da):
assert doc1.text == doc2.text
assert (doc1.tensor == doc2.tensor).all()
def test_nested_proto():
class CustomDocument(Document):
text: Text
image: Image
da = DocumentArray[CustomDocument](
[
CustomDocument(
text=Text(text='hello'), image=Image(tensor=np.zeros((3, 224, 224)))
)
for _ in range(10)
]
)
DocumentArray[CustomDocument].from_protobuf(da.to_protobuf())
def test_nested_proto_any_doc():
class CustomDocument(Document):
text: Text
image: Image
da = DocumentArray[CustomDocument](
[
CustomDocument(
text=Text(text='hello'), image=Image(tensor=np.zeros((3, 224, 224)))
)
for _ in range(10)
]
)
DocumentArray.from_protobuf(da.to_protobuf())
|
import numpy as np
from docarray import DocumentArray, Document, Image, Text
from docarray.typing import Tensor
def test_simple_proto():
class CustomDoc(Document):
text: str
tensor: Tensor
da = DocumentArray(
[CustomDoc(text='hello', tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
new_da = DocumentArray[CustomDoc].from_protobuf(da.to_protobuf())
for doc1, doc2 in zip(da, new_da):
assert doc1.text == doc2.text
assert (doc1.tensor == doc2.tensor).all()
def test_nested_proto():
class CustomDocument(Document):
text: Text
image: Image
da = DocumentArray[CustomDocument](
[
CustomDocument(
text=Text(text='hello'), image=Image(tensor=np.zeros((3, 224, 224)))
)
for _ in range(10)
]
)
DocumentArray[CustomDocument].from_protobuf(da.to_protobuf())
def test_nested_proto_any_doc():
class CustomDocument(Document):
text: Text
image: Image
da = DocumentArray[CustomDocument](
[
CustomDocument(
text=Text(text='hello'), image=Image(tensor=np.zeros((3, 224, 224)))
)
for _ in range(10)
]
)
DocumentArray.from_protobuf(da.to_protobuf())
|
import pytest
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer
@pytest.mark.parametrize(
("revision", "expected_base_revision"),
[
("f3cb857cba53019a20df283396bcca179cf051a4", "f3cb857cba53019a20df283396bcca179cf051a4"),
("f3cb857", "f3cb857"),
("main", "valid-revision"),
(None, "valid-revision"),
],
)
def test_model_card_data(revision, expected_base_revision) -> None:
model_name = "sentence-transformers-testing/stsb-bert-tiny-safetensors"
model = SentenceTransformer(model_name, revision=revision)
assert model.model_card_data.base_model == model_name
if expected_base_revision == "valid-revision":
assert model.model_card_data.base_model_revision
assert len(model.model_card_data.base_model_revision) == 40
else:
assert model.model_card_data.base_model_revision == expected_base_revision
def test_generated_from_trainer_tag(stsb_bert_tiny_model: SentenceTransformer) -> None:
model = stsb_bert_tiny_model
assert "generated_from_trainer" not in model.model_card_data.tags
SentenceTransformerTrainer(model)
assert "generated_from_trainer" in model.model_card_data.tags
|
import pytest
from sentence_transformers import SentenceTransformer
@pytest.mark.parametrize(
("revision", "expected_base_revision"),
[
("f3cb857cba53019a20df283396bcca179cf051a4", "f3cb857cba53019a20df283396bcca179cf051a4"),
("f3cb857", "f3cb857"),
("main", "valid-revision"),
(None, "valid-revision"),
],
)
def test_model_card_data(revision, expected_base_revision) -> None:
model_name = "sentence-transformers-testing/stsb-bert-tiny-safetensors"
model = SentenceTransformer(model_name, revision=revision)
assert model.model_card_data.base_model == model_name
if expected_base_revision == "valid-revision":
assert model.model_card_data.base_model_revision
assert len(model.model_card_data.base_model_revision) == 40
else:
assert model.model_card_data.base_model_revision == expected_base_revision
|
from typing import Any, Dict, List, Optional, Union
from .. import config
from ..exceptions import DatasetsError
from .file_utils import (
get_authentication_headers_for_url,
http_get,
)
from .logging import get_logger
logger = get_logger(__name__)
class DatasetsServerError(DatasetsError):
"""Dataset-server error.
Raised when trying to use the Datasets-server HTTP API and when trying to access:
- a missing dataset, or
- a private/gated dataset and the user is not authenticated.
- unavailable /parquet or /info responses
"""
def get_exported_parquet_files(dataset: str, revision: str, token: Optional[Union[str, bool]]) -> List[Dict[str, Any]]:
"""
Get the dataset exported parquet files
Docs: https://huggingface.co/docs/datasets-server/parquet
"""
datasets_server_parquet_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/parquet?dataset="
try:
parquet_data_files_response = http_get(
url=datasets_server_parquet_url + dataset,
temp_file=None,
headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),
timeout=100.0,
max_retries=3,
)
parquet_data_files_response.raise_for_status()
if "X-Revision" in parquet_data_files_response.headers:
if parquet_data_files_response.headers["X-Revision"] == revision or revision is None:
parquet_data_files_response_json = parquet_data_files_response.json()
if (
parquet_data_files_response_json.get("partial") is False
and not parquet_data_files_response_json.get("pending", True)
and not parquet_data_files_response_json.get("failed", True)
and "parquet_files" in parquet_data_files_response_json
):
return parquet_data_files_response_json["parquet_files"]
else:
logger.debug(f"Parquet export for {dataset} is not completely ready yet.")
else:
logger.debug(
f"Parquet export for {dataset} is available but outdated (revision='{parquet_data_files_response.headers['X-Revision']}')"
)
except Exception as e: # noqa catch any exception of the datasets-server and consider the parquet export doesn't exist
logger.debug(f"No parquet export for {dataset} available ({type(e).__name__}: {e})")
raise DatasetsServerError("No exported Parquet files available.")
def get_exported_dataset_infos(
dataset: str, revision: str, token: Optional[Union[str, bool]]
) -> Dict[str, Dict[str, Any]]:
"""
Get the dataset information, can be useful to get e.g. the dataset features.
Docs: https://huggingface.co/docs/datasets-server/info
"""
datasets_server_info_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/info?dataset="
try:
info_response = http_get(
url=datasets_server_info_url + dataset,
temp_file=None,
headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),
timeout=100.0,
max_retries=3,
)
info_response.raise_for_status()
if "X-Revision" in info_response.headers:
if info_response.headers["X-Revision"] == revision or revision is None:
info_response = info_response.json()
if (
info_response.get("partial") is False
and not info_response.get("pending", True)
and not info_response.get("failed", True)
and "dataset_info" in info_response
):
return info_response["dataset_info"]
else:
logger.debug(f"Dataset info for {dataset} is not completely ready yet.")
else:
logger.debug(
f"Dataset info for {dataset} is available but outdated (revision='{info_response.headers['X-Revision']}')"
)
except Exception as e: # noqa catch any exception of the datasets-server and consider the dataset info doesn't exist
logger.debug(f"No dataset info for {dataset} available ({type(e).__name__}: {e})")
raise DatasetsServerError("No exported dataset infos available.")
|
from typing import Any, Dict, List
from .. import config
from ..exceptions import DatasetsError
from .file_utils import (
get_authentication_headers_for_url,
http_get,
)
from .logging import get_logger
logger = get_logger(__name__)
class DatasetsServerError(DatasetsError):
"""Dataset-server error.
Raised when trying to use the Datasets-server HTTP API and when trying to access:
- a missing dataset, or
- a private/gated dataset and the user is not authenticated.
- unavailable /parquet or /info responses
"""
def get_exported_parquet_files(dataset: str, revision: str) -> List[Dict[str, Any]]:
"""
Get the dataset exported parquet files
Docs: https://huggingface.co/docs/datasets-server/parquet
"""
datasets_server_parquet_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/parquet?dataset="
try:
parquet_data_files_response = http_get(
url=datasets_server_parquet_url + dataset,
temp_file=None,
headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}"),
timeout=100.0,
max_retries=3,
)
parquet_data_files_response.raise_for_status()
if "X-Revision" in parquet_data_files_response.headers:
if parquet_data_files_response.headers["X-Revision"] == revision or revision is None:
parquet_data_files_response_json = parquet_data_files_response.json()
if (
parquet_data_files_response_json.get("partial") is False
and not parquet_data_files_response_json.get("pending", True)
and not parquet_data_files_response_json.get("failed", True)
and "parquet_files" in parquet_data_files_response_json
):
return parquet_data_files_response_json["parquet_files"]
else:
logger.debug(f"Parquet export for {dataset} is not completely ready yet.")
else:
logger.debug(
f"Parquet export for {dataset} is available but outdated (revision='{parquet_data_files_response.headers['X-Revision']}')"
)
except Exception as e: # noqa catch any exception of the datasets-server and consider the parquet export doesn't exist
logger.debug(f"No parquet export for {dataset} available ({type(e).__name__}: {e})")
raise DatasetsServerError("No exported Parquet files available.")
def get_exported_dataset_infos(dataset: str, revision: str) -> Dict[str, Dict[str, Any]]:
"""
Get the dataset information, can be useful to get e.g. the dataset features.
Docs: https://huggingface.co/docs/datasets-server/info
"""
datasets_server_info_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/info?dataset="
try:
info_response = http_get(
url=datasets_server_info_url + dataset,
temp_file=None,
headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}"),
timeout=100.0,
max_retries=3,
)
info_response.raise_for_status()
if "X-Revision" in info_response.headers:
if info_response.headers["X-Revision"] == revision or revision is None:
info_response = info_response.json()
if (
info_response.get("partial") is False
and not info_response.get("pending", True)
and not info_response.get("failed", True)
and "dataset_info" in info_response
):
return info_response["dataset_info"]
else:
logger.debug(f"Dataset info for {dataset} is not completely ready yet.")
else:
logger.debug(
f"Dataset info for {dataset} is available but outdated (revision='{info_response.headers['X-Revision']}')"
)
except Exception as e: # noqa catch any exception of the datasets-server and consider the dataset info doesn't exist
logger.debug(f"No dataset info for {dataset} available ({type(e).__name__}: {e})")
raise DatasetsServerError("No exported dataset infos available.")
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import GmailGetMessage
from langchain_community.tools.gmail.get_message import SearchArgsSchema
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SearchArgsSchema": "langchain_community.tools.gmail.get_message",
"GmailGetMessage": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GmailGetMessage",
"SearchArgsSchema",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import GmailGetMessage
from langchain_community.tools.gmail.get_message import SearchArgsSchema
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SearchArgsSchema": "langchain_community.tools.gmail.get_message",
"GmailGetMessage": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SearchArgsSchema",
"GmailGetMessage",
]
|
_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py'
model = dict(
backbone=dict(
embed_dims=64,
num_layers=[3, 6, 40, 3],
mlp_ratios=(4, 4, 4, 4),
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_v2_b5.pth')),
neck=dict(in_channels=[64, 128, 320, 512]))
# optimizer
optimizer = dict(
_delete_=True, type='AdamW', lr=0.0001 / 1.4, weight_decay=0.0001)
# dataset settings
data = dict(samples_per_gpu=1, workers_per_gpu=1)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
auto_scale_lr = dict(base_batch_size=8)
|
_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py'
model = dict(
backbone=dict(
embed_dims=64,
num_layers=[3, 6, 40, 3],
mlp_ratios=(4, 4, 4, 4),
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_v2_b5.pth')),
neck=dict(in_channels=[64, 128, 320, 512]))
# optimizer
optimizer = dict(
_delete_=True, type='AdamW', lr=0.0001 / 1.4, weight_decay=0.0001)
# dataset settings
data = dict(samples_per_gpu=1, workers_per_gpu=1)
|
from typing import List
import pytest
from sqlalchemy import create_engine, text
from llama_index.readers.database import DatabaseReader
from llama_index.core.schema import Document
# --------------------------------------------------------------------------- #
# Fixtures
# --------------------------------------------------------------------------- #
@pytest.fixture(scope="module")
def sqlite_engine():
"""
Return an in-memory SQLite engine using a URI that allows sharing
the database across connections within the same process.
"""
# This URI creates a named in-memory database that persists
# as long as at least one connection is open, and is shareable.
db_uri = (
"sqlite:///file:llamaindex_reader_test_db?mode=memory&cache=shared&uri=true"
)
engine = create_engine(db_uri, future=True)
# Set up schema + sample data (ensure clean state first)
with engine.begin() as conn:
# Drop table if it exists from a previous potentially failed run
conn.execute(text("DROP TABLE IF EXISTS items"))
# Create table (no schema prefix)
conn.execute(
text(
"""
CREATE TABLE items (
id INTEGER PRIMARY KEY,
name TEXT,
value INTEGER
)
"""
)
)
# Insert data (no schema prefix)
conn.execute(
text(
"""
INSERT INTO items (name, value)
VALUES ('foo', 10), ('bar', 20)
"""
)
)
# The engine is now configured with a persistent in-memory DB
# containing the 'items' table.
return engine
# Optional teardown: dispose engine if needed, though usually not required
# engine.dispose()
# --------------------------------------------------------------------------- #
# Helpers
# --------------------------------------------------------------------------- #
def _create_reader(engine):
"""Utility to build a DatabaseReader for the given engine."""
return DatabaseReader(engine=engine)
def _get_all_docs(reader: DatabaseReader, **kwargs) -> List[Document]:
"""Convenience wrapper that returns a list of Documents."""
return reader.load_data(
query="SELECT id, name, value FROM items ORDER BY id",
**kwargs,
)
# --------------------------------------------------------------------------- #
# Tests
# --------------------------------------------------------------------------- #
def test_load_data_basic(sqlite_engine):
"""It should return two Document objects with concatenated text."""
reader = _create_reader(sqlite_engine)
docs = _get_all_docs(reader)
assert len(docs) == 2
assert docs[0].text_resource and docs[0].text_resource.text
assert docs[0].text_resource.text.startswith("id: 1")
assert docs[0].text_resource.text.endswith("value: 10")
def test_metadata_and_exclusion(sqlite_engine):
"""
`metadata_cols` should be promoted to metadata and
`excluded_text_cols` should remove columns from text.
"""
reader = _create_reader(sqlite_engine)
docs = _get_all_docs(
reader,
metadata_cols=[("id", "item_id"), "value"],
excluded_text_cols=["value"],
)
doc = docs[0]
# `value` excluded from text, included as metadata
assert "value:" not in doc.text
assert doc.metadata == {"item_id": 1, "value": 10}
def test_resource_id_fn(sqlite_engine):
"""Custom `document_id` should drive `doc_id`."""
reader = _create_reader(sqlite_engine)
docs = _get_all_docs(
reader,
document_id=lambda row: f"custom-{row['id']}",
)
assert docs[0].id_ == "custom-1"
assert docs[1].id_ == "custom-2"
def test_lazy_load_data_generator(sqlite_engine):
"""`lazy_load_data` should yield Documents lazily."""
reader = _create_reader(sqlite_engine)
gen = reader.lazy_load_data(query="SELECT * FROM items")
docs = list(gen)
assert len(docs) == 2
assert all(hasattr(d, "text_resource") for d in docs)
assert all(hasattr(d.text_resource, "text") for d in docs)
@pytest.mark.asyncio()
async def test_aload_data_async(sqlite_engine):
"""`aload_data` wraps the sync loader via asyncio.to_thread()."""
reader = _create_reader(sqlite_engine)
docs = await reader.aload_data(query="SELECT * FROM items")
assert len(docs) == 2
assert docs[0].text_resource and docs[0].text_resource.text
assert docs[0].text_resource.text.startswith("id: 1")
|
from llama_index.core.readers.base import BaseReader
from llama_index.readers.database import DatabaseReader
def test_class():
names_of_base_classes = [b.__name__ for b in DatabaseReader.__mro__]
assert BaseReader.__name__ in names_of_base_classes
|
_base_ = './solo_r50_fpn_8xb8-lsj-200e_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './solo_r50_fpn_lsj_200e_8x8_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
# Copyright (c) OpenMMLab. All rights reserved.
from .det_inferencer import DetInferencer
from .inference import (async_inference_detector, inference_detector,
inference_mot, init_detector, init_track_model)
__all__ = [
'init_detector', 'async_inference_detector', 'inference_detector',
'DetInferencer', 'inference_mot', 'init_track_model'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .det_inferencer import DetInferencer
from .inference import (async_inference_detector, inference_detector,
init_detector)
__all__ = [
'init_detector', 'async_inference_detector', 'inference_detector',
'DetInferencer'
]
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# please install mmcls>=0.22.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.ConvNeXt',
arch='tiny',
out_indices=[0, 1, 2, 3],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
gap_before_final_norm=False,
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')),
neck=dict(in_channels=[96, 192, 384, 768]))
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
constructor='LearningRateDecayOptimizerConstructor',
paramwise_cfg={
'decay_rate': 0.95,
'decay_type': 'layer_wise',
'num_layers': 6
},
optimizer=dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# please install mmcls>=0.22.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.ConvNeXt',
arch='tiny',
out_indices=[0, 1, 2, 3],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
gap_before_final_norm=False,
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')),
neck=dict(in_channels=[96, 192, 384, 768]))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='AutoAugment',
policies=[[
dict(
type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(
type='Resize',
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
multiscale_mode='value',
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
data = dict(train=dict(pipeline=train_pipeline), persistent_workers=True)
optimizer = dict(
_delete_=True,
constructor='LearningRateDecayOptimizerConstructor',
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
paramwise_cfg={
'decay_rate': 0.95,
'decay_type': 'layer_wise',
'num_layers': 6
})
lr_config = dict(warmup_iters=1000, step=[27, 33])
runner = dict(max_epochs=36)
# you need to set mode='dynamic' if you are using pytorch<=1.5.0
fp16 = dict(loss_scale=dict(init_scale=512))
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
from typing import List, Optional, Sequence
import torch
from torch.nn.parameter import Parameter
from torch.nn.utils import clip_grad
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class OptimizerHook(Hook):
"""A hook contains custom operations for the optimizer.
Args:
grad_clip (dict, optional): A config dict to control the clip_grad.
Defaults to None.
detect_anomalous_params (bool): This option is only used for
debugging which will slow down the training speed.
Detect anomalous parameters that are not included in
the computational graph with ``loss`` as the root.
There are two cases
- Parameters were not used during
forward pass.
- Parameters were not used to produce
loss.
Defaults to False.
"""
priority = 'HIGH'
def __init__(self,
grad_clip: Optional[dict] = None,
detect_anomalous_params: bool = False) -> None:
self.grad_clip = grad_clip
self.detect_anomalous_params = detect_anomalous_params
def clip_grads(self, params: List[Parameter]) -> Optional[torch.Tensor]:
"""Clip the gradients of parameters.
Args:
params (list[Parameter]): Model's parameters.
Returns:
Optional[torch.Tensor]: Total norm of the parameters if there is
at least one param requiring gradient, else None.
"""
params = list(
filter(lambda p: p.requires_grad and p.grad is not None, params))
if len(params) > 0:
return clip_grad.clip_grad_norm_(params, **self.grad_clip)
return None
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""All operations need to be finished after each training iteration.
This function will finish following 3 operations:
- Detect any anomalous parameters which are not included in the
training graph. (optional)
- Compute the gradient of model parameters.
- Clip the gradients of each parameter. (optional)
- Update model parameters with gradients.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (Sequence[dict], optional): Data from dataloader.
In order to keep this interface consistent with other hooks,
we keep ``data_batch`` here. Defaults to None.
outputs (dict, optional): Outputs from model.
In order to keep this interface consistent with other hooks,
we keep ``outputs`` here. Defaults to None.
"""
runner.optimizer.zero_grad()
if self.detect_anomalous_params:
self.detect_anomalous_parameters(runner.outputs['loss'], runner)
runner.outputs['loss'].backward()
if self.grad_clip is not None:
grad_norm = self.clip_grads(runner.model.parameters())
if grad_norm is not None:
# Add grad norm to the logger
runner.log_buffer.update({'grad_norm': float(grad_norm)},
runner.outputs['num_samples'])
runner.optimizer.step()
def detect_anomalous_parameters(self, loss: torch.Tensor, runner) -> None:
"""Detect anomalous parameters that are not included in the graph.
Args:
loss (torch.Tensor): The loss of current iteration.
runner (Runner): The runner of the training process.
"""
logger = runner.logger
parameters_in_graph = set()
visited = set()
def traverse(grad_fn):
if grad_fn is None:
return
if grad_fn not in visited:
visited.add(grad_fn)
if hasattr(grad_fn, 'variable'):
parameters_in_graph.add(grad_fn.variable)
parents = grad_fn.next_functions
if parents is not None:
for parent in parents:
grad_fn = parent[0]
traverse(grad_fn)
traverse(loss.grad_fn)
for n, p in runner.model.named_parameters():
if p not in parameters_in_graph and p.requires_grad:
logger.log(
level=logging.ERROR,
msg=f'{n} with shape {p.size()} is not '
f'in the computational graph \n')
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
from typing import Any, List, Optional, Sequence, Tuple
import torch
from torch.nn.parameter import Parameter
from torch.nn.utils import clip_grad
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataElement]]]
@HOOKS.register_module()
class OptimizerHook(Hook):
"""A hook contains custom operations for the optimizer.
Args:
grad_clip (dict, optional): A config dict to control the clip_grad.
Defaults to None.
detect_anomalous_params (bool): This option is only used for
debugging which will slow down the training speed.
Detect anomalous parameters that are not included in
the computational graph with ``loss`` as the root.
There are two cases
- Parameters were not used during
forward pass.
- Parameters were not used to produce
loss.
Defaults to False.
"""
priority = 'HIGH'
def __init__(self,
grad_clip: Optional[dict] = None,
detect_anomalous_params: bool = False) -> None:
self.grad_clip = grad_clip
self.detect_anomalous_params = detect_anomalous_params
def clip_grads(self, params: List[Parameter]) -> Optional[torch.Tensor]:
"""Clip the gradients of parameters.
Args:
params (list[Parameter]): Model's parameters.
Returns:
Optional[torch.Tensor]: Total norm of the parameters if there is
at least one param requiring gradient, else None.
"""
params = list(
filter(lambda p: p.requires_grad and p.grad is not None, params))
if len(params) > 0:
return clip_grad.clip_grad_norm_(params, **self.grad_clip)
return None
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""All operations need to be finished after each training iteration.
This function will finish following 3 operations:
- Detect any anomalous parameters which are not included in the
training graph. (optional)
- Compute the gradient of model parameters.
- Clip the gradients of each parameter. (optional)
- Update model parameters with gradients.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. In order to keep this interface consistent
with other hooks, we keep ``data_batch`` here.
Defaults to None.
outputs (dict, optional): Outputs from model.
In order to keep this interface consistent with other hooks,
we keep ``outputs`` here. Defaults to None.
"""
runner.optimizer.zero_grad()
if self.detect_anomalous_params:
self.detect_anomalous_parameters(runner.outputs['loss'], runner)
runner.outputs['loss'].backward()
if self.grad_clip is not None:
grad_norm = self.clip_grads(runner.model.parameters())
if grad_norm is not None:
# Add grad norm to the logger
runner.log_buffer.update({'grad_norm': float(grad_norm)},
runner.outputs['num_samples'])
runner.optimizer.step()
def detect_anomalous_parameters(self, loss: torch.Tensor, runner) -> None:
"""Detect anomalous parameters that are not included in the graph.
Args:
loss (torch.Tensor): The loss of current iteration.
runner (Runner): The runner of the training process.
"""
logger = runner.logger
parameters_in_graph = set()
visited = set()
def traverse(grad_fn):
if grad_fn is None:
return
if grad_fn not in visited:
visited.add(grad_fn)
if hasattr(grad_fn, 'variable'):
parameters_in_graph.add(grad_fn.variable)
parents = grad_fn.next_functions
if parents is not None:
for parent in parents:
grad_fn = parent[0]
traverse(grad_fn)
traverse(loss.grad_fn)
for n, p in runner.model.named_parameters():
if p not in parameters_in_graph and p.requires_grad:
logger.log(
level=logging.ERROR,
msg=f'{n} with shape {p.size()} is not '
f'in the computational graph \n')
|
"""Init file."""
from llama_index.readers.openalex.base import OpenAlexReader
__all__ = ["OpenAlexReader"]
|
"""Init file."""
from llama_index.readers.openalex.base import OpenAlexReader
__all__ = ["OpenAlexReader"]
|
"""
Successive Halving Iterations
=============================
This example illustrates how a successive halving search
(:class:`~sklearn.model_selection.HalvingGridSearchCV` and
:class:`~sklearn.model_selection.HalvingRandomSearchCV`)
iteratively chooses the best parameter combination out of
multiple candidates.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import randint
from sklearn import datasets
from sklearn.ensemble import RandomForestClassifier
from sklearn.experimental import enable_halving_search_cv # noqa: F401
from sklearn.model_selection import HalvingRandomSearchCV
# %%
# We first define the parameter space and train a
# :class:`~sklearn.model_selection.HalvingRandomSearchCV` instance.
rng = np.random.RandomState(0)
X, y = datasets.make_classification(n_samples=400, n_features=12, random_state=rng)
clf = RandomForestClassifier(n_estimators=20, random_state=rng)
param_dist = {
"max_depth": [3, None],
"max_features": randint(1, 6),
"min_samples_split": randint(2, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"],
}
rsh = HalvingRandomSearchCV(
estimator=clf, param_distributions=param_dist, factor=2, random_state=rng
)
rsh.fit(X, y)
# %%
# We can now use the `cv_results_` attribute of the search estimator to inspect
# and plot the evolution of the search.
results = pd.DataFrame(rsh.cv_results_)
results["params_str"] = results.params.apply(str)
results.drop_duplicates(subset=("params_str", "iter"), inplace=True)
mean_scores = results.pivot(
index="iter", columns="params_str", values="mean_test_score"
)
ax = mean_scores.plot(legend=False, alpha=0.6)
labels = [
f"iter={i}\nn_samples={rsh.n_resources_[i]}\nn_candidates={rsh.n_candidates_[i]}"
for i in range(rsh.n_iterations_)
]
ax.set_xticks(range(rsh.n_iterations_))
ax.set_xticklabels(labels, rotation=45, multialignment="left")
ax.set_title("Scores of candidates over iterations")
ax.set_ylabel("mean test score", fontsize=15)
ax.set_xlabel("iterations", fontsize=15)
plt.tight_layout()
plt.show()
# %%
# Number of candidates and amount of resource at each iteration
# -------------------------------------------------------------
#
# At the first iteration, a small amount of resources is used. The resource
# here is the number of samples that the estimators are trained on. All
# candidates are evaluated.
#
# At the second iteration, only the best half of the candidates is evaluated.
# The number of allocated resources is doubled: candidates are evaluated on
# twice as many samples.
#
# This process is repeated until the last iteration, where only 2 candidates
# are left. The best candidate is the candidate that has the best score at the
# last iteration.
|
"""
Successive Halving Iterations
=============================
This example illustrates how a successive halving search
(:class:`~sklearn.model_selection.HalvingGridSearchCV` and
:class:`~sklearn.model_selection.HalvingRandomSearchCV`)
iteratively chooses the best parameter combination out of
multiple candidates.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import randint
from sklearn import datasets
from sklearn.ensemble import RandomForestClassifier
from sklearn.experimental import enable_halving_search_cv # noqa
from sklearn.model_selection import HalvingRandomSearchCV
# %%
# We first define the parameter space and train a
# :class:`~sklearn.model_selection.HalvingRandomSearchCV` instance.
rng = np.random.RandomState(0)
X, y = datasets.make_classification(n_samples=400, n_features=12, random_state=rng)
clf = RandomForestClassifier(n_estimators=20, random_state=rng)
param_dist = {
"max_depth": [3, None],
"max_features": randint(1, 6),
"min_samples_split": randint(2, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"],
}
rsh = HalvingRandomSearchCV(
estimator=clf, param_distributions=param_dist, factor=2, random_state=rng
)
rsh.fit(X, y)
# %%
# We can now use the `cv_results_` attribute of the search estimator to inspect
# and plot the evolution of the search.
results = pd.DataFrame(rsh.cv_results_)
results["params_str"] = results.params.apply(str)
results.drop_duplicates(subset=("params_str", "iter"), inplace=True)
mean_scores = results.pivot(
index="iter", columns="params_str", values="mean_test_score"
)
ax = mean_scores.plot(legend=False, alpha=0.6)
labels = [
f"iter={i}\nn_samples={rsh.n_resources_[i]}\nn_candidates={rsh.n_candidates_[i]}"
for i in range(rsh.n_iterations_)
]
ax.set_xticks(range(rsh.n_iterations_))
ax.set_xticklabels(labels, rotation=45, multialignment="left")
ax.set_title("Scores of candidates over iterations")
ax.set_ylabel("mean test score", fontsize=15)
ax.set_xlabel("iterations", fontsize=15)
plt.tight_layout()
plt.show()
# %%
# Number of candidates and amount of resource at each iteration
# -------------------------------------------------------------
#
# At the first iteration, a small amount of resources is used. The resource
# here is the number of samples that the estimators are trained on. All
# candidates are evaluated.
#
# At the second iteration, only the best half of the candidates is evaluated.
# The number of allocated resources is doubled: candidates are evaluated on
# twice as many samples.
#
# This process is repeated until the last iteration, where only 2 candidates
# are left. The best candidate is the candidate that has the best score at the
# last iteration.
|
import functools
import warnings
from collections import defaultdict
from typing import Any, Dict, Optional, Sequence, Tuple, Type, TypeVar, Union
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import Transform
from torchvision.transforms.v2.utils import is_simple_tensor
T = TypeVar("T")
def _default_arg(value: T) -> T:
return value
def _get_defaultdict(default: T) -> Dict[Any, T]:
# This weird looking construct only exists, since `lambda`'s cannot be serialized by pickle.
# If it were possible, we could replace this with `defaultdict(lambda: default)`
return defaultdict(functools.partial(_default_arg, default))
class PermuteDimensions(Transform):
_transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dims: Union[Sequence[int], Dict[Type, Optional[Sequence[int]]]]) -> None:
super().__init__()
if not isinstance(dims, dict):
dims = _get_defaultdict(dims)
if torch.Tensor in dims and any(cls in dims for cls in [datapoints.Image, datapoints.Video]):
warnings.warn(
"Got `dims` values for `torch.Tensor` and either `datapoints.Image` or `datapoints.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `datapoints.Image` or `datapoints.Video` is present in the input."
)
self.dims = dims
def _transform(
self, inpt: Union[datapoints._TensorImageType, datapoints._TensorVideoType], params: Dict[str, Any]
) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
return inpt.as_subclass(torch.Tensor)
return inpt.permute(*dims)
class TransposeDimensions(Transform):
_transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dims: Union[Tuple[int, int], Dict[Type, Optional[Tuple[int, int]]]]) -> None:
super().__init__()
if not isinstance(dims, dict):
dims = _get_defaultdict(dims)
if torch.Tensor in dims and any(cls in dims for cls in [datapoints.Image, datapoints.Video]):
warnings.warn(
"Got `dims` values for `torch.Tensor` and either `datapoints.Image` or `datapoints.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `datapoints.Image` or `datapoints.Video` is present in the input."
)
self.dims = dims
def _transform(
self, inpt: Union[datapoints._TensorImageType, datapoints._TensorVideoType], params: Dict[str, Any]
) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
return inpt.as_subclass(torch.Tensor)
return inpt.transpose(*dims)
|
import warnings
from typing import Any, Dict, Optional, Sequence, Tuple, Type, Union
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import Transform
from torchvision.transforms.v2._utils import _get_defaultdict
from torchvision.transforms.v2.utils import is_simple_tensor
class PermuteDimensions(Transform):
_transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dims: Union[Sequence[int], Dict[Type, Optional[Sequence[int]]]]) -> None:
super().__init__()
if not isinstance(dims, dict):
dims = _get_defaultdict(dims)
if torch.Tensor in dims and any(cls in dims for cls in [datapoints.Image, datapoints.Video]):
warnings.warn(
"Got `dims` values for `torch.Tensor` and either `datapoints.Image` or `datapoints.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `datapoints.Image` or `datapoints.Video` is present in the input."
)
self.dims = dims
def _transform(
self, inpt: Union[datapoints._TensorImageType, datapoints._TensorVideoType], params: Dict[str, Any]
) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
return inpt.as_subclass(torch.Tensor)
return inpt.permute(*dims)
class TransposeDimensions(Transform):
_transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dims: Union[Tuple[int, int], Dict[Type, Optional[Tuple[int, int]]]]) -> None:
super().__init__()
if not isinstance(dims, dict):
dims = _get_defaultdict(dims)
if torch.Tensor in dims and any(cls in dims for cls in [datapoints.Image, datapoints.Video]):
warnings.warn(
"Got `dims` values for `torch.Tensor` and either `datapoints.Image` or `datapoints.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `datapoints.Image` or `datapoints.Video` is present in the input."
)
self.dims = dims
def _transform(
self, inpt: Union[datapoints._TensorImageType, datapoints._TensorVideoType], params: Dict[str, Any]
) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
return inpt.as_subclass(torch.Tensor)
return inpt.transpose(*dims)
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import YOLOFHead
def test_yolof_head_loss():
"""Tests yolof head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='UniformAssigner',
pos_ignore_thr=0.15,
neg_ignore_thr=0.7),
allowed_border=-1,
pos_weight=-1,
debug=False))
self = YOLOFHead(
num_classes=4,
in_channels=1,
reg_decoded_bbox=True,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[1, 2, 4, 8, 16],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1., 1., 1., 1.],
add_ctr_clamp=True,
ctr_clamp=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0))
feat = [torch.rand(1, 1, s // 32, s // 32)]
cls_scores, bbox_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
|
import mmcv
import torch
from mmdet.models.dense_heads import YOLOFHead
def test_yolof_head_loss():
"""Tests yolof head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='UniformAssigner',
pos_ignore_thr=0.15,
neg_ignore_thr=0.7),
allowed_border=-1,
pos_weight=-1,
debug=False))
self = YOLOFHead(
num_classes=4,
in_channels=1,
reg_decoded_bbox=True,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[1, 2, 4, 8, 16],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1., 1., 1., 1.],
add_ctr_clamp=True,
ctr_clamp=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0))
feat = [torch.rand(1, 1, s // 32, s // 32)]
cls_scores, bbox_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class DistSamplerSeedHook(Hook):
"""Data-loading sampler for distributed training.
When distributed training, it is only useful in conjunction with
:obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same
purpose with :obj:`IterLoader`.
"""
priority = 'NORMAL'
def before_train_epoch(self, runner, mode: str = 'train') -> None:
"""Set the seed for sampler and batch_sampler.
Args:
runner (Runner): The runner of the training process.
"""
if hasattr(runner.train_loop.dataloader.sampler, 'set_epoch'):
# in case the data loader uses `SequentialSampler` in Pytorch
runner.train_loop.dataloader.sampler.set_epoch(runner.epoch)
elif hasattr(runner.train_loop.dataloader.batch_sampler.sampler,
'set_epoch'):
# batch sampler in pytorch warps the sampler as its attributes.
runner.train_loop.dataloader.batch_sampler.sampler.set_epoch(
runner.epoch)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class DistSamplerSeedHook(Hook):
"""Data-loading sampler for distributed training.
When distributed training, it is only useful in conjunction with
:obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same
purpose with :obj:`IterLoader`.
"""
priority = 'NORMAL'
def before_train_epoch(self, runner, mode: str = 'train') -> None:
"""Set the seed for sampler and batch_sampler.
Args:
runner (Runner): The runner of the training process.
"""
if hasattr(runner.cur_dataloader.sampler, 'set_epoch'):
# in case the data loader uses `SequentialSampler` in Pytorch
runner.cur_dataloader.sampler.set_epoch(runner.epoch)
elif hasattr(runner.cur_dataloader.batch_sampler.sampler, 'set_epoch'):
# batch sampler in pytorch warps the sampler as its attributes.
runner.cur_dataloader.batch_sampler.sampler.set_epoch(runner.epoch)
|
import abc
from typing import BinaryIO, Optional, Type, TypeVar, Union
import numpy as np
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
T = TypeVar('T', bound='AbstractTensor')
class VideoTensorMixin(AbstractTensor, abc.ABC):
@classmethod
def validate_shape(cls: Type['T'], value: 'T') -> 'T':
comp_be = cls.get_comp_backend()
shape = comp_be.shape(value) # type: ignore
if comp_be.n_dim(value) not in [3, 4] or shape[-1] != 3: # type: ignore
raise ValueError(
f'Expects tensor with 3 or 4 dimensions and the last dimension equal '
f'to 3, but received {shape}.'
)
else:
return value
def save(
self: 'T',
file_path: Union[str, BinaryIO],
audio_tensor: Optional[AudioTensor] = None,
video_frame_rate: int = 24,
video_codec: str = 'h264',
audio_frame_rate: int = 48000,
audio_codec: str = 'aac',
audio_format: str = 'fltp',
) -> None:
"""
Save video tensor to a .mp4 file.
:param file_path: path to a .mp4 file. If file is a string, open the file by
that name, otherwise treat it as a file-like object.
:param audio_tensor: AudioTensor containing the video's soundtrack.
:param video_frame_rate: video frames per second.
:param video_codec: the name of a video decoder/encoder.
:param audio_frame_rate: audio frames per second.
:param audio_codec: the name of an audio decoder/encoder.
:param audio_format: the name of one of the audio formats supported by PyAV,
such as 'flt', 'fltp', 's16' or 's16p'.
EXAMPLE USAGE
.. code-block:: python
import numpy as np
from docarray import BaseDocument
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
class MyDoc(BaseDocument):
video_tensor: VideoTensor
audio_tensor: AudioTensor
doc = MyDoc(
video_tensor=np.random.randint(low=0, high=256, size=(10, 200, 300, 3)),
audio_tensor=np.random.randn(100, 1, 1024).astype("float32"),
)
doc.video_tensor.save(
file_path="toydata/mp_.mp4",
audio_tensor=doc.audio_tensor,
audio_format="flt",
)
"""
import av
np_tensor = self.get_comp_backend().to_numpy(array=self)
video_tensor = np_tensor.astype('uint8')
with av.open(file_path, mode='w') as container:
if video_tensor.ndim == 3:
video_tensor = np.expand_dims(video_tensor, axis=0)
stream_video = container.add_stream(video_codec, rate=video_frame_rate)
stream_video.height = video_tensor.shape[-3]
stream_video.width = video_tensor.shape[-2]
if audio_tensor is not None:
stream_audio = container.add_stream(audio_codec)
audio_np = audio_tensor.get_comp_backend().to_numpy(array=audio_tensor)
audio_layout = 'stereo' if audio_np.shape[-2] == 2 else 'mono'
for i, audio in enumerate(audio_np):
frame = av.AudioFrame.from_ndarray(
array=audio, format=audio_format, layout=audio_layout
)
frame.rate = audio_frame_rate
frame.pts = audio.shape[-1] * i
for packet in stream_audio.encode(frame):
container.mux(packet)
for packet in stream_audio.encode(None):
container.mux(packet)
for vid in video_tensor:
frame = av.VideoFrame.from_ndarray(vid, format='rgb24')
for packet in stream_video.encode(frame):
container.mux(packet)
for packet in stream_video.encode(None):
container.mux(packet)
|
import abc
from typing import BinaryIO, Optional, Type, TypeVar, Union
import numpy as np
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
T = TypeVar('T', bound='AbstractTensor')
class VideoTensorMixin(AbstractTensor, abc.ABC):
@classmethod
def validate_shape(cls: Type['T'], value: 'T') -> 'T':
comp_be = cls.get_comp_backend()
shape = comp_be.shape(value) # type: ignore
if comp_be.n_dim(value) not in [3, 4] or shape[-1] != 3: # type: ignore
raise ValueError(
f'Expects tensor with 3 or 4 dimensions and the last dimension equal '
f'to 3, but received {shape}.'
)
else:
return value
def save(
self: 'T',
file_path: Union[str, BinaryIO],
audio_tensor: Optional[AudioTensor] = None,
video_frame_rate: int = 24,
video_codec: str = 'h264',
audio_frame_rate: int = 48000,
audio_codec: str = 'aac',
audio_format: str = 'fltp',
) -> None:
"""
Save video tensor to a .mp4 file.
:param file_path: path to a .mp4 file. If file is a string, open the file by
that name, otherwise treat it as a file-like object.
:param audio_tensor: AudioTensor containing the video's soundtrack.
:param video_frame_rate: video frames per second.
:param video_codec: the name of a video decoder/encoder.
:param audio_frame_rate: audio frames per second.
:param audio_codec: the name of an audio decoder/encoder.
:param audio_format: the name of one of the audio formats supported by PyAV,
such as 'flt', 'fltp', 's16' or 's16p'.
EXAMPLE USAGE
.. code-block:: python
import numpy as np
from docarray import BaseDocument
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
class MyDoc(BaseDocument):
video_tensor: VideoTensor
audio_tensor: AudioTensor
doc = MyDoc(
video_tensor=np.random.randint(low=0, high=256, size=(10, 200, 300, 3)),
audio_tensor=np.random.randn(100, 1, 1024).astype("float32"),
)
doc.video_tensor.save(
file_path="toydata/mp_.mp4",
audio_tensor=doc.audio_tensor,
audio_format="flt",
)
"""
import av
np_tensor = self.get_comp_backend().to_numpy(array=self) # type: ignore
video_tensor = np_tensor.astype('uint8')
with av.open(file_path, mode='w') as container:
if video_tensor.ndim == 3:
video_tensor = np.expand_dims(video_tensor, axis=0)
stream_video = container.add_stream(video_codec, rate=video_frame_rate)
stream_video.height = video_tensor.shape[-3]
stream_video.width = video_tensor.shape[-2]
if audio_tensor is not None:
stream_audio = container.add_stream(audio_codec)
audio_np = audio_tensor.get_comp_backend().to_numpy(array=audio_tensor)
audio_layout = 'stereo' if audio_np.shape[-2] == 2 else 'mono'
for i, audio in enumerate(audio_np):
frame = av.AudioFrame.from_ndarray(
array=audio, format=audio_format, layout=audio_layout
)
frame.rate = audio_frame_rate
frame.pts = audio.shape[-1] * i
for packet in stream_audio.encode(frame):
container.mux(packet)
for packet in stream_audio.encode(None):
container.mux(packet)
for vid in video_tensor:
frame = av.VideoFrame.from_ndarray(vid, format='rgb24')
for packet in stream_video.encode(frame):
container.mux(packet)
for packet in stream_video.encode(None):
container.mux(packet)
|
from pathlib import Path
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
read_categories_file,
)
from torchvision.prototype.features import Label
from .._api import register_dataset, register_info
NAME = "food101"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class Food101(Dataset):
"""Food 101 dataset
homepage="https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101",
"""
def __init__(self, root: Union[str, Path], *, split: str = "train", skip_integrity_check: bool = False) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
url="http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz",
sha256="d97d15e438b7f4498f96086a4f7e2fa42a32f2712e87d3295441b2b6314053a4",
preprocess="decompress",
)
]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = Path(data[0])
if path.parents[1].name == "images":
return 0
elif path.parents[0].name == "meta":
return 1
else:
return None
def _prepare_sample(self, data: Tuple[str, Tuple[str, BinaryIO]]) -> Dict[str, Any]:
id, (path, buffer) = data
return dict(
label=Label.from_category(id.split("/", 1)[0], categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _image_key(self, data: Tuple[str, Any]) -> str:
path = Path(data[0])
return path.relative_to(path.parents[1]).with_suffix("").as_posix()
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
images_dp, split_dp = Demultiplexer(
archive_dp, 2, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
split_dp = Filter(split_dp, path_comparator("name", f"{self._split}.txt"))
split_dp = LineReader(split_dp, decode=True, return_path=False)
split_dp = hint_sharding(split_dp)
split_dp = hint_shuffling(split_dp)
dp = IterKeyZipper(
split_dp,
images_dp,
key_fn=getitem(),
ref_key_fn=self._image_key,
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
dp = Filter(dp, path_comparator("name", "classes.txt"))
dp = LineReader(dp, decode=True, return_path=False)
return list(dp)
def __len__(self) -> int:
return 75_750 if self._split == "train" else 25_250
|
from pathlib import Path
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
read_categories_file,
)
from torchvision.prototype.features import EncodedImage, Label
from .._api import register_dataset, register_info
NAME = "food101"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class Food101(Dataset):
"""Food 101 dataset
homepage="https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101",
"""
def __init__(self, root: Union[str, Path], *, split: str = "train", skip_integrity_check: bool = False) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
url="http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz",
sha256="d97d15e438b7f4498f96086a4f7e2fa42a32f2712e87d3295441b2b6314053a4",
preprocess="decompress",
)
]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = Path(data[0])
if path.parents[1].name == "images":
return 0
elif path.parents[0].name == "meta":
return 1
else:
return None
def _prepare_sample(self, data: Tuple[str, Tuple[str, BinaryIO]]) -> Dict[str, Any]:
id, (path, buffer) = data
return dict(
label=Label.from_category(id.split("/", 1)[0], categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _image_key(self, data: Tuple[str, Any]) -> str:
path = Path(data[0])
return path.relative_to(path.parents[1]).with_suffix("").as_posix()
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
images_dp, split_dp = Demultiplexer(
archive_dp, 2, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
split_dp = Filter(split_dp, path_comparator("name", f"{self._split}.txt"))
split_dp = LineReader(split_dp, decode=True, return_path=False)
split_dp = hint_sharding(split_dp)
split_dp = hint_shuffling(split_dp)
dp = IterKeyZipper(
split_dp,
images_dp,
key_fn=getitem(),
ref_key_fn=self._image_key,
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
dp = Filter(dp, path_comparator("name", "classes.txt"))
dp = LineReader(dp, decode=True, return_path=False)
return list(dp)
def __len__(self) -> int:
return 75_750 if self._split == "train" else 25_250
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.tree.tree_api import MAP_TO_NONE
from keras.src.tree.tree_api import assert_same_paths
from keras.src.tree.tree_api import assert_same_structure
from keras.src.tree.tree_api import flatten
from keras.src.tree.tree_api import flatten_with_path
from keras.src.tree.tree_api import is_nested
from keras.src.tree.tree_api import lists_to_tuples
from keras.src.tree.tree_api import map_shape_structure
from keras.src.tree.tree_api import map_structure
from keras.src.tree.tree_api import map_structure_up_to
from keras.src.tree.tree_api import pack_sequence_as
from keras.src.tree.tree_api import traverse
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.tree.tree_api import assert_same_paths
from keras.src.tree.tree_api import assert_same_structure
from keras.src.tree.tree_api import flatten
from keras.src.tree.tree_api import flatten_with_path
from keras.src.tree.tree_api import is_nested
from keras.src.tree.tree_api import lists_to_tuples
from keras.src.tree.tree_api import map_shape_structure
from keras.src.tree.tree_api import map_structure
from keras.src.tree.tree_api import map_structure_up_to
from keras.src.tree.tree_api import pack_sequence_as
from keras.src.tree.tree_api import traverse
|
"""Message responsible for deleting other messages."""
from typing import Any, Literal
from langchain_core.messages.base import BaseMessage
class RemoveMessage(BaseMessage):
"""Message responsible for deleting other messages."""
type: Literal["remove"] = "remove"
"""The type of the message (used for serialization). Defaults to "remove"."""
def __init__(
self,
id: str, # noqa: A002
**kwargs: Any,
) -> None:
"""Create a RemoveMessage.
Args:
id: The ID of the message to remove.
kwargs: Additional fields to pass to the message.
Raises:
ValueError: If the 'content' field is passed in kwargs.
"""
if kwargs.pop("content", None):
msg = "RemoveMessage does not support 'content' field."
raise ValueError(msg)
super().__init__("", id=id, **kwargs)
|
"""Message responsible for deleting other messages."""
from typing import Any, Literal
from langchain_core.messages.base import BaseMessage
class RemoveMessage(BaseMessage):
"""Message responsible for deleting other messages."""
type: Literal["remove"] = "remove"
"""The type of the message (used for serialization). Defaults to "remove"."""
def __init__(self, id: str, **kwargs: Any) -> None:
"""Create a RemoveMessage.
Args:
id: The ID of the message to remove.
kwargs: Additional fields to pass to the message.
Raises:
ValueError: If the 'content' field is passed in kwargs.
"""
if kwargs.pop("content", None):
msg = "RemoveMessage does not support 'content' field."
raise ValueError(msg)
super().__init__("", id=id, **kwargs)
|
import os
import fsspec
import pytest
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from datasets.utils._hf_hub_fixes import dataset_info as hf_api_dataset_info
from .utils import require_lz4, require_zstandard
def test_extract_path_from_uri():
mock_bucket = "moto-mock-s3-bucket"
dataset_path = f"s3://{mock_bucket}"
dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path.startswith("s3://") is False
dataset_path = "./local/path"
new_dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path == new_dataset_path
def test_is_remote_filesystem(mockfs):
is_remote = is_remote_filesystem(mockfs)
assert is_remote is True
fs = fsspec.filesystem("file")
is_remote = is_remote_filesystem(fs)
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS)
def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file):
input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file}
input_path = input_paths[compression_fs_class.protocol]
if input_path is None:
reason = f"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lz4.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(reason)
fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path)
assert isinstance(fs, compression_fs_class)
expected_filename = os.path.basename(input_path)
expected_filename = expected_filename[: expected_filename.rindex(".")]
assert fs.ls("/") == [expected_filename]
with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol", ["zip", "gzip"])
def test_fs_isfile(protocol, zip_jsonl_path, jsonl_gz_path):
compressed_file_paths = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
compressed_file_path = compressed_file_paths[protocol]
member_file_path = "dataset.jsonl"
path = f"{protocol}://{member_file_path}::{compressed_file_path}"
fs, *_ = fsspec.get_fs_token_paths(path)
assert fs.isfile(member_file_path)
assert not fs.isfile("non_existing_" + member_file_path)
@pytest.mark.integration
def test_hf_filesystem(hf_token, hf_api, hf_private_dataset_repo_txt_data, text_file):
repo_info = hf_api_dataset_info(hf_api, hf_private_dataset_repo_txt_data, use_auth_token=hf_token)
hffs = HfFileSystem(repo_info=repo_info, token=hf_token)
assert sorted(hffs.glob("*")) == [".gitattributes", "data"]
assert hffs.isdir("data")
assert hffs.isfile(".gitattributes") and hffs.isfile("data/text_data.txt")
with open(text_file) as f:
assert hffs.open("data/text_data.txt", "r").read() == f.read()
|
import os
import boto3
import fsspec
import pytest
from moto import mock_s3
from datasets.filesystems import (
COMPRESSION_FILESYSTEMS,
HfFileSystem,
S3FileSystem,
extract_path_from_uri,
is_remote_filesystem,
)
from datasets.utils._hf_hub_fixes import dataset_info as hf_api_dataset_info
from .utils import require_lz4, require_zstandard
@pytest.fixture(scope="function")
def aws_credentials():
"""Mocked AWS Credentials for moto."""
os.environ["AWS_ACCESS_KEY_ID"] = "fake_access_key"
os.environ["AWS_SECRET_ACCESS_KEY"] = "fake_secret_key"
os.environ["AWS_SECURITY_TOKEN"] = "fake_secrurity_token"
os.environ["AWS_SESSION_TOKEN"] = "fake_session_token"
@pytest.fixture(scope="function")
def s3(aws_credentials):
with mock_s3():
yield boto3.client("s3", region_name="us-east-1")
def test_extract_path_from_uri(s3):
mock_bucket = "moto-mock-s3-bucket"
# We need to create the bucket since this is all in Moto's 'virtual' AWS account
s3.create_bucket(Bucket=mock_bucket)
dataset_path = f"s3://{mock_bucket}"
dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path.startswith("s3://") is False
dataset_path = "./local/path"
new_dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path == new_dataset_path
def test_is_remote_filesystem():
fs = S3FileSystem(key="fake_access_key", secret="fake_secret")
is_remote = is_remote_filesystem(fs)
assert is_remote is True
fs = fsspec.filesystem("file")
is_remote = is_remote_filesystem(fs)
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS)
def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file):
input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file}
input_path = input_paths[compression_fs_class.protocol]
if input_path is None:
reason = f"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lz4.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(reason)
fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path)
assert isinstance(fs, compression_fs_class)
expected_filename = os.path.basename(input_path)
expected_filename = expected_filename[: expected_filename.rindex(".")]
assert fs.ls("/") == [expected_filename]
with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol", ["zip", "gzip"])
def test_fs_isfile(protocol, zip_jsonl_path, jsonl_gz_path):
compressed_file_paths = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
compressed_file_path = compressed_file_paths[protocol]
member_file_path = "dataset.jsonl"
path = f"{protocol}://{member_file_path}::{compressed_file_path}"
fs, *_ = fsspec.get_fs_token_paths(path)
assert fs.isfile(member_file_path)
assert not fs.isfile("non_existing_" + member_file_path)
@pytest.mark.integration
def test_hf_filesystem(hf_token, hf_api, hf_private_dataset_repo_txt_data, text_file):
repo_info = hf_api_dataset_info(hf_api, hf_private_dataset_repo_txt_data, use_auth_token=hf_token)
hffs = HfFileSystem(repo_info=repo_info, token=hf_token)
assert sorted(hffs.glob("*")) == [".gitattributes", "data"]
assert hffs.isdir("data")
assert hffs.isfile(".gitattributes") and hffs.isfile("data/text_data.txt")
with open(text_file) as f:
assert hffs.open("data/text_data.txt", "r").read() == f.read()
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.core import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
register_all_modules()
class TestSingleStageDetector(TestCase):
@parameterized.expand([
'retinanet/retinanet_r18_fpn_1x_coco.py',
# 'centernet/centernet_resnet18_140e_coco.py',
# 'fsaf/fsaf_r50_fpn_1x_coco.py', 'yolox/yolox_tiny_8x8_300e_coco.py',
# 'yolo/yolov3_mobilenetv2_320_300e_coco.py'
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
assert detector.backbone
assert detector.neck
assert detector.bbox_head
@parameterized.expand([
('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')),
# ('centernet/centernet_resnet18_140e_coco.py', ('cpu', 'cuda')),
# ('fsaf/fsaf_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
# ('yolox/yolox_tiny_8x8_300e_coco.py', ('cpu', 'cuda')),
# ('yolo/yolov3_mobilenetv2_320_300e_coco.py', ('cpu', 'cuda'))
])
def test_single_stage_forward_loss_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
detector.init_weights()
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, True)
losses = detector.forward(batch_inputs, data_samples, mode='loss')
assert isinstance(losses, dict)
@parameterized.expand([
('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')),
# ('centernet/centernet_resnet18_140e_coco.py', ('cpu', 'cuda')),
# ('fsaf/fsaf_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
# ('yolox/yolox_tiny_8x8_300e_coco.py', ('cpu', 'cuda')),
# ('yolo/yolov3_mobilenetv2_320_300e_coco.py', ('cpu', 'cuda'))
])
def test_single_stage_forward_predict_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
batch_inputs, data_samples, mode='predict')
assert len(batch_results) == 2
assert isinstance(batch_results[0], DetDataSample)
@parameterized.expand([
('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')),
# ('centernet/centernet_resnet18_140e_coco.py', ('cpu', 'cuda')),
# ('fsaf/fsaf_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
# ('yolox/yolox_tiny_8x8_300e_coco.py', ('cpu', 'cuda')),
# ('yolo/yolov3_mobilenetv2_320_300e_coco.py', ('cpu', 'cuda'))
])
def test_single_stage_forward_tensor_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
batch_results = detector.forward(
batch_inputs, data_samples, mode='tensor')
assert isinstance(batch_results, tuple)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.core import DetDataSample
from .utils import demo_mm_inputs, get_detector_cfg
class TestSingleStageDetector(TestCase):
@parameterized.expand([
'retinanet/retinanet_r18_fpn_1x_coco.py',
'centernet/centernet_resnet18_140e_coco.py',
'fsaf/fsaf_r50_fpn_1x_coco.py', 'yolox/yolox_tiny_8x8_300e_coco.py',
'yolo/yolov3_mobilenetv2_320_300e_coco.py'
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
assert detector.backbone
assert detector.neck
assert detector.bbox_head
assert detector.device.type == 'cpu'
@parameterized.expand([
('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')),
('centernet/centernet_resnet18_140e_coco.py', ('cpu', 'cuda')),
('fsaf/fsaf_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('yolox/yolox_tiny_8x8_300e_coco.py', ('cpu', 'cuda')),
('yolo/yolov3_mobilenetv2_320_300e_coco.py', ('cpu', 'cuda'))
])
def test_single_stage_forward_train(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
detector.init_weights()
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
# Test forward train
losses = detector.forward(packed_inputs, return_loss=True)
assert isinstance(losses, dict)
# Test forward_dummy
batch = torch.ones((1, 3, 64, 64)).to(device=device)
out = detector.forward_dummy(batch)
assert isinstance(out, tuple)
@parameterized.expand([
('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')),
('centernet/centernet_resnet18_140e_coco.py', ('cpu', 'cuda')),
('fsaf/fsaf_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('yolox/yolox_tiny_8x8_300e_coco.py', ('cpu', 'cuda')),
('yolo/yolov3_mobilenetv2_320_300e_coco.py', ('cpu', 'cuda'))
])
def test_single_stage_forward_test(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
packed_inputs, return_loss=False)
assert len(batch_results) == 2
assert isinstance(batch_results[0], DetDataSample)
|
_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco.py']
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = ['./mask2former_r50_lsj_8x2_50e_coco.py']
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
"""
This is a simple application for sentence embeddings: clustering
Sentences are mapped to sentence embeddings and then agglomerative clustering with a threshold is applied.
"""
from sentence_transformers import SentenceTransformer
from sklearn.cluster import AgglomerativeClustering
import numpy as np
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"A man is eating pasta.",
"The girl is carrying a baby.",
"The baby is carried by the woman",
"A man is riding a horse.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah is running behind its prey.",
"A cheetah chases prey on across a field.",
]
corpus_embeddings = embedder.encode(corpus)
# Normalize the embeddings to unit length
corpus_embeddings = corpus_embeddings / np.linalg.norm(corpus_embeddings, axis=1, keepdims=True)
# Perform kmean clustering
clustering_model = AgglomerativeClustering(
n_clusters=None, distance_threshold=1.5
) # , affinity='cosine', linkage='average', distance_threshold=0.4)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = {}
for sentence_id, cluster_id in enumerate(cluster_assignment):
if cluster_id not in clustered_sentences:
clustered_sentences[cluster_id] = []
clustered_sentences[cluster_id].append(corpus[sentence_id])
for i, cluster in clustered_sentences.items():
print("Cluster ", i + 1)
print(cluster)
print("")
|
"""
This is a simple application for sentence embeddings: clustering
Sentences are mapped to sentence embeddings and then agglomerative clustering with a threshold is applied.
"""
from sentence_transformers import SentenceTransformer
from sklearn.cluster import AgglomerativeClustering
import numpy as np
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"A man is eating pasta.",
"The girl is carrying a baby.",
"The baby is carried by the woman",
"A man is riding a horse.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah is running behind its prey.",
"A cheetah chases prey on across a field.",
]
corpus_embeddings = embedder.encode(corpus)
# Normalize the embeddings to unit length
corpus_embeddings = corpus_embeddings / np.linalg.norm(corpus_embeddings, axis=1, keepdims=True)
# Perform kmean clustering
clustering_model = AgglomerativeClustering(
n_clusters=None, distance_threshold=1.5
) # , affinity='cosine', linkage='average', distance_threshold=0.4)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = {}
for sentence_id, cluster_id in enumerate(cluster_assignment):
if cluster_id not in clustered_sentences:
clustered_sentences[cluster_id] = []
clustered_sentences[cluster_id].append(corpus[sentence_id])
for i, cluster in clustered_sentences.items():
print("Cluster ", i + 1)
print(cluster)
print("")
|
import imghdr
import os
import struct
import pytest
from jina import Executor, Flow
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.mark.skipif("GITHUB_WORKFLOW" in os.environ, reason="Skip unneeded")
def test_visualization_with_yml_file_img(tmpdir):
Flow.load_config(
os.path.join(cur_dir, '../../../yaml/test_flow_visualization.yml')
).plot(output=os.path.join(tmpdir, 'flow.svg'))
assert os.path.exists(os.path.join(tmpdir, 'flow.svg'))
@pytest.mark.skipif("GITHUB_WORKFLOW" in os.environ, reason="Skip unneeded")
def test_visualization_with_yml_file_jpg(tmpdir):
Flow.load_config(
os.path.join(cur_dir, '../../../yaml/test_flow_visualization.yml')
).plot(output=os.path.join(tmpdir, 'flow.jpg'))
assert os.path.exists(os.path.join(tmpdir, 'flow.jpg'))
@pytest.mark.skipif("GITHUB_WORKFLOW" in os.environ, reason="Skip unneeded")
def test_visualization_with_yml_file_jpg_lr(tmpdir):
Flow.load_config(
os.path.join(cur_dir, '../../../yaml/test_flow_visualization.yml')
).plot(output=os.path.join(tmpdir, 'flow-hor.jpg'), vertical_layout=False)
assert os.path.exists(os.path.join(tmpdir, 'flow-hor.jpg'))
@pytest.mark.skipif("GITHUB_WORKFLOW" in os.environ, reason="Skip unneeded")
def test_visualization_plot_twice(tmpdir):
(
Flow()
.add(name='pod_a')
.plot(output=os.path.join(tmpdir, 'flow1.svg'))
.add(name='pod_b', needs='gateway')
.needs(['pod_a', 'pod_b'])
.plot(output=os.path.join(tmpdir, 'flow2.svg'))
)
assert os.path.exists(os.path.join(tmpdir, 'flow1.svg'))
assert os.path.exists(os.path.join(tmpdir, 'flow2.svg'))
@pytest.mark.skipif("GITHUB_WORKFLOW" in os.environ, reason="Skip unneeded")
def test_visualization_plot_in_middle(tmpdir):
(
Flow()
.add(name='pod_a')
.plot(output=os.path.join(tmpdir, 'flow3.svg'))
.add(name='pod_b', needs='gateway')
.needs(['pod_a', 'pod_b'])
)
assert os.path.exists(os.path.join(tmpdir, 'flow3.svg'))
@pytest.mark.skipif("GITHUB_WORKFLOW" in os.environ, reason="Skip unneeded")
def test_flow_before_after_plot(tmpdir):
Flow().add(uses_before=Executor, uses_after=Executor, name='p1').plot(
os.path.join(tmpdir, 'flow.svg')
)
assert os.path.exists(os.path.join(tmpdir, 'flow.svg'))
@pytest.mark.skipif("GITHUB_WORKFLOW" in os.environ, reason="Skip unneeded")
def test_flow_before_plot(tmpdir):
Flow().add(uses_before=Executor, name='p1').plot(os.path.join(tmpdir, 'flow.svg'))
assert os.path.exists(os.path.join(tmpdir, 'flow.svg'))
@pytest.mark.skipif("GITHUB_WORKFLOW" in os.environ, reason="Skip unneeded")
def test_flow_after_plot(tmpdir):
Flow().add(uses_after=Executor, name='p1').plot(os.path.join(tmpdir, 'flow.svg'))
assert os.path.exists(os.path.join(tmpdir, 'flow.svg'))
@pytest.mark.skipif("GITHUB_WORKFLOW" in os.environ, reason="Skip unneeded")
@pytest.mark.parametrize('vertical_layout', [True, False])
def test_flow_vertical(tmpdir, vertical_layout):
def get_image_size(fname):
with open(fname, 'rb') as fh:
head = fh.read(24)
if len(head) != 24:
return
if imghdr.what(fname) == 'png':
check = struct.unpack('>i', head[4:8])[0]
if check != 0x0D0A1A0A:
return
width, height = struct.unpack('>ii', head[16:24])
elif imghdr.what(fname) == 'jpeg':
try:
fh.seek(0) # Read 0xff next
size = 2
ftype = 0
while not 0xC0 <= ftype <= 0xCF:
fh.seek(size, 1)
byte = fh.read(1)
while ord(byte) == 0xFF:
byte = fh.read(1)
ftype = ord(byte)
size = struct.unpack('>H', fh.read(2))[0] - 2
# We are at a SOFn block
fh.seek(1, 1) # Skip `precision' byte.
height, width = struct.unpack('>HH', fh.read(4))
except Exception: # IGNORE:W0703
return
else:
return
return width, height
output_fn = str(tmpdir / 'flow.png')
Flow().add(name='a').add(name='b').plot(output_fn, vertical_layout=vertical_layout)
assert os.path.exists(output_fn)
w_h = get_image_size(output_fn)
assert w_h is not None
w, h = w_h
assert (w < h) == vertical_layout
|
import imghdr
import os
import struct
import pytest
from jina import Executor, Flow
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_visualization_with_yml_file_img(tmpdir):
Flow.load_config(
os.path.join(cur_dir, '../../../yaml/test_flow_visualization.yml')
).plot(output=os.path.join(tmpdir, 'flow.svg'))
assert os.path.exists(os.path.join(tmpdir, 'flow.svg'))
def test_visualization_with_yml_file_jpg(tmpdir):
Flow.load_config(
os.path.join(cur_dir, '../../../yaml/test_flow_visualization.yml')
).plot(output=os.path.join(tmpdir, 'flow.jpg'))
assert os.path.exists(os.path.join(tmpdir, 'flow.jpg'))
def test_visualization_with_yml_file_jpg_lr(tmpdir):
Flow.load_config(
os.path.join(cur_dir, '../../../yaml/test_flow_visualization.yml')
).plot(output=os.path.join(tmpdir, 'flow-hor.jpg'), vertical_layout=False)
assert os.path.exists(os.path.join(tmpdir, 'flow-hor.jpg'))
def test_visualization_plot_twice(tmpdir):
(
Flow()
.add(name='pod_a')
.plot(output=os.path.join(tmpdir, 'flow1.svg'))
.add(name='pod_b', needs='gateway')
.needs(['pod_a', 'pod_b'])
.plot(output=os.path.join(tmpdir, 'flow2.svg'))
)
assert os.path.exists(os.path.join(tmpdir, 'flow1.svg'))
assert os.path.exists(os.path.join(tmpdir, 'flow2.svg'))
def test_visualization_plot_in_middle(tmpdir):
(
Flow()
.add(name='pod_a')
.plot(output=os.path.join(tmpdir, 'flow3.svg'))
.add(name='pod_b', needs='gateway')
.needs(['pod_a', 'pod_b'])
)
assert os.path.exists(os.path.join(tmpdir, 'flow3.svg'))
def test_flow_before_after_plot(tmpdir):
Flow().add(uses_before=Executor, uses_after=Executor, name='p1').plot(
os.path.join(tmpdir, 'flow.svg')
)
assert os.path.exists(os.path.join(tmpdir, 'flow.svg'))
def test_flow_before_plot(tmpdir):
Flow().add(uses_before=Executor, name='p1').plot(os.path.join(tmpdir, 'flow.svg'))
assert os.path.exists(os.path.join(tmpdir, 'flow.svg'))
def test_flow_after_plot(tmpdir):
Flow().add(uses_after=Executor, name='p1').plot(os.path.join(tmpdir, 'flow.svg'))
assert os.path.exists(os.path.join(tmpdir, 'flow.svg'))
@pytest.mark.parametrize('vertical_layout', [True, False])
def test_flow_vertical(tmpdir, vertical_layout):
def get_image_size(fname):
with open(fname, 'rb') as fh:
head = fh.read(24)
if len(head) != 24:
return
if imghdr.what(fname) == 'png':
check = struct.unpack('>i', head[4:8])[0]
if check != 0x0D0A1A0A:
return
width, height = struct.unpack('>ii', head[16:24])
elif imghdr.what(fname) == 'jpeg':
try:
fh.seek(0) # Read 0xff next
size = 2
ftype = 0
while not 0xC0 <= ftype <= 0xCF:
fh.seek(size, 1)
byte = fh.read(1)
while ord(byte) == 0xFF:
byte = fh.read(1)
ftype = ord(byte)
size = struct.unpack('>H', fh.read(2))[0] - 2
# We are at a SOFn block
fh.seek(1, 1) # Skip `precision' byte.
height, width = struct.unpack('>HH', fh.read(4))
except Exception: # IGNORE:W0703
return
else:
return
return width, height
output_fn = str(tmpdir / 'flow.png')
Flow().add(name='a').add(name='b').plot(output_fn, vertical_layout=vertical_layout)
assert os.path.exists(output_fn)
w_h = get_image_size(output_fn)
assert w_h is not None
w, h = w_h
assert (w < h) == vertical_layout
def test_flow_plot_after_build():
f = Flow().add().add()
with f:
f.plot()
f.plot()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_overlaps import bbox_overlaps
from .cityscapes_utils import evaluateImgLists
from .class_names import (cityscapes_classes, coco_classes,
coco_panoptic_classes, dataset_aliases, get_classes,
imagenet_det_classes, imagenet_vid_classes,
objects365v1_classes, objects365v2_classes,
oid_challenge_classes, oid_v6_classes, voc_classes)
from .mean_ap import average_precision, eval_map, print_map_summary
from .panoptic_utils import (INSTANCE_OFFSET, pq_compute_multi_core,
pq_compute_single_core)
from .recall import (eval_recalls, plot_iou_recall, plot_num_recall,
print_recall_summary)
__all__ = [
'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',
'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes',
'average_precision', 'eval_map', 'print_map_summary', 'eval_recalls',
'print_recall_summary', 'plot_num_recall', 'plot_iou_recall',
'oid_v6_classes', 'oid_challenge_classes', 'INSTANCE_OFFSET',
'pq_compute_single_core', 'pq_compute_multi_core', 'bbox_overlaps',
'objects365v1_classes', 'objects365v2_classes', 'coco_panoptic_classes',
'evaluateImgLists'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_overlaps import bbox_overlaps
from .class_names import (cityscapes_classes, coco_classes,
coco_panoptic_classes, dataset_aliases, get_classes,
imagenet_det_classes, imagenet_vid_classes,
objects365v1_classes, objects365v2_classes,
oid_challenge_classes, oid_v6_classes, voc_classes)
from .mean_ap import average_precision, eval_map, print_map_summary
from .panoptic_utils import (INSTANCE_OFFSET, pq_compute_multi_core,
pq_compute_single_core)
from .recall import (eval_recalls, plot_iou_recall, plot_num_recall,
print_recall_summary)
__all__ = [
'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',
'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes',
'average_precision', 'eval_map', 'print_map_summary', 'eval_recalls',
'print_recall_summary', 'plot_num_recall', 'plot_iou_recall',
'oid_v6_classes', 'oid_challenge_classes', 'INSTANCE_OFFSET',
'pq_compute_single_core', 'pq_compute_multi_core', 'bbox_overlaps',
'objects365v1_classes', 'objects365v2_classes', 'coco_panoptic_classes'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Get image shape on CrowdHuman dataset.
Here is an example to run this script.
Example:
python tools/misc/get_crowdhuman_id_hw.py ${CONFIG} \
--dataset ${DATASET_TYPE}
"""
import argparse
import json
import logging
import os.path as osp
from multiprocessing import Pool
import mmcv
from mmengine.config import Config
from mmengine.fileio import dump, get, get_text
from mmengine.logging import print_log
def parse_args():
parser = argparse.ArgumentParser(description='Collect image metas')
parser.add_argument('config', help='Config file path')
parser.add_argument(
'--dataset',
choices=['train', 'val'],
help='Collect image metas from which dataset')
parser.add_argument(
'--nproc',
default=10,
type=int,
help='Processes used for get image metas')
args = parser.parse_args()
return args
def get_image_metas(anno_str, img_prefix):
id_hw = {}
anno_dict = json.loads(anno_str)
img_path = osp.join(img_prefix, f"{anno_dict['ID']}.jpg")
img_id = anno_dict['ID']
img_bytes = get(img_path)
img = mmcv.imfrombytes(img_bytes, backend='cv2')
id_hw[img_id] = img.shape[:2]
return id_hw
def main():
args = parse_args()
# get ann_file and img_prefix from config files
cfg = Config.fromfile(args.config)
dataset = args.dataset
dataloader_cfg = cfg.get(f'{dataset}_dataloader')
ann_file = osp.join(dataloader_cfg.dataset.data_root,
dataloader_cfg.dataset.ann_file)
img_prefix = osp.join(dataloader_cfg.dataset.data_root,
dataloader_cfg.dataset.data_prefix['img'])
# load image metas
print_log(
f'loading CrowdHuman {dataset} annotation...', level=logging.INFO)
anno_strs = get_text(ann_file).strip().split('\n')
pool = Pool(args.nproc)
# get image metas with multiple processes
id_hw_temp = pool.starmap(
get_image_metas,
zip(anno_strs, [img_prefix for _ in range(len(anno_strs))]),
)
pool.close()
# save image metas
id_hw = {}
for sub_dict in id_hw_temp:
id_hw.update(sub_dict)
data_root = osp.dirname(ann_file)
save_path = osp.join(data_root, f'id_hw_{dataset}.json')
print_log(
f'\nsaving "id_hw_{dataset}.json" in "{data_root}"',
level=logging.INFO)
dump(id_hw, save_path, file_format='json')
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Get image shape on CrowdHuman dataset.
Here is an example to run this script.
Example:
python tools/misc/get_crowdhuman_id_hw.py ${CONFIG} \
--dataset ${DATASET_TYPE}
"""
import argparse
import json
import logging
import os.path as osp
from multiprocessing import Pool
import mmcv
from mmengine.config import Config
from mmengine.fileio import FileClient, dump
from mmengine.logging import print_log
def parse_args():
parser = argparse.ArgumentParser(description='Collect image metas')
parser.add_argument('config', help='Config file path')
parser.add_argument(
'--dataset',
choices=['train', 'val'],
help='Collect image metas from which dataset')
parser.add_argument(
'--nproc',
default=10,
type=int,
help='Processes used for get image metas')
args = parser.parse_args()
return args
def get_image_metas(anno_str, img_prefix):
id_hw = {}
file_client = FileClient(backend='disk')
anno_dict = json.loads(anno_str)
img_path = osp.join(img_prefix, f"{anno_dict['ID']}.jpg")
img_id = anno_dict['ID']
img_bytes = file_client.get(img_path)
img = mmcv.imfrombytes(img_bytes, backend='cv2')
id_hw[img_id] = img.shape[:2]
return id_hw
def main():
args = parse_args()
# get ann_file and img_prefix from config files
cfg = Config.fromfile(args.config)
file_client_args = cfg.get('file_client_args', dict(backend='disk'))
file_client = FileClient(**file_client_args)
dataset = args.dataset
dataloader_cfg = cfg.get(f'{dataset}_dataloader')
ann_file = osp.join(dataloader_cfg.dataset.data_root,
dataloader_cfg.dataset.ann_file)
img_prefix = osp.join(dataloader_cfg.dataset.data_root,
dataloader_cfg.dataset.data_prefix['img'])
# load image metas
print_log(
f'loading CrowdHuman {dataset} annotation...', level=logging.INFO)
anno_strs = file_client.get_text(ann_file).strip().split('\n')
pool = Pool(args.nproc)
# get image metas with multiple processes
id_hw_temp = pool.starmap(
get_image_metas,
zip(anno_strs, [img_prefix for _ in range(len(anno_strs))]),
)
pool.close()
# save image metas
id_hw = {}
for sub_dict in id_hw_temp:
id_hw.update(sub_dict)
data_root = osp.dirname(ann_file)
save_path = osp.join(data_root, f'id_hw_{dataset}.json')
print_log(
f'\nsaving "id_hw_{dataset}.json" in "{data_root}"',
level=logging.INFO)
dump(id_hw, save_path, file_format='json')
if __name__ == '__main__':
main()
|
_base_ = './fovea_r50_fpn_4xb4-1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
bbox_head=dict(
with_deform=True,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning policy
max_epochs = 24
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
|
_base_ = './fovea_r50_fpn_4x4_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
bbox_head=dict(
with_deform=True,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning policy
max_epochs = 24
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
|
from ._optical_flow import FlyingChairs, FlyingThings3D, HD1K, KittiFlow, Sintel
from ._stereo_matching import (
CarlaStereo,
CREStereo,
ETH3DStereo,
FallingThingsStereo,
InStereo2k,
Kitti2012Stereo,
Kitti2015Stereo,
Middlebury2014Stereo,
SceneFlowStereo,
SintelStereo,
)
from .caltech import Caltech101, Caltech256
from .celeba import CelebA
from .cifar import CIFAR10, CIFAR100
from .cityscapes import Cityscapes
from .clevr import CLEVRClassification
from .coco import CocoCaptions, CocoDetection
from .country211 import Country211
from .dtd import DTD
from .eurosat import EuroSAT
from .fakedata import FakeData
from .fer2013 import FER2013
from .fgvc_aircraft import FGVCAircraft
from .flickr import Flickr30k, Flickr8k
from .flowers102 import Flowers102
from .folder import DatasetFolder, ImageFolder
from .food101 import Food101
from .gtsrb import GTSRB
from .hmdb51 import HMDB51
from .imagenet import ImageNet
from .inaturalist import INaturalist
from .kinetics import Kinetics
from .kitti import Kitti
from .lfw import LFWPairs, LFWPeople
from .lsun import LSUN, LSUNClass
from .mnist import EMNIST, FashionMNIST, KMNIST, MNIST, QMNIST
from .moving_mnist import MovingMNIST
from .omniglot import Omniglot
from .oxford_iiit_pet import OxfordIIITPet
from .pcam import PCAM
from .phototour import PhotoTour
from .places365 import Places365
from .rendered_sst2 import RenderedSST2
from .sbd import SBDataset
from .sbu import SBU
from .semeion import SEMEION
from .stanford_cars import StanfordCars
from .stl10 import STL10
from .sun397 import SUN397
from .svhn import SVHN
from .ucf101 import UCF101
from .usps import USPS
from .vision import VisionDataset
from .voc import VOCDetection, VOCSegmentation
from .widerface import WIDERFace
__all__ = (
"LSUN",
"LSUNClass",
"ImageFolder",
"DatasetFolder",
"FakeData",
"CocoCaptions",
"CocoDetection",
"CIFAR10",
"CIFAR100",
"EMNIST",
"FashionMNIST",
"QMNIST",
"MNIST",
"KMNIST",
"StanfordCars",
"STL10",
"SUN397",
"SVHN",
"PhotoTour",
"SEMEION",
"Omniglot",
"SBU",
"Flickr8k",
"Flickr30k",
"Flowers102",
"VOCSegmentation",
"VOCDetection",
"Cityscapes",
"ImageNet",
"Caltech101",
"Caltech256",
"CelebA",
"WIDERFace",
"SBDataset",
"VisionDataset",
"USPS",
"Kinetics",
"HMDB51",
"UCF101",
"Places365",
"Kitti",
"INaturalist",
"LFWPeople",
"LFWPairs",
"KittiFlow",
"Sintel",
"FlyingChairs",
"FlyingThings3D",
"HD1K",
"Food101",
"DTD",
"FER2013",
"GTSRB",
"CLEVRClassification",
"OxfordIIITPet",
"PCAM",
"Country211",
"FGVCAircraft",
"EuroSAT",
"RenderedSST2",
"Kitti2012Stereo",
"Kitti2015Stereo",
"CarlaStereo",
"Middlebury2014Stereo",
"CREStereo",
"FallingThingsStereo",
"SceneFlowStereo",
"SintelStereo",
"InStereo2k",
"ETH3DStereo",
"wrap_dataset_for_transforms_v2",
)
# We override current module's attributes to handle the import:
# from torchvision.datasets import wrap_dataset_for_transforms_v2
# without a cyclic error.
# Ref: https://peps.python.org/pep-0562/
def __getattr__(name):
if name in ("wrap_dataset_for_transforms_v2",):
from torchvision.tv_tensors._dataset_wrapper import wrap_dataset_for_transforms_v2
return wrap_dataset_for_transforms_v2
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
from ._optical_flow import FlyingChairs, FlyingThings3D, HD1K, KittiFlow, Sintel
from ._stereo_matching import (
CarlaStereo,
CREStereo,
ETH3DStereo,
FallingThingsStereo,
InStereo2k,
Kitti2012Stereo,
Kitti2015Stereo,
Middlebury2014Stereo,
SceneFlowStereo,
SintelStereo,
)
from .caltech import Caltech101, Caltech256
from .celeba import CelebA
from .cifar import CIFAR10, CIFAR100
from .cityscapes import Cityscapes
from .clevr import CLEVRClassification
from .coco import CocoCaptions, CocoDetection
from .country211 import Country211
from .dtd import DTD
from .eurosat import EuroSAT
from .fakedata import FakeData
from .fer2013 import FER2013
from .fgvc_aircraft import FGVCAircraft
from .flickr import Flickr30k, Flickr8k
from .flowers102 import Flowers102
from .folder import DatasetFolder, ImageFolder
from .food101 import Food101
from .gtsrb import GTSRB
from .hmdb51 import HMDB51
from .imagenet import ImageNet
from .inaturalist import INaturalist
from .kinetics import Kinetics
from .kitti import Kitti
from .lfw import LFWPairs, LFWPeople
from .lsun import LSUN, LSUNClass
from .mnist import EMNIST, FashionMNIST, KMNIST, MNIST, QMNIST
from .moving_mnist import MovingMNIST
from .omniglot import Omniglot
from .oxford_iiit_pet import OxfordIIITPet
from .pcam import PCAM
from .phototour import PhotoTour
from .places365 import Places365
from .rendered_sst2 import RenderedSST2
from .sbd import SBDataset
from .sbu import SBU
from .semeion import SEMEION
from .stanford_cars import StanfordCars
from .stl10 import STL10
from .sun397 import SUN397
from .svhn import SVHN
from .ucf101 import UCF101
from .usps import USPS
from .vision import VisionDataset
from .voc import VOCDetection, VOCSegmentation
from .widerface import WIDERFace
__all__ = (
"LSUN",
"LSUNClass",
"ImageFolder",
"DatasetFolder",
"FakeData",
"CocoCaptions",
"CocoDetection",
"CIFAR10",
"CIFAR100",
"EMNIST",
"FashionMNIST",
"QMNIST",
"MNIST",
"KMNIST",
"StanfordCars",
"STL10",
"SUN397",
"SVHN",
"PhotoTour",
"SEMEION",
"Omniglot",
"SBU",
"Flickr8k",
"Flickr30k",
"Flowers102",
"VOCSegmentation",
"VOCDetection",
"Cityscapes",
"ImageNet",
"Caltech101",
"Caltech256",
"CelebA",
"WIDERFace",
"SBDataset",
"VisionDataset",
"USPS",
"Kinetics",
"HMDB51",
"UCF101",
"Places365",
"Kitti",
"INaturalist",
"LFWPeople",
"LFWPairs",
"KittiFlow",
"Sintel",
"FlyingChairs",
"FlyingThings3D",
"HD1K",
"Food101",
"DTD",
"FER2013",
"GTSRB",
"CLEVRClassification",
"OxfordIIITPet",
"PCAM",
"Country211",
"FGVCAircraft",
"EuroSAT",
"RenderedSST2",
"Kitti2012Stereo",
"Kitti2015Stereo",
"CarlaStereo",
"Middlebury2014Stereo",
"CREStereo",
"FallingThingsStereo",
"SceneFlowStereo",
"SintelStereo",
"InStereo2k",
"ETH3DStereo",
"wrap_dataset_for_transforms_v2",
)
# We override current module's attributes to handle the import:
# from torchvision.datasets import wrap_dataset_for_transforms_v2
# without a cyclic error.
# Ref: https://peps.python.org/pep-0562/
def __getattr__(name):
if name in ("wrap_dataset_for_transforms_v2",):
from torchvision.datapoints._dataset_wrapper import wrap_dataset_for_transforms_v2
return wrap_dataset_for_transforms_v2
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
"""Argparser module for pinging"""
from jina.parsers.base import set_base_parser
def set_ping_parser(parser=None):
"""Set the parser for `ping`
:param parser: an existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
parser.add_argument(
'target',
type=str,
choices=['flow', 'executor', 'gateway'],
help='The target type to ping. For `executor` and `gateway`, checks the readiness of the individual service. '
'For `flow` it checks the connectivity of the complete microservice architecture.',
default='executor',
)
parser.add_argument(
'host',
type=str,
help='The host address with port of a target Executor, Gateway or a Flow, e.g. 0.0.0.0:8000. For Flow or Gateway, host can also indicate the protocol, grpc will be used if not provided, e.g http://0.0.0.0:8000',
)
parser.add_argument(
'--timeout',
type=int,
default=3000,
help='''
Timeout in millisecond of one check
-1 for waiting forever
''',
)
parser.add_argument(
'--attempts',
type=int,
default=1,
help='The number of readiness checks to perform',
)
parser.add_argument(
'--min-successful-attempts',
type=int,
default=1,
help='The minimum number of successful readiness checks, before exiting successfully with exit(0)',
)
return parser
|
"""Argparser module for pinging"""
from jina.parsers.base import set_base_parser
def set_ping_parser(parser=None):
"""Set the parser for `ping`
:param parser: an existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
parser.add_argument(
'target',
type=str,
choices=['flow', 'executor', 'gateway'],
help='The target type to ping. For `executor` and `gateway`, checks the readiness of the individual service. '
'For `flow` it checks the connectivity of the complete microservice architecture.',
default='executor',
)
parser.add_argument(
'host',
type=str,
help='The host address with port of a target Executor, Gateway or a Flow, e.g. 0.0.0.0:8000. For Flow or Gateway, host can also indicate the protocol, grpc will be used if not provided, e.g http://0.0.0.0:8000',
)
parser.add_argument(
'--timeout',
type=int,
default=3000,
help='''
Timeout in millisecond of one check
-1 for waiting forever
''',
)
parser.add_argument(
'--attempts',
type=int,
default=1,
help='The number of readiness checks to perform',
)
parser.add_argument(
'--min-successful-attempts',
type=int,
default=1,
help='The minimum number of successful readiness checks, before exiting successfully with exit(0)',
)
return parser
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import flax.linen as nn
import jax.numpy as jnp
def get_sinusoidal_embeddings(
timesteps: jnp.ndarray,
embedding_dim: int,
freq_shift: float = 1,
min_timescale: float = 1,
max_timescale: float = 1.0e4,
flip_sin_to_cos: bool = False,
scale: float = 1.0,
) -> jnp.ndarray:
"""Returns the positional encoding (same as Tensor2Tensor).
Args:
timesteps (`jnp.ndarray` of shape `(N,)`):
A 1-D array of N indices, one per batch element. These may be fractional.
embedding_dim (`int`):
The number of output channels.
freq_shift (`float`, *optional*, defaults to `1`):
Shift applied to the frequency scaling of the embeddings.
min_timescale (`float`, *optional*, defaults to `1`):
The smallest time unit used in the sinusoidal calculation (should probably be 0.0).
max_timescale (`float`, *optional*, defaults to `1.0e4`):
The largest time unit used in the sinusoidal calculation.
flip_sin_to_cos (`bool`, *optional*, defaults to `False`):
Whether to flip the order of sinusoidal components to cosine first.
scale (`float`, *optional*, defaults to `1.0`):
A scaling factor applied to the positional embeddings.
Returns:
a Tensor of timing signals [N, num_channels]
"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even"
num_timescales = float(embedding_dim // 2)
log_timescale_increment = math.log(max_timescale / min_timescale) / (num_timescales - freq_shift)
inv_timescales = min_timescale * jnp.exp(jnp.arange(num_timescales, dtype=jnp.float32) * -log_timescale_increment)
emb = jnp.expand_dims(timesteps, 1) * jnp.expand_dims(inv_timescales, 0)
# scale embeddings
scaled_time = scale * emb
if flip_sin_to_cos:
signal = jnp.concatenate([jnp.cos(scaled_time), jnp.sin(scaled_time)], axis=1)
else:
signal = jnp.concatenate([jnp.sin(scaled_time), jnp.cos(scaled_time)], axis=1)
signal = jnp.reshape(signal, [jnp.shape(timesteps)[0], embedding_dim])
return signal
class FlaxTimestepEmbedding(nn.Module):
r"""
Time step Embedding Module. Learns embeddings for input time steps.
Args:
time_embed_dim (`int`, *optional*, defaults to `32`):
Time step embedding dimension.
dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`):
The data type for the embedding parameters.
"""
time_embed_dim: int = 32
dtype: jnp.dtype = jnp.float32
@nn.compact
def __call__(self, temb):
temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name="linear_1")(temb)
temb = nn.silu(temb)
temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name="linear_2")(temb)
return temb
class FlaxTimesteps(nn.Module):
r"""
Wrapper Module for sinusoidal Time step Embeddings as described in https://huggingface.co/papers/2006.11239
Args:
dim (`int`, *optional*, defaults to `32`):
Time step embedding dimension.
flip_sin_to_cos (`bool`, *optional*, defaults to `False`):
Whether to flip the sinusoidal function from sine to cosine.
freq_shift (`float`, *optional*, defaults to `1`):
Frequency shift applied to the sinusoidal embeddings.
"""
dim: int = 32
flip_sin_to_cos: bool = False
freq_shift: float = 1
@nn.compact
def __call__(self, timesteps):
return get_sinusoidal_embeddings(
timesteps, embedding_dim=self.dim, flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.freq_shift
)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import flax.linen as nn
import jax.numpy as jnp
def get_sinusoidal_embeddings(
timesteps: jnp.ndarray,
embedding_dim: int,
freq_shift: float = 1,
min_timescale: float = 1,
max_timescale: float = 1.0e4,
flip_sin_to_cos: bool = False,
scale: float = 1.0,
) -> jnp.ndarray:
"""Returns the positional encoding (same as Tensor2Tensor).
Args:
timesteps (`jnp.ndarray` of shape `(N,)`):
A 1-D array of N indices, one per batch element. These may be fractional.
embedding_dim (`int`):
The number of output channels.
freq_shift (`float`, *optional*, defaults to `1`):
Shift applied to the frequency scaling of the embeddings.
min_timescale (`float`, *optional*, defaults to `1`):
The smallest time unit used in the sinusoidal calculation (should probably be 0.0).
max_timescale (`float`, *optional*, defaults to `1.0e4`):
The largest time unit used in the sinusoidal calculation.
flip_sin_to_cos (`bool`, *optional*, defaults to `False`):
Whether to flip the order of sinusoidal components to cosine first.
scale (`float`, *optional*, defaults to `1.0`):
A scaling factor applied to the positional embeddings.
Returns:
a Tensor of timing signals [N, num_channels]
"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even"
num_timescales = float(embedding_dim // 2)
log_timescale_increment = math.log(max_timescale / min_timescale) / (num_timescales - freq_shift)
inv_timescales = min_timescale * jnp.exp(jnp.arange(num_timescales, dtype=jnp.float32) * -log_timescale_increment)
emb = jnp.expand_dims(timesteps, 1) * jnp.expand_dims(inv_timescales, 0)
# scale embeddings
scaled_time = scale * emb
if flip_sin_to_cos:
signal = jnp.concatenate([jnp.cos(scaled_time), jnp.sin(scaled_time)], axis=1)
else:
signal = jnp.concatenate([jnp.sin(scaled_time), jnp.cos(scaled_time)], axis=1)
signal = jnp.reshape(signal, [jnp.shape(timesteps)[0], embedding_dim])
return signal
class FlaxTimestepEmbedding(nn.Module):
r"""
Time step Embedding Module. Learns embeddings for input time steps.
Args:
time_embed_dim (`int`, *optional*, defaults to `32`):
Time step embedding dimension.
dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`):
The data type for the embedding parameters.
"""
time_embed_dim: int = 32
dtype: jnp.dtype = jnp.float32
@nn.compact
def __call__(self, temb):
temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name="linear_1")(temb)
temb = nn.silu(temb)
temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name="linear_2")(temb)
return temb
class FlaxTimesteps(nn.Module):
r"""
Wrapper Module for sinusoidal Time step Embeddings as described in https://huggingface.co/papers/2006.11239
Args:
dim (`int`, *optional*, defaults to `32`):
Time step embedding dimension.
flip_sin_to_cos (`bool`, *optional*, defaults to `False`):
Whether to flip the sinusoidal function from sine to cosine.
freq_shift (`float`, *optional*, defaults to `1`):
Frequency shift applied to the sinusoidal embeddings.
"""
dim: int = 32
flip_sin_to_cos: bool = False
freq_shift: float = 1
@nn.compact
def __call__(self, timesteps):
return get_sinusoidal_embeddings(
timesteps, embedding_dim=self.dim, flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.freq_shift
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.logging import print_log
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--auto-scale-lr',
action='store_true',
help='enable automatically scaling LR.')
parser.add_argument(
'--resume',
nargs='?',
type=str,
const='auto',
help='If specify checkpoint path, resume from it, while if not '
'specify, try to auto resume from the latest checkpoint '
'in the work directory.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
# When using PyTorch version >= 2.0.0, the `torch.distributed.launch`
# will pass the `--local-rank` parameter to `tools/train.py` instead
# of `--local_rank`.
parser.add_argument('--local_rank', '--local-rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.type
if optim_wrapper == 'AmpOptimWrapper':
print_log(
'AMP training is already enabled in your config.',
logger='current',
level=logging.WARNING)
else:
assert optim_wrapper == 'OptimWrapper', (
'`--amp` is only supported when the optimizer wrapper type is '
f'`OptimWrapper` but got {optim_wrapper}.')
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# enable automatically scaling LR
if args.auto_scale_lr:
if 'auto_scale_lr' in cfg and \
'enable' in cfg.auto_scale_lr and \
'base_batch_size' in cfg.auto_scale_lr:
cfg.auto_scale_lr.enable = True
else:
raise RuntimeError('Can not find "auto_scale_lr" or '
'"auto_scale_lr.enable" or '
'"auto_scale_lr.base_batch_size" in your'
' configuration file.')
# resume is determined in this priority: resume from > auto_resume
if args.resume == 'auto':
cfg.resume = True
cfg.load_from = None
elif args.resume is not None:
cfg.resume = True
cfg.load_from = args.resume
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start training
runner.train()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.logging import print_log
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--auto-scale-lr',
action='store_true',
help='enable automatically scaling LR.')
parser.add_argument(
'--resume',
nargs='?',
type=str,
const='auto',
help='If specify checkpoint path, resume from it, while if not '
'specify, try to auto resume from the latest checkpoint '
'in the work directory.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.type
if optim_wrapper == 'AmpOptimWrapper':
print_log(
'AMP training is already enabled in your config.',
logger='current',
level=logging.WARNING)
else:
assert optim_wrapper == 'OptimWrapper', (
'`--amp` is only supported when the optimizer wrapper type is '
f'`OptimWrapper` but got {optim_wrapper}.')
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# enable automatically scaling LR
if args.auto_scale_lr:
if 'auto_scale_lr' in cfg and \
'enable' in cfg.auto_scale_lr and \
'base_batch_size' in cfg.auto_scale_lr:
cfg.auto_scale_lr.enable = True
else:
raise RuntimeError('Can not find "auto_scale_lr" or '
'"auto_scale_lr.enable" or '
'"auto_scale_lr.base_batch_size" in your'
' configuration file.')
# resume is determined in this priority: resume from > auto_resume
if args.resume == 'auto':
cfg.resume = True
cfg.load_from = None
elif args.resume is not None:
cfg.resume = True
cfg.load_from = args.resume
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start training
runner.train()
if __name__ == '__main__':
main()
|
from typing import Any, List, Optional
from gigachat import GigaChat # Install GigaChat API library via 'pip install gigachat'
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.callbacks.base import CallbackManager
class GigaChatEmbedding(BaseEmbedding):
"""
GigaChat encoder class for generating embeddings.
Attributes:
_client (Optional[GigaChat]): Instance of the GigaChat client.
type (str): Type identifier for the encoder, which is "gigachat".
Example:
.. code-block:: python
from llama_index.embeddings.gigachat import GigaChatEmbeddings
embeddings = GigaChatEmbeddings(
credentials=..., scope=..., verify_ssl_certs=False
)
"""
_client: Optional[GigaChat] = PrivateAttr()
type: str = "gigachat"
def __init__(
self,
name: Optional[str] = "Embeddings",
auth_data: Optional[str] = None,
scope: Optional[str] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
) -> None:
auth_data = get_from_param_or_env(
"auth_data", auth_data, "GIGACHAT_AUTH_DATA", ""
)
if not auth_data:
raise ValueError(
"You must provide an AUTH DATA to use GigaChat. "
"You can either pass it in as an argument or set it `GIGACHAT_AUTH_DATA`."
)
if scope is None:
raise ValueError(
"""
GigaChat scope cannot be 'None'.
Set 'GIGACHAT_API_PERS' for personal use or 'GIGACHAT_API_CORP' for corporate use.
"""
)
super().__init__(
model_name=name,
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
**kwargs,
)
try:
self._client = GigaChat(
scope=scope, credentials=auth_data, verify_ssl_certs=False
)
except Exception as e:
raise ValueError(f"GigaChat client failed to initialize. Error: {e}") from e
@classmethod
def class_name(cls) -> str:
"""Return the class name."""
return "GigaChatEmbedding"
def _get_query_embeddings(self, queries: List[str]) -> List[List[float]]:
"""
Synchronously Embed documents using a GigaChat embeddings model.
Args:
queries: The list of documents to embed.
Returns:
List of embeddings, one for each document.
"""
embeddings = self._client.embeddings(queries).data
return [embeds_obj.embedding for embeds_obj in embeddings]
async def _aget_query_embeddings(self, queries: List[str]) -> List[List[float]]:
"""
Asynchronously embed documents using a GigaChat embeddings model.
Args:
queries: The list of documents to embed.
Returns:
List of embeddings, one for each document.
"""
embeddings = (await self._client.aembeddings(queries)).data
return [embeds_obj.embedding for embeds_obj in embeddings]
def _get_query_embedding(self, query: List[str]) -> List[float]:
"""
Synchronously embed a document using GigaChat embeddings model.
Args:
query: The document to embed.
Returns:
Embeddings for the document.
"""
return self._client.embeddings(query).data[0].embedding
async def _aget_query_embedding(self, query: List[str]) -> List[float]:
"""
Asynchronously embed a query using GigaChat embeddings model.
Args:
query: The document to embed.
Returns:
Embeddings for the document.
"""
return (await self._client.aembeddings(query)).data[0].embedding
def _get_text_embedding(self, text: str) -> List[float]:
"""
Synchronously embed a text using GigaChat embeddings model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._client.embeddings([text]).data[0].embedding
async def _aget_text_embedding(self, text: str) -> List[float]:
"""
Asynchronously embed a text using GigaChat embeddings model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return (await self._client.aembeddings([text])).data[0].embedding
|
from typing import Any, List, Optional
from gigachat import GigaChat # Install GigaChat API library via 'pip install gigachat'
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.callbacks.base import CallbackManager
class GigaChatEmbedding(BaseEmbedding):
"""
GigaChat encoder class for generating embeddings.
Attributes:
_client (Optional[GigaChat]): Instance of the GigaChat client.
type (str): Type identifier for the encoder, which is "gigachat".
Example:
.. code-block:: python
from llama_index.embeddings.gigachat import GigaChatEmbeddings
embeddings = GigaChatEmbeddings(
credentials=..., scope=..., verify_ssl_certs=False
)
"""
_client: Optional[GigaChat] = PrivateAttr()
type: str = "gigachat"
def __init__(
self,
name: Optional[str] = "Embeddings",
auth_data: Optional[str] = None,
scope: Optional[str] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
) -> None:
auth_data = get_from_param_or_env(
"auth_data", auth_data, "GIGACHAT_AUTH_DATA", ""
)
if not auth_data:
raise ValueError(
"You must provide an AUTH DATA to use GigaChat. "
"You can either pass it in as an argument or set it `GIGACHAT_AUTH_DATA`."
)
if scope is None:
raise ValueError(
"""
GigaChat scope cannot be 'None'.
Set 'GIGACHAT_API_PERS' for personal use or 'GIGACHAT_API_CORP' for corporate use.
"""
)
super().__init__(
model_name=name,
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
**kwargs,
)
try:
self._client = GigaChat(
scope=scope, credentials=auth_data, verify_ssl_certs=False
)
except Exception as e:
raise ValueError(f"GigaChat client failed to initialize. Error: {e}") from e
@classmethod
def class_name(cls) -> str:
"""Return the class name."""
return "GigaChatEmbedding"
def _get_query_embeddings(self, queries: List[str]) -> List[List[float]]:
"""Synchronously Embed documents using a GigaChat embeddings model.
Args:
queries: The list of documents to embed.
Returns:
List of embeddings, one for each document.
"""
embeddings = self._client.embeddings(queries).data
return [embeds_obj.embedding for embeds_obj in embeddings]
async def _aget_query_embeddings(self, queries: List[str]) -> List[List[float]]:
"""Asynchronously embed documents using a GigaChat embeddings model.
Args:
queries: The list of documents to embed.
Returns:
List of embeddings, one for each document.
"""
embeddings = (await self._client.aembeddings(queries)).data
return [embeds_obj.embedding for embeds_obj in embeddings]
def _get_query_embedding(self, query: List[str]) -> List[float]:
"""Synchronously embed a document using GigaChat embeddings model.
Args:
query: The document to embed.
Returns:
Embeddings for the document.
"""
return self._client.embeddings(query).data[0].embedding
async def _aget_query_embedding(self, query: List[str]) -> List[float]:
"""Asynchronously embed a query using GigaChat embeddings model.
Args:
query: The document to embed.
Returns:
Embeddings for the document.
"""
return (await self._client.aembeddings(query)).data[0].embedding
def _get_text_embedding(self, text: str) -> List[float]:
"""Synchronously embed a text using GigaChat embeddings model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._client.embeddings([text]).data[0].embedding
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Asynchronously embed a text using GigaChat embeddings model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return (await self._client.aembeddings([text])).data[0].embedding
|
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.image_url import ImageUrl
from docarray.typing.url.text_url import TextUrl
__all__ = ['ImageUrl', 'AnyUrl', 'TextUrl']
|
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.image_url import ImageUrl
__all__ = ['ImageUrl', 'AnyUrl']
|
import os
from typing import Any, Optional
from llama_index.llms.openai_like import OpenAILike
class TogetherLLM(OpenAILike):
"""
Together LLM.
Examples:
`pip install llama-index-llms-together`
```python
from llama_index.llms.together import TogetherLLM
# set api key in env or in llm
# import os
# os.environ["TOGETHER_API_KEY"] = "your api key"
llm = TogetherLLM(
model="mistralai/Mixtral-8x7B-Instruct-v0.1", api_key="your_api_key"
)
resp = llm.complete("Who is Paul Graham?")
print(resp)
```
"""
def __init__(
self,
model: str,
api_key: Optional[str] = None,
api_base: str = "https://api.together.xyz/v1",
is_chat_model: bool = True,
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("TOGETHER_API_KEY", None)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=is_chat_model,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "TogetherLLM"
|
import os
from typing import Any, Optional
from llama_index.llms.openai_like import OpenAILike
class TogetherLLM(OpenAILike):
"""Together LLM.
Examples:
`pip install llama-index-llms-together`
```python
from llama_index.llms.together import TogetherLLM
# set api key in env or in llm
# import os
# os.environ["TOGETHER_API_KEY"] = "your api key"
llm = TogetherLLM(
model="mistralai/Mixtral-8x7B-Instruct-v0.1", api_key="your_api_key"
)
resp = llm.complete("Who is Paul Graham?")
print(resp)
```
"""
def __init__(
self,
model: str,
api_key: Optional[str] = None,
api_base: str = "https://api.together.xyz/v1",
is_chat_model: bool = True,
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("TOGETHER_API_KEY", None)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=is_chat_model,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "TogetherLLM"
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import HorizontalBoxes, get_box_tensor
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class PseudoBBoxCoder(BaseBBoxCoder):
"""Pseudo bounding box coder."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def encode(self, bboxes, gt_bboxes):
"""torch.Tensor: return the given ``bboxes``"""
gt_bboxes = get_box_tensor(gt_bboxes)
return gt_bboxes
def decode(self, bboxes, pred_bboxes):
"""torch.Tensor: return the given ``pred_bboxes``"""
if self.use_box_type:
pred_bboxes = HorizontalBoxes(pred_bboxes)
return pred_bboxes
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.utils.misc import get_box_tensor
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import HorizontalBoxes
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class PseudoBBoxCoder(BaseBBoxCoder):
"""Pseudo bounding box coder."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def encode(self, bboxes, gt_bboxes):
"""torch.Tensor: return the given ``bboxes``"""
gt_bboxes = get_box_tensor(gt_bboxes)
return gt_bboxes
def decode(self, bboxes, pred_bboxes):
"""torch.Tensor: return the given ``pred_bboxes``"""
if self.use_box_type:
pred_bboxes = HorizontalBoxes(pred_bboxes)
return pred_bboxes
|
from __future__ import annotations
import tempfile
from typing import TYPE_CHECKING, Any, Optional
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.vertexai import get_client_info
if TYPE_CHECKING:
from google.cloud import texttospeech
def _import_google_cloud_texttospeech() -> Any:
try:
from google.cloud import texttospeech
except ImportError as e:
raise ImportError(
"Cannot import google.cloud.texttospeech, please install "
"`pip install google-cloud-texttospeech`."
) from e
return texttospeech
def _encoding_file_extension_map(encoding: texttospeech.AudioEncoding) -> Optional[str]:
texttospeech = _import_google_cloud_texttospeech()
ENCODING_FILE_EXTENSION_MAP = {
texttospeech.AudioEncoding.LINEAR16: ".wav",
texttospeech.AudioEncoding.MP3: ".mp3",
texttospeech.AudioEncoding.OGG_OPUS: ".ogg",
texttospeech.AudioEncoding.MULAW: ".wav",
texttospeech.AudioEncoding.ALAW: ".wav",
}
return ENCODING_FILE_EXTENSION_MAP.get(encoding)
@deprecated(
since="0.0.33",
removal="1.0",
alternative_import="langchain_google_community.TextToSpeechTool",
)
class GoogleCloudTextToSpeechTool(BaseTool):
"""Tool that queries the Google Cloud Text to Speech API.
In order to set this up, follow instructions at:
https://cloud.google.com/text-to-speech/docs/before-you-begin
"""
name: str = "google_cloud_texttospeech"
description: str = (
"A wrapper around Google Cloud Text-to-Speech. "
"Useful for when you need to synthesize audio from text. "
"It supports multiple languages, including English, German, Polish, "
"Spanish, Italian, French, Portuguese, and Hindi. "
)
_client: Any
def __init__(self, **kwargs: Any) -> None:
"""Initializes private fields."""
texttospeech = _import_google_cloud_texttospeech()
super().__init__(**kwargs)
self._client = texttospeech.TextToSpeechClient(
client_info=get_client_info(module="text-to-speech")
)
def _run(
self,
input_text: str,
language_code: str = "en-US",
ssml_gender: Optional[texttospeech.SsmlVoiceGender] = None,
audio_encoding: Optional[texttospeech.AudioEncoding] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
texttospeech = _import_google_cloud_texttospeech()
ssml_gender = ssml_gender or texttospeech.SsmlVoiceGender.NEUTRAL
audio_encoding = audio_encoding or texttospeech.AudioEncoding.MP3
response = self._client.synthesize_speech(
input=texttospeech.SynthesisInput(text=input_text),
voice=texttospeech.VoiceSelectionParams(
language_code=language_code, ssml_gender=ssml_gender
),
audio_config=texttospeech.AudioConfig(audio_encoding=audio_encoding),
)
suffix = _encoding_file_extension_map(audio_encoding)
with tempfile.NamedTemporaryFile(mode="bx", suffix=suffix, delete=False) as f:
f.write(response.audio_content)
return f.name
|
from __future__ import annotations
import tempfile
from typing import TYPE_CHECKING, Any, Optional
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.vertexai import get_client_info
if TYPE_CHECKING:
from google.cloud import texttospeech # type: ignore[attr-defined]
def _import_google_cloud_texttospeech() -> Any:
try:
from google.cloud import texttospeech # type: ignore[attr-defined]
except ImportError as e:
raise ImportError(
"Cannot import google.cloud.texttospeech, please install "
"`pip install google-cloud-texttospeech`."
) from e
return texttospeech
def _encoding_file_extension_map(encoding: texttospeech.AudioEncoding) -> Optional[str]:
texttospeech = _import_google_cloud_texttospeech()
ENCODING_FILE_EXTENSION_MAP = {
texttospeech.AudioEncoding.LINEAR16: ".wav",
texttospeech.AudioEncoding.MP3: ".mp3",
texttospeech.AudioEncoding.OGG_OPUS: ".ogg",
texttospeech.AudioEncoding.MULAW: ".wav",
texttospeech.AudioEncoding.ALAW: ".wav",
}
return ENCODING_FILE_EXTENSION_MAP.get(encoding)
@deprecated(
since="0.0.33",
removal="1.0",
alternative_import="langchain_google_community.TextToSpeechTool",
)
class GoogleCloudTextToSpeechTool(BaseTool): # type: ignore[override]
"""Tool that queries the Google Cloud Text to Speech API.
In order to set this up, follow instructions at:
https://cloud.google.com/text-to-speech/docs/before-you-begin
"""
name: str = "google_cloud_texttospeech"
description: str = (
"A wrapper around Google Cloud Text-to-Speech. "
"Useful for when you need to synthesize audio from text. "
"It supports multiple languages, including English, German, Polish, "
"Spanish, Italian, French, Portuguese, and Hindi. "
)
_client: Any
def __init__(self, **kwargs: Any) -> None:
"""Initializes private fields."""
texttospeech = _import_google_cloud_texttospeech()
super().__init__(**kwargs)
self._client = texttospeech.TextToSpeechClient(
client_info=get_client_info(module="text-to-speech")
)
def _run(
self,
input_text: str,
language_code: str = "en-US",
ssml_gender: Optional[texttospeech.SsmlVoiceGender] = None,
audio_encoding: Optional[texttospeech.AudioEncoding] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
texttospeech = _import_google_cloud_texttospeech()
ssml_gender = ssml_gender or texttospeech.SsmlVoiceGender.NEUTRAL
audio_encoding = audio_encoding or texttospeech.AudioEncoding.MP3
response = self._client.synthesize_speech(
input=texttospeech.SynthesisInput(text=input_text),
voice=texttospeech.VoiceSelectionParams(
language_code=language_code, ssml_gender=ssml_gender
),
audio_config=texttospeech.AudioConfig(audio_encoding=audio_encoding),
)
suffix = _encoding_file_extension_map(audio_encoding)
with tempfile.NamedTemporaryFile(mode="bx", suffix=suffix, delete=False) as f:
f.write(response.audio_content)
return f.name
|
import os
import shutil
from pathlib import Path
import pytest
import numpy as np
import PIL.Image as Image
from jina import DocumentArray, Document, Executor
from ...big_transfer import BigTransferEncoder
directory = os.path.dirname(os.path.realpath(__file__))
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.model_path == 'pretrained'
assert ex.model_name == 'R50x1'
def test_initialization_and_model_download():
shutil.rmtree('pretrained', ignore_errors=True)
# This call will download the model
encoder = BigTransferEncoder()
assert encoder.model_path == 'pretrained'
assert encoder.model_name == 'R50x1'
assert not encoder.on_gpu
assert os.path.exists('pretrained')
assert os.path.exists(os.path.join('pretrained', 'saved_model.pb'))
# This call will use the downloaded model
_ = BigTransferEncoder()
shutil.rmtree('pretrained', ignore_errors=True)
with pytest.raises(AttributeError):
_ = BigTransferEncoder(model_name='model_not_exists')
def test_encoding():
doc = Document(uri=os.path.join(directory, '../test_data/test_image.png'))
doc.convert_image_uri_to_blob()
img = Image.fromarray(doc.blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.blob = img
assert doc.embedding is None
encoder = BigTransferEncoder()
encoder.encode(DocumentArray([doc]), {})
assert doc.embedding.shape == (2048,)
def test_preprocessing():
doc = Document(uri=os.path.join(directory, '../test_data/test_image.png'))
doc.convert_image_uri_to_blob()
img = Image.fromarray(doc.blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.blob = img
assert doc.embedding is None
encoder = BigTransferEncoder(target_dim=(256, 256, 3))
encoder.encode(DocumentArray([doc]), {})
assert doc.embedding.shape == (2048,)
def test_encoding_default_chunks():
doc = Document(text="testing")
chunk = Document(uri=os.path.join(directory, '../test_data/test_image.png'))
for i in range(3):
doc.chunks.append(chunk)
doc.chunks[i].convert_image_uri_to_blob()
img = Image.fromarray(doc.chunks[i].blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.chunks[i].blob = img
encoder = BigTransferEncoder(default_traversal_paths=['c'])
encoder.encode(DocumentArray([doc]), {})
assert doc.embedding is None
for i in range(3):
assert doc.chunks[i].embedding.shape == (2048,)
def test_encoding_override_chunks():
doc = Document(text="testing")
chunk = Document(uri=os.path.join(directory, '../test_data/test_image.png'))
for i in range(3):
doc.chunks.append(chunk)
doc.chunks[i].convert_image_uri_to_blob()
img = Image.fromarray(doc.chunks[i].blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.chunks[i].blob = img
encoder = BigTransferEncoder()
assert encoder.default_traversal_paths == ['r']
encoder.encode(DocumentArray([doc]), parameters={'traversal_paths': ['c']})
assert doc.embedding is None
for i in range(3):
assert doc.chunks[i].embedding.shape == (2048,)
|
import shutil
import pytest
import os
import numpy as np
import PIL.Image as Image
from jina import DocumentArray, Document
from ...big_transfer import BigTransferEncoder
directory = os.path.dirname(os.path.realpath(__file__))
def test_initialization_and_model_download():
shutil.rmtree('pretrained', ignore_errors=True)
# This call will download the model
encoder = BigTransferEncoder()
assert encoder.model_path == 'pretrained'
assert encoder.model_name == 'R50x1'
assert not encoder.on_gpu
assert os.path.exists('pretrained')
assert os.path.exists(os.path.join('pretrained', 'saved_model.pb'))
# This call will use the downloaded model
_ = BigTransferEncoder()
shutil.rmtree('pretrained', ignore_errors=True)
with pytest.raises(AttributeError):
_ = BigTransferEncoder(model_name='model_not_exists')
def test_encoding():
doc = Document(uri=os.path.join(directory, '../test_data/test_image.png'))
doc.convert_image_uri_to_blob()
img = Image.fromarray(doc.blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.blob = img
assert doc.embedding is None
encoder = BigTransferEncoder()
encoder.encode(DocumentArray([doc]), {})
assert doc.embedding.shape == (2048,)
def test_preprocessing():
doc = Document(uri=os.path.join(directory, '../test_data/test_image.png'))
doc.convert_image_uri_to_blob()
img = Image.fromarray(doc.blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.blob = img
assert doc.embedding is None
encoder = BigTransferEncoder(target_dim=(256, 256, 3))
encoder.encode(DocumentArray([doc]), {})
assert doc.embedding.shape == (2048,)
def test_encoding_default_chunks():
doc = Document(text="testing")
chunk = Document(uri=os.path.join(directory, '../test_data/test_image.png'))
for i in range(3):
doc.chunks.append(chunk)
doc.chunks[i].convert_image_uri_to_blob()
img = Image.fromarray(doc.chunks[i].blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.chunks[i].blob = img
encoder = BigTransferEncoder(default_traversal_paths=['c'])
encoder.encode(DocumentArray([doc]), {})
assert doc.embedding is None
for i in range(3):
assert doc.chunks[i].embedding.shape == (2048,)
def test_encoding_override_chunks():
doc = Document(text="testing")
chunk = Document(uri=os.path.join(directory, '../test_data/test_image.png'))
for i in range(3):
doc.chunks.append(chunk)
doc.chunks[i].convert_image_uri_to_blob()
img = Image.fromarray(doc.chunks[i].blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.chunks[i].blob = img
encoder = BigTransferEncoder()
assert encoder.default_traversal_paths == ['r']
encoder.encode(DocumentArray([doc]),
parameters={'traversal_paths': ['c']})
assert doc.embedding is None
for i in range(3):
assert doc.chunks[i].embedding.shape == (2048,)
|
import argparse
import logging
from typing import Optional
import torch
import torchaudio
from torchaudio.prototype.ctc_decoder import lexicon_decoder, download_pretrained_files
logger = logging.getLogger(__name__)
def run_inference(args):
# get pretrained wav2vec2.0 model
bundle = getattr(torchaudio.pipelines, args.model)
model = bundle.get_model()
# get decoder files
files = download_pretrained_files("librispeech-4-gram")
decoder = lexicon_decoder(
lexicon=files.lexicon,
tokens=files.tokens,
lm=files.lm,
nbest=args.nbest,
beam_size=args.beam_size,
beam_size_token=args.beam_size_token,
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
word_score=args.word_score,
unk_score=args.unk_score,
sil_score=args.sil_score,
log_add=False,
)
dataset = torchaudio.datasets.LIBRISPEECH(args.librispeech_path, url=args.split, download=False)
total_edit_distance = 0
total_length = 0
for idx, sample in enumerate(dataset):
waveform, _, transcript, _, _, _ = sample
transcript = transcript.strip().lower().strip()
with torch.inference_mode():
emission, _ = model(waveform)
results = decoder(emission)
total_edit_distance += torchaudio.functional.edit_distance(transcript.split(), results[0][0].words)
total_length += len(transcript.split())
if idx % 100 == 0:
logger.info(f"Processed elem {idx}; WER: {total_edit_distance / total_length}")
logger.info(f"Final WER: {total_edit_distance / total_length}")
def _parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"--librispeech_path",
type=str,
help="folder where LibriSpeech is stored",
)
parser.add_argument(
"--split",
type=str,
help="LibriSpeech dataset split",
choices=["dev-clean", "dev-other", "test-clean", "test-other"],
default="test-other",
)
parser.add_argument(
"--model",
type=str,
default="WAV2VEC2_ASR_BASE_960H",
help="pretrained Wav2Vec2 model from torchaudio.pipelines",
)
parser.add_argument("--nbest", type=int, default=1, help="number of best hypotheses to return")
parser.add_argument(
"--beam-size", type=int, default=500, help="beam size for determining number of hypotheses to store"
)
parser.add_argument(
"--beam-size-token",
type=Optional[int],
default=None,
help="number of tokens to consider at each beam search step",
)
parser.add_argument("--beam-threshold", type=int, default=50, help="beam threshold for pruning hypotheses")
parser.add_argument(
"--lm-weight",
type=float,
default=1.74,
help="languge model weight",
)
parser.add_argument(
"--word-score",
type=float,
default=0.52,
help="word insertion score",
)
parser.add_argument("--unk_score", type=float, default=float("-inf"), help="unknown word insertion score")
parser.add_argument("--sil_score", type=float, default=0, help="silence insertion score")
parser.add_argument("--debug", action="store_true", help="whether to use debug level for logging")
return parser.parse_args()
def _init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def _main():
args = _parse_args()
_init_logger(args.debug)
run_inference(args)
if __name__ == "__main__":
_main()
|
import argparse
import logging
from typing import Optional
import torch
import torchaudio
from torchaudio.prototype.ctc_decoder import lexicon_decoder
logger = logging.getLogger(__name__)
def _download_files(lexicon_file, kenlm_file):
torch.hub.download_url_to_file(
"https://pytorch.s3.amazonaws.com/torchaudio/tutorial-assets/ctc-decoding/lexicon-librispeech.txt", lexicon_file
)
torch.hub.download_url_to_file(
"https://pytorch.s3.amazonaws.com/torchaudio/tutorial-assets/ctc-decoding/4-gram-librispeech.bin", kenlm_file
)
def run_inference(args):
# get pretrained wav2vec2.0 model
bundle = getattr(torchaudio.pipelines, args.model)
model = bundle.get_model()
tokens = [label.lower() for label in bundle.get_labels()]
# get decoder files
hub_dir = torch.hub.get_dir()
lexicon_file = f"{hub_dir}/lexicon.txt"
kenlm_file = f"{hub_dir}/kenlm.bin"
_download_files(lexicon_file, kenlm_file)
decoder = lexicon_decoder(
lexicon=lexicon_file,
tokens=tokens,
lm=kenlm_file,
nbest=args.nbest,
beam_size=args.beam_size,
beam_size_token=args.beam_size_token,
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
word_score=args.word_score,
unk_score=args.unk_score,
sil_score=args.sil_score,
log_add=False,
)
dataset = torchaudio.datasets.LIBRISPEECH(args.librispeech_path, url=args.split, download=False)
total_edit_distance = 0
total_length = 0
for idx, sample in enumerate(dataset):
waveform, _, transcript, _, _, _ = sample
transcript = transcript.strip().lower().strip()
with torch.inference_mode():
emission, _ = model(waveform)
results = decoder(emission)
total_edit_distance += torchaudio.functional.edit_distance(transcript.split(), results[0][0].words)
total_length += len(transcript.split())
if idx % 100 == 0:
logger.info(f"Processed elem {idx}; WER: {total_edit_distance / total_length}")
logger.info(f"Final WER: {total_edit_distance / total_length}")
def _parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"--librispeech_path",
type=str,
help="folder where LibriSpeech is stored",
)
parser.add_argument(
"--split",
type=str,
help="LibriSpeech dataset split",
choices=["dev-clean", "dev-other", "test-clean", "test-other"],
default="test-other",
)
parser.add_argument(
"--model",
type=str,
default="WAV2VEC2_ASR_BASE_960H",
help="pretrained Wav2Vec2 model from torchaudio.pipelines",
)
parser.add_argument("--nbest", type=int, default=1, help="number of best hypotheses to return")
parser.add_argument(
"--beam-size", type=int, default=500, help="beam size for determining number of hypotheses to store"
)
parser.add_argument(
"--beam-size-token",
type=Optional[int],
default=None,
help="number of tokens to consider at each beam search step",
)
parser.add_argument("--beam-threshold", type=int, default=50, help="beam threshold for pruning hypotheses")
parser.add_argument(
"--lm-weight",
type=float,
default=1.74,
help="languge model weight",
)
parser.add_argument(
"--word-score",
type=float,
default=0.52,
help="word insertion score",
)
parser.add_argument("--unk_score", type=float, default=float("-inf"), help="unknown word insertion score")
parser.add_argument("--sil_score", type=float, default=0, help="silence insertion score")
parser.add_argument("--debug", action="store_true", help="whether to use debug level for logging")
return parser.parse_args()
def _init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def _main():
args = _parse_args()
_init_logger(args.debug)
run_inference(args)
if __name__ == "__main__":
_main()
|
_base_ = './faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
_base_ = './faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
"""
Hugging Face file reader.
A parser for HF files.
"""
import json
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Dict, List
import pandas as pd
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class HuggingFaceFSReader(BaseReader):
"""
Hugging Face File System reader.
Uses the new Filesystem API from the Hugging Face Hub client library.
"""
def __init__(self) -> None:
from huggingface_hub import HfFileSystem
self.fs = HfFileSystem()
def load_dicts(self, path: str) -> List[Dict]:
"""Parse file."""
test_data = self.fs.read_bytes(path)
path = Path(path)
if ".gz" in path.suffixes:
import gzip
with TemporaryDirectory() as tmp:
tmp = Path(tmp)
with open(tmp / "tmp.jsonl.gz", "wb") as fp:
fp.write(test_data)
f = gzip.open(tmp / "tmp.jsonl.gz", "rb")
raw = f.read()
data = raw.decode()
else:
data = test_data.decode()
text_lines = data.split("\n")
json_dicts = []
for t in text_lines:
try:
json_dict = json.loads(t)
except json.decoder.JSONDecodeError:
continue
json_dicts.append(json_dict)
return json_dicts
def load_df(self, path: str) -> pd.DataFrame:
"""Load pandas dataframe."""
return pd.DataFrame(self.load_dicts(path))
def load_data(self, path: str) -> List[Document]:
"""Load data."""
json_dicts = self.load_dicts(path)
docs = []
for d in json_dicts:
docs.append(Document(text=str(d)))
return docs
|
"""Hugging Face file reader.
A parser for HF files.
"""
import json
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Dict, List
import pandas as pd
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class HuggingFaceFSReader(BaseReader):
"""Hugging Face File System reader.
Uses the new Filesystem API from the Hugging Face Hub client library.
"""
def __init__(self) -> None:
from huggingface_hub import HfFileSystem
self.fs = HfFileSystem()
def load_dicts(self, path: str) -> List[Dict]:
"""Parse file."""
test_data = self.fs.read_bytes(path)
path = Path(path)
if ".gz" in path.suffixes:
import gzip
with TemporaryDirectory() as tmp:
tmp = Path(tmp)
with open(tmp / "tmp.jsonl.gz", "wb") as fp:
fp.write(test_data)
f = gzip.open(tmp / "tmp.jsonl.gz", "rb")
raw = f.read()
data = raw.decode()
else:
data = test_data.decode()
text_lines = data.split("\n")
json_dicts = []
for t in text_lines:
try:
json_dict = json.loads(t)
except json.decoder.JSONDecodeError:
continue
json_dicts.append(json_dict)
return json_dicts
def load_df(self, path: str) -> pd.DataFrame:
"""Load pandas dataframe."""
return pd.DataFrame(self.load_dicts(path))
def load_data(self, path: str) -> List[Document]:
"""Load data."""
json_dicts = self.load_dicts(path)
docs = []
for d in json_dicts:
docs.append(Document(text=str(d)))
return docs
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import warnings
import torch
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class RPN(SingleStageDetector):
"""Implementation of Region Proposal Network.
Args:
backbone (:obj:`ConfigDict` or dict): The backbone config.
neck (:obj:`ConfigDict` or dict): The neck config.
bbox_head (:obj:`ConfigDict` or dict): The bbox head config.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
rpn_head: ConfigType,
train_cfg: ConfigType,
test_cfg: ConfigType,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None,
**kwargs) -> None:
super(SingleStageDetector, self).__init__(
data_preprocessor=data_preprocessor, init_cfg=init_cfg)
self.backbone = MODELS.build(backbone)
self.neck = MODELS.build(neck) if neck is not None else None
rpn_train_cfg = train_cfg['rpn'] if train_cfg is not None else None
rpn_head_num_classes = rpn_head.get('num_classes', 1)
if rpn_head_num_classes != 1:
warnings.warn('The `num_classes` should be 1 in RPN, but get '
f'{rpn_head_num_classes}, please set '
'rpn_head.num_classes = 1 in your config file.')
rpn_head.update(num_classes=1)
rpn_head.update(train_cfg=rpn_train_cfg)
rpn_head.update(test_cfg=test_cfg['rpn'])
self.bbox_head = MODELS.build(rpn_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""Calculate losses from a batch of inputs and data samples.
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.extract_feat(batch_inputs)
# set cat_id of gt_labels to 0 in RPN
rpn_data_samples = copy.deepcopy(batch_data_samples)
for data_sample in rpn_data_samples:
data_sample.gt_instances.labels = \
torch.zeros_like(data_sample.gt_instances.labels)
losses = self.bbox_head.loss(x, rpn_data_samples)
return losses
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import warnings
import torch
from torch import Tensor
from mmdet.data_elements import SampleList
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class RPN(SingleStageDetector):
"""Implementation of Region Proposal Network.
Args:
backbone (:obj:`ConfigDict` or dict): The backbone config.
neck (:obj:`ConfigDict` or dict): The neck config.
bbox_head (:obj:`ConfigDict` or dict): The bbox head config.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
rpn_head: ConfigType,
train_cfg: ConfigType,
test_cfg: ConfigType,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None,
**kwargs) -> None:
super(SingleStageDetector, self).__init__(
data_preprocessor=data_preprocessor, init_cfg=init_cfg)
self.backbone = MODELS.build(backbone)
self.neck = MODELS.build(neck) if neck is not None else None
rpn_train_cfg = train_cfg['rpn'] if train_cfg is not None else None
rpn_head_num_classes = rpn_head.get('num_classes', 1)
if rpn_head_num_classes != 1:
warnings.warn('The `num_classes` should be 1 in RPN, but get '
f'{rpn_head_num_classes}, please set '
'rpn_head.num_classes = 1 in your config file.')
rpn_head.update(num_classes=1)
rpn_head.update(train_cfg=rpn_train_cfg)
rpn_head.update(test_cfg=test_cfg['rpn'])
self.bbox_head = MODELS.build(rpn_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""Calculate losses from a batch of inputs and data samples.
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.extract_feat(batch_inputs)
# set cat_id of gt_labels to 0 in RPN
rpn_data_samples = copy.deepcopy(batch_data_samples)
for data_sample in rpn_data_samples:
data_sample.gt_instances.labels = \
torch.zeros_like(data_sample.gt_instances.labels)
losses = self.bbox_head.loss(x, rpn_data_samples)
return losses
|
from enum import Enum
# --8<-- [start:ProviderName]
class ProviderName(str, Enum):
AIML_API = "aiml_api"
ANTHROPIC = "anthropic"
APOLLO = "apollo"
COMPASS = "compass"
DISCORD = "discord"
D_ID = "d_id"
E2B = "e2b"
EXA = "exa"
FAL = "fal"
GENERIC_WEBHOOK = "generic_webhook"
GITHUB = "github"
GOOGLE = "google"
GOOGLE_MAPS = "google_maps"
GROQ = "groq"
HUBSPOT = "hubspot"
IDEOGRAM = "ideogram"
JINA = "jina"
LINEAR = "linear"
LLAMA_API = "llama_api"
MEDIUM = "medium"
MEM0 = "mem0"
NOTION = "notion"
NVIDIA = "nvidia"
OLLAMA = "ollama"
OPENAI = "openai"
OPENWEATHERMAP = "openweathermap"
OPEN_ROUTER = "open_router"
PINECONE = "pinecone"
REDDIT = "reddit"
REPLICATE = "replicate"
REVID = "revid"
SCREENSHOTONE = "screenshotone"
SLANT3D = "slant3d"
SMARTLEAD = "smartlead"
SMTP = "smtp"
TWITTER = "twitter"
TODOIST = "todoist"
UNREAL_SPEECH = "unreal_speech"
ZEROBOUNCE = "zerobounce"
# --8<-- [end:ProviderName]
|
from enum import Enum
# --8<-- [start:ProviderName]
class ProviderName(str, Enum):
ANTHROPIC = "anthropic"
APOLLO = "apollo"
COMPASS = "compass"
DISCORD = "discord"
D_ID = "d_id"
E2B = "e2b"
EXA = "exa"
FAL = "fal"
GENERIC_WEBHOOK = "generic_webhook"
GITHUB = "github"
GOOGLE = "google"
GOOGLE_MAPS = "google_maps"
GROQ = "groq"
HUBSPOT = "hubspot"
IDEOGRAM = "ideogram"
JINA = "jina"
LINEAR = "linear"
LLAMA_API = "llama_api"
MEDIUM = "medium"
MEM0 = "mem0"
NOTION = "notion"
NVIDIA = "nvidia"
OLLAMA = "ollama"
OPENAI = "openai"
OPENWEATHERMAP = "openweathermap"
OPEN_ROUTER = "open_router"
PINECONE = "pinecone"
REDDIT = "reddit"
REPLICATE = "replicate"
REVID = "revid"
SCREENSHOTONE = "screenshotone"
SLANT3D = "slant3d"
SMARTLEAD = "smartlead"
SMTP = "smtp"
TWITTER = "twitter"
TODOIST = "todoist"
UNREAL_SPEECH = "unreal_speech"
ZEROBOUNCE = "zerobounce"
# --8<-- [end:ProviderName]
|
# NOTE:
# The entire `torchaudio.backend` module is deprecated.
# New things should be added to `torchaudio._backend`.
# Only things related to backward compatibility should be placed here.
def __getattr__(name: str):
if name == "common":
from . import _common
return _common
if name in ["no_backend", "sox_io_backend", "soundfile_backend"]:
import warnings
warnings.warn(
"Torchaudio's I/O functions now support par-call bakcend dispatch. "
"Importing backend implementation directly is no longer guaranteed to work. "
"Please use `backend` keyword with load/save/info function, instead of "
"calling the udnerlying implementation directly.",
stacklevel=2,
)
if name == "sox_io_backend":
from . import _sox_io_backend
return _sox_io_backend
if name == "soundfile_backend":
from torchaudio._backend import soundfile_backend
return soundfile_backend
if name == "no_backend":
from . import _no_backend
return _no_backend
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
# NOTE:
# The entire `torchaudio.backend` module is deprecated.
# New things should be added to `torchaudio._backend`.
# Only things related to backward compatibility should be placed here.
from .utils import _init_backend, get_audio_backend, list_audio_backends, set_audio_backend
__all__ = ["_init_backend", "get_audio_backend", "list_audio_backends", "set_audio_backend"]
def __getattr__(name: str):
if name == "common":
from . import _common
return _common
if name in ["no_backend", "sox_io_backend", "soundfile_backend"]:
import warnings
warnings.warn(
"Torchaudio's I/O functions now support par-call bakcend dispatch. "
"Importing backend implementation directly is no longer guaranteed to work. "
"Please use `backend` keyword with load/save/info function, instead of "
"calling the udnerlying implementation directly.",
stacklevel=2,
)
if name == "sox_io_backend":
from . import _sox_io_backend
return _sox_io_backend
if name == "soundfile_backend":
from torchaudio._backend import soundfile_backend
return soundfile_backend
if name == "no_backend":
from . import _no_backend
return _no_backend
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py'
# training schedule for 90k
max_iters = 90000
# learning rate policy
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
param_scheduler = [
dict(
type='LinearLR', start_factor=0.067, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=90000,
by_epoch=False,
milestones=[81000, 85500, 87750],
gamma=0.1)
]
|
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py'
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
lr_config = dict(
warmup_iters=500, warmup_ratio=0.067, step=[81000, 85500, 87750])
# 90k iterations with batch_size 64 is roughly equivalent to 48 epochs
runner = dict(type='IterBasedRunner', max_iters=90000)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from torch import hub
from pytest_mock import MockerFixture
from ...torch_encoder import ImageTorchEncoder
def test_load_from_url(tmpdir: str, mocker: MockerFixture) -> None:
os.environ['TORCH_HOME'] = str(tmpdir)
spy = mocker.spy(hub, 'urlopen')
_ = ImageTorchEncoder(model_name='mobilenet_v2')
assert os.path.isfile(os.path.join(tmpdir, 'hub', 'checkpoints', 'mobilenet_v2-b0353104.pth'))
assert spy.call_count == 1
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from torch import hub
from pytest_mock import MockerFixture
try:
from torch_encoder import ImageTorchEncoder
except:
from jinahub.image.encoder.torch_encoder import ImageTorchEncoder
def test_load_from_url(tmpdir: str, mocker: MockerFixture) -> None:
os.environ['TORCH_HOME'] = str(tmpdir)
spy = mocker.spy(hub, 'urlopen')
_ = ImageTorchEncoder(model_name='mobilenet_v2')
assert os.path.isfile(os.path.join(tmpdir, 'hub', 'checkpoints', 'mobilenet_v2-b0353104.pth'))
assert spy.call_count == 1
|
from ._transforms import (
Spectrogram,
InverseSpectrogram,
GriffinLim,
AmplitudeToDB,
MelScale,
InverseMelScale,
MelSpectrogram,
MFCC,
LFCC,
MuLawEncoding,
MuLawDecoding,
Resample,
TimeStretch,
Fade,
FrequencyMasking,
TimeMasking,
SlidingWindowCmn,
Vad,
SpectralCentroid,
Vol,
ComputeDeltas,
PitchShift,
RNNTLoss,
PSD,
MVDR,
RTFMVDR,
SoudenMVDR,
)
__all__ = [
"AmplitudeToDB",
"ComputeDeltas",
"Fade",
"FrequencyMasking",
"GriffinLim",
"InverseMelScale",
"InverseSpectrogram",
"LFCC",
"MFCC",
"MVDR",
"MelScale",
"MelSpectrogram",
"MuLawDecoding",
"MuLawEncoding",
"PSD",
"PitchShift",
"RNNTLoss",
"RTFMVDR",
"Resample",
"SlidingWindowCmn",
"SoudenMVDR",
"SpectralCentroid",
"Spectrogram",
"TimeMasking",
"TimeStretch",
"Vad",
"Vol",
]
|
from ._transforms import (
Spectrogram,
InverseSpectrogram,
GriffinLim,
AmplitudeToDB,
MelScale,
InverseMelScale,
MelSpectrogram,
MFCC,
LFCC,
MuLawEncoding,
MuLawDecoding,
Resample,
TimeStretch,
Fade,
FrequencyMasking,
TimeMasking,
SlidingWindowCmn,
Vad,
SpectralCentroid,
Vol,
ComputeDeltas,
PitchShift,
RNNTLoss,
PSD,
MVDR,
SoudenMVDR,
)
__all__ = [
"AmplitudeToDB",
"ComputeDeltas",
"Fade",
"FrequencyMasking",
"GriffinLim",
"InverseMelScale",
"InverseSpectrogram",
"LFCC",
"MFCC",
"MVDR",
"MelScale",
"MelSpectrogram",
"MuLawDecoding",
"MuLawEncoding",
"PSD",
"PitchShift",
"RNNTLoss",
"Resample",
"SlidingWindowCmn",
"SoudenMVDR",
"SpectralCentroid",
"Spectrogram",
"TimeMasking",
"TimeStretch",
"Vad",
"Vol",
]
|
from prisma.models import User
from backend.blocks.basic import AgentInputBlock, PrintToConsoleBlock
from backend.blocks.text import FillTextTemplateBlock
from backend.data import graph
from backend.data.graph import create_graph
from backend.data.user import get_or_create_user
from backend.util.test import SpinTestServer, wait_execution
async def create_test_user(alt_user: bool = False) -> User:
if alt_user:
test_user_data = {
"sub": "3e53486c-cf57-477e-ba2a-cb02dc828e1b",
"email": "[email protected]",
"name": "Test User 2",
}
else:
test_user_data = {
"sub": "ef3b97d7-1161-4eb4-92b2-10c24fb154c1",
"email": "[email protected]",
"name": "Test User",
}
user = await get_or_create_user(test_user_data)
return user
def create_test_graph() -> graph.Graph:
"""
InputBlock
\
---- FillTextTemplateBlock ---- PrintToConsoleBlock
/
InputBlock
"""
nodes = [
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_1"},
),
graph.Node(
block_id=AgentInputBlock().id,
input_default={
"name": "input_2",
"description": "This is my description of this parameter",
},
),
graph.Node(
block_id=FillTextTemplateBlock().id,
input_default={
"format": "{{a}}, {{b}}{{c}}",
"values_#_c": "!!!",
},
),
graph.Node(block_id=PrintToConsoleBlock().id),
]
links = [
graph.Link(
source_id=nodes[0].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_a",
),
graph.Link(
source_id=nodes[1].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_b",
),
graph.Link(
source_id=nodes[2].id,
sink_id=nodes[3].id,
source_name="output",
sink_name="text",
),
]
return graph.Graph(
name="TestGraph",
description="Test graph description",
nodes=nodes,
links=links,
)
async def sample_agent():
async with SpinTestServer() as server:
test_user = await create_test_user()
test_graph = await create_graph(create_test_graph(), test_user.id)
input_data = {"input_1": "Hello", "input_2": "World"}
response = await server.agent_server.test_execute_graph(
graph_id=test_graph.id,
user_id=test_user.id,
node_input=input_data,
)
print(response)
result = await wait_execution(
test_user.id, test_graph.id, response.graph_exec_id, 10
)
print(result)
if __name__ == "__main__":
import asyncio
asyncio.run(sample_agent())
|
from prisma.models import User
from backend.blocks.basic import AgentInputBlock, PrintToConsoleBlock
from backend.blocks.text import FillTextTemplateBlock
from backend.data import graph
from backend.data.graph import create_graph
from backend.data.user import get_or_create_user
from backend.util.test import SpinTestServer, wait_execution
async def create_test_user(alt_user: bool = False) -> User:
if alt_user:
test_user_data = {
"sub": "3e53486c-cf57-477e-ba2a-cb02dc828e1b",
"email": "[email protected]",
"name": "Test User 2",
}
else:
test_user_data = {
"sub": "ef3b97d7-1161-4eb4-92b2-10c24fb154c1",
"email": "[email protected]",
"name": "Test User",
}
user = await get_or_create_user(test_user_data)
return user
def create_test_graph() -> graph.Graph:
"""
InputBlock
\
---- FillTextTemplateBlock ---- PrintToConsoleBlock
/
InputBlock
"""
nodes = [
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_1"},
),
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_2"},
),
graph.Node(
block_id=FillTextTemplateBlock().id,
input_default={
"format": "{{a}}, {{b}}{{c}}",
"values_#_c": "!!!",
},
),
graph.Node(block_id=PrintToConsoleBlock().id),
]
links = [
graph.Link(
source_id=nodes[0].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_a",
),
graph.Link(
source_id=nodes[1].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_b",
),
graph.Link(
source_id=nodes[2].id,
sink_id=nodes[3].id,
source_name="output",
sink_name="text",
),
]
return graph.Graph(
name="TestGraph",
description="Test graph",
nodes=nodes,
links=links,
)
async def sample_agent():
async with SpinTestServer() as server:
test_user = await create_test_user()
test_graph = await create_graph(create_test_graph(), test_user.id)
input_data = {"input_1": "Hello", "input_2": "World"}
response = await server.agent_server.test_execute_graph(
graph_id=test_graph.id,
user_id=test_user.id,
node_input=input_data,
)
print(response)
result = await wait_execution(
test_user.id, test_graph.id, response.graph_exec_id, 10
)
print(result)
if __name__ == "__main__":
import asyncio
asyncio.run(sample_agent())
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
@_register_proto(proto_type_name='audio_torch_tensor')
class AudioTorchTensor(AbstractAudioTensor, TorchTensor, metaclass=metaTorchAndNode):
"""
Subclass of TorchTensor, to represent an audio tensor.
Adds audio-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import torch
from pydantic import parse_obj_as
from docarray import Document
from docarray.typing import AudioTorchTensor, AudioUrl
class MyAudioDoc(Document):
title: str
audio_tensor: Optional[AudioTorchTensor]
url: Optional[AudioUrl]
bytes_: Optional[bytes]
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=torch.randn(size=(1000, 2)),
)
doc_1.audio_tensor.save_to_wav_file(file_path='path/to/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor = doc_2.url.load()
doc_2.audio_tensor.save_to_wav_file(file_path='path/to/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
"""
...
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.audio.audio_ndarray import MAX_INT_16
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
T = TypeVar('T', bound='AudioTorchTensor')
@_register_proto(proto_type_name='audio_torch_tensor')
class AudioTorchTensor(AbstractAudioTensor, TorchTensor, metaclass=metaTorchAndNode):
"""
Subclass of TorchTensor, to represent an audio tensor.
Adds audio-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import torch
from pydantic import parse_obj_as
from docarray import Document
from docarray.typing import AudioTorchTensor, AudioUrl
class MyAudioDoc(Document):
title: str
audio_tensor: Optional[AudioTorchTensor]
url: Optional[AudioUrl]
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=torch.randn(size=(1000, 2)),
)
doc_1.audio_tensor.save_to_wav_file(file_path='path/to/file_1.wav')
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor = parse_obj_as(AudioTorchTensor, doc_2.url.load())
doc_2.audio_tensor.save_to_wav_file(file_path='path/to/file_2.wav')
"""
def to_audio_bytes(self):
import torch
tensor = (self * MAX_INT_16).to(dtype=torch.int16)
return tensor.cpu().detach().numpy().tobytes()
|
import pytest
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer
@pytest.mark.parametrize(
("revision", "expected_base_revision"),
[
("f3cb857cba53019a20df283396bcca179cf051a4", "f3cb857cba53019a20df283396bcca179cf051a4"),
("f3cb857", "f3cb857"),
("main", "valid-revision"),
(None, "valid-revision"),
],
)
def test_model_card_data(revision, expected_base_revision) -> None:
model_name = "sentence-transformers-testing/stsb-bert-tiny-safetensors"
model = SentenceTransformer(model_name, revision=revision)
assert model.model_card_data.base_model == model_name
if expected_base_revision == "valid-revision":
assert model.model_card_data.base_model_revision
assert len(model.model_card_data.base_model_revision) == 40
else:
assert model.model_card_data.base_model_revision == expected_base_revision
def test_generated_from_trainer_tag(stsb_bert_tiny_model: SentenceTransformer) -> None:
model = stsb_bert_tiny_model
assert "generated_from_trainer" not in model.model_card_data.tags
SentenceTransformerTrainer(model)
assert "generated_from_trainer" in model.model_card_data.tags
|
import pytest
from sentence_transformers import SentenceTransformer
@pytest.mark.parametrize(
("revision", "expected_base_revision"),
[
("f3cb857cba53019a20df283396bcca179cf051a4", "f3cb857cba53019a20df283396bcca179cf051a4"),
("f3cb857", "f3cb857"),
("main", "valid-revision"),
(None, "valid-revision"),
],
)
def test_model_card_data(revision, expected_base_revision) -> None:
model_name = "sentence-transformers-testing/stsb-bert-tiny-safetensors"
model = SentenceTransformer(model_name, revision=revision)
assert model.model_card_data.base_model == model_name
if expected_base_revision == "valid-revision":
assert model.model_card_data.base_model_revision
assert len(model.model_card_data.base_model_revision) == 40
else:
assert model.model_card_data.base_model_revision == expected_base_revision
|
import os
import pytest
from jina import Document, Flow
from jinahub.indexers.compound.FaissPostgresIndexer import FaissPostgresIndexer
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.join(cur_dir, 'docker-compose.yml')
# fixes issue #208 https://github.com/jina-ai/executors/issues/208
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_shards_str(docker_compose):
with Flow().load_config(
"""
jtype: Flow
executors:
- name: text_indexer
shards: 1
uses: FaissPostgresIndexer
uses_with:
startup_sync_args:
only_delta: True
total_shards: 1
"""
) as f:
f.search([Document() for _ in range(20)])
|
import os
import pytest
from jina import Document, Flow
from jinahub.indexers.searcher.compound.FaissPostgresIndexer import FaissPostgresIndexer
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.join(cur_dir, 'docker-compose.yml')
# fixes issue #208 https://github.com/jina-ai/executors/issues/208
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_shards_str(docker_compose):
with Flow().load_config(
"""
jtype: Flow
executors:
- name: text_indexer
shards: 1
uses: FaissPostgresIndexer
uses_with:
startup_sync_args:
only_delta: True
total_shards: 1
"""
) as f:
f.search([Document() for _ in range(20)])
|
from __future__ import annotations
try:
from typing import Self
except ImportError:
from typing_extensions import Self
from torch import Tensor, nn
from sentence_transformers.models.Module import Module
class LayerNorm(Module):
config_keys: list[str] = ["dimension"]
def __init__(self, dimension: int):
super().__init__()
self.dimension = dimension
self.norm = nn.LayerNorm(dimension)
def forward(self, features: dict[str, Tensor]):
features["sentence_embedding"] = self.norm(features["sentence_embedding"])
return features
def get_sentence_embedding_dimension(self):
return self.dimension
def save(self, output_path, safe_serialization: bool = True) -> None:
self.save_config(output_path)
self.save_torch_weights(output_path, safe_serialization=safe_serialization)
@classmethod
def load(
cls,
model_name_or_path: str,
subfolder: str = "",
token: bool | str | None = None,
cache_folder: str | None = None,
revision: str | None = None,
local_files_only: bool = False,
**kwargs,
) -> Self:
hub_kwargs = {
"subfolder": subfolder,
"token": token,
"cache_folder": cache_folder,
"revision": revision,
"local_files_only": local_files_only,
}
config = cls.load_config(model_name_or_path=model_name_or_path, **hub_kwargs)
model = cls(**config)
model = cls.load_torch_weights(model_name_or_path=model_name_or_path, model=model, **hub_kwargs)
return model
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
class LayerNorm(nn.Module):
def __init__(self, dimension: int):
super().__init__()
self.dimension = dimension
self.norm = nn.LayerNorm(dimension)
def forward(self, features: dict[str, Tensor]):
features["sentence_embedding"] = self.norm(features["sentence_embedding"])
return features
def get_sentence_embedding_dimension(self):
return self.dimension
def save(self, output_path, safe_serialization: bool = True) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump({"dimension": self.dimension}, fOut, indent=2)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = LayerNorm(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(
os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"), weights_only=True
)
)
return model
|
import prisma
AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = {
"Input": True,
"Output": True,
"Webhook": True,
"AgentBlock": True,
}
AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore
}
EXECUTION_RESULT_INCLUDE: prisma.types.AgentNodeExecutionInclude = {
"Input": True,
"Output": True,
"AgentNode": True,
"AgentGraphExecution": True,
}
MAX_NODE_EXECUTIONS_FETCH = 1000
GRAPH_EXECUTION_INCLUDE: prisma.types.AgentGraphExecutionInclude = {
"AgentNodeExecutions": {
"include": {
"Input": True,
"Output": True,
"AgentNode": True,
"AgentGraphExecution": True,
},
"order_by": [
{"queuedTime": "desc"},
# Fallback: Incomplete execs has no queuedTime.
{"addedTime": "desc"},
],
"take": MAX_NODE_EXECUTIONS_FETCH, # Avoid loading excessive node executions.
}
}
INTEGRATION_WEBHOOK_INCLUDE: prisma.types.IntegrationWebhookInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore
}
def library_agent_include(user_id: str) -> prisma.types.LibraryAgentInclude:
return {
"Agent": {
"include": {
**AGENT_GRAPH_INCLUDE,
"AgentGraphExecution": {"where": {"userId": user_id}},
}
},
"Creator": True,
}
|
import prisma
AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = {
"Input": True,
"Output": True,
"Webhook": True,
"AgentBlock": True,
}
AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore
}
EXECUTION_RESULT_INCLUDE: prisma.types.AgentNodeExecutionInclude = {
"Input": True,
"Output": True,
"AgentNode": True,
"AgentGraphExecution": True,
}
GRAPH_EXECUTION_INCLUDE: prisma.types.AgentGraphExecutionInclude = {
"AgentNodeExecutions": {
"include": {
"Input": True,
"Output": True,
"AgentNode": True,
"AgentGraphExecution": True,
}
}
}
INTEGRATION_WEBHOOK_INCLUDE: prisma.types.IntegrationWebhookInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore
}
def library_agent_include(user_id: str) -> prisma.types.LibraryAgentInclude:
return {
"Agent": {
"include": {
**AGENT_GRAPH_INCLUDE,
"AgentGraphExecution": {"where": {"userId": user_id}},
}
},
"Creator": True,
}
|
# Copyright (c) OpenMMLab. All rights reserved.
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .dynamic_soft_label_assigner import DynamicSoftLabelAssigner
from .grid_assigner import GridAssigner
from .hungarian_assigner import HungarianAssigner
from .iou2d_calculator import BboxOverlaps2D
from .match_cost import (BBoxL1Cost, ClassificationCost, CrossEntropyLossCost,
DiceCost, FocalLossCost, IoUCost)
from .max_iou_assigner import MaxIoUAssigner
from .multi_instance_assigner import MultiInstanceAssigner
from .point_assigner import PointAssigner
from .region_assigner import RegionAssigner
from .sim_ota_assigner import SimOTAAssigner
from .task_aligned_assigner import TaskAlignedAssigner
from .uniform_assigner import UniformAssigner
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner',
'TaskAlignedAssigner', 'BBoxL1Cost', 'ClassificationCost',
'CrossEntropyLossCost', 'DiceCost', 'FocalLossCost', 'IoUCost',
'BboxOverlaps2D', 'DynamicSoftLabelAssigner', 'MultiInstanceAssigner'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .dynamic_soft_label_assigner import DynamicSoftLabelAssigner
from .grid_assigner import GridAssigner
from .hungarian_assigner import HungarianAssigner
from .iou2d_calculator import BboxOverlaps2D
from .match_cost import (BBoxL1Cost, ClassificationCost, CrossEntropyLossCost,
DiceCost, FocalLossCost, IoUCost)
from .max_iou_assigner import MaxIoUAssigner
from .point_assigner import PointAssigner
from .region_assigner import RegionAssigner
from .sim_ota_assigner import SimOTAAssigner
from .task_aligned_assigner import TaskAlignedAssigner
from .uniform_assigner import UniformAssigner
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner',
'TaskAlignedAssigner', 'BBoxL1Cost', 'ClassificationCost',
'CrossEntropyLossCost', 'DiceCost', 'FocalLossCost', 'IoUCost',
'BboxOverlaps2D', 'DynamicSoftLabelAssigner'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
@MODELS.register_module()
class PanopticFPN(TwoStagePanopticSegmentor):
r"""Implementation of `Panoptic feature pyramid
networks <https://arxiv.org/pdf/1901.02446>`_"""
def __init__(
self,
backbone: ConfigType,
neck: OptConfigType = None,
rpn_head: OptConfigType = None,
roi_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None,
# for panoptic segmentation
semantic_head: OptConfigType = None,
panoptic_fusion_head: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg,
semantic_head=semantic_head,
panoptic_fusion_head=panoptic_fusion_head)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
@MODELS.register_module()
class PanopticFPN(TwoStagePanopticSegmentor):
r"""Implementation of `Panoptic feature pyramid
networks <https://arxiv.org/pdf/1901.02446>`_"""
def __init__(
self,
backbone,
neck=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None,
img_norm_cfg=None,
# for panoptic segmentation
semantic_head=None,
panoptic_fusion_head=None):
super(PanopticFPN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg,
img_norm_cfg=img_norm_cfg,
semantic_head=semantic_head,
panoptic_fusion_head=panoptic_fusion_head)
|
import logging
import random
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseInformationRetrievalEvaluator,
)
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load the NFcorpus IR dataset (https://huggingface.co/datasets/BeIR/nfcorpus, https://huggingface.co/datasets/BeIR/nfcorpus-qrels)
corpus = load_dataset("BeIR/nfcorpus", "corpus", split="corpus")
queries = load_dataset("BeIR/nfcorpus", "queries", split="queries")
relevant_docs_data = load_dataset("BeIR/nfcorpus-qrels", split="test")
# For this dataset, we want to concatenate the title and texts for the corpus
corpus = corpus.map(lambda x: {"text": x["title"] + " " + x["text"]}, remove_columns=["title"])
# Shrink the corpus size heavily to only the relevant documents + 1,000 random documents
required_corpus_ids = set(map(str, relevant_docs_data["corpus-id"]))
required_corpus_ids |= set(random.sample(corpus["_id"], k=1000))
corpus = corpus.filter(lambda x: x["_id"] in required_corpus_ids)
# Convert the datasets to dictionaries
corpus = dict(zip(corpus["_id"], corpus["text"])) # Our corpus (cid => document)
queries = dict(zip(queries["_id"], queries["text"])) # Our queries (qid => question)
relevant_docs = {} # Query ID to relevant documents (qid => set([relevant_cids])
for qid, corpus_ids in zip(relevant_docs_data["query-id"], relevant_docs_data["corpus-id"]):
qid = str(qid)
corpus_ids = str(corpus_ids)
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(corpus_ids)
# Given queries, a corpus and a mapping with relevant documents, the SparseInformationRetrievalEvaluator computes different IR metrics.
ir_evaluator = SparseInformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="BeIR-nfcorpus-subset-test",
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = ir_evaluator(model)
"""
Query info: num_rows: 323, num_cols: 30522, row_non_zero_mean: 42.891639709472656, row_sparsity_mean: 0.9985947012901306
Corpus info: num_rows: 3270, num_cols: 30522, row_non_zero_mean: 206.98899841308594, row_sparsity_mean: 0.9932184219360352
Score-Function: dot
Accuracy@1: 50.46%
Accuracy@3: 64.09%
Accuracy@5: 67.49%
Accuracy@10: 72.14%
Precision@1: 50.46%
Precision@3: 40.76%
Precision@5: 34.06%
Precision@10: 25.98%
Recall@1: 6.09%
Recall@3: 11.73%
Recall@5: 13.64%
Recall@10: 17.21%
MRR@10: 0.5796
NDCG@10: 0.3613
MAP@100: 0.1827
Primary metric value: 0.3613
"""
# Print the results
print(f"Primary metric: {ir_evaluator.primary_metric}")
# => Primary metric: BeIR-nfcorpus-subset-test_dot_ndcg@10
print(f"Primary metric value: {results[ir_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.3613
|
import logging
import random
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseInformationRetrievalEvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load the Touche-2020 IR dataset (https://huggingface.co/datasets/BeIR/webis-touche2020, https://huggingface.co/datasets/BeIR/webis-touche2020-qrels)
corpus = load_dataset("BeIR/webis-touche2020", "corpus", split="corpus")
queries = load_dataset("BeIR/webis-touche2020", "queries", split="queries")
relevant_docs_data = load_dataset("BeIR/webis-touche2020-qrels", split="test")
# For this dataset, we want to concatenate the title and texts for the corpus
corpus = corpus.map(lambda x: {"text": x["title"] + " " + x["text"]}, remove_columns=["title"])
# Shrink the corpus size heavily to only the relevant documents + 30,000 random documents
required_corpus_ids = set(map(str, relevant_docs_data["corpus-id"]))
required_corpus_ids |= set(random.sample(corpus["_id"], k=30_000))
corpus = corpus.filter(lambda x: x["_id"] in required_corpus_ids)
# Convert the datasets to dictionaries
corpus = dict(zip(corpus["_id"], corpus["text"])) # Our corpus (cid => document)
queries = dict(zip(queries["_id"], queries["text"])) # Our queries (qid => question)
relevant_docs = {} # Query ID to relevant documents (qid => set([relevant_cids])
for qid, corpus_ids in zip(relevant_docs_data["query-id"], relevant_docs_data["corpus-id"]):
qid = str(qid)
corpus_ids = str(corpus_ids)
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(corpus_ids)
# Given queries, a corpus and a mapping with relevant documents, the SparseInformationRetrievalEvaluator computes different IR metrics.
ir_evaluator = SparseInformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="BeIR-touche2020-subset-test",
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = ir_evaluator(model)
# Print the results
print(f"Primary metric: {ir_evaluator.primary_metric}")
print(f"Primary metric value: {results[ir_evaluator.primary_metric]:.4f}")
|
from codecs import unicode_escape_decode
from typing import Dict
from docarray import Document
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from typing import Sequence, Iterable
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``,
and ``__delitem__`` for ``DocumentArrayRedis``"""
def _get_doc_by_id(self, _id: str) -> 'Document':
"""Concrete implementation of base class' ``_get_doc_by_id``
:param _id: the id of the document
:return: the retrieved document from redis
"""
try:
result = self._client.hgetall(self._doc_prefix + _id)
doc = Document.from_base64(result[b'blob'])
return doc
except Exception as ex:
raise KeyError(_id) from ex
def _get_docs_by_ids(self, ids: Sequence[str]) -> Iterable['Document']:
"""Concrete implementation of base class' ``_get_docs_by_ids``
:param ids: ids of the document
:return: Iterable[Document]
"""
accumulated_docs = []
accumulated_docs_id_not_found = []
if not ids:
return accumulated_docs
pipe = self._client.pipeline()
for id in ids:
pipe.hgetall(self._doc_prefix + id)
results = pipe.execute()
for i, result in enumerate(results):
if result:
accumulated_docs.append(Document.from_base64(result[b'blob']))
else:
accumulated_docs_id_not_found.append(ids[i])
if accumulated_docs_id_not_found:
raise KeyError(accumulated_docs_id_not_found, accumulated_docs)
return accumulated_docs
def _set_doc_by_id(self, _id: str, value: 'Document'):
"""Concrete implementation of base class' ``_set_doc_by_id``
:param _id: the id of doc to update
:param value: the document to update to
"""
self._del_doc_by_id(_id)
if _id != value.id:
self._del_doc_by_id(value.id)
payload = self._document_to_redis(value)
self._client.hset(self._doc_prefix + value.id, mapping=payload)
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
"""Overridden implementation of _set_docs_by_ids in order to add docs in batches and flush at the end
:param ids: the ids used for indexing
"""
for _id, doc in zip(ids, docs):
self._del_doc_by_id(_id)
if _id != doc.id:
self._del_doc_by_id(doc.id)
self._upload_batch(docs)
def _del_doc_by_id(self, _id: str):
"""Concrete implementation of base class' ``_del_doc_by_id``
:param _id: the id of the document to delete
"""
if self._doc_id_exists(_id):
self._client.delete(self._doc_prefix + _id)
def _document_to_redis(self, doc: 'Document') -> Dict:
extra_columns = {}
for col, _ in self._config.columns.items():
tag = doc.tags.get(col)
if tag is not None:
extra_columns[col] = int(tag) if isinstance(tag, bool) else tag
payload = {
'id': doc.id,
'embedding': self._map_embedding(doc.embedding),
'blob': doc.to_base64(),
**extra_columns,
}
if doc.text:
payload['text'] = doc.text
return payload
def _load_offset2ids(self):
ids = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids)
def _save_offset2ids(self):
self._update_offset2ids_meta()
def _clear_storage(self):
self._client.flushdb()
|
from codecs import unicode_escape_decode
from typing import Dict
from docarray import Document
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from typing import Sequence, Iterable
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``,
and ``__delitem__`` for ``DocumentArrayRedis``"""
def _get_doc_by_id(self, _id: str) -> 'Document':
"""Concrete implementation of base class' ``_get_doc_by_id``
:param _id: the id of the document
:return: the retrieved document from redis
"""
try:
result = self._client.hgetall(self._doc_prefix + _id)
doc = Document.from_base64(result[b'blob'])
return doc
except Exception as ex:
raise KeyError(_id) from ex
def _get_docs_by_ids(self, ids: Sequence[str]) -> Iterable['Document']:
"""Concrete implementation of base class' ``_get_docs_by_ids``
:param ids: ids of the document
:return: Iterable[Document]
"""
accumulated_docs = []
accumulated_docs_id_not_found = []
if not ids:
return accumulated_docs
pipe = self._client.pipeline()
for id in ids:
pipe.hgetall(self._doc_prefix + id)
results = pipe.execute()
for i, result in enumerate(results):
if result:
accumulated_docs.append(Document.from_base64(result[b'blob']))
else:
accumulated_docs_id_not_found.append(ids[i])
if accumulated_docs_id_not_found:
raise KeyError(accumulated_docs_id_not_found, accumulated_docs)
return accumulated_docs
def _set_doc_by_id(self, _id: str, value: 'Document'):
"""Concrete implementation of base class' ``_set_doc_by_id``
:param _id: the id of doc to update
:param value: the document to update to
"""
self._del_doc_by_id(_id)
if _id != value.id:
self._del_doc_by_id(value.id)
payload = self._document_to_redis(value)
self._client.hset(self._doc_prefix + value.id, mapping=payload)
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
"""Overridden implementation of _set_docs_by_ids in order to add docs in batches and flush at the end
:param ids: the ids used for indexing
"""
for _id, doc in zip(ids, docs):
self._del_doc_by_id(_id)
if _id != doc.id:
self._del_doc_by_id(doc.id)
self._upload_batch(docs)
def _del_doc_by_id(self, _id: str):
"""Concrete implementation of base class' ``_del_doc_by_id``
:param _id: the id of the document to delete
"""
if self._doc_id_exists(_id):
self._client.delete(self._doc_prefix + _id)
def _document_to_redis(self, doc: 'Document') -> Dict:
extra_columns = {}
for col, _ in self._config.columns:
tag = doc.tags.get(col)
if tag is not None:
extra_columns[col] = int(tag) if isinstance(tag, bool) else tag
payload = {
'id': doc.id,
'embedding': self._map_embedding(doc.embedding),
'blob': doc.to_base64(),
**extra_columns,
}
if doc.text:
payload['text'] = doc.text
return payload
def _load_offset2ids(self):
ids = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids)
def _save_offset2ids(self):
self._update_offset2ids_meta()
def _clear_storage(self):
self._client.flushdb()
|
# CoSENTLoss must be imported before AnglELoss
from .CoSENTLoss import CoSENTLoss # isort: skip
from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .AnglELoss import AnglELoss
from .BatchAllTripletLoss import BatchAllTripletLoss
from .BatchHardSoftMarginTripletLoss import BatchHardSoftMarginTripletLoss
from .BatchHardTripletLoss import BatchHardTripletLoss, BatchHardTripletLossDistanceFunction
from .BatchSemiHardTripletLoss import BatchSemiHardTripletLoss
from .CachedGISTEmbedLoss import CachedGISTEmbedLoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .ContrastiveLoss import ContrastiveLoss, SiameseDistanceMetric
from .ContrastiveTensionLoss import (
ContrastiveTensionDataLoader,
ContrastiveTensionLoss,
ContrastiveTensionLossInBatchNegatives,
)
from .CosineSimilarityLoss import CosineSimilarityLoss
from .DenoisingAutoEncoderLoss import DenoisingAutoEncoderLoss
from .GISTEmbedLoss import GISTEmbedLoss
from .MarginMSELoss import MarginMSELoss
from .Matryoshka2dLoss import Matryoshka2dLoss
from .MatryoshkaLoss import MatryoshkaLoss
from .MegaBatchMarginLoss import MegaBatchMarginLoss
from .MSELoss import MSELoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from .MultipleNegativesSymmetricRankingLoss import MultipleNegativesSymmetricRankingLoss
from .OnlineContrastiveLoss import OnlineContrastiveLoss
from .SoftmaxLoss import SoftmaxLoss
from .TripletLoss import TripletDistanceMetric, TripletLoss
__all__ = [
"AdaptiveLayerLoss",
"CosineSimilarityLoss",
"SoftmaxLoss",
"MultipleNegativesRankingLoss",
"MultipleNegativesSymmetricRankingLoss",
"TripletLoss",
"TripletDistanceMetric",
"MarginMSELoss",
"MatryoshkaLoss",
"Matryoshka2dLoss",
"MSELoss",
"ContrastiveLoss",
"SiameseDistanceMetric",
"CachedGISTEmbedLoss",
"CachedMultipleNegativesRankingLoss",
"ContrastiveTensionLoss",
"ContrastiveTensionLossInBatchNegatives",
"ContrastiveTensionDataLoader",
"CoSENTLoss",
"AnglELoss",
"OnlineContrastiveLoss",
"MegaBatchMarginLoss",
"DenoisingAutoEncoderLoss",
"GISTEmbedLoss",
"BatchHardTripletLoss",
"BatchHardTripletLossDistanceFunction",
"BatchHardSoftMarginTripletLoss",
"BatchSemiHardTripletLoss",
"BatchAllTripletLoss",
]
|
from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .CosineSimilarityLoss import CosineSimilarityLoss
from .SoftmaxLoss import SoftmaxLoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from .MultipleNegativesSymmetricRankingLoss import MultipleNegativesSymmetricRankingLoss
from .TripletLoss import TripletDistanceMetric, TripletLoss
from .MarginMSELoss import MarginMSELoss
from .MatryoshkaLoss import MatryoshkaLoss
from .Matryoshka2dLoss import Matryoshka2dLoss
from .MSELoss import MSELoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .ContrastiveLoss import SiameseDistanceMetric, ContrastiveLoss
from .ContrastiveTensionLoss import (
ContrastiveTensionLoss,
ContrastiveTensionLossInBatchNegatives,
ContrastiveTensionDataLoader,
)
from .CoSENTLoss import CoSENTLoss
from .AnglELoss import AnglELoss
from .OnlineContrastiveLoss import OnlineContrastiveLoss
from .MegaBatchMarginLoss import MegaBatchMarginLoss
from .DenoisingAutoEncoderLoss import DenoisingAutoEncoderLoss
from .GISTEmbedLoss import GISTEmbedLoss
from .CachedGISTEmbedLoss import CachedGISTEmbedLoss
# Triplet losses
from .BatchHardTripletLoss import BatchHardTripletLoss, BatchHardTripletLossDistanceFunction
from .BatchHardSoftMarginTripletLoss import BatchHardSoftMarginTripletLoss
from .BatchSemiHardTripletLoss import BatchSemiHardTripletLoss
from .BatchAllTripletLoss import BatchAllTripletLoss
__all__ = [
"AdaptiveLayerLoss",
"CosineSimilarityLoss",
"SoftmaxLoss",
"MultipleNegativesRankingLoss",
"MultipleNegativesSymmetricRankingLoss",
"TripletLoss",
"TripletDistanceMetric",
"MarginMSELoss",
"MatryoshkaLoss",
"Matryoshka2dLoss",
"MSELoss",
"ContrastiveLoss",
"SiameseDistanceMetric",
"CachedGISTEmbedLoss",
"CachedMultipleNegativesRankingLoss",
"ContrastiveTensionLoss",
"ContrastiveTensionLossInBatchNegatives",
"ContrastiveTensionDataLoader",
"CoSENTLoss",
"AnglELoss",
"OnlineContrastiveLoss",
"MegaBatchMarginLoss",
"DenoisingAutoEncoderLoss",
"GISTEmbedLoss",
"BatchHardTripletLoss",
"BatchHardTripletLossDistanceFunction",
"BatchHardSoftMarginTripletLoss",
"BatchSemiHardTripletLoss",
"BatchAllTripletLoss",
]
|
# coding: utf-8
from pathlib import Path
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
import lightgbm as lgb
print('Loading data...')
# load or create your dataset
regression_example_dir = Path(__file__).absolute().parents[1] / 'regression'
df_train = pd.read_csv(str(regression_example_dir / 'regression.train'), header=None, sep='\t')
df_test = pd.read_csv(str(regression_example_dir / 'regression.test'), header=None, sep='\t')
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
print('Starting training...')
# train
gbm = lgb.LGBMRegressor(num_leaves=31,
learning_rate=0.05,
n_estimators=20)
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric='l1',
callbacks=[lgb.early_stopping(5)])
print('Starting predicting...')
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
# eval
rmse_test = mean_squared_error(y_test, y_pred) ** 0.5
print(f'The RMSE of prediction is: {rmse_test}')
# feature importances
print(f'Feature importances: {list(gbm.feature_importances_)}')
# self-defined eval metric
# f(y_true: array, y_pred: array) -> name: str, eval_result: float, is_higher_better: bool
# Root Mean Squared Logarithmic Error (RMSLE)
def rmsle(y_true, y_pred):
return 'RMSLE', np.sqrt(np.mean(np.power(np.log1p(y_pred) - np.log1p(y_true), 2))), False
print('Starting training with custom eval function...')
# train
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric=rmsle,
callbacks=[lgb.early_stopping(5)])
# another self-defined eval metric
# f(y_true: array, y_pred: array) -> name: str, eval_result: float, is_higher_better: bool
# Relative Absolute Error (RAE)
def rae(y_true, y_pred):
return 'RAE', np.sum(np.abs(y_pred - y_true)) / np.sum(np.abs(np.mean(y_true) - y_true)), False
print('Starting training with multiple custom eval functions...')
# train
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric=[rmsle, rae],
callbacks=[lgb.early_stopping(5)])
print('Starting predicting...')
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
# eval
rmsle_test = rmsle(y_test, y_pred)[1]
rae_test = rae(y_test, y_pred)[1]
print(f'The RMSLE of prediction is: {rmsle_test}')
print(f'The RAE of prediction is: {rae_test}')
# other scikit-learn modules
estimator = lgb.LGBMRegressor(num_leaves=31)
param_grid = {
'learning_rate': [0.01, 0.1, 1],
'n_estimators': [20, 40]
}
gbm = GridSearchCV(estimator, param_grid, cv=3)
gbm.fit(X_train, y_train)
print(f'Best parameters found by grid search are: {gbm.best_params_}')
|
# coding: utf-8
from pathlib import Path
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
import lightgbm as lgb
print('Loading data...')
# load or create your dataset
regression_example_dir = Path(__file__).absolute().parents[1] / 'regression'
df_train = pd.read_csv(str(regression_example_dir / 'regression.train'), header=None, sep='\t')
df_test = pd.read_csv(str(regression_example_dir / 'regression.test'), header=None, sep='\t')
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
print('Starting training...')
# train
gbm = lgb.LGBMRegressor(num_leaves=31,
learning_rate=0.05,
n_estimators=20)
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric='l1',
early_stopping_rounds=5)
print('Starting predicting...')
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
# eval
rmse_test = mean_squared_error(y_test, y_pred) ** 0.5
print(f'The RMSE of prediction is: {rmse_test}')
# feature importances
print(f'Feature importances: {list(gbm.feature_importances_)}')
# self-defined eval metric
# f(y_true: array, y_pred: array) -> name: str, eval_result: float, is_higher_better: bool
# Root Mean Squared Logarithmic Error (RMSLE)
def rmsle(y_true, y_pred):
return 'RMSLE', np.sqrt(np.mean(np.power(np.log1p(y_pred) - np.log1p(y_true), 2))), False
print('Starting training with custom eval function...')
# train
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric=rmsle,
early_stopping_rounds=5)
# another self-defined eval metric
# f(y_true: array, y_pred: array) -> name: str, eval_result: float, is_higher_better: bool
# Relative Absolute Error (RAE)
def rae(y_true, y_pred):
return 'RAE', np.sum(np.abs(y_pred - y_true)) / np.sum(np.abs(np.mean(y_true) - y_true)), False
print('Starting training with multiple custom eval functions...')
# train
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric=[rmsle, rae],
early_stopping_rounds=5)
print('Starting predicting...')
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
# eval
rmsle_test = rmsle(y_test, y_pred)[1]
rae_test = rae(y_test, y_pred)[1]
print(f'The RMSLE of prediction is: {rmsle_test}')
print(f'The RAE of prediction is: {rae_test}')
# other scikit-learn modules
estimator = lgb.LGBMRegressor(num_leaves=31)
param_grid = {
'learning_rate': [0.01, 0.1, 1],
'n_estimators': [20, 40]
}
gbm = GridSearchCV(estimator, param_grid, cv=3)
gbm.fit(X_train, y_train)
print(f'Best parameters found by grid search are: {gbm.best_params_}')
|
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_pure_tensor, register_kernel # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_bounding_box_format,
get_dimensions_image,
_get_dimensions_image_pil,
get_dimensions_video,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image,
_get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_size_bounding_boxes,
get_size_image,
_get_size_image_pil,
get_size_mask,
get_size_video,
get_size,
) # usort: skip
from ._augment import _erase_image_pil, erase, erase_image, erase_video
from ._color import (
_adjust_brightness_image_pil,
_adjust_contrast_image_pil,
_adjust_gamma_image_pil,
_adjust_hue_image_pil,
_adjust_saturation_image_pil,
_adjust_sharpness_image_pil,
_autocontrast_image_pil,
_equalize_image_pil,
_invert_image_pil,
_permute_channels_image_pil,
_posterize_image_pil,
_rgb_to_grayscale_image_pil,
_solarize_image_pil,
adjust_brightness,
adjust_brightness_image,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image,
adjust_gamma_video,
adjust_hue,
adjust_hue_image,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image,
adjust_sharpness_video,
autocontrast,
autocontrast_image,
autocontrast_video,
equalize,
equalize_image,
equalize_video,
invert,
invert_image,
invert_video,
permute_channels,
permute_channels_image,
permute_channels_video,
posterize,
posterize_image,
posterize_video,
rgb_to_grayscale,
rgb_to_grayscale_image,
solarize,
solarize_image,
solarize_video,
to_grayscale,
)
from ._geometry import (
_affine_image_pil,
_center_crop_image_pil,
_crop_image_pil,
_elastic_image_pil,
_five_crop_image_pil,
_horizontal_flip_image_pil,
_pad_image_pil,
_perspective_image_pil,
_resize_image_pil,
_resized_crop_image_pil,
_rotate_image_pil,
_ten_crop_image_pil,
_vertical_flip_image_pil,
affine,
affine_bounding_boxes,
affine_image,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_boxes,
center_crop_image,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_boxes,
crop_image,
crop_mask,
crop_video,
elastic,
elastic_bounding_boxes,
elastic_image,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_boxes,
horizontal_flip_image,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_boxes,
pad_image,
pad_mask,
pad_video,
perspective,
perspective_bounding_boxes,
perspective_image,
perspective_mask,
perspective_video,
resize,
resize_bounding_boxes,
resize_image,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_boxes,
resized_crop_image,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_boxes,
rotate_image,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_boxes,
vertical_flip_image,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
_gaussian_blur_image_pil,
convert_image_dtype,
gaussian_blur,
gaussian_blur_image,
gaussian_blur_video,
normalize,
normalize_image,
normalize_video,
sanitize_bounding_boxes,
to_dtype,
to_dtype_image,
to_dtype_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image, to_pil_image
from ._deprecated import get_image_size, to_tensor # usort: skip
|
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_pure_tensor, register_kernel # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_bounding_box_format,
get_dimensions_image,
_get_dimensions_image_pil,
get_dimensions_video,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image,
_get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_size_bounding_boxes,
get_size_image,
_get_size_image_pil,
get_size_mask,
get_size_video,
get_size,
) # usort: skip
from ._augment import _erase_image_pil, erase, erase_image, erase_video
from ._color import (
_adjust_brightness_image_pil,
_adjust_contrast_image_pil,
_adjust_gamma_image_pil,
_adjust_hue_image_pil,
_adjust_saturation_image_pil,
_adjust_sharpness_image_pil,
_autocontrast_image_pil,
_equalize_image_pil,
_invert_image_pil,
_permute_channels_image_pil,
_posterize_image_pil,
_rgb_to_grayscale_image_pil,
_solarize_image_pil,
adjust_brightness,
adjust_brightness_image,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image,
adjust_gamma_video,
adjust_hue,
adjust_hue_image,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image,
adjust_sharpness_video,
autocontrast,
autocontrast_image,
autocontrast_video,
equalize,
equalize_image,
equalize_video,
invert,
invert_image,
invert_video,
permute_channels,
permute_channels_image,
permute_channels_video,
posterize,
posterize_image,
posterize_video,
rgb_to_grayscale,
rgb_to_grayscale_image,
solarize,
solarize_image,
solarize_video,
to_grayscale,
)
from ._geometry import (
_affine_image_pil,
_center_crop_image_pil,
_crop_image_pil,
_elastic_image_pil,
_five_crop_image_pil,
_horizontal_flip_image_pil,
_pad_image_pil,
_perspective_image_pil,
_resize_image_pil,
_resized_crop_image_pil,
_rotate_image_pil,
_ten_crop_image_pil,
_vertical_flip_image_pil,
affine,
affine_bounding_boxes,
affine_image,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_boxes,
center_crop_image,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_boxes,
crop_image,
crop_mask,
crop_video,
elastic,
elastic_bounding_boxes,
elastic_image,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_boxes,
horizontal_flip_image,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_boxes,
pad_image,
pad_mask,
pad_video,
perspective,
perspective_bounding_boxes,
perspective_image,
perspective_mask,
perspective_video,
resize,
resize_bounding_boxes,
resize_image,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_boxes,
resized_crop_image,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_boxes,
rotate_image,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_boxes,
vertical_flip_image,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
_gaussian_blur_image_pil,
convert_image_dtype,
gaussian_blur,
gaussian_blur_image,
gaussian_blur_video,
normalize,
normalize_image,
normalize_video,
to_dtype,
to_dtype_image,
to_dtype_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image, to_pil_image
from ._deprecated import get_image_size, to_tensor # usort: skip
|
from backend.blocks.linear._api import LinearAPIException, LinearClient
from backend.blocks.linear._auth import (
LINEAR_OAUTH_IS_CONFIGURED,
TEST_CREDENTIALS_INPUT_OAUTH,
TEST_CREDENTIALS_OAUTH,
LinearCredentials,
LinearCredentialsField,
LinearCredentialsInput,
LinearScope,
)
from backend.blocks.linear.models import Project
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class LinearSearchProjectsBlock(Block):
"""Block for searching projects on Linear"""
class Input(BlockSchema):
credentials: LinearCredentialsInput = LinearCredentialsField(
scopes=[LinearScope.READ],
)
term: str = SchemaField(description="Term to search for projects")
class Output(BlockSchema):
projects: list[Project] = SchemaField(description="List of projects")
error: str = SchemaField(description="Error message if issue creation failed")
def __init__(self):
super().__init__(
id="446a1d35-9d8f-4ac5-83ea-7684ec50e6af",
description="Searches for projects on Linear",
input_schema=self.Input,
output_schema=self.Output,
categories={BlockCategory.PRODUCTIVITY, BlockCategory.ISSUE_TRACKING},
test_input={
"term": "Test project",
"credentials": TEST_CREDENTIALS_INPUT_OAUTH,
},
disabled=not LINEAR_OAUTH_IS_CONFIGURED,
test_credentials=TEST_CREDENTIALS_OAUTH,
test_output=[
(
"projects",
[
Project(
id="abc123",
name="Test project",
description="Test description",
priority=1,
progress=1,
content="Test content",
)
],
)
],
test_mock={
"search_projects": lambda *args, **kwargs: [
Project(
id="abc123",
name="Test project",
description="Test description",
priority=1,
progress=1,
content="Test content",
)
]
},
)
@staticmethod
def search_projects(
credentials: LinearCredentials,
term: str,
) -> list[Project]:
client = LinearClient(credentials=credentials)
response: list[Project] = client.try_search_projects(term=term)
return response
def run(
self, input_data: Input, *, credentials: LinearCredentials, **kwargs
) -> BlockOutput:
"""Execute the project search"""
try:
projects = self.search_projects(
credentials=credentials,
term=input_data.term,
)
yield "projects", projects
except LinearAPIException as e:
yield "error", str(e)
except Exception as e:
yield "error", f"Unexpected error: {str(e)}"
|
from backend.blocks.linear._api import LinearAPIException, LinearClient
from backend.blocks.linear._auth import (
TEST_CREDENTIALS_INPUT_OAUTH,
TEST_CREDENTIALS_OAUTH,
LinearCredentials,
LinearCredentialsField,
LinearCredentialsInput,
LinearScope,
)
from backend.blocks.linear.models import Project
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class LinearSearchProjectsBlock(Block):
"""Block for searching projects on Linear"""
class Input(BlockSchema):
credentials: LinearCredentialsInput = LinearCredentialsField(
scopes=[LinearScope.READ],
)
term: str = SchemaField(description="Term to search for projects")
class Output(BlockSchema):
projects: list[Project] = SchemaField(description="List of projects")
error: str = SchemaField(description="Error message if issue creation failed")
def __init__(self):
super().__init__(
id="446a1d35-9d8f-4ac5-83ea-7684ec50e6af",
description="Searches for projects on Linear",
input_schema=self.Input,
output_schema=self.Output,
categories={BlockCategory.PRODUCTIVITY, BlockCategory.ISSUE_TRACKING},
test_input={
"term": "Test project",
"credentials": TEST_CREDENTIALS_INPUT_OAUTH,
},
test_credentials=TEST_CREDENTIALS_OAUTH,
test_output=[
(
"projects",
[
Project(
id="abc123",
name="Test project",
description="Test description",
priority=1,
progress=1,
content="Test content",
)
],
)
],
test_mock={
"search_projects": lambda *args, **kwargs: [
Project(
id="abc123",
name="Test project",
description="Test description",
priority=1,
progress=1,
content="Test content",
)
]
},
)
@staticmethod
def search_projects(
credentials: LinearCredentials,
term: str,
) -> list[Project]:
client = LinearClient(credentials=credentials)
response: list[Project] = client.try_search_projects(term=term)
return response
def run(
self, input_data: Input, *, credentials: LinearCredentials, **kwargs
) -> BlockOutput:
"""Execute the project search"""
try:
projects = self.search_projects(
credentials=credentials,
term=input_data.term,
)
yield "projects", projects
except LinearAPIException as e:
yield "error", str(e)
except Exception as e:
yield "error", f"Unexpected error: {str(e)}"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.