input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
"""Integration test for Google Search API Wrapper."""
from langchain_community.utilities.google_search import GoogleSearchAPIWrapper
def test_call() -> None:
"""Test that call gives the correct answer."""
search = GoogleSearchAPIWrapper()
output = search.run("What was Obama's first name?")
assert "Barack Hussein Obama II" in output
def test_no_result_call() -> None:
"""Test that call gives no result."""
search = GoogleSearchAPIWrapper()
output = search.run(
"NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL"
)
print(type(output)) # noqa: T201
assert "No good Google Search Result was found" == output
def test_result_with_params_call() -> None:
"""Test that call gives the correct answer with extra params."""
search = GoogleSearchAPIWrapper()
output = search.results(
query="What was Obama's first name?",
num_results=5,
search_params={"cr": "us", "safe": "active"},
)
assert len(output)
|
"""Integration test for Google Search API Wrapper."""
from langchain_community.utilities.google_search import GoogleSearchAPIWrapper
def test_call() -> None:
"""Test that call gives the correct answer."""
search = GoogleSearchAPIWrapper() # type: ignore[call-arg]
output = search.run("What was Obama's first name?")
assert "Barack Hussein Obama II" in output
def test_no_result_call() -> None:
"""Test that call gives no result."""
search = GoogleSearchAPIWrapper() # type: ignore[call-arg]
output = search.run(
"NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL"
)
print(type(output)) # noqa: T201
assert "No good Google Search Result was found" == output
def test_result_with_params_call() -> None:
"""Test that call gives the correct answer with extra params."""
search = GoogleSearchAPIWrapper() # type: ignore[call-arg]
output = search.results(
query="What was Obama's first name?",
num_results=5,
search_params={"cr": "us", "safe": "active"},
)
assert len(output)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import RPNHead
class TestRPNHead(TestCase):
def test_init(self):
"""Test init rpn head."""
rpn_head = RPNHead(num_classes=1, in_channels=1)
self.assertTrue(rpn_head.rpn_conv)
self.assertTrue(rpn_head.rpn_cls)
self.assertTrue(rpn_head.rpn_reg)
# rpn_head.num_convs > 1
rpn_head = RPNHead(num_classes=1, in_channels=1, num_convs=2)
self.assertTrue(rpn_head.rpn_conv)
self.assertTrue(rpn_head.rpn_cls)
self.assertTrue(rpn_head.rpn_reg)
def test_rpn_head_loss(self):
"""Tests rpn head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False))
rpn_head = RPNHead(num_classes=1, in_channels=1, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(rpn_head.prior_generator.strides)))
cls_scores, bbox_preds = rpn_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = rpn_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_rpn_cls'])
empty_box_loss = sum(empty_gt_losses['loss_rpn_bbox'])
self.assertGreater(empty_cls_loss.item(), 0,
'rpn cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([0])
one_gt_losses = rpn_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_rpn_cls'])
onegt_box_loss = sum(one_gt_losses['loss_rpn_bbox'])
self.assertGreater(onegt_cls_loss.item(), 0,
'rpn cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'rpn box loss should be non-zero')
# When there is no valid anchor, the loss will be None,
# and this will raise a ValueError.
img_metas = [{
'img_shape': (8, 8, 3),
'pad_shape': (8, 8, 3),
'scale_factor': 1,
}]
with pytest.raises(ValueError):
rpn_head.loss_by_feat(cls_scores, bbox_preds, [gt_instances],
img_metas)
def test_bbox_post_process(self):
"""Test the length of detection instance results is 0."""
from mmengine.config import ConfigDict
cfg = ConfigDict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0)
rpn_head = RPNHead(num_classes=1, in_channels=1)
results = InstanceData(metainfo=dict())
results.bboxes = torch.zeros((0, 4))
results.scores = torch.zeros(0)
results = rpn_head._bbox_post_process(results, cfg, img_meta=dict())
self.assertEqual(len(results), 0)
self.assertEqual(results.bboxes.size(), (0, 4))
self.assertEqual(results.scores.size(), (0, ))
self.assertEqual(results.labels.size(), (0, ))
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from mmengine import Config
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import RPNHead
class TestRPNHead(TestCase):
def test_init(self):
"""Test init rpn head."""
rpn_head = RPNHead(num_classes=1, in_channels=1)
self.assertTrue(rpn_head.rpn_conv)
self.assertTrue(rpn_head.rpn_cls)
self.assertTrue(rpn_head.rpn_reg)
# rpn_head.num_convs > 1
rpn_head = RPNHead(num_classes=1, in_channels=1, num_convs=2)
self.assertTrue(rpn_head.rpn_conv)
self.assertTrue(rpn_head.rpn_cls)
self.assertTrue(rpn_head.rpn_reg)
def test_rpn_head_loss(self):
"""Tests rpn head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False))
rpn_head = RPNHead(num_classes=1, in_channels=1, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(rpn_head.prior_generator.strides)))
cls_scores, bbox_preds = rpn_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = rpn_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_rpn_cls'])
empty_box_loss = sum(empty_gt_losses['loss_rpn_bbox'])
self.assertGreater(empty_cls_loss.item(), 0,
'rpn cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([0])
one_gt_losses = rpn_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_rpn_cls'])
onegt_box_loss = sum(one_gt_losses['loss_rpn_bbox'])
self.assertGreater(onegt_cls_loss.item(), 0,
'rpn cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'rpn box loss should be non-zero')
# When there is no valid anchor, the loss will be None,
# and this will raise a ValueError.
img_metas = [{
'img_shape': (8, 8, 3),
'pad_shape': (8, 8, 3),
'scale_factor': 1,
}]
with pytest.raises(ValueError):
rpn_head.loss_by_feat(cls_scores, bbox_preds, [gt_instances],
img_metas)
def test_bbox_post_process(self):
"""Test the length of detection instance results is 0."""
from mmengine.config import ConfigDict
cfg = ConfigDict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0)
rpn_head = RPNHead(num_classes=1, in_channels=1)
results = InstanceData(metainfo=dict())
results.bboxes = torch.zeros((0, 4))
results.scores = torch.zeros(0)
results = rpn_head._bbox_post_process(results, cfg, img_meta=dict())
self.assertEqual(len(results), 0)
self.assertEqual(results.bboxes.size(), (0, 4))
self.assertEqual(results.scores.size(), (0, ))
self.assertEqual(results.labels.size(), (0, ))
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/lvis_v1_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
roi_head=dict(
bbox_head=dict(
num_classes=1203,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0)),
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
train_cfg = dict(val_interval=24)
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/lvis_v1_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
roi_head=dict(
bbox_head=dict(
num_classes=1203,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0)),
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
train_cfg = dict(val_interval=24)
|
import csv
import logging
import os
from typing import List
import numpy as np
from sklearn.metrics import average_precision_score
from sentence_transformers import InputExample
from sentence_transformers.evaluation import BinaryClassificationEvaluator
logger = logging.getLogger(__name__)
class CEBinaryClassificationEvaluator:
"""
This evaluator can be used with the CrossEncoder class. Given sentence pairs and binary labels (0 and 1),
it compute the average precision and the best possible f1 score
"""
def __init__(
self,
sentence_pairs: List[List[str]],
labels: List[int],
name: str = "",
show_progress_bar: bool = False,
write_csv: bool = True,
):
assert len(sentence_pairs) == len(labels)
for label in labels:
assert label == 0 or label == 1
self.sentence_pairs = sentence_pairs
self.labels = np.asarray(labels)
self.name = name
if show_progress_bar is None:
show_progress_bar = (
logger.getEffectiveLevel() == logging.INFO or logger.getEffectiveLevel() == logging.DEBUG
)
self.show_progress_bar = show_progress_bar
self.csv_file = "CEBinaryClassificationEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = [
"epoch",
"steps",
"Accuracy",
"Accuracy_Threshold",
"F1",
"F1_Threshold",
"Precision",
"Recall",
"Average_Precision",
]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: List[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CEBinaryClassificationEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(
self.sentence_pairs, convert_to_numpy=True, show_progress_bar=self.show_progress_bar
)
acc, acc_threshold = BinaryClassificationEvaluator.find_best_acc_and_threshold(pred_scores, self.labels, True)
f1, precision, recall, f1_threshold = BinaryClassificationEvaluator.find_best_f1_and_threshold(
pred_scores, self.labels, True
)
ap = average_precision_score(self.labels, pred_scores)
logger.info("Accuracy: {:.2f}\t(Threshold: {:.4f})".format(acc * 100, acc_threshold))
logger.info("F1: {:.2f}\t(Threshold: {:.4f})".format(f1 * 100, f1_threshold))
logger.info("Precision: {:.2f}".format(precision * 100))
logger.info("Recall: {:.2f}".format(recall * 100))
logger.info("Average Precision: {:.2f}\n".format(ap * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc, acc_threshold, f1, f1_threshold, precision, recall, ap])
return ap
|
import logging
from sklearn.metrics import average_precision_score
from typing import List
import numpy as np
import os
import csv
from ... import InputExample
from ...evaluation import BinaryClassificationEvaluator
logger = logging.getLogger(__name__)
class CEBinaryClassificationEvaluator:
"""
This evaluator can be used with the CrossEncoder class. Given sentence pairs and binary labels (0 and 1),
it compute the average precision and the best possible f1 score
"""
def __init__(self, sentence_pairs: List[List[str]], labels: List[int], name: str='', show_progress_bar: bool = False, write_csv: bool = True):
assert len(sentence_pairs) == len(labels)
for label in labels:
assert (label == 0 or label == 1)
self.sentence_pairs = sentence_pairs
self.labels = np.asarray(labels)
self.name = name
if show_progress_bar is None:
show_progress_bar = (logger.getEffectiveLevel() == logging.INFO or logger.getEffectiveLevel() == logging.DEBUG)
self.show_progress_bar = show_progress_bar
self.csv_file = "CEBinaryClassificationEvaluator" + ("_" + name if name else '') + "_results.csv"
self.csv_headers = ["epoch", "steps", "Accuracy", "Accuracy_Threshold", "F1", "F1_Threshold", "Precision", "Recall", "Average_Precision"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: List[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CEBinaryClassificationEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=self.show_progress_bar)
acc, acc_threshold = BinaryClassificationEvaluator.find_best_acc_and_threshold(pred_scores, self.labels, True)
f1, precision, recall, f1_threshold = BinaryClassificationEvaluator.find_best_f1_and_threshold(pred_scores, self.labels, True)
ap = average_precision_score(self.labels, pred_scores)
logger.info("Accuracy: {:.2f}\t(Threshold: {:.4f})".format(acc * 100, acc_threshold))
logger.info("F1: {:.2f}\t(Threshold: {:.4f})".format(f1 * 100, f1_threshold))
logger.info("Precision: {:.2f}".format(precision * 100))
logger.info("Recall: {:.2f}".format(recall * 100))
logger.info("Average Precision: {:.2f}\n".format(ap * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else 'w', encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc, acc_threshold, f1, f1_threshold, precision, recall, ap])
return ap
|
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from typing import Any, Optional, Union
from huggingface_hub import HfFileSystem
from . import config
from .table import CastError
from .utils.track import TrackedIterableFromGenerator, tracked_list, tracked_str
class DatasetsError(Exception):
"""Base class for exceptions in this library."""
class DefunctDatasetError(DatasetsError):
"""The dataset has been defunct."""
class FileNotFoundDatasetsError(DatasetsError, FileNotFoundError):
"""FileNotFoundError raised by this library."""
class DataFilesNotFoundError(FileNotFoundDatasetsError):
"""No (supported) data files found."""
class DatasetNotFoundError(FileNotFoundDatasetsError):
"""Dataset not found.
Raised when trying to access:
- a missing dataset, or
- a private/gated dataset and the user is not authenticated.
"""
class DatasetBuildError(DatasetsError):
pass
class ManualDownloadError(DatasetBuildError):
pass
class FileFormatError(DatasetBuildError):
pass
class DatasetGenerationError(DatasetBuildError):
pass
class DatasetGenerationCastError(DatasetGenerationError):
@classmethod
def from_cast_error(
cls,
cast_error: CastError,
builder_name: str,
gen_kwargs: dict[str, Any],
token: Optional[Union[bool, str]],
) -> "DatasetGenerationCastError":
explanation_message = (
f"\n\nAll the data files must have the same columns, but at some point {cast_error.details()}"
)
formatted_tracked_gen_kwargs: list[str] = []
for gen_kwarg in gen_kwargs.values():
if not isinstance(gen_kwarg, (tracked_str, tracked_list, TrackedIterableFromGenerator)):
continue
while (
isinstance(gen_kwarg, (tracked_list, TrackedIterableFromGenerator)) and gen_kwarg.last_item is not None
):
gen_kwarg = gen_kwarg.last_item
if isinstance(gen_kwarg, tracked_str):
gen_kwarg = gen_kwarg.get_origin()
if isinstance(gen_kwarg, str) and gen_kwarg.startswith("hf://"):
resolved_path = HfFileSystem(endpoint=config.HF_ENDPOINT, token=token).resolve_path(gen_kwarg)
gen_kwarg = "hf://" + resolved_path.unresolve()
if "@" + resolved_path.revision in gen_kwarg:
gen_kwarg = (
gen_kwarg.replace("@" + resolved_path.revision, "", 1)
+ f" (at revision {resolved_path.revision})"
)
formatted_tracked_gen_kwargs.append(str(gen_kwarg))
if formatted_tracked_gen_kwargs:
explanation_message += f"\n\nThis happened while the {builder_name} dataset builder was generating data using\n\n{', '.join(formatted_tracked_gen_kwargs)}"
help_message = "\n\nPlease either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)"
return cls("An error occurred while generating the dataset" + explanation_message + help_message)
class ChecksumVerificationError(DatasetsError):
"""Error raised during checksums verifications of downloaded files."""
class UnexpectedDownloadedFileError(ChecksumVerificationError):
"""Some downloaded files were not expected."""
class ExpectedMoreDownloadedFilesError(ChecksumVerificationError):
"""Some files were supposed to be downloaded but were not."""
class NonMatchingChecksumError(ChecksumVerificationError):
"""The downloaded file checksum don't match the expected checksum."""
class SplitsVerificationError(DatasetsError):
"""Error raised during splits verifications."""
class UnexpectedSplitsError(SplitsVerificationError):
"""The expected splits of the downloaded file is missing."""
class ExpectedMoreSplitsError(SplitsVerificationError):
"""Some recorded splits are missing."""
class NonMatchingSplitsSizesError(SplitsVerificationError):
"""The splits sizes don't match the expected splits sizes."""
|
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from typing import Any, Dict, List, Optional, Union
from huggingface_hub import HfFileSystem
from . import config
from .table import CastError
from .utils.track import TrackedIterableFromGenerator, tracked_list, tracked_str
class DatasetsError(Exception):
"""Base class for exceptions in this library."""
class DefunctDatasetError(DatasetsError):
"""The dataset has been defunct."""
class FileNotFoundDatasetsError(DatasetsError, FileNotFoundError):
"""FileNotFoundError raised by this library."""
class DataFilesNotFoundError(FileNotFoundDatasetsError):
"""No (supported) data files found."""
class DatasetNotFoundError(FileNotFoundDatasetsError):
"""Dataset not found.
Raised when trying to access:
- a missing dataset, or
- a private/gated dataset and the user is not authenticated.
"""
class DatasetBuildError(DatasetsError):
pass
class ManualDownloadError(DatasetBuildError):
pass
class FileFormatError(DatasetBuildError):
pass
class DatasetGenerationError(DatasetBuildError):
pass
class DatasetGenerationCastError(DatasetGenerationError):
@classmethod
def from_cast_error(
cls,
cast_error: CastError,
builder_name: str,
gen_kwargs: Dict[str, Any],
token: Optional[Union[bool, str]],
) -> "DatasetGenerationCastError":
explanation_message = (
f"\n\nAll the data files must have the same columns, but at some point {cast_error.details()}"
)
formatted_tracked_gen_kwargs: List[str] = []
for gen_kwarg in gen_kwargs.values():
if not isinstance(gen_kwarg, (tracked_str, tracked_list, TrackedIterableFromGenerator)):
continue
while (
isinstance(gen_kwarg, (tracked_list, TrackedIterableFromGenerator)) and gen_kwarg.last_item is not None
):
gen_kwarg = gen_kwarg.last_item
if isinstance(gen_kwarg, tracked_str):
gen_kwarg = gen_kwarg.get_origin()
if isinstance(gen_kwarg, str) and gen_kwarg.startswith("hf://"):
resolved_path = HfFileSystem(endpoint=config.HF_ENDPOINT, token=token).resolve_path(gen_kwarg)
gen_kwarg = "hf://" + resolved_path.unresolve()
if "@" + resolved_path.revision in gen_kwarg:
gen_kwarg = (
gen_kwarg.replace("@" + resolved_path.revision, "", 1)
+ f" (at revision {resolved_path.revision})"
)
formatted_tracked_gen_kwargs.append(str(gen_kwarg))
if formatted_tracked_gen_kwargs:
explanation_message += f"\n\nThis happened while the {builder_name} dataset builder was generating data using\n\n{', '.join(formatted_tracked_gen_kwargs)}"
help_message = "\n\nPlease either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)"
return cls("An error occurred while generating the dataset" + explanation_message + help_message)
class ChecksumVerificationError(DatasetsError):
"""Error raised during checksums verifications of downloaded files."""
class UnexpectedDownloadedFileError(ChecksumVerificationError):
"""Some downloaded files were not expected."""
class ExpectedMoreDownloadedFilesError(ChecksumVerificationError):
"""Some files were supposed to be downloaded but were not."""
class NonMatchingChecksumError(ChecksumVerificationError):
"""The downloaded file checksum don't match the expected checksum."""
class SplitsVerificationError(DatasetsError):
"""Error raised during splits verifications."""
class UnexpectedSplitsError(SplitsVerificationError):
"""The expected splits of the downloaded file is missing."""
class ExpectedMoreSplitsError(SplitsVerificationError):
"""Some recorded splits are missing."""
class NonMatchingSplitsSizesError(SplitsVerificationError):
"""The splits sizes don't match the expected splits sizes."""
|
import numpy as np
import pandas as pd
import pytest
import xgboost as xgb
from xgboost.testing.interaction_constraints import (
run_interaction_constraints,
training_accuracy,
)
class TestGPUInteractionConstraints:
@pytest.mark.parametrize("tree_method", ["hist", "approx"])
def test_interaction_constraints(self, tree_method: str) -> None:
run_interaction_constraints(tree_method=tree_method, device="cuda")
@pytest.mark.parametrize("tree_method", ["hist", "approx"])
def test_training_accuracy(self, tree_method: str) -> None:
dpath = "demo/data/"
training_accuracy(tree_method=tree_method, dpath=dpath, device="cuda")
# case where different number of features can occur in the evaluator
def test_issue_8730(self):
X = pd.DataFrame(
zip(range(0, 100), range(200, 300), range(300, 400), range(400, 500)),
columns=["A", "B", "C", "D"],
)
y = np.array([*([0] * 50), *([1] * 50)])
dm = xgb.DMatrix(X, label=y)
params = {
"eta": 0.16095019509249486,
"min_child_weight": 1,
"subsample": 0.688567929338029,
"colsample_bynode": 0.7,
"gamma": 5.666579817418348e-06,
"lambda": 0.14943712232059794,
"grow_policy": "depthwise",
"max_depth": 3,
"tree_method": "hist",
"device": "cuda",
"interaction_constraints": [["A", "B"], ["B", "D", "C"], ["C", "D"]],
"objective": "count:poisson",
"eval_metric": "poisson-nloglik",
"verbosity": 0,
}
xgb.train(params, dm, num_boost_round=100)
|
import sys
import numpy as np
import pandas as pd
import xgboost as xgb
sys.path.append("tests/python")
# Don't import the test class, otherwise they will run twice.
import test_interaction_constraints as test_ic # noqa
rng = np.random.RandomState(1994)
class TestGPUInteractionConstraints:
cputest = test_ic.TestInteractionConstraints()
def test_interaction_constraints(self):
self.cputest.run_interaction_constraints(tree_method="gpu_hist")
def test_training_accuracy(self):
self.cputest.training_accuracy(tree_method="gpu_hist")
# case where different number of features can occur in the evaluator
def test_issue_8730(self):
X = pd.DataFrame(
zip(range(0, 100), range(200, 300), range(300, 400), range(400, 500)),
columns=["A", "B", "C", "D"],
)
y = np.array([*([0] * 50), *([1] * 50)])
dm = xgb.DMatrix(X, label=y)
params = {
"eta": 0.16095019509249486,
"min_child_weight": 1,
"subsample": 0.688567929338029,
"colsample_bynode": 0.7,
"gamma": 5.666579817418348e-06,
"lambda": 0.14943712232059794,
"grow_policy": "depthwise",
"max_depth": 3,
"tree_method": "gpu_hist",
"interaction_constraints": [["A", "B"], ["B", "D", "C"], ["C", "D"]],
"objective": "count:poisson",
"eval_metric": "poisson-nloglik",
"verbosity": 0,
}
xgb.train(params, dm, num_boost_round=100)
|
from __future__ import annotations
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
class CrossEncoderTrainingArguments(SentenceTransformerTrainingArguments):
r"""
CrossEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`, :class:`~sentence_transformers.sampler.DefaultBatchSampler`, Callable[[...], :class:`~sentence_transformers.sampler.DefaultBatchSampler`]], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`, :class:`~sentence_transformers.sampler.MultiDatasetDefaultBatchSampler`, Callable[[...], :class:`~sentence_transformers.sampler.MultiDatasetDefaultBatchSampler`]], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
learning_rate_mapping (`Optional[Dict[str, float]]`, *optional*):
A mapping of parameter name regular expressions to learning rates. This allows you to set different
learning rates for different parts of the model, e.g., `{'SparseStaticEmbedding\.*': 1e-3}` for the
SparseStaticEmbedding module. This is useful when you want to fine-tune specific parts of the model
with different learning rates.
"""
|
from __future__ import annotations
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
class CrossEncoderTrainingArguments(SentenceTransformerTrainingArguments):
"""
CrossEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`, :class:`~sentence_transformers.sampler.DefaultBatchSampler`, Callable[[...], :class:`~sentence_transformers.sampler.DefaultBatchSampler`]], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`, :class:`~sentence_transformers.sampler.MultiDatasetDefaultBatchSampler`, Callable[[...], :class:`~sentence_transformers.sampler.MultiDatasetDefaultBatchSampler`]], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import ConvModule
from mmdet.registry import MODELS
from .fcn_mask_head import FCNMaskHead
@MODELS.register_module()
class HTCMaskHead(FCNMaskHead):
def __init__(self, with_conv_res=True, *args, **kwargs):
super(HTCMaskHead, self).__init__(*args, **kwargs)
self.with_conv_res = with_conv_res
if self.with_conv_res:
self.conv_res = ConvModule(
self.conv_out_channels,
self.conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
def forward(self, x, res_feat=None, return_logits=True, return_feat=True):
if res_feat is not None:
assert self.with_conv_res
res_feat = self.conv_res(res_feat)
x = x + res_feat
for conv in self.convs:
x = conv(x)
res_feat = x
outs = []
if return_logits:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_pred = self.conv_logits(x)
outs.append(mask_pred)
if return_feat:
outs.append(res_feat)
return outs if len(outs) > 1 else outs[0]
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import ConvModule
from mmdet.models.builder import HEADS
from .fcn_mask_head import FCNMaskHead
@HEADS.register_module()
class HTCMaskHead(FCNMaskHead):
def __init__(self, with_conv_res=True, *args, **kwargs):
super(HTCMaskHead, self).__init__(*args, **kwargs)
self.with_conv_res = with_conv_res
if self.with_conv_res:
self.conv_res = ConvModule(
self.conv_out_channels,
self.conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
def forward(self, x, res_feat=None, return_logits=True, return_feat=True):
if res_feat is not None:
assert self.with_conv_res
res_feat = self.conv_res(res_feat)
x = x + res_feat
for conv in self.convs:
x = conv(x)
res_feat = x
outs = []
if return_logits:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_pred = self.conv_logits(x)
outs.append(mask_pred)
if return_feat:
outs.append(res_feat)
return outs if len(outs) > 1 else outs[0]
|
_base_ = './cascade-mask-rcnn_convnext-t-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py' # noqa
# please install mmpretrain
# import mmpretrain.models to trigger register_module in mmpretrain
custom_imports = dict(
imports=['mmpretrain.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-small_3rdparty_32xb128-noema_in1k_20220301-303e75e3.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='mmpretrain.ConvNeXt',
arch='small',
out_indices=[0, 1, 2, 3],
drop_path_rate=0.6,
layer_scale_init_value=1.0,
gap_before_final_norm=False,
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')))
optim_wrapper = dict(paramwise_cfg={
'decay_rate': 0.7,
'decay_type': 'layer_wise',
'num_layers': 12
})
|
_base_ = './cascade-mask-rcnn_convnext-t-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py' # noqa
# TODO: delete custom_imports after mmcls supports auto import
# please install mmcls>=1.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-small_3rdparty_32xb128-noema_in1k_20220301-303e75e3.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.ConvNeXt',
arch='small',
out_indices=[0, 1, 2, 3],
drop_path_rate=0.6,
layer_scale_init_value=1.0,
gap_before_final_norm=False,
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')))
optim_wrapper = dict(paramwise_cfg={
'decay_rate': 0.7,
'decay_type': 'layer_wise',
'num_layers': 12
})
|
import json
from typing import Optional, Type
from langchain_core.callbacks import AsyncCallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.ainetwork.base import AINBaseTool
class TransferSchema(BaseModel):
"""Schema for transfer operations."""
address: str = Field(..., description="Address to transfer AIN to")
amount: int = Field(..., description="Amount of AIN to transfer")
class AINTransfer(AINBaseTool):
"""Tool for transfer operations."""
name: str = "AINtransfer"
description: str = "Transfers AIN to a specified address"
args_schema: Type[TransferSchema] = TransferSchema
async def _arun(
self,
address: str,
amount: int,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
try:
res = await self.interface.wallet.transfer(address, amount, nonce=-1)
return json.dumps(res, ensure_ascii=False)
except Exception as e:
return f"{type(e).__name__}: {str(e)}"
|
import json
from typing import Optional, Type
from langchain_core.callbacks import AsyncCallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.ainetwork.base import AINBaseTool
class TransferSchema(BaseModel):
"""Schema for transfer operations."""
address: str = Field(..., description="Address to transfer AIN to")
amount: int = Field(..., description="Amount of AIN to transfer")
class AINTransfer(AINBaseTool): # type: ignore[override, override]
"""Tool for transfer operations."""
name: str = "AINtransfer"
description: str = "Transfers AIN to a specified address"
args_schema: Type[TransferSchema] = TransferSchema
async def _arun(
self,
address: str,
amount: int,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
try:
res = await self.interface.wallet.transfer(address, amount, nonce=-1)
return json.dumps(res, ensure_ascii=False)
except Exception as e:
return f"{type(e).__name__}: {str(e)}"
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# model settings
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
bbox_head=dict(
_delete_=True,
type='SABLRetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
norm_cfg=norm_cfg,
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),
loss_bbox_reg=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=[(1333, 480), (1333, 960)],
resize_cfg=dict(type='Resize', keep_ratio=True)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# model settings
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
bbox_head=dict(
_delete_=True,
type='SABLRetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
norm_cfg=norm_cfg,
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),
loss_bbox_reg=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 480), (1333, 960)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
data = dict(train=dict(pipeline=train_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_nms import fast_nms, multiclass_nms
from .matrix_nms import mask_matrix_nms
from .merge_augs import (merge_aug_bboxes, merge_aug_masks,
merge_aug_proposals, merge_aug_scores)
__all__ = [
'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes',
'merge_aug_scores', 'merge_aug_masks', 'mask_matrix_nms', 'fast_nms'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_nms import fast_nms, multiclass_nms
from .merge_augs import (merge_aug_bboxes, merge_aug_masks,
merge_aug_proposals, merge_aug_scores)
__all__ = [
'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes',
'merge_aug_scores', 'merge_aug_masks', 'fast_nms'
]
|
import sys
import pytest
from llama_index.graph_rag.cognee import CogneeGraphRAG
@pytest.mark.skipif(
sys.version_info < (3, 10), reason="mock strategy requires python3.10 or higher"
)
@pytest.mark.asyncio
async def test_get_graph_url(monkeypatch):
# Instantiate cognee GraphRAG
cogneeRAG = CogneeGraphRAG(
llm_api_key="",
llm_provider="openai",
llm_model="gpt-4o-mini",
graph_db_provider="networkx",
vector_db_provider="lancedb",
relational_db_provider="sqlite",
relational_db_name="cognee_db",
)
# Mock logging to graphistry
def mock_graphistry_return(username, password):
return True
import graphistry
monkeypatch.setattr(graphistry, "login", mock_graphistry_return)
# Mock render of graph
async def mock_render_return(graph):
return "link"
from cognee.shared import utils
monkeypatch.setattr(utils, "render_graph", mock_render_return)
await cogneeRAG.get_graph_url("password", "username")
from cognee.base_config import get_base_config
assert get_base_config().graphistry_password == "password", (
"Password was not set properly"
)
assert get_base_config().graphistry_username == "username", (
"Username was not set properly"
)
|
import sys
import pytest
from llama_index.graph_rag.cognee import CogneeGraphRAG
@pytest.mark.skipif(
sys.version_info < (3, 10), reason="mock strategy requires python3.10 or higher"
)
@pytest.mark.asyncio()
async def test_get_graph_url(monkeypatch):
# Instantiate cognee GraphRAG
cogneeRAG = CogneeGraphRAG(
llm_api_key="",
llm_provider="openai",
llm_model="gpt-4o-mini",
graph_db_provider="networkx",
vector_db_provider="lancedb",
relational_db_provider="sqlite",
relational_db_name="cognee_db",
)
# Mock logging to graphistry
def mock_graphistry_return(username, password):
return True
import graphistry
monkeypatch.setattr(graphistry, "login", mock_graphistry_return)
# Mock render of graph
async def mock_render_return(graph):
return "link"
from cognee.shared import utils
monkeypatch.setattr(utils, "render_graph", mock_render_return)
await cogneeRAG.get_graph_url("password", "username")
from cognee.base_config import get_base_config
assert get_base_config().graphistry_password == "password", (
"Password was not set properly"
)
assert get_base_config().graphistry_username == "username", (
"Username was not set properly"
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class FOVEA(SingleStageDetector):
"""Implementation of `FoveaBox <https://arxiv.org/abs/1904.03797>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone config.
neck (:obj:`ConfigDict` or dict): The neck config.
bbox_head (:obj:`ConfigDict` or dict): The bbox head config.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of FOVEA. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of FOVEA. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class FOVEA(SingleStageDetector):
"""Implementation of `FoveaBox <https://arxiv.org/abs/1904.03797>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone config.
neck (:obj:`ConfigDict` or dict): The neck config.
bbox_head (:obj:`ConfigDict` or dict): The bbox head config.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of FOVEA. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of FOVEA. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmcv.runner import BaseModule
from mmdet.registry import MODELS
@MODELS.register_module()
class SSDNeck(BaseModule):
"""Extra layers of SSD backbone to generate multi-scale feature maps.
Args:
in_channels (Sequence[int]): Number of input channels per scale.
out_channels (Sequence[int]): Number of output channels per scale.
level_strides (Sequence[int]): Stride of 3x3 conv per level.
level_paddings (Sequence[int]): Padding size of 3x3 conv per level.
l2_norm_scale (float|None): L2 normalization layer init scale.
If None, not use L2 normalization on the first input feature.
last_kernel_size (int): Kernel size of the last conv layer.
Default: 3.
use_depthwise (bool): Whether to use DepthwiseSeparableConv.
Default: False.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: None.
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
level_strides,
level_paddings,
l2_norm_scale=20.,
last_kernel_size=3,
use_depthwise=False,
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='ReLU'),
init_cfg=[
dict(
type='Xavier', distribution='uniform',
layer='Conv2d'),
dict(type='Constant', val=1, layer='BatchNorm2d'),
]):
super(SSDNeck, self).__init__(init_cfg)
assert len(out_channels) > len(in_channels)
assert len(out_channels) - len(in_channels) == len(level_strides)
assert len(level_strides) == len(level_paddings)
assert in_channels == out_channels[:len(in_channels)]
if l2_norm_scale:
self.l2_norm = L2Norm(in_channels[0], l2_norm_scale)
self.init_cfg += [
dict(
type='Constant',
val=self.l2_norm.scale,
override=dict(name='l2_norm'))
]
self.extra_layers = nn.ModuleList()
extra_layer_channels = out_channels[len(in_channels):]
second_conv = DepthwiseSeparableConvModule if \
use_depthwise else ConvModule
for i, (out_channel, stride, padding) in enumerate(
zip(extra_layer_channels, level_strides, level_paddings)):
kernel_size = last_kernel_size \
if i == len(extra_layer_channels) - 1 else 3
per_lvl_convs = nn.Sequential(
ConvModule(
out_channels[len(in_channels) - 1 + i],
out_channel // 2,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
second_conv(
out_channel // 2,
out_channel,
kernel_size,
stride=stride,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.extra_layers.append(per_lvl_convs)
def forward(self, inputs):
"""Forward function."""
outs = [feat for feat in inputs]
if hasattr(self, 'l2_norm'):
outs[0] = self.l2_norm(outs[0])
feat = outs[-1]
for layer in self.extra_layers:
feat = layer(feat)
outs.append(feat)
return tuple(outs)
class L2Norm(nn.Module):
def __init__(self, n_dims, scale=20., eps=1e-10):
"""L2 normalization layer.
Args:
n_dims (int): Number of dimensions to be normalized
scale (float, optional): Defaults to 20..
eps (float, optional): Used to avoid division by zero.
Defaults to 1e-10.
"""
super(L2Norm, self).__init__()
self.n_dims = n_dims
self.weight = nn.Parameter(torch.Tensor(self.n_dims))
self.eps = eps
self.scale = scale
def forward(self, x):
"""Forward function."""
# normalization layer convert to FP32 in FP16 training
x_float = x.float()
norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps
return (self.weight[None, :, None, None].float().expand_as(x_float) *
x_float / norm).type_as(x)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmcv.runner import BaseModule
from ..builder import NECKS
@NECKS.register_module()
class SSDNeck(BaseModule):
"""Extra layers of SSD backbone to generate multi-scale feature maps.
Args:
in_channels (Sequence[int]): Number of input channels per scale.
out_channels (Sequence[int]): Number of output channels per scale.
level_strides (Sequence[int]): Stride of 3x3 conv per level.
level_paddings (Sequence[int]): Padding size of 3x3 conv per level.
l2_norm_scale (float|None): L2 normalization layer init scale.
If None, not use L2 normalization on the first input feature.
last_kernel_size (int): Kernel size of the last conv layer.
Default: 3.
use_depthwise (bool): Whether to use DepthwiseSeparableConv.
Default: False.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: None.
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
level_strides,
level_paddings,
l2_norm_scale=20.,
last_kernel_size=3,
use_depthwise=False,
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='ReLU'),
init_cfg=[
dict(
type='Xavier', distribution='uniform',
layer='Conv2d'),
dict(type='Constant', val=1, layer='BatchNorm2d'),
]):
super(SSDNeck, self).__init__(init_cfg)
assert len(out_channels) > len(in_channels)
assert len(out_channels) - len(in_channels) == len(level_strides)
assert len(level_strides) == len(level_paddings)
assert in_channels == out_channels[:len(in_channels)]
if l2_norm_scale:
self.l2_norm = L2Norm(in_channels[0], l2_norm_scale)
self.init_cfg += [
dict(
type='Constant',
val=self.l2_norm.scale,
override=dict(name='l2_norm'))
]
self.extra_layers = nn.ModuleList()
extra_layer_channels = out_channels[len(in_channels):]
second_conv = DepthwiseSeparableConvModule if \
use_depthwise else ConvModule
for i, (out_channel, stride, padding) in enumerate(
zip(extra_layer_channels, level_strides, level_paddings)):
kernel_size = last_kernel_size \
if i == len(extra_layer_channels) - 1 else 3
per_lvl_convs = nn.Sequential(
ConvModule(
out_channels[len(in_channels) - 1 + i],
out_channel // 2,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
second_conv(
out_channel // 2,
out_channel,
kernel_size,
stride=stride,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.extra_layers.append(per_lvl_convs)
def forward(self, inputs):
"""Forward function."""
outs = [feat for feat in inputs]
if hasattr(self, 'l2_norm'):
outs[0] = self.l2_norm(outs[0])
feat = outs[-1]
for layer in self.extra_layers:
feat = layer(feat)
outs.append(feat)
return tuple(outs)
class L2Norm(nn.Module):
def __init__(self, n_dims, scale=20., eps=1e-10):
"""L2 normalization layer.
Args:
n_dims (int): Number of dimensions to be normalized
scale (float, optional): Defaults to 20..
eps (float, optional): Used to avoid division by zero.
Defaults to 1e-10.
"""
super(L2Norm, self).__init__()
self.n_dims = n_dims
self.weight = nn.Parameter(torch.Tensor(self.n_dims))
self.eps = eps
self.scale = scale
def forward(self, x):
"""Forward function."""
# normalization layer convert to FP32 in FP16 training
x_float = x.float()
norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps
return (self.weight[None, :, None, None].float().expand_as(x_float) *
x_float / norm).type_as(x)
|
import time
from typing import Callable
from pydantic import Field
from docarray import BaseDoc
from docarray.typing import NdArray
N_DIM = 10
class SimpleSchema(BaseDoc):
text: str = Field(index_name='text_index')
number: int
embedding: NdArray[10] = Field(dim=10, index_name="vector_index")
class SimpleDoc(BaseDoc):
embedding: NdArray[N_DIM] = Field(dim=N_DIM, index_name="vector_index_1")
class NestedDoc(BaseDoc):
d: SimpleDoc
embedding: NdArray[N_DIM] = Field(dim=N_DIM, index_name="vector_index")
class FlatSchema(BaseDoc):
embedding1: NdArray = Field(dim=N_DIM, index_name="vector_index_1")
embedding2: NdArray = Field(dim=N_DIM, index_name="vector_index_2")
def assert_when_ready(callable: Callable, tries: int = 5, interval: float = 2):
"""
Retry callable to account for time taken to change data on the cluster
"""
while True:
try:
callable()
except AssertionError as e:
tries -= 1
if tries == 0:
raise RuntimeError("Retries exhausted.") from e
time.sleep(interval)
else:
return
|
import time
from typing import Callable
from pydantic import Field
from docarray import BaseDoc
from docarray.typing import NdArray
N_DIM = 10
class SimpleSchema(BaseDoc):
text: str = Field(index_name='text_index')
number: int
embedding: NdArray[10] = Field(dim=10, index_name="vector_index")
class SimpleDoc(BaseDoc):
embedding: NdArray[N_DIM] = Field(dim=N_DIM, index_name="vector_index_1")
class NestedDoc(BaseDoc):
d: SimpleDoc
embedding: NdArray[N_DIM] = Field(dim=N_DIM, index_name="vector_index")
class FlatSchema(BaseDoc):
embedding1: NdArray = Field(dim=N_DIM, index_name="vector_index_1")
# the dim and N_DIM are setted different on propouse. to check the correct handling of n_dim
embedding2: NdArray[50] = Field(dim=N_DIM, index_name="vector_index_2")
def assert_when_ready(callable: Callable, tries: int = 5, interval: float = 2):
"""
Retry callable to account for time taken to change data on the cluster
"""
while True:
try:
callable()
except AssertionError:
tries -= 1
if tries == 0:
raise
time.sleep(interval)
else:
return
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import AstraDBChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"AstraDBChatMessageHistory": "langchain_community.chat_message_histories",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AstraDBChatMessageHistory",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import AstraDBChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"AstraDBChatMessageHistory": "langchain_community.chat_message_histories"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AstraDBChatMessageHistory",
]
|
from llama_index.llms.openai.base import AsyncOpenAI, OpenAI, SyncOpenAI, Tokenizer
from llama_index.llms.openai.responses import OpenAIResponses
__all__ = ["OpenAI", "OpenAIResponses", "Tokenizer", "SyncOpenAI", "AsyncOpenAI"]
|
from llama_index.llms.openai.base import AsyncOpenAI, OpenAI, SyncOpenAI, Tokenizer
__all__ = ["OpenAI", "Tokenizer", "SyncOpenAI", "AsyncOpenAI"]
|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for LeViT."""
from ...image_processing_utils_fast import BaseImageProcessorFast, SizeDict
from ...image_transforms import (
ChannelDimension,
get_resize_output_image_size,
)
from ...image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
from ...utils import auto_docstring, is_torch_available, is_torchvision_available, is_torchvision_v2_available
if is_torch_available():
import torch
if is_torchvision_available():
if is_torchvision_v2_available():
from torchvision.transforms.v2 import functional as F
else:
from torchvision.transforms import functional as F
@auto_docstring
class LevitImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BICUBIC
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"shortest_edge": 224}
default_to_square = False
crop_size = {"height": 224, "width": 224}
do_resize = True
do_center_crop = True
do_rescale = True
do_normalize = True
do_convert_rgb = None
def resize(
self,
image: torch.Tensor,
size: SizeDict,
interpolation: "F.InterpolationMode" = None,
**kwargs,
) -> torch.Tensor:
"""
Resize an image.
If size is a dict with keys "width" and "height", the image will be resized to `(size["height"],
size["width"])`.
If size is a dict with key "shortest_edge", the shortest edge value `c` is rescaled to `int(c * (256/224))`.
The smaller edge of the image will be matched to this value i.e, if height > width, then image will be rescaled
to `(size["shortest_egde"] * height / width, size["shortest_egde"])`.
Args:
image (`torch.Tensor`):
Image to resize.
size (`SizeDict`):
Size of the output image after resizing. If size is a dict with keys "width" and "height", the image
will be resized to (height, width). If size is a dict with key "shortest_edge", the shortest edge value
`c` is rescaled to int(`c` * (256/224)). The smaller edge of the image will be matched to this value
i.e, if height > width, then image will be rescaled to (size * height / width, size).
interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BICUBIC`):
Resampling filter to use when resiizing the image.
"""
interpolation = interpolation if interpolation is not None else F.InterpolationMode.BICUBIC
if size.shortest_edge:
shortest_edge = int((256 / 224) * size["shortest_edge"])
new_size = get_resize_output_image_size(
image, size=shortest_edge, default_to_square=False, input_data_format=ChannelDimension.FIRST
)
elif size.height and size.width:
new_size = (size.height, size.width)
else:
raise ValueError(
f"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size.keys()} {size.keys()}."
)
return F.resize(
image,
size=new_size,
interpolation=interpolation,
**kwargs,
)
__all__ = ["LevitImageProcessorFast"]
|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for LeViT."""
from ...image_processing_utils_fast import BASE_IMAGE_PROCESSOR_FAST_DOCSTRING, BaseImageProcessorFast, SizeDict
from ...image_transforms import (
ChannelDimension,
get_resize_output_image_size,
)
from ...image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
from ...utils import add_start_docstrings, is_torch_available, is_torchvision_available, is_torchvision_v2_available
if is_torch_available():
import torch
if is_torchvision_available():
if is_torchvision_v2_available():
from torchvision.transforms.v2 import functional as F
else:
from torchvision.transforms import functional as F
@add_start_docstrings(
"Constructs a fast Levit image processor.",
BASE_IMAGE_PROCESSOR_FAST_DOCSTRING,
)
class LevitImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BICUBIC
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"shortest_edge": 224}
default_to_square = False
crop_size = {"height": 224, "width": 224}
do_resize = True
do_center_crop = True
do_rescale = True
do_normalize = True
do_convert_rgb = None
def resize(
self,
image: torch.Tensor,
size: SizeDict,
interpolation: "F.InterpolationMode" = None,
**kwargs,
) -> torch.Tensor:
"""
Resize an image.
If size is a dict with keys "width" and "height", the image will be resized to `(size["height"],
size["width"])`.
If size is a dict with key "shortest_edge", the shortest edge value `c` is rescaled to `int(c * (256/224))`.
The smaller edge of the image will be matched to this value i.e, if height > width, then image will be rescaled
to `(size["shortest_egde"] * height / width, size["shortest_egde"])`.
Args:
image (`torch.Tensor`):
Image to resize.
size (`SizeDict`):
Size of the output image after resizing. If size is a dict with keys "width" and "height", the image
will be resized to (height, width). If size is a dict with key "shortest_edge", the shortest edge value
`c` is rescaled to int(`c` * (256/224)). The smaller edge of the image will be matched to this value
i.e, if height > width, then image will be rescaled to (size * height / width, size).
interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BICUBIC`):
Resampling filter to use when resiizing the image.
"""
interpolation = interpolation if interpolation is not None else F.InterpolationMode.BICUBIC
if size.shortest_edge:
shortest_edge = int((256 / 224) * size["shortest_edge"])
new_size = get_resize_output_image_size(
image, size=shortest_edge, default_to_square=False, input_data_format=ChannelDimension.FIRST
)
elif size.height and size.width:
new_size = (size.height, size.width)
else:
raise ValueError(
f"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size.keys()} {size.keys()}."
)
return F.resize(
image,
size=new_size,
interpolation=interpolation,
**kwargs,
)
__all__ = ["LevitImageProcessorFast"]
|
_base_ = './mask-rcnn_r50_fpn_gn-ws-all_2x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[20, 23],
gamma=0.1)
]
|
_base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[20, 23],
gamma=0.1)
]
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import PointCloud3D
from docarray.utils.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj')
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_OBJ_FILE, REMOTE_OBJ_FILE])
def test_point_cloud(file_url):
print(f"file_url = {file_url}")
point_cloud = PointCloud3D(url=file_url)
point_cloud.tensor = point_cloud.url.load(samples=100)
assert isinstance(point_cloud.tensor, np.ndarray)
def test_point_cloud_np():
pc = parse_obj_as(PointCloud3D, np.zeros((10, 10, 3)))
assert (pc.tensor == np.zeros((10, 10, 3))).all()
def test_point_cloud_torch():
pc = parse_obj_as(PointCloud3D, torch.zeros(10, 10, 3))
assert (pc.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.tensorflow
def test_point_cloud_tensorflow():
pc = parse_obj_as(PointCloud3D, tf.zeros((10, 10, 3)))
assert tnp.allclose(pc.tensor.tensor, tf.zeros((10, 10, 3)))
def test_point_cloud_shortcut_doc():
class MyDoc(BaseDocument):
pc: PointCloud3D
pc2: PointCloud3D
pc3: PointCloud3D
doc = MyDoc(
pc='http://myurl.ply',
pc2=np.zeros((10, 10, 3)),
pc3=torch.zeros(10, 10, 3),
)
assert doc.pc.url == 'http://myurl.ply'
assert (doc.pc2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.pc3.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.tensorflow
def test_point_cloud_shortcut_doc_tf():
class MyDoc(BaseDocument):
pc: PointCloud3D
pc2: PointCloud3D
doc = MyDoc(
pc='http://myurl.ply',
pc2=tf.zeros((10, 10, 3)),
)
assert doc.pc.url == 'http://myurl.ply'
assert tnp.allclose(doc.pc2.tensor.tensor, tf.zeros((10, 10, 3)))
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import PointCloud3D
from tests import TOYDATA_DIR
LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj')
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_OBJ_FILE, REMOTE_OBJ_FILE])
def test_point_cloud(file_url):
print(f"file_url = {file_url}")
point_cloud = PointCloud3D(url=file_url)
point_cloud.tensor = point_cloud.url.load(samples=100)
assert isinstance(point_cloud.tensor, np.ndarray)
def test_point_cloud_np():
image = parse_obj_as(PointCloud3D, np.zeros((10, 10, 3)))
assert (image.tensor == np.zeros((10, 10, 3))).all()
def test_point_cloud_torch():
image = parse_obj_as(PointCloud3D, torch.zeros(10, 10, 3))
assert (image.tensor == torch.zeros(10, 10, 3)).all()
def test_point_cloud_shortcut_doc():
class MyDoc(BaseDocument):
image: PointCloud3D
image2: PointCloud3D
image3: PointCloud3D
doc = MyDoc(
image='http://myurl.ply',
image2=np.zeros((10, 10, 3)),
image3=torch.zeros(10, 10, 3),
)
assert doc.image.url == 'http://myurl.ply'
assert (doc.image2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.image3.tensor == torch.zeros(10, 10, 3)).all()
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .sparse_rcnn import SparseRCNN
@DETECTORS.register_module()
class QueryInst(SparseRCNN):
r"""Implementation of
`Instances as Queries <http://arxiv.org/abs/2105.01928>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(QueryInst, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
|
from ..builder import DETECTORS
from .sparse_rcnn import SparseRCNN
@DETECTORS.register_module()
class QueryInst(SparseRCNN):
r"""Implementation of
`Instances as Queries <http://arxiv.org/abs/2105.01928>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(QueryInst, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
|
import json
from typing import Dict, List, Union
from docarray.array.abstract_array import AnyDocumentArray
from docarray.array.array.array import DocumentArray
def filter_docs(
docs: AnyDocumentArray,
query: Union[str, Dict, List[Dict]],
) -> AnyDocumentArray:
"""
Filter the Documents in the index according to the given filter query.
EXAMPLE USAGE
.. code-block:: python
from docarray import DocumentArray, BaseDocument
from docarray.documents import Text, Image
from docarray.util.filter import filter_docs
class MyDocument(BaseDocument):
caption: Text
image: Image
price: int
docs = DocumentArray[MyDocument](
[
MyDocument(
caption='A tiger in the jungle',
image=Image(url='tigerphoto.png'),
price=100,
),
MyDocument(
caption='A swimming turtle', image=Image(url='turtlepic.png'), price=50
),
MyDocument(
caption='A couple birdwatching with binoculars',
image=Image(url='binocularsphoto.png'),
price=30,
),
]
)
query = {
'$and': {
'image__url': {'$regex': 'photo'},
'price': {'$lte': 50},
}
}
results = filter_docs(docs, query)
assert len(results) == 1
assert results[0].price == 30
assert results[0].caption == 'A couple birdwatching with binoculars'
assert results[0].image.url == 'binocularsphoto.png'
:param docs: the DocumentArray where to apply the filter
:param query: the query to filter by
:return: A DocumentArray containing the Documents
in `docs` that fulfill the filter conditions in the `query`
"""
from docarray.utils.query_language.query_parser import QueryParser
if query:
query = query if not isinstance(query, str) else json.loads(query)
parser = QueryParser(query)
return DocumentArray.__class_getitem__(docs.document_type)(
d for d in docs if parser.evaluate(d)
)
else:
return docs
|
import json
from typing import Dict, List, Union
from docarray.array.abstract_array import AnyDocumentArray
from docarray.array.array.array import DocumentArray
def filter(
docs: AnyDocumentArray,
query: Union[str, Dict, List[Dict]],
) -> AnyDocumentArray:
"""
Filter the Documents in the index according to the given filter query.
EXAMPLE USAGE
.. code-block:: python
from docarray import DocumentArray, BaseDocument
from docarray.documents import Text, Image
from docarray.util.filter import filter
class MyDocument(BaseDocument):
caption: Text
image: Image
price: int
docs = DocumentArray[MyDocument](
[
MyDocument(
caption='A tiger in the jungle',
image=Image(url='tigerphoto.png'),
price=100,
),
MyDocument(
caption='A swimming turtle', image=Image(url='turtlepic.png'), price=50
),
MyDocument(
caption='A couple birdwatching with binoculars',
image=Image(url='binocularsphoto.png'),
price=30,
),
]
)
query = {
'$and': {
'image__url': {'$regex': 'photo'},
'price': {'$lte': 50},
}
}
results = filter(docs, query)
assert len(results) == 1
assert results[0].price == 30
assert results[0].caption == 'A couple birdwatching with binoculars'
assert results[0].image.url == 'binocularsphoto.png'
:param docs: the DocumentArray where to apply the filter
:param query: the query to filter by
:return: A DocumentArray containing the Documents
in `docs` that fulfill the filter conditions in the `query`
"""
from docarray.utils.query_language.query_parser import QueryParser
if query:
query = query if not isinstance(query, str) else json.loads(query)
parser = QueryParser(query)
return DocumentArray.__class_getitem__(docs.document_type)(
d for d in docs if parser.evaluate(d)
)
else:
return docs
|
_base_ = 'faster-rcnn_r50_fpn_ms-3x_coco.py'
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
_base_ = 'faster-rcnn_r50_fpn_ms-3x_coco.py'
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
_base_ = './reppoints-moment_r50_fpn_1x_coco.py'
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(neck=dict(norm_cfg=norm_cfg), bbox_head=dict(norm_cfg=norm_cfg))
|
_base_ = './reppoints_moment_r50_fpn_1x_coco.py'
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(neck=dict(norm_cfg=norm_cfg), bbox_head=dict(norm_cfg=norm_cfg))
|
from google.protobuf import __version__ as __pb__version__
if __pb__version__.startswith('4'):
from docarray.proto.pb.docarray_pb2 import (
DictOfAnyProto,
DocArrayProto,
DocArrayStackedProto,
DocumentProto,
ListOfAnyProto,
ListOfDocArrayProto,
NdArrayProto,
NodeProto,
)
else:
from docarray.proto.pb2.docarray_pb2 import (
DictOfAnyProto,
DocArrayProto,
DocArrayStackedProto,
DocumentProto,
ListOfAnyProto,
ListOfDocArrayProto,
NdArrayProto,
NodeProto,
)
__all__ = [
'DocArrayProto',
'DocumentProto',
'NdArrayProto',
'NodeProto',
'DocArrayStackedProto',
'DocArrayProto',
'ListOfDocArrayProto',
'ListOfAnyProto',
'DictOfAnyProto',
]
|
from google.protobuf import __version__ as __pb__version__
if __pb__version__.startswith('4'):
from docarray.proto.pb.docarray_pb2 import (
DictOfAnyProto,
DocumentArrayProto,
DocumentArrayStackedProto,
DocumentProto,
ListOfAnyProto,
ListOfDocumentArrayProto,
NdArrayProto,
NodeProto,
)
else:
from docarray.proto.pb2.docarray_pb2 import (
DictOfAnyProto,
DocumentArrayProto,
DocumentArrayStackedProto,
DocumentProto,
ListOfAnyProto,
ListOfDocumentArrayProto,
NdArrayProto,
NodeProto,
)
__all__ = [
'DocumentArrayProto',
'DocumentProto',
'NdArrayProto',
'NodeProto',
'DocumentArrayStackedProto',
'DocumentArrayProto',
'ListOfDocumentArrayProto',
'ListOfAnyProto',
'DictOfAnyProto',
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.runner.hooks import HOOKS, Hook
@HOOKS.register_module()
class CheckInvalidLossHook(Hook):
"""Check invalid loss hook.
This hook will regularly check whether the loss is valid
during training.
Args:
interval (int): Checking interval (every k iterations).
Default: 50.
"""
def __init__(self, interval=50):
self.interval = interval
def after_train_iter(self, runner):
if self.every_n_iters(runner, self.interval):
assert torch.isfinite(runner.outputs['loss']), \
runner.logger.info('loss become infinite or NaN!')
|
import torch
from mmcv.runner.hooks import HOOKS, Hook
@HOOKS.register_module()
class CheckInvalidLossHook(Hook):
"""Check invalid loss hook.
This hook will regularly check whether the loss is valid
during training.
Args:
interval (int): Checking interval (every k iterations).
Default: 50.
"""
def __init__(self, interval=50):
self.interval = interval
def after_train_iter(self, runner):
if self.every_n_iters(runner, self.interval):
assert torch.isfinite(runner.outputs['loss']), \
runner.logger.info('loss become infinite or NaN!')
|
"""
This example loads the pre-trained SentenceTransformer model 'nli-distilroberta-base-v2' from Hugging Face.
It then fine-tunes this model for some epochs on the STS benchmark dataset.
Note: In this example, you must specify a SentenceTransformer model.
If you want to fine-tune a huggingface/transformers model like bert-base-uncased, see training_nli.py and training_stsbenchmark.py
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Sentence Transformer model here, for example all-mpnet-base-v2, all-MiniLM-L6-v2, mixedbread-ai/mxbai-embed-large-v1
model_name = sys.argv[1] if len(sys.argv) > 1 else "sentence-transformers/all-mpnet-base-v2"
train_batch_size = 16
num_epochs = 4
output_dir = (
"output/training_stsbenchmark_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# 1. Here we define our SentenceTransformer model.
model = SentenceTransformer(model_name)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and one
# similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# train_loss = losses.CoSENTLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts')`."
)
|
"""
This example loads the pre-trained SentenceTransformer model 'nli-distilroberta-base-v2' from Hugging Face.
It then fine-tunes this model for some epochs on the STS benchmark dataset.
Note: In this example, you must specify a SentenceTransformer model.
If you want to fine-tune a huggingface/transformers model like bert-base-uncased, see training_nli.py and training_stsbenchmark.py
"""
import traceback
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
import sys
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Sentence Transformer model here, for example all-mpnet-base-v2, all-MiniLM-L6-v2, mixedbread-ai/mxbai-embed-large-v1
model_name = sys.argv[1] if len(sys.argv) > 1 else "sentence-transformers/all-mpnet-base-v2"
train_batch_size = 16
num_epochs = 4
output_dir = (
"output/training_stsbenchmark_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# 1. Here we define our SentenceTransformer model.
model = SentenceTransformer(model_name)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and one
# similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# train_loss = losses.CoSENTLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts')`."
)
|
from __future__ import annotations
import re
import pytest
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import NanoBEIREvaluator
from sentence_transformers.util import is_datasets_available
from tests.utils import is_ci
if not is_datasets_available():
pytest.skip(
reason="Datasets are not installed. Please install `datasets` with `pip install datasets`",
allow_module_level=True,
)
if is_ci():
pytest.skip(
reason="Skip test in CI to try and avoid 429 Client Error",
allow_module_level=True,
)
def test_nanobeir_evaluator(stsb_bert_tiny_model_reused: SentenceTransformer):
"""Tests that the NanoBERTEvaluator can be loaded and produces expected metrics"""
datasets = ["QuoraRetrieval", "MSMARCO"]
query_prompts = {
"QuoraRetrieval": "Instruct: Given a question, retrieve questions that are semantically equivalent to the given question\\nQuery: ",
"MSMARCO": "Instruct: Given a web search query, retrieve relevant passages that answer the query\\nQuery: ",
}
model = stsb_bert_tiny_model_reused
evaluator = NanoBEIREvaluator(
dataset_names=datasets,
query_prompts=query_prompts,
)
results = evaluator(model)
assert len(results) > 0
assert all(isinstance(results[metric], float) for metric in results)
def test_nanobeir_evaluator_with_invalid_dataset():
"""Test that NanoBEIREvaluator raises an error for invalid dataset names."""
invalid_datasets = ["invalidDataset"]
with pytest.raises(
ValueError,
match=re.escape(
r"Dataset(s) ['invalidDataset'] not found in the NanoBEIR collection. "
r"Valid dataset names are: ['climatefever', 'dbpedia', 'fever', 'fiqa2018', 'hotpotqa', 'msmarco', 'nfcorpus', 'nq', 'quoraretrieval', 'scidocs', 'arguana', 'scifact', 'touche2020']"
),
):
NanoBEIREvaluator(dataset_names=invalid_datasets)
def test_nanobeir_evaluator_empty_inputs():
"""Test that NanoBEIREvaluator behaves correctly with empty datasets."""
with pytest.raises(ValueError, match="dataset_names cannot be empty. Use None to evaluate on all datasets."):
NanoBEIREvaluator(dataset_names=[])
|
from __future__ import annotations
import re
import pytest
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import NanoBEIREvaluator
from sentence_transformers.util import is_datasets_available
if not is_datasets_available():
pytest.skip(
reason="Datasets are not installed. Please install `datasets` with `pip install datasets`",
allow_module_level=True,
)
def test_nanobeir_evaluator():
"""Tests that the NanoBERTEvaluator can be loaded and produces expected metrics"""
datasets = ["QuoraRetrieval", "MSMARCO"]
query_prompts = {
"QuoraRetrieval": "Instruct: Given a question, retrieve questions that are semantically equivalent to the given question\\nQuery: ",
"MSMARCO": "Instruct: Given a web search query, retrieve relevant passages that answer the query\\nQuery: ",
}
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
evaluator = NanoBEIREvaluator(
dataset_names=datasets,
query_prompts=query_prompts,
)
results = evaluator(model)
assert len(results) > 0
assert all(isinstance(results[metric], float) for metric in results)
def test_nanobeir_evaluator_with_invalid_dataset():
"""Test that NanoBEIREvaluator raises an error for invalid dataset names."""
invalid_datasets = ["invalidDataset"]
with pytest.raises(
ValueError,
match=re.escape(
r"Dataset(s) ['invalidDataset'] not found in the NanoBEIR collection. "
r"Valid dataset names are: ['climatefever', 'dbpedia', 'fever', 'fiqa2018', 'hotpotqa', 'msmarco', 'nfcorpus', 'nq', 'quoraretrieval', 'scidocs', 'arguana', 'scifact', 'touche2020']"
),
):
NanoBEIREvaluator(dataset_names=invalid_datasets)
def test_nanobeir_evaluator_empty_inputs():
"""Test that NanoBEIREvaluator behaves correctly with empty datasets."""
with pytest.raises(ValueError, match="dataset_names cannot be empty. Use None to evaluate on all datasets."):
NanoBEIREvaluator(dataset_names=[])
|
from keras.src.api_export import keras_export
from keras.src.callbacks.callback import Callback
@keras_export("keras.callbacks.LambdaCallback")
class LambdaCallback(Callback):
"""Callback for creating simple, custom callbacks on-the-fly.
This callback is constructed with anonymous functions that will be called
at the appropriate time (during `Model.{fit | evaluate | predict}`).
Note that the callbacks expects positional arguments, as:
- `on_epoch_begin` and `on_epoch_end` expect two positional arguments:
`epoch`, `logs`
- `on_train_begin` and `on_train_end` expect one positional argument:
`logs`
- `on_train_batch_begin` and `on_train_batch_end` expect a positional
argument `batch` and a keyword argument `logs`
- See `Callback` class definition for the full list of functions and their
expected arguments.
Args:
on_epoch_begin: called at the beginning of every epoch.
on_epoch_end: called at the end of every epoch.
on_train_begin: called at the beginning of model training.
on_train_end: called at the end of model training.
on_train_batch_begin: called at the beginning of every train batch.
on_train_batch_end: called at the end of every train batch.
kwargs: Any function in `Callback` that you want to override by
passing `function_name=function`. For example,
`LambdaCallback(.., on_train_end=train_end_fn)`. The custom function
needs to have same arguments as the ones defined in `Callback`.
Example:
```python
# Print the batch number at the beginning of every batch.
batch_print_callback = LambdaCallback(
on_train_batch_begin=lambda batch,logs: print(batch))
# Stream the epoch loss to a file in JSON format. The file content
# is not well-formed JSON but rather has a JSON object per line.
import json
json_log = open('loss_log.json', mode='wt', buffering=1)
json_logging_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: json_log.write(
json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n'),
on_train_end=lambda logs: json_log.close()
)
# Terminate some processes after having finished model training.
processes = ...
cleanup_callback = LambdaCallback(
on_train_end=lambda logs: [
p.terminate() for p in processes if p.is_alive()])
model.fit(...,
callbacks=[batch_print_callback,
json_logging_callback,
cleanup_callback])
```
"""
def __init__(
self,
on_epoch_begin=None,
on_epoch_end=None,
on_train_begin=None,
on_train_end=None,
on_train_batch_begin=None,
on_train_batch_end=None,
**kwargs,
):
super().__init__()
self.__dict__.update(kwargs)
if on_epoch_begin is not None:
self.on_epoch_begin = on_epoch_begin
if on_epoch_end is not None:
self.on_epoch_end = on_epoch_end
if on_train_begin is not None:
self.on_train_begin = on_train_begin
if on_train_end is not None:
self.on_train_end = on_train_end
if on_train_batch_begin is not None:
self.on_train_batch_begin = on_train_batch_begin
if on_train_batch_end is not None:
self.on_train_batch_end = on_train_batch_end
|
from keras.src.api_export import keras_export
from keras.src.callbacks.callback import Callback
@keras_export("keras.callbacks.LambdaCallback")
class LambdaCallback(Callback):
"""Callback for creating simple, custom callbacks on-the-fly.
This callback is constructed with anonymous functions that will be called
at the appropriate time (during `Model.{fit | evaluate | predict}`).
Note that the callbacks expects positional arguments, as:
- `on_epoch_begin` and `on_epoch_end` expect two positional arguments:
`epoch`, `logs`
- `on_train_begin` and `on_train_end` expect one positional argument:
`logs`
- `on_train_batch_begin` and `on_train_batch_end` expect two positional
arguments: `batch`, `logs`
- See `Callback` class definition for the full list of functions and their
expected arguments.
Args:
on_epoch_begin: called at the beginning of every epoch.
on_epoch_end: called at the end of every epoch.
on_train_begin: called at the beginning of model training.
on_train_end: called at the end of model training.
on_train_batch_begin: called at the beginning of every train batch.
on_train_batch_end: called at the end of every train batch.
kwargs: Any function in `Callback` that you want to override by
passing `function_name=function`. For example,
`LambdaCallback(.., on_train_end=train_end_fn)`. The custom function
needs to have same arguments as the ones defined in `Callback`.
Example:
```python
# Print the batch number at the beginning of every batch.
batch_print_callback = LambdaCallback(
on_train_batch_begin=lambda batch,logs: print(batch))
# Stream the epoch loss to a file in JSON format. The file content
# is not well-formed JSON but rather has a JSON object per line.
import json
json_log = open('loss_log.json', mode='wt', buffering=1)
json_logging_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: json_log.write(
json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n'),
on_train_end=lambda logs: json_log.close()
)
# Terminate some processes after having finished model training.
processes = ...
cleanup_callback = LambdaCallback(
on_train_end=lambda logs: [
p.terminate() for p in processes if p.is_alive()])
model.fit(...,
callbacks=[batch_print_callback,
json_logging_callback,
cleanup_callback])
```
"""
def __init__(
self,
on_epoch_begin=None,
on_epoch_end=None,
on_train_begin=None,
on_train_end=None,
on_train_batch_begin=None,
on_train_batch_end=None,
**kwargs,
):
super().__init__()
self.__dict__.update(kwargs)
if on_epoch_begin is not None:
self.on_epoch_begin = on_epoch_begin
if on_epoch_end is not None:
self.on_epoch_end = on_epoch_end
if on_train_begin is not None:
self.on_train_begin = on_train_begin
if on_train_end is not None:
self.on_train_end = on_train_end
if on_train_batch_begin is not None:
self.on_train_batch_begin = on_train_batch_begin
if on_train_batch_end is not None:
self.on_train_batch_end = on_train_batch_end
|
import numpy as np
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.array.stacked.array_stacked import DocumentArrayStacked
from docarray.documents import ImageDoc, TextDoc
from docarray.typing import NdArray
@pytest.mark.proto
def test_simple_proto():
class CustomDoc(BaseDocument):
text: str
tensor: NdArray
da = DocumentArray(
[CustomDoc(text='hello', tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
new_da = DocumentArray[CustomDoc].from_protobuf(da.to_protobuf())
for doc1, doc2 in zip(da, new_da):
assert doc1.text == doc2.text
assert (doc1.tensor == doc2.tensor).all()
@pytest.mark.proto
def test_nested_proto():
class CustomDocument(BaseDocument):
text: TextDoc
image: ImageDoc
da = DocumentArray[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocumentArray[CustomDocument].from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_nested_proto_any_doc():
class CustomDocument(BaseDocument):
text: TextDoc
image: ImageDoc
da = DocumentArray[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocumentArray.from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_stacked_proto():
class CustomDocument(BaseDocument):
image: NdArray
da = DocumentArray[CustomDocument](
[CustomDocument(image=np.zeros((3, 224, 224))) for _ in range(10)]
).stack()
da2 = DocumentArrayStacked.from_protobuf(da.to_protobuf())
assert isinstance(da2, DocumentArrayStacked)
|
import numpy as np
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.array.stacked.array_stacked import DocumentArrayStacked
from docarray.documents import Image, Text
from docarray.typing import NdArray
@pytest.mark.proto
def test_simple_proto():
class CustomDoc(BaseDocument):
text: str
tensor: NdArray
da = DocumentArray(
[CustomDoc(text='hello', tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
new_da = DocumentArray[CustomDoc].from_protobuf(da.to_protobuf())
for doc1, doc2 in zip(da, new_da):
assert doc1.text == doc2.text
assert (doc1.tensor == doc2.tensor).all()
@pytest.mark.proto
def test_nested_proto():
class CustomDocument(BaseDocument):
text: Text
image: Image
da = DocumentArray[CustomDocument](
[
CustomDocument(
text=Text(text='hello'), image=Image(tensor=np.zeros((3, 224, 224)))
)
for _ in range(10)
]
)
DocumentArray[CustomDocument].from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_nested_proto_any_doc():
class CustomDocument(BaseDocument):
text: Text
image: Image
da = DocumentArray[CustomDocument](
[
CustomDocument(
text=Text(text='hello'), image=Image(tensor=np.zeros((3, 224, 224)))
)
for _ in range(10)
]
)
DocumentArray.from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_stacked_proto():
class CustomDocument(BaseDocument):
image: NdArray
da = DocumentArray[CustomDocument](
[CustomDocument(image=np.zeros((3, 224, 224))) for _ in range(10)]
).stack()
da2 = DocumentArrayStacked.from_protobuf(da.to_protobuf())
assert isinstance(da2, DocumentArrayStacked)
|
import json
import os
import pickle
import numpy as np
import xgboost as xgb
kRows = 100
kCols = 10
def generate_data():
X = np.random.randn(kRows, kCols)
y = np.random.randn(kRows)
return X, y
class TestPickling:
def run_model_pickling(self, xgb_params) -> str:
X, y = generate_data()
dtrain = xgb.DMatrix(X, y)
bst = xgb.train(xgb_params, dtrain)
dump_0 = bst.get_dump(dump_format='json')
assert dump_0
config_0 = bst.save_config()
filename = 'model.pkl'
with open(filename, 'wb') as fd:
pickle.dump(bst, fd)
with open(filename, 'rb') as fd:
bst = pickle.load(fd)
with open(filename, 'wb') as fd:
pickle.dump(bst, fd)
with open(filename, 'rb') as fd:
bst = pickle.load(fd)
assert bst.get_dump(dump_format='json') == dump_0
if os.path.exists(filename):
os.remove(filename)
config_1 = bst.save_config()
assert config_0 == config_1
return json.loads(config_0)
def test_model_pickling_json(self):
def check(config):
tree_param = config["learner"]["gradient_booster"]["tree_train_param"]
subsample = tree_param["subsample"]
assert float(subsample) == 0.5
params = {"nthread": 8, "tree_method": "hist", "subsample": 0.5}
config = self.run_model_pickling(params)
check(config)
params = {"nthread": 8, "tree_method": "exact", "subsample": 0.5}
config = self.run_model_pickling(params)
check(config)
|
import json
import os
import pickle
import tempfile
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
kRows = 100
kCols = 10
def generate_data():
X = np.random.randn(kRows, kCols)
y = np.random.randn(kRows)
return X, y
class TestPickling:
def run_model_pickling(self, xgb_params) -> str:
X, y = generate_data()
dtrain = xgb.DMatrix(X, y)
bst = xgb.train(xgb_params, dtrain)
dump_0 = bst.get_dump(dump_format='json')
assert dump_0
config_0 = bst.save_config()
filename = 'model.pkl'
with open(filename, 'wb') as fd:
pickle.dump(bst, fd)
with open(filename, 'rb') as fd:
bst = pickle.load(fd)
with open(filename, 'wb') as fd:
pickle.dump(bst, fd)
with open(filename, 'rb') as fd:
bst = pickle.load(fd)
assert bst.get_dump(dump_format='json') == dump_0
if os.path.exists(filename):
os.remove(filename)
config_1 = bst.save_config()
assert config_0 == config_1
return json.loads(config_0)
def test_model_pickling_json(self):
def check(config):
tree_param = config["learner"]["gradient_booster"]["tree_train_param"]
subsample = tree_param["subsample"]
assert float(subsample) == 0.5
params = {"nthread": 8, "tree_method": "hist", "subsample": 0.5}
config = self.run_model_pickling(params)
check(config)
params = {"nthread": 8, "tree_method": "exact", "subsample": 0.5}
config = self.run_model_pickling(params)
check(config)
@pytest.mark.skipif(**tm.no_sklearn())
def test_with_sklearn_obj_metric(self) -> None:
from sklearn.metrics import mean_squared_error
X, y = tm.datasets.make_regression()
reg = xgb.XGBRegressor(objective=tm.ls_obj, eval_metric=mean_squared_error)
reg.fit(X, y)
pkl = pickle.dumps(reg)
reg_1 = pickle.loads(pkl)
assert callable(reg_1.objective)
assert callable(reg_1.eval_metric)
with tempfile.TemporaryDirectory() as tmpdir:
path = os.path.join(tmpdir, "model.json")
reg.save_model(path)
reg_2 = xgb.XGBRegressor()
reg_2.load_model(path)
assert not callable(reg_2.objective)
assert not callable(reg_2.eval_metric)
assert reg_2.eval_metric is None
|
from keras.src import activations
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
def _large_negative_number(dtype):
"""Return a Large negative number based on dtype."""
if backend.standardize_dtype(dtype) == "float16":
return -3e4
return -1e9
@keras_export("keras.layers.Softmax")
class Softmax(Layer):
"""Softmax activation layer.
Formula:
``` python
exp_x = exp(x - max(x))
f(x) = exp_x / sum(exp_x)
```
Example:
>>> softmax_layer = keras.layers.Softmax()
>>> input = np.array([1.0, 2.0, 1.0])
>>> result = softmax_layer(input)
>>> result
[0.21194157, 0.5761169, 0.21194157]
Args:
axis: Integer, or list of Integers, axis along which the softmax
normalization is applied.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
Call arguments:
inputs: The inputs (logits) to the softmax layer.
mask: A boolean mask of the same shape as `inputs`. The mask
specifies 1 to keep and 0 to mask. Defaults to `None`.
Returns:
Softmaxed output with the same shape as `inputs`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
self.axis = axis
self.supports_masking = True
self.built = True
def call(self, inputs, mask=None):
if mask is not None:
adder = (
1.0 - backend.cast(mask, inputs.dtype)
) * _large_negative_number(inputs.dtype)
inputs += adder
if isinstance(self.axis, (tuple, list)):
if len(self.axis) > 1:
return backend.numpy.exp(
inputs
- backend.math.logsumexp(
inputs, axis=self.axis, keepdims=True
)
)
else:
return activations.softmax(inputs, axis=self.axis[0])
return activations.softmax(inputs, axis=self.axis)
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
def compute_output_shape(self, input_shape):
return input_shape
|
from keras.src import activations
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
def _large_negative_number(dtype):
"""Return a Large negative number based on dtype."""
if backend.standardize_dtype(dtype) == "float16":
return -3e4
return -1e9
@keras_export("keras.layers.Softmax")
class Softmax(Layer):
"""Softmax activation layer.
Formula:
``` python
exp_x = exp(x - max(x))
f(x) = exp_x / sum(exp_x)
```
Example:
>>> softmax_layer = keras.layers.activations.Softmax()
>>> input = np.array([1.0, 2.0, 1.0])
>>> result = softmax_layer(input)
>>> result
[0.21194157, 0.5761169, 0.21194157]
Args:
axis: Integer, or list of Integers, axis along which the softmax
normalization is applied.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
Call arguments:
inputs: The inputs (logits) to the softmax layer.
mask: A boolean mask of the same shape as `inputs`. The mask
specifies 1 to keep and 0 to mask. Defaults to `None`.
Returns:
Softmaxed output with the same shape as `inputs`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
self.axis = axis
self.supports_masking = True
self.built = True
def call(self, inputs, mask=None):
if mask is not None:
adder = (
1.0 - backend.cast(mask, inputs.dtype)
) * _large_negative_number(inputs.dtype)
inputs += adder
if isinstance(self.axis, (tuple, list)):
if len(self.axis) > 1:
return backend.numpy.exp(
inputs
- backend.math.logsumexp(
inputs, axis=self.axis, keepdims=True
)
)
else:
return activations.softmax(inputs, axis=self.axis[0])
return activations.softmax(inputs, axis=self.axis)
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
def compute_output_shape(self, input_shape):
return input_shape
|
"""Function calling agent."""
from typing import Any, List, Optional
from llama_index.core.agent.runner.base import AgentRunner, AgentState
from llama_index.core.agent.function_calling.step import (
FunctionCallingAgentWorker,
DEFAULT_MAX_FUNCTION_CALLS,
)
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.callbacks import CallbackManager
from llama_index.core.llms.function_calling import FunctionCallingLLM
from llama_index.core.memory.types import BaseMemory
from llama_index.core.objects.base import ObjectRetriever
from llama_index.core.settings import Settings
from llama_index.core.tools.types import BaseTool
class FunctionCallingAgent(AgentRunner):
"""
Function calling agent.
Light wrapper around AgentRunner.
"""
@classmethod
def from_tools(
cls,
tools: Optional[List[BaseTool]] = None,
tool_retriever: Optional[ObjectRetriever[BaseTool]] = None,
llm: Optional[FunctionCallingLLM] = None,
verbose: bool = False,
max_function_calls: int = DEFAULT_MAX_FUNCTION_CALLS,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
prefix_messages: Optional[List[ChatMessage]] = None,
memory: Optional[BaseMemory] = None,
chat_history: Optional[List[ChatMessage]] = None,
state: Optional[AgentState] = None,
allow_parallel_tool_calls: bool = True,
**kwargs: Any,
) -> "FunctionCallingAgent":
"""Create a FunctionCallingAgent from a list of tools."""
tools = tools or []
llm = llm or Settings.llm # type: ignore
assert isinstance(llm, FunctionCallingLLM), (
"llm must be an instance of FunctionCallingLLM"
)
if callback_manager is not None:
llm.callback_manager = callback_manager
if system_prompt is not None:
if prefix_messages is not None:
raise ValueError(
"Cannot specify both system_prompt and prefix_messages"
)
prefix_messages = [ChatMessage(content=system_prompt, role="system")]
prefix_messages = prefix_messages or []
agent_worker = FunctionCallingAgentWorker.from_tools(
tools,
tool_retriever=tool_retriever,
llm=llm,
verbose=verbose,
max_function_calls=max_function_calls,
callback_manager=callback_manager,
prefix_messages=prefix_messages,
allow_parallel_tool_calls=allow_parallel_tool_calls,
)
return cls(
agent_worker=agent_worker,
memory=memory,
chat_history=chat_history,
state=state,
llm=llm,
callback_manager=callback_manager,
verbose=verbose,
**kwargs,
)
|
"""Function calling agent."""
from typing import Any, List, Optional
from llama_index.core.agent.runner.base import AgentRunner, AgentState
from llama_index.core.agent.function_calling.step import (
FunctionCallingAgentWorker,
DEFAULT_MAX_FUNCTION_CALLS,
)
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.callbacks import CallbackManager
from llama_index.core.llms.function_calling import FunctionCallingLLM
from llama_index.core.memory.types import BaseMemory
from llama_index.core.objects.base import ObjectRetriever
from llama_index.core.settings import Settings
from llama_index.core.tools.types import BaseTool
class FunctionCallingAgent(AgentRunner):
"""
Function calling agent.
Light wrapper around AgentRunner.
"""
@classmethod
def from_tools(
cls,
tools: Optional[List[BaseTool]] = None,
tool_retriever: Optional[ObjectRetriever[BaseTool]] = None,
llm: Optional[FunctionCallingLLM] = None,
verbose: bool = False,
max_function_calls: int = DEFAULT_MAX_FUNCTION_CALLS,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
prefix_messages: Optional[List[ChatMessage]] = None,
memory: Optional[BaseMemory] = None,
chat_history: Optional[List[ChatMessage]] = None,
state: Optional[AgentState] = None,
allow_parallel_tool_calls: bool = True,
**kwargs: Any,
) -> "FunctionCallingAgent":
"""Create a FunctionCallingAgent from a list of tools."""
tools = tools or []
llm = llm or Settings.llm # type: ignore
assert isinstance(
llm, FunctionCallingLLM
), "llm must be an instance of FunctionCallingLLM"
if callback_manager is not None:
llm.callback_manager = callback_manager
if system_prompt is not None:
if prefix_messages is not None:
raise ValueError(
"Cannot specify both system_prompt and prefix_messages"
)
prefix_messages = [ChatMessage(content=system_prompt, role="system")]
prefix_messages = prefix_messages or []
agent_worker = FunctionCallingAgentWorker.from_tools(
tools,
tool_retriever=tool_retriever,
llm=llm,
verbose=verbose,
max_function_calls=max_function_calls,
callback_manager=callback_manager,
prefix_messages=prefix_messages,
allow_parallel_tool_calls=allow_parallel_tool_calls,
)
return cls(
agent_worker=agent_worker,
memory=memory,
chat_history=chat_history,
state=state,
llm=llm,
callback_manager=callback_manager,
verbose=verbose,
**kwargs,
)
|
import copy
import warnings
import torch
from keras.src import tree
from keras.src.export.export_utils import convert_spec_to_tensor
from keras.src.utils.module_utils import tensorflow as tf
from keras.src.utils.module_utils import torch_xla
class TorchExportArchive:
def _track_layer(self, layer):
raise NotImplementedError(
"`track` is not supported for `Layer`s and `Model`s in the torch "
"backend. Use `track_and_add_endpoint` instead."
)
def add_endpoint(self, name, fn, input_signature, **kwargs):
raise NotImplementedError(
"`add_endpoint` is not supported for `Layer`s and `Model`s in the "
"torch backend. Use `track_and_add_endpoint` instead."
)
def track_and_add_endpoint(self, name, resource, input_signature, **kwargs):
# Disable false alarms related to lifting parameters.
warnings.filterwarnings("ignore", message=".*created when tracing.*")
warnings.filterwarnings(
"ignore", message=".*Unable to find the path of the module.*"
)
if not isinstance(resource, torch.nn.Module):
raise TypeError(
"`resource` must be an instance of `torch.nn.Module`. "
f"Received: resource={resource} (of type {type(resource)})"
)
sample_inputs = tree.map_structure(
lambda x: convert_spec_to_tensor(x, replace_none_number=1),
input_signature,
)
sample_inputs = tuple(sample_inputs)
# Ref: torch_xla.tf_saved_model_integration
# TODO: Utilize `dynamic_shapes`
exported = torch.export.export(
resource, sample_inputs, dynamic_shapes=None, strict=False
)
options = torch_xla.stablehlo.StableHLOExportOptions(
override_tracing_arguments=sample_inputs
)
stablehlo_model = torch_xla.stablehlo.exported_program_to_stablehlo(
exported, options
)
state_dict_keys = list(stablehlo_model._bundle.state_dict.keys())
# Remove unused variables.
for k in state_dict_keys:
if "lifted" not in k:
stablehlo_model._bundle.state_dict.pop(k)
bundle = copy.deepcopy(stablehlo_model._bundle)
bundle.state_dict = {
k: tf.Variable(v, trainable=False, name=k)
for k, v in bundle.state_dict.items()
}
bundle.additional_constants = [
tf.Variable(v, trainable=False) for v in bundle.additional_constants
]
# Track variables in `bundle` for `write_out`.
self._tf_trackable.variables += (
list(bundle.state_dict.values()) + bundle.additional_constants
)
# Ref: torch_xla.tf_saved_model_integration.save_stablehlo_graph_as_tf
def make_tf_function(func, bundle):
from tensorflow.compiler.tf2xla.python import xla as tfxla
def _get_shape_with_dynamic(signature):
shape = copy.copy(signature.shape)
for i in signature.dynamic_dims:
shape[i] = None
return shape
def _extract_call_parameters(args, meta, bundle):
call_args = []
if meta.input_pytree_spec is not None:
args = tree.flatten(args)
for loc in meta.input_locations:
if loc.type_ == torch_xla.stablehlo.VariableType.PARAMETER:
call_args.append(bundle.state_dict[loc.name])
elif loc.type_ == torch_xla.stablehlo.VariableType.CONSTANT:
call_args.append(
bundle.additional_constants[loc.position]
)
else:
call_args.append(args[loc.position])
return call_args
def inner(*args):
Touts = [sig.dtype for sig in func.meta.output_signature]
Souts = [
_get_shape_with_dynamic(sig)
for sig in func.meta.output_signature
]
call_args = _extract_call_parameters(args, func.meta, bundle)
results = tfxla.call_module(
tuple(call_args),
version=5,
Tout=Touts, # dtype information
Sout=Souts, # Shape information
function_list=[],
module=func.bytecode,
)
if len(Souts) == 1:
results = results[0]
return results
return inner
decorated_fn = tf.function(
make_tf_function(
stablehlo_model._bundle.stablehlo_funcs[0], bundle
),
input_signature=input_signature,
)
return decorated_fn
|
import copy
import warnings
import torch
from keras.src import tree
from keras.src.export.export_utils import convert_spec_to_tensor
from keras.src.utils.module_utils import tensorflow as tf
from keras.src.utils.module_utils import torch_xla
class TorchExportArchive:
def track(self, resource):
raise NotImplementedError(
"`track` is not implemented in the torch backend. Use"
"`track_and_add_endpoint` instead."
)
def add_endpoint(self, name, fn, input_signature, **kwargs):
raise NotImplementedError(
"`add_endpoint` is not implemented in the torch backend. Use"
"`track_and_add_endpoint` instead."
)
def track_and_add_endpoint(self, name, resource, input_signature, **kwargs):
# Disable false alarms related to lifting parameters.
warnings.filterwarnings("ignore", message=".*created when tracing.*")
warnings.filterwarnings(
"ignore", message=".*Unable to find the path of the module.*"
)
if not isinstance(resource, torch.nn.Module):
raise TypeError(
"`resource` must be an instance of `torch.nn.Module`. "
f"Received: resource={resource} (of type {type(resource)})"
)
sample_inputs = tree.map_structure(
lambda x: convert_spec_to_tensor(x, replace_none_number=1),
input_signature,
)
sample_inputs = tuple(sample_inputs)
# Ref: torch_xla.tf_saved_model_integration
# TODO: Utilize `dynamic_shapes`
exported = torch.export.export(
resource, sample_inputs, dynamic_shapes=None, strict=False
)
options = torch_xla.stablehlo.StableHLOExportOptions(
override_tracing_arguments=sample_inputs
)
stablehlo_model = torch_xla.stablehlo.exported_program_to_stablehlo(
exported, options
)
state_dict_keys = list(stablehlo_model._bundle.state_dict.keys())
# Remove unused variables.
for k in state_dict_keys:
if "lifted" not in k:
stablehlo_model._bundle.state_dict.pop(k)
bundle = copy.deepcopy(stablehlo_model._bundle)
bundle.state_dict = {
k: tf.Variable(v, trainable=False, name=k)
for k, v in bundle.state_dict.items()
}
bundle.additional_constants = [
tf.Variable(v, trainable=False) for v in bundle.additional_constants
]
# Track variables in `bundle` for `write_out`.
self._tf_trackable.variables += (
list(bundle.state_dict.values()) + bundle.additional_constants
)
# Ref: torch_xla.tf_saved_model_integration.save_stablehlo_graph_as_tf
def make_tf_function(func, bundle):
from tensorflow.compiler.tf2xla.python import xla as tfxla
def _get_shape_with_dynamic(signature):
shape = copy.copy(signature.shape)
for i in signature.dynamic_dims:
shape[i] = None
return shape
def _extract_call_parameters(args, meta, bundle):
call_args = []
if meta.input_pytree_spec is not None:
args = tree.flatten(args)
for loc in meta.input_locations:
if loc.type_ == torch_xla.stablehlo.VariableType.PARAMETER:
call_args.append(bundle.state_dict[loc.name])
elif loc.type_ == torch_xla.stablehlo.VariableType.CONSTANT:
call_args.append(
bundle.additional_constants[loc.position]
)
else:
call_args.append(args[loc.position])
return call_args
def inner(*args):
Touts = [sig.dtype for sig in func.meta.output_signature]
Souts = [
_get_shape_with_dynamic(sig)
for sig in func.meta.output_signature
]
call_args = _extract_call_parameters(args, func.meta, bundle)
results = tfxla.call_module(
tuple(call_args),
version=5,
Tout=Touts, # dtype information
Sout=Souts, # Shape information
function_list=[],
module=func.bytecode,
)
if len(Souts) == 1:
results = results[0]
return results
return inner
decorated_fn = tf.function(
make_tf_function(
stablehlo_model._bundle.stablehlo_funcs[0], bundle
),
input_signature=input_signature,
)
return decorated_fn
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
cudnn_benchmark = True
# model settings
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
type='RetinaNet',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(type='NASFPN', stack_times=7, norm_cfg=norm_cfg),
bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg),
# training and testing settings
train_cfg=dict(assigner=dict(neg_iou_thr=0.5)))
# dataset settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=(640, 640),
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=(640, 640)),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(640, 640),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=128),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD',
lr=0.08,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.1,
step=[30, 40])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=50)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
cudnn_benchmark = True
# model settings
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
type='RetinaNet',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(type='NASFPN', stack_times=7, norm_cfg=norm_cfg),
bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg),
# training and testing settings
train_cfg=dict(assigner=dict(neg_iou_thr=0.5)))
# dataset settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=(640, 640),
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=(640, 640)),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(640, 640),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=128),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD',
lr=0.08,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.1,
step=[30, 40])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=50)
|
from typing import TYPE_CHECKING
from ..utils import DIFFUSERS_SLOW_IMPORT, _LazyModule, deprecate
from ..utils.import_utils import is_peft_available, is_torch_available, is_transformers_available
def text_encoder_lora_state_dict(text_encoder):
deprecate(
"text_encoder_load_state_dict in `models`",
"0.27.0",
"`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
)
state_dict = {}
for name, module in text_encoder_attn_modules(text_encoder):
for k, v in module.q_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.q_proj.lora_linear_layer.{k}"] = v
for k, v in module.k_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.k_proj.lora_linear_layer.{k}"] = v
for k, v in module.v_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.v_proj.lora_linear_layer.{k}"] = v
for k, v in module.out_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.out_proj.lora_linear_layer.{k}"] = v
return state_dict
if is_transformers_available():
def text_encoder_attn_modules(text_encoder):
deprecate(
"text_encoder_attn_modules in `models`",
"0.27.0",
"`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
)
from transformers import CLIPTextModel, CLIPTextModelWithProjection
attn_modules = []
if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)):
for i, layer in enumerate(text_encoder.text_model.encoder.layers):
name = f"text_model.encoder.layers.{i}.self_attn"
mod = layer.self_attn
attn_modules.append((name, mod))
else:
raise ValueError(f"do not know how to get attention modules for: {text_encoder.__class__.__name__}")
return attn_modules
_import_structure = {}
if is_torch_available():
_import_structure["single_file_model"] = ["FromOriginalModelMixin"]
_import_structure["unet"] = ["UNet2DConditionLoadersMixin"]
_import_structure["utils"] = ["AttnProcsLayers"]
if is_transformers_available():
_import_structure["single_file"] = ["FromSingleFileMixin"]
_import_structure["lora_pipeline"] = [
"AmusedLoraLoaderMixin",
"StableDiffusionLoraLoaderMixin",
"SD3LoraLoaderMixin",
"StableDiffusionXLLoraLoaderMixin",
"LTXVideoLoraLoaderMixin",
"LoraLoaderMixin",
"FluxLoraLoaderMixin",
"CogVideoXLoraLoaderMixin",
"Mochi1LoraLoaderMixin",
]
_import_structure["textual_inversion"] = ["TextualInversionLoaderMixin"]
_import_structure["ip_adapter"] = ["IPAdapterMixin"]
_import_structure["peft"] = ["PeftAdapterMixin"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
if is_torch_available():
from .single_file_model import FromOriginalModelMixin
from .unet import UNet2DConditionLoadersMixin
from .utils import AttnProcsLayers
if is_transformers_available():
from .ip_adapter import IPAdapterMixin
from .lora_pipeline import (
AmusedLoraLoaderMixin,
CogVideoXLoraLoaderMixin,
FluxLoraLoaderMixin,
LoraLoaderMixin,
LTXVideoLoraLoaderMixin,
Mochi1LoraLoaderMixin,
SD3LoraLoaderMixin,
StableDiffusionLoraLoaderMixin,
StableDiffusionXLLoraLoaderMixin,
)
from .single_file import FromSingleFileMixin
from .textual_inversion import TextualInversionLoaderMixin
from .peft import PeftAdapterMixin
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
from typing import TYPE_CHECKING
from ..utils import DIFFUSERS_SLOW_IMPORT, _LazyModule, deprecate
from ..utils.import_utils import is_peft_available, is_torch_available, is_transformers_available
def text_encoder_lora_state_dict(text_encoder):
deprecate(
"text_encoder_load_state_dict in `models`",
"0.27.0",
"`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
)
state_dict = {}
for name, module in text_encoder_attn_modules(text_encoder):
for k, v in module.q_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.q_proj.lora_linear_layer.{k}"] = v
for k, v in module.k_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.k_proj.lora_linear_layer.{k}"] = v
for k, v in module.v_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.v_proj.lora_linear_layer.{k}"] = v
for k, v in module.out_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.out_proj.lora_linear_layer.{k}"] = v
return state_dict
if is_transformers_available():
def text_encoder_attn_modules(text_encoder):
deprecate(
"text_encoder_attn_modules in `models`",
"0.27.0",
"`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
)
from transformers import CLIPTextModel, CLIPTextModelWithProjection
attn_modules = []
if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)):
for i, layer in enumerate(text_encoder.text_model.encoder.layers):
name = f"text_model.encoder.layers.{i}.self_attn"
mod = layer.self_attn
attn_modules.append((name, mod))
else:
raise ValueError(f"do not know how to get attention modules for: {text_encoder.__class__.__name__}")
return attn_modules
_import_structure = {}
if is_torch_available():
_import_structure["single_file_model"] = ["FromOriginalModelMixin"]
_import_structure["unet"] = ["UNet2DConditionLoadersMixin"]
_import_structure["utils"] = ["AttnProcsLayers"]
if is_transformers_available():
_import_structure["single_file"] = ["FromSingleFileMixin"]
_import_structure["lora_pipeline"] = [
"AmusedLoraLoaderMixin",
"StableDiffusionLoraLoaderMixin",
"SD3LoraLoaderMixin",
"StableDiffusionXLLoraLoaderMixin",
"LoraLoaderMixin",
"FluxLoraLoaderMixin",
"CogVideoXLoraLoaderMixin",
"Mochi1LoraLoaderMixin",
]
_import_structure["textual_inversion"] = ["TextualInversionLoaderMixin"]
_import_structure["ip_adapter"] = ["IPAdapterMixin"]
_import_structure["peft"] = ["PeftAdapterMixin"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
if is_torch_available():
from .single_file_model import FromOriginalModelMixin
from .unet import UNet2DConditionLoadersMixin
from .utils import AttnProcsLayers
if is_transformers_available():
from .ip_adapter import IPAdapterMixin
from .lora_pipeline import (
AmusedLoraLoaderMixin,
CogVideoXLoraLoaderMixin,
FluxLoraLoaderMixin,
LoraLoaderMixin,
Mochi1LoraLoaderMixin,
SD3LoraLoaderMixin,
StableDiffusionLoraLoaderMixin,
StableDiffusionXLLoraLoaderMixin,
)
from .single_file import FromSingleFileMixin
from .textual_inversion import TextualInversionLoaderMixin
from .peft import PeftAdapterMixin
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
"""
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
we can also visualize the top tokens for each text."""
from sentence_transformers import SparseEncoder
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Embed a list of sentences
sentences = [
"This framework generates embeddings for each input sentence",
"Sentences are passed as a list of string.",
"The quick brown fox jumps over the lazy dog.",
]
# Generate embeddings
embeddings = model.encode(sentences)
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
print(f"Embedding sparsity: {model.get_sparsity_stats(embeddings)}")
"""
Embedding dim: 30522
Embedding sparsity: {'num_rows': 3, 'num_cols': 30522, 'row_non_zero_mean': 56.66666793823242, 'row_sparsity_mean': 0.9981433749198914}
"""
# Visualize top tokens for each text
top_k = 10
token_weights = model.decode(embeddings, top_k=top_k)
print(f"\nTop tokens {top_k} for each text:")
# The result is a list of sentence embeddings as numpy arrays
for i, sentence in enumerate(sentences):
token_scores = ", ".join([f'("{token.strip()}", {value:.2f})' for token, value in token_weights[i]])
print(f"{i}: {sentence} -> Top tokens: {token_scores}")
"""
Top tokens 10 for each text:
0: This framework generates embeddings for each input sentence -> Top tokens: ("framework", 2.19), ("##bed", 2.12), ("input", 1.99), ("each", 1.60), ("em", 1.58), ("sentence", 1.49), ("generate", 1.42), ("##ding", 1.33), ("sentences", 1.10), ("create", 0.93)
1: Sentences are passed as a list of string. -> Top tokens: ("string", 2.72), ("pass", 2.24), ("sentences", 2.15), ("passed", 2.07), ("sentence", 1.90), ("strings", 1.86), ("list", 1.84), ("lists", 1.49), ("as", 1.18), ("passing", 0.73)
2: The quick brown fox jumps over the lazy dog. -> Top tokens: ("lazy", 2.18), ("fox", 1.67), ("brown", 1.56), ("over", 1.52), ("dog", 1.50), ("quick", 1.49), ("jump", 1.39), ("dogs", 1.25), ("foxes", 0.99), ("jumping", 0.84)
"""
|
"""
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
we can also visualize the top tokens for each text."""
from sentence_transformers import SparseEncoder
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Embed a list of sentences
sentences = [
"This framework generates embeddings for each input sentence",
"Sentences are passed as a list of string.",
"The quick brown fox jumps over the lazy dog.",
]
# Generate embeddings
embeddings = model.encode(sentences)
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
print(f"Embedding sparsity: {model.get_sparsity_stats(embeddings)}")
"""
Embedding dim: 30522
Embedding sparsity: {'num_rows': 3, 'num_cols': 30522, 'row_non_zero_mean': 56.66666793823242, 'row_sparsity_mean': 0.9981433749198914}
"""
# Visualize top tokens for each text
top_k = 10
token_weights = model.decode(embeddings, top_k=top_k)["decoded_1"]
print(f"\nTop tokens {top_k} for each text:")
# The result is a list of sentence embeddings as numpy arrays
for i, sentence in enumerate(sentences):
token_scores = ", ".join([f'("{token.strip()}", {value:.2f})' for token, value in token_weights[i]])
print(f"{i}: {sentence} -> Top tokens: {token_scores}")
"""
Top tokens 10 for each text:
0: This framework generates embeddings for each input sentence -> Top tokens: ("framework", 2.19), ("##bed", 2.12), ("input", 1.99), ("each", 1.60), ("em", 1.58), ("sentence", 1.49), ("generate", 1.42), ("##ding", 1.33), ("sentences", 1.10), ("create", 0.93)
1: Sentences are passed as a list of string. -> Top tokens: ("string", 2.72), ("pass", 2.24), ("sentences", 2.15), ("passed", 2.07), ("sentence", 1.90), ("strings", 1.86), ("list", 1.84), ("lists", 1.49), ("as", 1.18), ("passing", 0.73)
2: The quick brown fox jumps over the lazy dog. -> Top tokens: ("lazy", 2.18), ("fox", 1.67), ("brown", 1.56), ("over", 1.52), ("dog", 1.50), ("quick", 1.49), ("jump", 1.39), ("dogs", 1.25), ("foxes", 0.99), ("jumping", 0.84)
"""
|
from typing import Dict, Optional, Tuple
import numpy as np
import torch
import torchvision.transforms as T
from jina import DocumentArray, Executor, requests
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
class TimmImageEncoder(Executor):
"""
TimmImageEncoder encodes Document blobs of type `ndarray` (`float32`) and shape
`H x W x 3` into `d`-dimensional embedding. The input image in Document should be
in RGB format.
If `use_default_preprocessing=False`, the expected input shape is
`3 x H x W` with `float32` dtype.
Internally, :class:`TimmImageEncoder` wraps the pre-trained models from
[Timm library](https://rwightman.github.io/pytorch-image-models/).
"""
def __init__(
self,
model_name: str = 'resnet18',
device: str = 'cpu',
traversal_paths: Tuple[str] = ('r',),
batch_size: Optional[int] = 32,
use_default_preprocessing: bool = True,
*args,
**kwargs
):
"""
:param model_name: the name of the model. Models listed on:
https://rwightman.github.io/pytorch-image-models/models/
:param device: Which device the model runs on. For example 'cpu' or 'cuda'.
:param traversal_paths: Defines traversal path through the docs. It can be
overridden via request params.
:param batch_size: Defines the batch size for inference on the loaded Timm model.
It can be overridden via request params.
:param use_default_preprocessing: If the input should be preprocessed with
default configuration. If `False`, inputs are expected to be pre-processed.
:param args: Additional positional arguments.
:param kwargs: Additional keyword arguments.
"""
super().__init__(*args, **kwargs)
self.device = device
self.batch_size = batch_size
self.use_default_preprocessing = use_default_preprocessing
self.traversal_paths = traversal_paths
self._model = create_model(model_name, pretrained=True, num_classes=0)
self._model = self._model.to(device).eval()
config = resolve_data_config({}, model=self._model)
self._preprocess = create_transform(**config)
self._preprocess.transforms.insert(0, T.ToPILImage())
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode image data from the `blob` attribute of Documents into a ndarray of
`D` as dimension, and fill the embedding of each Document.
:param docs: DocumentArray containing images
:param parameters: dictionary with additional request parameters. Possible
values are `traversal_paths` and the `batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
:param kwargs: Additional keyword arguments.
"""
if docs is None:
return
traversal_paths = parameters.get('traversal_paths', self.traversal_paths)
batch_size = parameters.get('batch_size', self.batch_size)
docs_batch_generator = docs.batch(
traversal_paths=traversal_paths,
batch_size=batch_size,
require_attr='blob',
)
for document_batch in docs_batch_generator:
blob_batch = [d.blob for d in document_batch]
if self.use_default_preprocessing:
images = np.stack([self._preprocess(img) for img in blob_batch])
else:
images = np.stack(blob_batch)
with torch.inference_mode():
tensor = torch.from_numpy(images).to(self.device)
features = self._model(tensor)
features = features.cpu().numpy()
for doc, embed in zip(document_batch, features):
doc.embedding = embed
|
from typing import Dict, Optional, Tuple
import numpy as np
import torch
import torchvision.transforms as T
from jina import DocumentArray, Executor, requests
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
class TimmImageEncoder(Executor):
"""
TimmImageEncoder encodes Document blobs of type `ndarray` (`float32`) and shape
`H x W x 3` into `d`-dimensional embedding. The input image in Document should be
in RGB format.
If `use_default_preprocessing=False`, the expected input shape is
`3 x H x W` with `float32` dtype.
Internally, :class:`TimmImageEncoder` wraps the pre-trained models from
[Timm library](https://rwightman.github.io/pytorch-image-models/).
"""
def __init__(
self,
model_name: str = 'resnet18',
device: str = 'cpu',
traversal_path: Tuple[str] = ('r',),
batch_size: Optional[int] = 32,
use_default_preprocessing: bool = True,
*args,
**kwargs
):
"""
:param model_name: the name of the model. Models listed on:
https://rwightman.github.io/pytorch-image-models/models/
:param device: Which device the model runs on. For example 'cpu' or 'cuda'.
:param traversal_paths: Defines traversal path through the docs. It can be
overridden via request params.
:param batch_size: Defines the batch size for inference on the loaded Timm model.
It can be overridden via request params.
:param use_default_preprocessing: If the input should be preprocessed with
default configuration. If `False`, inputs are expected to be pre-processed.
:param args: Additional positional arguments.
:param kwargs: Additional keyword arguments.
"""
super().__init__(*args, **kwargs)
self.device = device
self.batch_size = batch_size
self.use_default_preprocessing = use_default_preprocessing
self.traversal_path = traversal_path
self._model = create_model(model_name, pretrained=True, num_classes=0)
self._model = self._model.to(device)
self._model.eval()
config = resolve_data_config({}, model=self._model)
self._preprocess = create_transform(**config)
self._preprocess.transforms.insert(0, T.ToPILImage())
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode image data from the `blob` attribute of Documents into a ndarray of
`D` as dimension, and fill the embedding of each Document.
:param docs: DocumentArray containing images
:param parameters: dictionary with additional request parameters. Possible
values are `traversal_paths` and the `batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
:param kwargs: Additional keyword arguments.
"""
if docs is None:
return
traversal_paths = parameters.get('traversal_paths', self.traversal_path)
batch_size = parameters.get('batch_size', self.batch_size)
docs_batch_generator = docs.batch(
traversal_paths=traversal_paths,
batch_size=batch_size,
require_attr='blob',
)
for document_batch in docs_batch_generator:
blob_batch = [d.blob for d in document_batch]
if self.use_default_preprocessing:
images = np.stack([self._preprocess(img) for img in blob_batch])
else:
images = np.stack(blob_batch)
with torch.inference_mode():
tensor = torch.from_numpy(images).to(self.device)
features = self._model(tensor)
features = features.cpu().numpy()
for doc, embed in zip(document_batch, features):
doc.embedding = embed
|
from typing import Iterable, Type
from docarray.document import AnyDocument, BaseDocument, BaseNode
from docarray.document.abstract_document import AbstractDocument
from .abstract_array import AbstractDocumentArray
from .mixins import ProtoArrayMixin
class DocumentArray(
list,
ProtoArrayMixin,
AbstractDocumentArray,
BaseNode,
):
"""
a _GenericDocumentArray is a list-like container of Document of the same schema
:param docs: iterable of Document
"""
document_type: Type[BaseDocument] = AnyDocument
def __init__(self, docs: Iterable[AbstractDocument]):
super().__init__(doc_ for doc_ in docs)
def __class_getitem__(cls, item: Type[BaseDocument]):
if not issubclass(item, BaseDocument):
raise ValueError(
f'DocumentArray[item] item should be a Document not a {item} '
)
class _DocumenArrayTyped(DocumentArray):
document_type = item
_DocumenArrayTyped.__name__ = f'DocumentArray{item.__name__}'
return _DocumenArrayTyped
|
from typing import Iterable, Type
from docarray.document import AnyDocument, BaseDocument
from docarray.document.abstract_document import AbstractDocument
from docarray.typing import BaseNode
from .abstract_array import AbstractDocumentArray
from .mixins import ProtoArrayMixin
class DocumentArray(
list,
ProtoArrayMixin,
AbstractDocumentArray,
BaseNode,
):
"""
a _GenericDocumentArray is a list-like container of Document of the same schema
:param docs: iterable of Document
"""
document_type: Type[BaseDocument] = AnyDocument
def __init__(self, docs: Iterable[AbstractDocument]):
super().__init__(doc_ for doc_ in docs)
def __class_getitem__(cls, item: Type[BaseDocument]):
if not issubclass(item, BaseDocument):
raise ValueError(
f'DocumentArray[item] item should be a Document not a {item} '
)
class _DocumenArrayTyped(DocumentArray):
document_type = item
_DocumenArrayTyped.__name__ = f'DocumentArray{item.__name__}'
return _DocumenArrayTyped
|
from typing import TYPE_CHECKING
from ..utils import DIFFUSERS_SLOW_IMPORT, _LazyModule, deprecate
from ..utils.import_utils import is_peft_available, is_torch_available, is_transformers_available
def text_encoder_lora_state_dict(text_encoder):
deprecate(
"text_encoder_load_state_dict in `models`",
"0.27.0",
"`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
)
state_dict = {}
for name, module in text_encoder_attn_modules(text_encoder):
for k, v in module.q_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.q_proj.lora_linear_layer.{k}"] = v
for k, v in module.k_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.k_proj.lora_linear_layer.{k}"] = v
for k, v in module.v_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.v_proj.lora_linear_layer.{k}"] = v
for k, v in module.out_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.out_proj.lora_linear_layer.{k}"] = v
return state_dict
if is_transformers_available():
def text_encoder_attn_modules(text_encoder):
deprecate(
"text_encoder_attn_modules in `models`",
"0.27.0",
"`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
)
from transformers import CLIPTextModel, CLIPTextModelWithProjection
attn_modules = []
if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)):
for i, layer in enumerate(text_encoder.text_model.encoder.layers):
name = f"text_model.encoder.layers.{i}.self_attn"
mod = layer.self_attn
attn_modules.append((name, mod))
else:
raise ValueError(f"do not know how to get attention modules for: {text_encoder.__class__.__name__}")
return attn_modules
_import_structure = {}
if is_torch_available():
_import_structure["single_file_model"] = ["FromOriginalModelMixin"]
_import_structure["unet"] = ["UNet2DConditionLoadersMixin"]
_import_structure["utils"] = ["AttnProcsLayers"]
if is_transformers_available():
_import_structure["single_file"] = ["FromSingleFileMixin"]
_import_structure["lora_pipeline"] = [
"AmusedLoraLoaderMixin",
"StableDiffusionLoraLoaderMixin",
"SD3LoraLoaderMixin",
"StableDiffusionXLLoraLoaderMixin",
"LTXVideoLoraLoaderMixin",
"LoraLoaderMixin",
"FluxLoraLoaderMixin",
"CogVideoXLoraLoaderMixin",
"Mochi1LoraLoaderMixin",
"SanaLoraLoaderMixin",
]
_import_structure["textual_inversion"] = ["TextualInversionLoaderMixin"]
_import_structure["ip_adapter"] = ["IPAdapterMixin"]
_import_structure["peft"] = ["PeftAdapterMixin"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
if is_torch_available():
from .single_file_model import FromOriginalModelMixin
from .unet import UNet2DConditionLoadersMixin
from .utils import AttnProcsLayers
if is_transformers_available():
from .ip_adapter import IPAdapterMixin
from .lora_pipeline import (
AmusedLoraLoaderMixin,
CogVideoXLoraLoaderMixin,
FluxLoraLoaderMixin,
LoraLoaderMixin,
LTXVideoLoraLoaderMixin,
Mochi1LoraLoaderMixin,
SanaLoraLoaderMixin,
SD3LoraLoaderMixin,
StableDiffusionLoraLoaderMixin,
StableDiffusionXLLoraLoaderMixin,
)
from .single_file import FromSingleFileMixin
from .textual_inversion import TextualInversionLoaderMixin
from .peft import PeftAdapterMixin
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
from typing import TYPE_CHECKING
from ..utils import DIFFUSERS_SLOW_IMPORT, _LazyModule, deprecate
from ..utils.import_utils import is_peft_available, is_torch_available, is_transformers_available
def text_encoder_lora_state_dict(text_encoder):
deprecate(
"text_encoder_load_state_dict in `models`",
"0.27.0",
"`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
)
state_dict = {}
for name, module in text_encoder_attn_modules(text_encoder):
for k, v in module.q_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.q_proj.lora_linear_layer.{k}"] = v
for k, v in module.k_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.k_proj.lora_linear_layer.{k}"] = v
for k, v in module.v_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.v_proj.lora_linear_layer.{k}"] = v
for k, v in module.out_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.out_proj.lora_linear_layer.{k}"] = v
return state_dict
if is_transformers_available():
def text_encoder_attn_modules(text_encoder):
deprecate(
"text_encoder_attn_modules in `models`",
"0.27.0",
"`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
)
from transformers import CLIPTextModel, CLIPTextModelWithProjection
attn_modules = []
if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)):
for i, layer in enumerate(text_encoder.text_model.encoder.layers):
name = f"text_model.encoder.layers.{i}.self_attn"
mod = layer.self_attn
attn_modules.append((name, mod))
else:
raise ValueError(f"do not know how to get attention modules for: {text_encoder.__class__.__name__}")
return attn_modules
_import_structure = {}
if is_torch_available():
_import_structure["single_file_model"] = ["FromOriginalModelMixin"]
_import_structure["unet"] = ["UNet2DConditionLoadersMixin"]
_import_structure["utils"] = ["AttnProcsLayers"]
if is_transformers_available():
_import_structure["single_file"] = ["FromSingleFileMixin"]
_import_structure["lora_pipeline"] = [
"AmusedLoraLoaderMixin",
"StableDiffusionLoraLoaderMixin",
"SD3LoraLoaderMixin",
"StableDiffusionXLLoraLoaderMixin",
"LTXVideoLoraLoaderMixin",
"LoraLoaderMixin",
"FluxLoraLoaderMixin",
"CogVideoXLoraLoaderMixin",
"Mochi1LoraLoaderMixin",
]
_import_structure["textual_inversion"] = ["TextualInversionLoaderMixin"]
_import_structure["ip_adapter"] = ["IPAdapterMixin"]
_import_structure["peft"] = ["PeftAdapterMixin"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
if is_torch_available():
from .single_file_model import FromOriginalModelMixin
from .unet import UNet2DConditionLoadersMixin
from .utils import AttnProcsLayers
if is_transformers_available():
from .ip_adapter import IPAdapterMixin
from .lora_pipeline import (
AmusedLoraLoaderMixin,
CogVideoXLoraLoaderMixin,
FluxLoraLoaderMixin,
LoraLoaderMixin,
LTXVideoLoraLoaderMixin,
Mochi1LoraLoaderMixin,
SD3LoraLoaderMixin,
StableDiffusionLoraLoaderMixin,
StableDiffusionXLLoraLoaderMixin,
)
from .single_file import FromSingleFileMixin
from .textual_inversion import TextualInversionLoaderMixin
from .peft import PeftAdapterMixin
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
from torch import nn, Tensor
__all__ = [
"Wav2Letter",
]
class Wav2Letter(nn.Module):
r"""Wav2Letter model architecture from *Wav2Letter: an End-to-End ConvNet-based Speech
Recognition System* :cite:`collobert2016wav2letter`.
See Also:
* `Training example <https://github.com/pytorch/audio/tree/release/0.12/examples/pipeline_wav2letter>`__
Args:
num_classes (int, optional): Number of classes to be classified. (Default: ``40``)
input_type (str, optional): Wav2Letter can use as input: ``waveform``, ``power_spectrum``
or ``mfcc`` (Default: ``waveform``).
num_features (int, optional): Number of input features that the network will receive (Default: ``1``).
"""
def __init__(self, num_classes: int = 40, input_type: str = "waveform", num_features: int = 1) -> None:
super().__init__()
acoustic_num_features = 250 if input_type == "waveform" else num_features
acoustic_model = nn.Sequential(
nn.Conv1d(in_channels=acoustic_num_features, out_channels=250, kernel_size=48, stride=2, padding=23),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=2000, kernel_size=32, stride=1, padding=16),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=2000, out_channels=2000, kernel_size=1, stride=1, padding=0),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=2000, out_channels=num_classes, kernel_size=1, stride=1, padding=0),
nn.ReLU(inplace=True),
)
if input_type == "waveform":
waveform_model = nn.Sequential(
nn.Conv1d(in_channels=num_features, out_channels=250, kernel_size=250, stride=160, padding=45),
nn.ReLU(inplace=True),
)
self.acoustic_model = nn.Sequential(waveform_model, acoustic_model)
if input_type in ["power_spectrum", "mfcc"]:
self.acoustic_model = acoustic_model
def forward(self, x: Tensor) -> Tensor:
r"""
Args:
x (torch.Tensor): Tensor of dimension (batch_size, num_features, input_length).
Returns:
Tensor: Predictor tensor of dimension (batch_size, number_of_classes, input_length).
"""
x = self.acoustic_model(x)
x = nn.functional.log_softmax(x, dim=1)
return x
|
from torch import nn, Tensor
__all__ = [
"Wav2Letter",
]
class Wav2Letter(nn.Module):
r"""Wav2Letter model architecture from *Wav2Letter: an End-to-End ConvNet-based Speech
Recognition System* :cite:`collobert2016wav2letter`.
:math:`\text{padding} = \frac{\text{ceil}(\text{kernel} - \text{stride})}{2}`
Args:
num_classes (int, optional): Number of classes to be classified. (Default: ``40``)
input_type (str, optional): Wav2Letter can use as input: ``waveform``, ``power_spectrum``
or ``mfcc`` (Default: ``waveform``).
num_features (int, optional): Number of input features that the network will receive (Default: ``1``).
"""
def __init__(self, num_classes: int = 40, input_type: str = "waveform", num_features: int = 1) -> None:
super(Wav2Letter, self).__init__()
acoustic_num_features = 250 if input_type == "waveform" else num_features
acoustic_model = nn.Sequential(
nn.Conv1d(in_channels=acoustic_num_features, out_channels=250, kernel_size=48, stride=2, padding=23),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=2000, kernel_size=32, stride=1, padding=16),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=2000, out_channels=2000, kernel_size=1, stride=1, padding=0),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=2000, out_channels=num_classes, kernel_size=1, stride=1, padding=0),
nn.ReLU(inplace=True),
)
if input_type == "waveform":
waveform_model = nn.Sequential(
nn.Conv1d(in_channels=num_features, out_channels=250, kernel_size=250, stride=160, padding=45),
nn.ReLU(inplace=True),
)
self.acoustic_model = nn.Sequential(waveform_model, acoustic_model)
if input_type in ["power_spectrum", "mfcc"]:
self.acoustic_model = acoustic_model
def forward(self, x: Tensor) -> Tensor:
r"""
Args:
x (torch.Tensor): Tensor of dimension (batch_size, num_features, input_length).
Returns:
Tensor: Predictor tensor of dimension (batch_size, number_of_classes, input_length).
"""
x = self.acoustic_model(x)
x = nn.functional.log_softmax(x, dim=1)
return x
|
from keras.src.backend.common.tensor_attributes import get_tensor_attr
from keras.src.backend.common.tensor_attributes import set_tensor_attr
def set_keras_mask(x, mask):
"""Sets the Keras mask attribute for the given tensor in-place.
Args:
x: Input tensor.
mask: The mask tensor to be set. If `None`, the `_keras_mask` attribute
will be cleared.
"""
set_tensor_attr(x, "_keras_mask", mask)
def get_keras_mask(x):
"""Gets the Keras mask attribute from the given tensor.
Args:
x: Input tensor.
Returns:
The mask tensor associated with the input tensor, or `None` if no mask
has been set.
"""
return get_tensor_attr(x, "_keras_mask")
|
from keras.src.backend.common.tensor_attributes import get_tensor_attr
from keras.src.backend.common.tensor_attributes import set_tensor_attr
def set_keras_mask(x, mask):
return set_tensor_attr(x, "_keras_mask", mask)
def get_keras_mask(x):
return get_tensor_attr(x, "_keras_mask")
|
import logging
import os
import sys
from torchaudio._internal.module_utils import fail_with_message, is_module_available, no_op
from .utils import _check_cuda_version, _fail_since_no_ffmpeg, _init_dll_path, _init_ffmpeg, _init_sox, _load_lib
_LG = logging.getLogger(__name__)
# Note:
# `_check_cuda_version` is not meant to be used by regular users.
# Builder uses it for debugging purpose, so we export it.
# https://github.com/pytorch/builder/blob/e2e4542b8eb0bdf491214451a1a4128bd606cce2/test/smoke_test/smoke_test.py#L80
__all__ = [
"fail_if_no_sox",
"fail_if_no_ffmpeg",
"_check_cuda_version",
"_IS_TORCHAUDIO_EXT_AVAILABLE",
"_IS_RIR_AVAILABLE",
"_SOX_INITIALIZED",
"_FFMPEG_EXT",
]
if os.name == "nt" and (3, 8) <= sys.version_info < (3, 9):
_init_dll_path()
# When the extension module is built, we initialize it.
# In case of an error, we do not catch the failure as it suggests there is something
# wrong with the installation.
_IS_TORCHAUDIO_EXT_AVAILABLE = is_module_available("torchaudio.lib._torchaudio")
# RIR features are implemented in _torchaudio extension, but they can be individually
# turned on/off at build time. Available means that _torchaudio is loaded properly, and
# RIR features are found there.
_IS_RIR_AVAILABLE = False
_IS_ALIGN_AVAILABLE = False
if _IS_TORCHAUDIO_EXT_AVAILABLE:
_load_lib("libtorchaudio")
import torchaudio.lib._torchaudio # noqa
_check_cuda_version()
_IS_RIR_AVAILABLE = torchaudio.lib._torchaudio.is_rir_available()
_IS_ALIGN_AVAILABLE = torchaudio.lib._torchaudio.is_align_available()
# Similar to libtorchaudio, sox-related features should be importable when present.
#
# Note: This will be change in the future when sox is dynamically linked.
# At that point, this initialization should handle the case where
# sox integration is built but libsox is not found.
_SOX_INITIALIZED = False
if is_module_available("torchaudio.lib._torchaudio_sox"):
_init_sox()
_SOX_INITIALIZED = True
# Initialize FFmpeg-related features
_FFMPEG_EXT = None
if _IS_TORCHAUDIO_EXT_AVAILABLE:
try:
_FFMPEG_EXT = _init_ffmpeg()
except Exception:
# The initialization of FFmpeg extension will fail if supported FFmpeg
# libraries are not found in the system.
# Since the rest of the torchaudio works without it, we do not report the
# error here.
# The error will be raised when user code attempts to use these features.
_LG.debug("Failed to initialize ffmpeg bindings", exc_info=True)
fail_if_no_sox = (
no_op
if _SOX_INITIALIZED
else fail_with_message(
"requires sox extension, but TorchAudio is not compiled with it. Please build TorchAudio with libsox support."
)
)
fail_if_no_ffmpeg = _fail_since_no_ffmpeg if _FFMPEG_EXT is None else no_op
fail_if_no_rir = (
no_op
if _IS_RIR_AVAILABLE
else fail_with_message(
"requires RIR extension, but TorchAudio is not compiled with it. Please build TorchAudio with RIR support."
)
)
fail_if_no_align = (
no_op
if _IS_ALIGN_AVAILABLE
else fail_with_message(
"Requires alignment extension, but TorchAudio is not compiled with it. \
Please build TorchAudio with alignment support."
)
)
|
import logging
import os
import sys
from torchaudio._internal.module_utils import fail_with_message, is_module_available, no_op
from .utils import _check_cuda_version, _fail_since_no_ffmpeg, _init_dll_path, _init_ffmpeg, _init_sox, _load_lib
_LG = logging.getLogger(__name__)
# Note:
# `_check_cuda_version` is not meant to be used by regular users.
# Builder uses it for debugging purpose, so we export it.
# https://github.com/pytorch/builder/blob/e2e4542b8eb0bdf491214451a1a4128bd606cce2/test/smoke_test/smoke_test.py#L80
__all__ = [
"fail_if_no_sox",
"fail_if_no_ffmpeg",
"_check_cuda_version",
"_IS_TORCHAUDIO_EXT_AVAILABLE",
"_IS_RIR_AVAILABLE",
"_SOX_INITIALIZED",
"_FFMPEG_INITIALIZED",
]
if os.name == "nt" and (3, 8) <= sys.version_info < (3, 9):
_init_dll_path()
# When the extension module is built, we initialize it.
# In case of an error, we do not catch the failure as it suggests there is something
# wrong with the installation.
_IS_TORCHAUDIO_EXT_AVAILABLE = is_module_available("torchaudio.lib._torchaudio")
# RIR features are implemented in _torchaudio extension, but they can be individually
# turned on/off at build time. Available means that _torchaudio is loaded properly, and
# RIR features are found there.
_IS_RIR_AVAILABLE = False
_IS_ALIGN_AVAILABLE = False
if _IS_TORCHAUDIO_EXT_AVAILABLE:
_load_lib("libtorchaudio")
import torchaudio.lib._torchaudio # noqa
_check_cuda_version()
_IS_RIR_AVAILABLE = torchaudio.lib._torchaudio.is_rir_available()
_IS_ALIGN_AVAILABLE = torchaudio.lib._torchaudio.is_align_available()
# Similar to libtorchaudio, sox-related features should be importable when present.
#
# Note: This will be change in the future when sox is dynamically linked.
# At that point, this initialization should handle the case where
# sox integration is built but libsox is not found.
_SOX_INITIALIZED = False
if is_module_available("torchaudio.lib._torchaudio_sox"):
_init_sox()
_SOX_INITIALIZED = True
# Initialize FFmpeg-related features
_FFMPEG_INITIALIZED = False
if is_module_available("torchaudio.lib._torchaudio_ffmpeg"):
try:
_init_ffmpeg()
_FFMPEG_INITIALIZED = True
except Exception:
# The initialization of FFmpeg extension will fail if supported FFmpeg
# libraries are not found in the system.
# Since the rest of the torchaudio works without it, we do not report the
# error here.
# The error will be raised when user code attempts to use these features.
_LG.debug("Failed to initialize ffmpeg bindings", exc_info=True)
fail_if_no_sox = (
no_op
if _SOX_INITIALIZED
else fail_with_message(
"requires sox extension, but TorchAudio is not compiled with it. Please build TorchAudio with libsox support."
)
)
fail_if_no_ffmpeg = no_op if _FFMPEG_INITIALIZED else _fail_since_no_ffmpeg
fail_if_no_rir = (
no_op
if _IS_RIR_AVAILABLE
else fail_with_message(
"requires RIR extension, but TorchAudio is not compiled with it. Please build TorchAudio with RIR support."
)
)
fail_if_no_align = (
no_op
if _IS_ALIGN_AVAILABLE
else fail_with_message(
"Requires alignment extension, but TorchAudio is not compiled with it. \
Please build TorchAudio with alignment support."
)
)
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
T = TypeVar('T', bound='ImageTorchTensor')
@_register_proto(proto_type_name='image_torch_tensor')
class ImageTorchTensor(AbstractImageTensor, TorchTensor, metaclass=metaTorchAndNode):
"""
Subclass of [`TorchTensor`][docarray.typing.TorchTensor], to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to image bytes which are
optimized to send over the wire.
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import ImageBytes, ImageTorchTensor, ImageUrl
class MyImageDoc(BaseDoc):
title: str
tensor: Optional[ImageTorchTensor]
url: Optional[ImageUrl]
bytes: Optional[ImageBytes]
doc = MyImageDoc(
title='my_second_image_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
```
---
"""
...
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
T = TypeVar('T', bound='ImageTorchTensor')
@_register_proto(proto_type_name='image_torch_tensor')
class ImageTorchTensor(AbstractImageTensor, TorchTensor, metaclass=metaTorchAndNode):
"""
Subclass of TorchTensor, to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to image bytes which are
optimized to send over the wire
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import ImageBytes, ImageTorchTensor, ImageUrl
class MyImageDoc(BaseDoc):
title: str
tensor: Optional[ImageTorchTensor]
url: Optional[ImageUrl]
bytes: Optional[ImageBytes]
doc = MyImageDoc(
title='my_second_image_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
```
---
"""
...
|
import asyncio
import time
import pytest
from jina import Client, Deployment, Executor, requests
from jina._docarray import Document, DocumentArray
from jina.excepts import BadServer
from jina.helper import random_port
class MyExecutor(Executor):
@requests(on='/hello')
async def task(self, doc: Document, **kwargs):
for i in range(100):
yield Document(text=f'{doc.text} {i}')
@requests(on='/world')
async def non_gen_task(self, docs: DocumentArray, **kwargs):
return docs
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
@pytest.mark.parametrize('include_gateway', [False, True])
@pytest.mark.parametrize('reuse_session', [False, True])
async def test_streaming_deployment(protocol, include_gateway, reuse_session):
if reuse_session and protocol != 'http':
return
port = random_port()
docs = []
with Deployment(
uses=MyExecutor,
timeout_ready=-1,
protocol=protocol,
port=port,
include_gateway=include_gateway,
):
client = Client(port=port, protocol=protocol, asyncio=True, reuse_session=reuse_session)
i = 0
async for doc in client.stream_doc(
on='/hello',
inputs=Document(text='hello world'),
return_type=Document,
input_type=Document,
):
docs.append(doc.text)
i += 1
assert docs == [f'hello world {i}' for i in range(100)]
assert len(docs) == 100
class WaitStreamExecutor(Executor):
@requests(on='/hello')
async def task(self, doc: Document, **kwargs):
for i in range(5):
yield Document(text=f'{doc.text} {i}')
await asyncio.sleep(0.5)
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
@pytest.mark.parametrize('include_gateway', [False, True])
@pytest.mark.parametrize('reuse_session', [False, True])
async def test_streaming_delay(protocol, include_gateway, reuse_session):
if reuse_session and protocol != 'http':
return
from jina import Deployment
port = random_port()
with Deployment(
uses=WaitStreamExecutor,
timeout_ready=-1,
protocol=protocol,
port=port,
include_gateway=include_gateway,
):
client = Client(port=port, protocol=protocol, asyncio=True, reuse_session=reuse_session)
i = 0
start_time = time.time()
async for doc in client.stream_doc(
on='/hello', inputs=Document(text='hello world')
):
assert doc.text == f'hello world {i}'
i += 1
# 0.5 seconds between each request + 0.5 seconds tolerance interval
assert time.time() - start_time < (0.5 * i) + 0.5
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc'])
async def test_streaming_client_non_gen_endpoint(protocol):
from jina import Deployment
port = random_port()
with Deployment(
uses=MyExecutor,
timeout_ready=-1,
protocol=protocol,
port=port,
include_gateway=False,
):
client = Client(port=port, protocol=protocol, asyncio=True)
i = 0
with pytest.raises(BadServer):
async for _ in client.stream_doc(
on='/world', inputs=Document(text='hello world')
):
pass
def test_invalid_executor():
with pytest.raises(RuntimeError) as exc_info:
class InvalidExecutor3(Executor):
@requests(on='/invalid')
async def invalid(self, docs: DocumentArray, **kwargs):
yield docs[0]
assert type(exc_info.value.__cause__) is AssertionError
with pytest.raises(RuntimeError) as exc_info:
class InvalidExecutor4(Executor):
@requests(on='/invalid')
def invalid(self, docs: DocumentArray, **kwargs):
yield docs[0]
assert type(exc_info.value.__cause__) is AssertionError
|
import asyncio
import time
import pytest
from jina import Client, Deployment, Executor, requests
from jina._docarray import Document, DocumentArray
from jina.excepts import BadServer
from jina.helper import random_port
class MyExecutor(Executor):
@requests(on='/hello')
async def task(self, doc: Document, **kwargs):
for i in range(100):
yield Document(text=f'{doc.text} {i}')
@requests(on='/world')
async def non_gen_task(self, docs: DocumentArray, **kwargs):
return docs
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
@pytest.mark.parametrize('include_gateway', [False, True])
async def test_streaming_deployment(protocol, include_gateway):
port = random_port()
docs = []
with Deployment(
uses=MyExecutor,
timeout_ready=-1,
protocol=protocol,
port=port,
include_gateway=include_gateway,
):
client = Client(port=port, protocol=protocol, asyncio=True)
i = 0
async for doc in client.stream_doc(
on='/hello',
inputs=Document(text='hello world'),
return_type=Document,
input_type=Document,
):
docs.append(doc.text)
i += 1
assert docs == [f'hello world {i}' for i in range(100)]
assert len(docs) == 100
class WaitStreamExecutor(Executor):
@requests(on='/hello')
async def task(self, doc: Document, **kwargs):
for i in range(5):
yield Document(text=f'{doc.text} {i}')
await asyncio.sleep(0.5)
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
@pytest.mark.parametrize('include_gateway', [False, True])
async def test_streaming_delay(protocol, include_gateway):
from jina import Deployment
port = random_port()
with Deployment(
uses=WaitStreamExecutor,
timeout_ready=-1,
protocol=protocol,
port=port,
include_gateway=include_gateway,
):
client = Client(port=port, protocol=protocol, asyncio=True)
i = 0
start_time = time.time()
async for doc in client.stream_doc(
on='/hello', inputs=Document(text='hello world')
):
assert doc.text == f'hello world {i}'
i += 1
# 0.5 seconds between each request + 0.5 seconds tolerance interval
assert time.time() - start_time < (0.5 * i) + 0.5
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc'])
async def test_streaming_client_non_gen_endpoint(protocol):
from jina import Deployment
port = random_port()
with Deployment(
uses=MyExecutor,
timeout_ready=-1,
protocol=protocol,
port=port,
include_gateway=False,
):
client = Client(port=port, protocol=protocol, asyncio=True)
i = 0
with pytest.raises(BadServer):
async for _ in client.stream_doc(
on='/world', inputs=Document(text='hello world')
):
pass
def test_invalid_executor():
with pytest.raises(RuntimeError) as exc_info:
class InvalidExecutor3(Executor):
@requests(on='/invalid')
async def invalid(self, docs: DocumentArray, **kwargs):
yield docs[0]
assert type(exc_info.value.__cause__) is AssertionError
with pytest.raises(RuntimeError) as exc_info:
class InvalidExecutor4(Executor):
@requests(on='/invalid')
def invalid(self, docs: DocumentArray, **kwargs):
yield docs[0]
assert type(exc_info.value.__cause__) is AssertionError
|
import numpy as np
import orjson
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import NdArray
from docarray.typing.tensor import NdArrayEmbedding
def test_proto_tensor():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
tensor._to_node_protobuf()
def test_from_list():
tensor = parse_obj_as(NdArray, [[0.0, 0.0], [0.0, 0.0]])
assert (tensor == np.zeros((2, 2))).all()
def test_json_schema():
schema_json_of(NdArray)
def test_dump_json():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
orjson_dumps(tensor)
def test_load_json():
tensor = parse_obj_as(NdArray, np.zeros((2, 2)))
json = orjson_dumps(tensor)
print(json)
print(type(json))
new_tensor = orjson.loads(json)
assert (new_tensor == tensor).all()
def test_unwrap():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
ndarray = tensor.unwrap()
assert not isinstance(ndarray, NdArray)
assert isinstance(ndarray, np.ndarray)
assert isinstance(tensor, NdArray)
assert (ndarray == np.zeros((3, 224, 224))).all()
def test_parametrized():
# correct shape, single axis
tensor = parse_obj_as(NdArray[128], np.zeros(128))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (128,)
# correct shape, multiple axis
tensor = parse_obj_as(NdArray[3, 224, 224], np.zeros((3, 224, 224)))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (3, 224, 224)
# wrong but reshapable shape
tensor = parse_obj_as(NdArray[3, 224, 224], np.zeros((3, 224, 224)))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (3, 224, 224)
# wrong and not reshapable shape
with pytest.raises(ValueError):
parse_obj_as(NdArray[3, 224, 224], np.zeros((224, 224)))
def test_np_embedding():
# correct shape
tensor = parse_obj_as(NdArrayEmbedding[128], np.zeros((128,)))
assert isinstance(tensor, NdArrayEmbedding)
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (128,)
# wrong shape at data setting time
with pytest.raises(ValueError):
parse_obj_as(NdArrayEmbedding[128], np.zeros((256,)))
# illegal shape at class creation time
with pytest.raises(ValueError):
parse_obj_as(NdArrayEmbedding[128, 128], np.zeros((128, 128)))
|
import numpy as np
import orjson
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import NdArray
from docarray.typing.tensor import NdArrayEmbedding
def test_proto_tensor():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
tensor._to_node_protobuf()
def test_from_list():
tensor = parse_obj_as(NdArray, [[0.0, 0.0], [0.0, 0.0]])
assert (tensor == np.zeros((2, 2))).all()
def test_json_schema():
schema_json_of(NdArray)
def test_dump_json():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
orjson_dumps(tensor)
def test_load_json():
tensor = parse_obj_as(NdArray, np.zeros((2, 2)))
json = orjson_dumps(tensor)
print(json)
print(type(json))
new_tensor = orjson.loads(json)
assert (new_tensor == tensor).all()
def test_unwrap():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
ndarray = tensor.unwrap()
assert not isinstance(ndarray, NdArray)
assert isinstance(ndarray, np.ndarray)
assert isinstance(tensor, NdArray)
assert (ndarray == np.zeros((3, 224, 224))).all()
def test_parametrized():
# correct shape, single axis
tensor = parse_obj_as(NdArray[128], np.zeros(128))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (128,)
# correct shape, multiple axis
tensor = parse_obj_as(NdArray[3, 224, 224], np.zeros((3, 224, 224)))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (3, 224, 224)
# wrong but reshapable shape
tensor = parse_obj_as(NdArray[3, 224, 224], np.zeros((3, 224, 224)))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (3, 224, 224)
# wrong and not reshapable shape
with pytest.raises(ValueError):
parse_obj_as(NdArray[3, 224, 224], np.zeros((224, 224)))
def test_np_embedding():
# correct shape
tensor = parse_obj_as(NdArrayEmbedding[128], np.zeros((128,)))
assert isinstance(tensor, NdArrayEmbedding)
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (128,)
# wrong shape at data setting time
with pytest.raises(ValueError):
parse_obj_as(NdArrayEmbedding[128], np.zeros((256,)))
# illegal shape at class creation time
with pytest.raises(ValueError):
parse_obj_as(NdArrayEmbedding[128, 128], np.zeros((128, 128)))
|
# mypy: allow-untyped-defs
# Owner(s): ["oncall: distributed"]
import os
import shutil
import traceback
from concurrent.futures import Future
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as dcp
import torch.multiprocessing as mp
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed.checkpoint.state_dict import (
_patch_model_state_dict,
_patch_optimizer_state_dict,
)
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.tensor.device_mesh import init_device_mesh
DEVICE = "cuda"
NUM_EPOCHS = 1000
SAVE_PERIOD = 10
FAULT_PERIOD = 25
CHECKPOINT_DIR = f"~/{os.environ.get('LOGNAME', '')}/checkpoint"
class InjectedException(Exception):
pass
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.net1 = nn.Linear(8, 32)
self.net2 = nn.Linear(32, 128)
self.net3 = nn.Linear(128, 64)
self.net4 = nn.Linear(64, 8)
self.net5 = nn.Linear(8, 1)
def forward(self, x):
x = F.relu(self.net1(x))
x = F.relu(self.net2(x))
x = F.relu(self.net3(x))
x = F.relu(self.net4(x))
x = F.sigmoid(self.net5(x))
return x
def _init_model(rank, world_size):
device_mesh = init_device_mesh(DEVICE, (world_size,))
# Create a dummy model and wrap it in FSDP
model = Model().cuda()
device_mesh = init_device_mesh(DEVICE, (world_size,))
model = FSDP(model, device_mesh=device_mesh, use_orig_params=True)
optim = torch.optim.Adam(model.parameters(), lr=0.0001)
_patch_model_state_dict(model)
_patch_optimizer_state_dict(model, optimizers=optim)
return model, optim
def _print(msg):
if dist.get_rank() == 0:
print(msg)
def _input():
x = torch.rand(128, 8, device="cuda")
y = torch.zeros(128, 1, device="cuda")
y[torch.sum(x, dim=1) >= 4] = 1.0
return x, y
def run(rank, world_size):
# Set up world pg
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
dist.init_process_group("cpu:gloo,cuda:nccl", rank=rank, world_size=world_size)
torch.cuda.set_device(rank)
model, optim = _init_model(rank, world_size)
state_dict = {"model": model, "optim": optim}
loss_calc = torch.nn.BCELoss()
f = None
for epoch in range(NUM_EPOCHS):
try:
torch.manual_seed(epoch)
x, y = _input()
loss = loss_calc(model(x), y)
_print(f"{epoch=} {loss=}")
loss.backward()
optim.step()
optim.zero_grad()
if epoch % SAVE_PERIOD == 0:
if f is not None:
assert isinstance(f, Future)
f.result()
f = dcp.state_dict_saver.async_save(
state_dict, checkpoint_id=CHECKPOINT_DIR
)
if FAULT_PERIOD > 0 and epoch % FAULT_PERIOD == 0:
raise InjectedException("Fault injection!")
except InjectedException as e:
dist.barrier()
_print("Trainer encountered exception:")
traceback.print_tb(e.__traceback__)
_print("Reloading model from last checkpoint!")
if f is not None:
assert isinstance(f, Future)
f.result()
dcp.load(state_dict)
if __name__ == "__main__":
world_size = torch.cuda.device_count()
print(f"Running an example of Async Checkpointing on {world_size} devices.")
shutil.rmtree(CHECKPOINT_DIR, ignore_errors=True)
mp.spawn(
run,
args=(world_size,),
nprocs=world_size,
join=True,
)
|
# mypy: allow-untyped-defs
# Owner(s): ["oncall: distributed"]
import os
import shutil
import traceback
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as dcp
import torch.multiprocessing as mp
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed.checkpoint.state_dict import (
_patch_model_state_dict,
_patch_optimizer_state_dict,
)
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.tensor.device_mesh import init_device_mesh
DEVICE = "cuda"
NUM_EPOCHS = 1000
SAVE_PERIOD = 10
FAULT_PERIOD = 25
CHECKPOINT_DIR = f"~/{os.environ.get('LOGNAME', '')}/checkpoint"
class InjectedException(Exception):
pass
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.net1 = nn.Linear(8, 32)
self.net2 = nn.Linear(32, 128)
self.net3 = nn.Linear(128, 64)
self.net4 = nn.Linear(64, 8)
self.net5 = nn.Linear(8, 1)
def forward(self, x):
x = F.relu(self.net1(x))
x = F.relu(self.net2(x))
x = F.relu(self.net3(x))
x = F.relu(self.net4(x))
x = F.sigmoid(self.net5(x))
return x
def _init_model(rank, world_size):
device_mesh = init_device_mesh(DEVICE, (world_size,))
# Create a dummy model and wrap it in FSDP
model = Model().cuda()
device_mesh = init_device_mesh(DEVICE, (world_size,))
model = FSDP(model, device_mesh=device_mesh, use_orig_params=True)
optim = torch.optim.Adam(model.parameters(), lr=0.0001)
_patch_model_state_dict(model)
_patch_optimizer_state_dict(model, optimizers=optim)
return model, optim
def _print(msg):
if dist.get_rank() == 0:
print(msg)
def _input():
x = torch.rand(128, 8, device="cuda")
y = torch.zeros(128, 1, device="cuda")
y[torch.sum(x, dim=1) >= 4] = 1.0
return x, y
def run(rank, world_size):
# Set up world pg
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
dist.init_process_group("cpu:gloo,cuda:nccl", rank=rank, world_size=world_size)
torch.cuda.set_device(rank)
model, optim = _init_model(rank, world_size)
state_dict = {"model": model, "optim": optim}
loss_calc = torch.nn.BCELoss()
f = None
for epoch in range(NUM_EPOCHS):
try:
torch.manual_seed(epoch)
x, y = _input()
loss = loss_calc(model(x), y)
_print(f"{epoch=} {loss=}")
loss.backward()
optim.step()
optim.zero_grad()
if epoch % SAVE_PERIOD == 0:
if f is not None:
f.result()
f = dcp.state_dict_saver.async_save(
state_dict, checkpoint_id=CHECKPOINT_DIR
)
if FAULT_PERIOD > 0 and epoch % FAULT_PERIOD == 0:
raise InjectedException("Fault injection!")
except InjectedException as e:
dist.barrier()
_print("Trainer encountered exception:")
traceback.print_tb(e.__traceback__)
_print("Reloading model from last checkpoint!")
if f is not None:
f.result()
dcp.load(state_dict)
if __name__ == "__main__":
world_size = torch.cuda.device_count()
print(f"Running an example of Async Checkpointing on {world_size} devices.")
shutil.rmtree(CHECKPOINT_DIR, ignore_errors=True)
mp.spawn(
run,
args=(world_size,),
nprocs=world_size,
join=True,
)
|
from typing import Callable, Dict, Generic, List, Optional, Type, TypeVar
from torch.utils.data import Dataset
from docarray import BaseDoc, DocArray, DocArrayStacked
from docarray.typing import TorchTensor
from docarray.utils._typing import change_cls_name
T_doc = TypeVar('T_doc', bound=BaseDoc)
class MultiModalDataset(Dataset, Generic[T_doc]):
"""
A dataset that can be used inside a PyTorch DataLoader.
In other words, it implements the PyTorch Dataset interface.
:param da: the DocArray to be used as the dataset
:param preprocessing: a dictionary of field names and preprocessing functions
The preprocessing dictionary passed to the constructor consists of keys that are
field names and values that are functions that take a single argument and return
a single argument.
EXAMPLE USAGE
.. code-block:: python
from torch.utils.data import DataLoader
from docarray import DocArray
from docarray.data import MultiModalDataset
from docarray.documents import Text
def prepend_number(text: str):
return f"Number {text}"
da = DocArray[Text](Text(text=str(i)) for i in range(16))
ds = MultiModalDataset[Text](da, preprocessing={'text': prepend_number})
loader = DataLoader(ds, batch_size=4, collate_fn=MultiModalDataset[Text].collate_fn)
for batch in loader:
print(batch.text)
Nested fields can be accessed by using dot notation.
The document itself can be accessed using the empty string as the key.
Transformations that operate on reference types (such as Documents) can optionally
not return a value.
The transformations will be applied according to their order in the dictionary.
EXAMPLE USAGE
.. code-block:: python
import torch
from torch.utils.data import DataLoader
from docarray import DocArray, BaseDoc
from docarray.data import MultiModalDataset
from docarray.documents import Text
class Thesis(BaseDoc):
title: Text
class Student(BaseDoc):
thesis: Thesis
def embed_title(title: Text):
title.embedding = torch.ones(4)
def normalize_embedding(thesis: Thesis):
thesis.title.embedding = thesis.title.embedding / thesis.title.embedding.norm()
def add_nonsense(student: Student):
student.thesis.title.embedding = student.thesis.title.embedding + int(
student.thesis.title.text
)
da = DocArray[Student](Student(thesis=Thesis(title=str(i))) for i in range(16))
ds = MultiModalDataset[Student](
da,
preprocessing={
"thesis.title": embed_title,
"thesis": normalize_embedding,
"": add_nonsense,
},
)
loader = DataLoader(ds, batch_size=4, collate_fn=ds.collate_fn)
for batch in loader:
print(batch.thesis.title.embedding)
"""
document_type: Optional[Type[BaseDoc]] = None
__typed_ds__: Dict[Type[BaseDoc], Type['MultiModalDataset']] = {}
def __init__(
self, da: 'DocArray[T_doc]', preprocessing: Dict[str, Callable]
) -> None:
self.da = da
self._preprocessing = preprocessing
def __len__(self):
return len(self.da)
def __getitem__(self, item: int):
doc = self.da[item].copy(deep=True)
for field, preprocess in self._preprocessing.items():
if len(field) == 0:
doc = preprocess(doc) or doc
else:
acc_path = field.split('.')
_field_ref = doc
for attr in acc_path[:-1]:
_field_ref = getattr(_field_ref, attr)
attr = acc_path[-1]
value = getattr(_field_ref, attr)
setattr(_field_ref, attr, preprocess(value) or value)
return doc
@classmethod
def collate_fn(cls, batch: List[T_doc]):
doc_type = cls.document_type
if doc_type:
batch_da = DocArrayStacked[doc_type]( # type: ignore
batch,
tensor_type=TorchTensor,
)
else:
batch_da = DocArrayStacked(batch, tensor_type=TorchTensor)
return batch_da
@classmethod
def __class_getitem__(cls, item: Type[BaseDoc]) -> Type['MultiModalDataset']:
if not issubclass(item, BaseDoc):
raise ValueError(
f'{cls.__name__}[item] item should be a Document not a {item} '
)
if item not in cls.__typed_ds__:
global _TypedDataset
class _TypedDataset(cls): # type: ignore
document_type = item
change_cls_name(
_TypedDataset, f'{cls.__name__}[{item.__name__}]', globals()
)
cls.__typed_ds__[item] = _TypedDataset
return cls.__typed_ds__[item]
|
from typing import Callable, Dict, Generic, List, Optional, Type, TypeVar
from torch.utils.data import Dataset
from docarray import BaseDocument, DocumentArray, DocumentArrayStacked
from docarray.typing import TorchTensor
from docarray.utils._typing import change_cls_name
T_doc = TypeVar('T_doc', bound=BaseDocument)
class MultiModalDataset(Dataset, Generic[T_doc]):
"""
A dataset that can be used inside a PyTorch DataLoader.
In other words, it implements the PyTorch Dataset interface.
:param da: the DocumentArray to be used as the dataset
:param preprocessing: a dictionary of field names and preprocessing functions
The preprocessing dictionary passed to the constructor consists of keys that are
field names and values that are functions that take a single argument and return
a single argument.
EXAMPLE USAGE
.. code-block:: python
from torch.utils.data import DataLoader
from docarray import DocumentArray
from docarray.data import MultiModalDataset
from docarray.documents import Text
def prepend_number(text: str):
return f"Number {text}"
da = DocumentArray[Text](Text(text=str(i)) for i in range(16))
ds = MultiModalDataset[Text](da, preprocessing={'text': prepend_number})
loader = DataLoader(ds, batch_size=4, collate_fn=MultiModalDataset[Text].collate_fn)
for batch in loader:
print(batch.text)
Nested fields can be accessed by using dot notation.
The document itself can be accessed using the empty string as the key.
Transformations that operate on reference types (such as Documents) can optionally
not return a value.
The transformations will be applied according to their order in the dictionary.
EXAMPLE USAGE
.. code-block:: python
import torch
from torch.utils.data import DataLoader
from docarray import DocumentArray, BaseDocument
from docarray.data import MultiModalDataset
from docarray.documents import Text
class Thesis(BaseDocument):
title: Text
class Student(BaseDocument):
thesis: Thesis
def embed_title(title: Text):
title.embedding = torch.ones(4)
def normalize_embedding(thesis: Thesis):
thesis.title.embedding = thesis.title.embedding / thesis.title.embedding.norm()
def add_nonsense(student: Student):
student.thesis.title.embedding = student.thesis.title.embedding + int(
student.thesis.title.text
)
da = DocumentArray[Student](Student(thesis=Thesis(title=str(i))) for i in range(16))
ds = MultiModalDataset[Student](
da,
preprocessing={
"thesis.title": embed_title,
"thesis": normalize_embedding,
"": add_nonsense,
},
)
loader = DataLoader(ds, batch_size=4, collate_fn=ds.collate_fn)
for batch in loader:
print(batch.thesis.title.embedding)
"""
document_type: Optional[Type[BaseDocument]] = None
__typed_ds__: Dict[Type[BaseDocument], Type['MultiModalDataset']] = {}
def __init__(
self, da: 'DocumentArray[T_doc]', preprocessing: Dict[str, Callable]
) -> None:
self.da = da
self._preprocessing = preprocessing
def __len__(self):
return len(self.da)
def __getitem__(self, item: int):
doc = self.da[item].copy(deep=True)
for field, preprocess in self._preprocessing.items():
if len(field) == 0:
doc = preprocess(doc) or doc
else:
acc_path = field.split('.')
_field_ref = doc
for attr in acc_path[:-1]:
_field_ref = getattr(_field_ref, attr)
attr = acc_path[-1]
value = getattr(_field_ref, attr)
setattr(_field_ref, attr, preprocess(value) or value)
return doc
@classmethod
def collate_fn(cls, batch: List[T_doc]):
doc_type = cls.document_type
if doc_type:
batch_da = DocumentArrayStacked[doc_type]( # type: ignore
batch,
tensor_type=TorchTensor,
)
else:
batch_da = DocumentArrayStacked(batch, tensor_type=TorchTensor)
return batch_da
@classmethod
def __class_getitem__(cls, item: Type[BaseDocument]) -> Type['MultiModalDataset']:
if not issubclass(item, BaseDocument):
raise ValueError(
f'{cls.__name__}[item] item should be a Document not a {item} '
)
if item not in cls.__typed_ds__:
global _TypedDataset
class _TypedDataset(cls): # type: ignore
document_type = item
change_cls_name(
_TypedDataset, f'{cls.__name__}[{item.__name__}]', globals()
)
cls.__typed_ds__[item] = _TypedDataset
return cls.__typed_ds__[item]
|
import logging
import os
import sys
from torchaudio._internal.module_utils import fail_with_message, is_module_available, no_op
try:
from .fb import _init_ffmpeg
except ImportError:
from .utils import _init_ffmpeg
from .utils import _check_cuda_version, _fail_since_no_ffmpeg, _init_dll_path, _init_sox, _load_lib
_LG = logging.getLogger(__name__)
# Note:
# `_check_cuda_version` is not meant to be used by regular users.
# Builder uses it for debugging purpose, so we export it.
# https://github.com/pytorch/builder/blob/e2e4542b8eb0bdf491214451a1a4128bd606cce2/test/smoke_test/smoke_test.py#L80
__all__ = [
"fail_if_no_sox",
"fail_if_no_ffmpeg",
"_check_cuda_version",
"_IS_TORCHAUDIO_EXT_AVAILABLE",
"_IS_RIR_AVAILABLE",
"_SOX_INITIALIZED",
"_FFMPEG_EXT",
]
if os.name == "nt" and (3, 8) <= sys.version_info < (3, 9):
_init_dll_path()
# When the extension module is built, we initialize it.
# In case of an error, we do not catch the failure as it suggests there is something
# wrong with the installation.
_IS_TORCHAUDIO_EXT_AVAILABLE = is_module_available("torchaudio.lib._torchaudio")
# RIR features are implemented in _torchaudio extension, but they can be individually
# turned on/off at build time. Available means that _torchaudio is loaded properly, and
# RIR features are found there.
_IS_RIR_AVAILABLE = False
_IS_ALIGN_AVAILABLE = False
if _IS_TORCHAUDIO_EXT_AVAILABLE:
_load_lib("libtorchaudio")
import torchaudio.lib._torchaudio # noqa
_check_cuda_version()
_IS_RIR_AVAILABLE = torchaudio.lib._torchaudio.is_rir_available()
_IS_ALIGN_AVAILABLE = torchaudio.lib._torchaudio.is_align_available()
# Similar to libtorchaudio, sox-related features should be importable when present.
#
# Note: This will be change in the future when sox is dynamically linked.
# At that point, this initialization should handle the case where
# sox integration is built but libsox is not found.
_SOX_INITIALIZED = False
if is_module_available("torchaudio.lib._torchaudio_sox"):
_init_sox()
_SOX_INITIALIZED = True
# Initialize FFmpeg-related features
_FFMPEG_EXT = None
if _IS_TORCHAUDIO_EXT_AVAILABLE:
try:
_FFMPEG_EXT = _init_ffmpeg()
except Exception:
# The initialization of FFmpeg extension will fail if supported FFmpeg
# libraries are not found in the system.
# Since the rest of the torchaudio works without it, we do not report the
# error here.
# The error will be raised when user code attempts to use these features.
_LG.debug("Failed to initialize ffmpeg bindings", exc_info=True)
fail_if_no_sox = (
no_op
if _SOX_INITIALIZED
else fail_with_message(
"requires sox extension, but TorchAudio is not compiled with it. Please build TorchAudio with libsox support."
)
)
fail_if_no_ffmpeg = _fail_since_no_ffmpeg if _FFMPEG_EXT is None else no_op
fail_if_no_rir = (
no_op
if _IS_RIR_AVAILABLE
else fail_with_message(
"requires RIR extension, but TorchAudio is not compiled with it. Please build TorchAudio with RIR support."
)
)
fail_if_no_align = (
no_op
if _IS_ALIGN_AVAILABLE
else fail_with_message(
"Requires alignment extension, but TorchAudio is not compiled with it. \
Please build TorchAudio with alignment support."
)
)
|
import logging
import os
import sys
from torchaudio._internal.module_utils import fail_with_message, is_module_available, no_op
from .utils import _check_cuda_version, _fail_since_no_ffmpeg, _init_dll_path, _init_ffmpeg, _init_sox, _load_lib
_LG = logging.getLogger(__name__)
# Note:
# `_check_cuda_version` is not meant to be used by regular users.
# Builder uses it for debugging purpose, so we export it.
# https://github.com/pytorch/builder/blob/e2e4542b8eb0bdf491214451a1a4128bd606cce2/test/smoke_test/smoke_test.py#L80
__all__ = [
"fail_if_no_sox",
"fail_if_no_ffmpeg",
"_check_cuda_version",
"_IS_TORCHAUDIO_EXT_AVAILABLE",
"_IS_RIR_AVAILABLE",
"_SOX_INITIALIZED",
"_FFMPEG_EXT",
]
if os.name == "nt" and (3, 8) <= sys.version_info < (3, 9):
_init_dll_path()
# When the extension module is built, we initialize it.
# In case of an error, we do not catch the failure as it suggests there is something
# wrong with the installation.
_IS_TORCHAUDIO_EXT_AVAILABLE = is_module_available("torchaudio.lib._torchaudio")
# RIR features are implemented in _torchaudio extension, but they can be individually
# turned on/off at build time. Available means that _torchaudio is loaded properly, and
# RIR features are found there.
_IS_RIR_AVAILABLE = False
_IS_ALIGN_AVAILABLE = False
if _IS_TORCHAUDIO_EXT_AVAILABLE:
_load_lib("libtorchaudio")
import torchaudio.lib._torchaudio # noqa
_check_cuda_version()
_IS_RIR_AVAILABLE = torchaudio.lib._torchaudio.is_rir_available()
_IS_ALIGN_AVAILABLE = torchaudio.lib._torchaudio.is_align_available()
# Similar to libtorchaudio, sox-related features should be importable when present.
#
# Note: This will be change in the future when sox is dynamically linked.
# At that point, this initialization should handle the case where
# sox integration is built but libsox is not found.
_SOX_INITIALIZED = False
if is_module_available("torchaudio.lib._torchaudio_sox"):
_init_sox()
_SOX_INITIALIZED = True
# Initialize FFmpeg-related features
_FFMPEG_EXT = None
if _IS_TORCHAUDIO_EXT_AVAILABLE:
try:
_FFMPEG_EXT = _init_ffmpeg()
except Exception:
# The initialization of FFmpeg extension will fail if supported FFmpeg
# libraries are not found in the system.
# Since the rest of the torchaudio works without it, we do not report the
# error here.
# The error will be raised when user code attempts to use these features.
_LG.debug("Failed to initialize ffmpeg bindings", exc_info=True)
fail_if_no_sox = (
no_op
if _SOX_INITIALIZED
else fail_with_message(
"requires sox extension, but TorchAudio is not compiled with it. Please build TorchAudio with libsox support."
)
)
fail_if_no_ffmpeg = _fail_since_no_ffmpeg if _FFMPEG_EXT is None else no_op
fail_if_no_rir = (
no_op
if _IS_RIR_AVAILABLE
else fail_with_message(
"requires RIR extension, but TorchAudio is not compiled with it. Please build TorchAudio with RIR support."
)
)
fail_if_no_align = (
no_op
if _IS_ALIGN_AVAILABLE
else fail_with_message(
"Requires alignment extension, but TorchAudio is not compiled with it. \
Please build TorchAudio with alignment support."
)
)
|
import asyncio
from typing import TYPE_CHECKING, Optional, Tuple
import grpc
from jina.clients.base.retry import wait_or_raise_err
from jina.clients.helper import callback_exec
from jina.excepts import InternalNetworkError
from jina.proto import jina_pb2_grpc
from jina.serve.stream import RequestStreamer
if TYPE_CHECKING:
from jina.types.request import Request
class UnaryRpc:
"""Class that encapsulated the methods required to run unary rpc calls from the client. Instantiate a single class
for each client request.
"""
def __init__(
self,
channel,
continue_on_error,
metadata,
on_always,
on_done,
on_error,
p_bar,
req_iter,
max_attempts,
backoff_multiplier,
initial_backoff,
max_backoff,
logger,
show_progress,
compression,
client_args,
prefetch,
results_in_order,
**kwargs
):
self.results_in_order = results_in_order
self.prefetch = prefetch
self.client_args = client_args
self.compression = compression
self.show_progress = show_progress
self.logger = logger
self.max_backoff = max_backoff
self.initial_backoff = initial_backoff
self.backoff_multiplier = backoff_multiplier
self.max_attempts = max_attempts
self.req_iter = req_iter
self.p_bar = p_bar
self.on_error = on_error
self.on_done = on_done
self.on_always = on_always
self.metadata = metadata
self.continue_on_error = continue_on_error
self.channel = channel
self.kwargs = kwargs
async def unary_rpc_with_retry(self):
"""Wraps the unary rpc call with retry loop based on the retry params.
:yields: Responses received from the target.
"""
stub = jina_pb2_grpc.JinaSingleDataRequestRPCStub(self.channel)
def _request_handler(
request: 'Request', **kwargs
) -> 'Tuple[asyncio.Future, Optional[asyncio.Future]]':
async def _with_retry(req: 'Request'):
for attempt in range(1, self.max_attempts + 1):
try:
return await stub.process_single_data(
req,
compression=self.compression,
metadata=self.metadata,
credentials=self.kwargs.get('credentials', None),
timeout=self.kwargs.get('timeout', None),
)
except (
grpc.aio.AioRpcError,
InternalNetworkError,
) as err:
await wait_or_raise_err(
attempt=attempt,
err=err,
max_attempts=self.max_attempts,
backoff_multiplier=self.backoff_multiplier,
initial_backoff=self.initial_backoff,
max_backoff=self.max_backoff,
)
return (
asyncio.ensure_future(_with_retry(request)),
None,
)
def _result_handler(resp):
callback_exec(
response=resp,
logger=self.logger,
on_error=self.on_error,
on_done=self.on_done,
on_always=self.on_always,
continue_on_error=self.continue_on_error,
)
return resp
streamer_args = vars(self.client_args)
if self.prefetch:
streamer_args['prefetch'] = self.prefetch
streamer = RequestStreamer(
request_handler=_request_handler,
result_handler=_result_handler,
iterate_sync_in_thread=False,
logger=self.logger,
**streamer_args,
)
async for response in streamer.stream(
request_iterator=self.req_iter, results_in_order=self.results_in_order
):
if self.show_progress:
self.p_bar.update()
yield response
|
import asyncio
from typing import TYPE_CHECKING, Optional, Tuple
import grpc
from jina.clients.base.retry import wait_or_raise_err
from jina.clients.helper import callback_exec
from jina.excepts import InternalNetworkError
from jina.proto import jina_pb2_grpc
from jina.serve.stream import RequestStreamer
if TYPE_CHECKING:
from jina.types.request import Request
class UnaryRpc:
"""Class that encapsulated the methods required to run unary rpc calls from the client. Instantiate a single class
for each client request.
"""
def __init__(
self,
channel,
continue_on_error,
metadata,
on_always,
on_done,
on_error,
p_bar,
req_iter,
max_attempts,
backoff_multiplier,
initial_backoff,
max_backoff,
logger,
show_progress,
compression,
client_args,
prefetch,
results_in_order,
**kwargs
):
self.results_in_order = results_in_order
self.prefetch = prefetch
self.client_args = client_args
self.compression = compression
self.show_progress = show_progress
self.logger = logger
self.max_backoff = max_backoff
self.initial_backoff = initial_backoff
self.backoff_multiplier = backoff_multiplier
self.max_attempts = max_attempts
self.req_iter = req_iter
self.p_bar = p_bar
self.on_error = on_error
self.on_done = on_done
self.on_always = on_always
self.metadata = metadata
self.continue_on_error = continue_on_error
self.channel = channel
self.kwargs = kwargs
async def unary_rpc_with_retry(self):
"""Wraps the unary rpc call with retry loop based on the retry params.
:yields: Responses received from the target.
"""
stub = jina_pb2_grpc.JinaSingleDataRequestRPCStub(self.channel)
def _request_handler(
request: 'Request',
) -> 'Tuple[asyncio.Future, Optional[asyncio.Future]]':
async def _with_retry(req: 'Request'):
for attempt in range(1, self.max_attempts + 1):
try:
return await stub.process_single_data(
req,
compression=self.compression,
metadata=self.metadata,
credentials=self.kwargs.get('credentials', None),
timeout=self.kwargs.get('timeout', None),
)
except (
grpc.aio.AioRpcError,
InternalNetworkError,
) as err:
await wait_or_raise_err(
attempt=attempt,
err=err,
max_attempts=self.max_attempts,
backoff_multiplier=self.backoff_multiplier,
initial_backoff=self.initial_backoff,
max_backoff=self.max_backoff,
)
return (
asyncio.ensure_future(_with_retry(request)),
None,
)
def _result_handler(resp):
callback_exec(
response=resp,
logger=self.logger,
on_error=self.on_error,
on_done=self.on_done,
on_always=self.on_always,
continue_on_error=self.continue_on_error,
)
return resp
streamer_args = vars(self.client_args)
if self.prefetch:
streamer_args['prefetch'] = self.prefetch
streamer = RequestStreamer(
request_handler=_request_handler,
result_handler=_result_handler,
iterate_sync_in_thread=False,
logger=self.logger,
**streamer_args,
)
async for response in streamer.stream(
request_iterator=self.req_iter, results_in_order=self.results_in_order
):
if self.show_progress:
self.p_bar.update()
yield response
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
git_repo_path = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def pytest_addoption(parser):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(parser)
def pytest_terminal_summary(terminalreporter):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
make_reports = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(terminalreporter, id=make_reports)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
git_repo_path = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def pytest_addoption(parser):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(parser)
def pytest_terminal_summary(terminalreporter):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
make_reports = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(terminalreporter, id=make_reports)
|
from docarray.array.document import DocumentArray
from docarray.array.storage.weaviate import StorageMixins, WeaviateConfig
__all__ = ['DocumentArrayWeaviate', 'WeaviateConfig']
class DocumentArrayWeaviate(StorageMixins, DocumentArray):
"""
DocumentArray that stores Documents in a `Weaviate <https://weaviate.io/>`_ vector search engine.
.. note::
This DocumentArray requires `weaviate-client`. You can install it via `pip install "docarray[weaviate]"`.
To use Weaviate as storage backend, a Weaviate service needs to be running on your machine.
With this implementation, :meth:`match` and :meth:`find` perform fast (approximate) vector search.
Additionally, search with filters is supported.
Example usage:
.. code-block:: python
from docarray import DocumentArray
# connect to running Weaviate service with default configuration (address: http://localhost:8080)
da = DocumentArray(storage='weaviate')
# connect to a previously persisted DocumentArrayWeaviate by specifying name, host, and port
da = DocumentArray(
storage='weaviate', config={'name': 'Persisted', 'host': 'localhost', 'port': 1234}
)
.. seealso::
For further details, see our :ref:`user guide <weaviate>`.
"""
def __new__(cls, *args, **kwargs):
"""``__new__`` method for :class:`DocumentArrayWeaviate`
:param *args: list of args to instantiate the object
:param **kwargs: dict of args to instantiate the object
:return: the instantiated :class:`DocumentArrayWeaviate` object
"""
return super().__new__(cls)
|
from .document import DocumentArray
from .storage.weaviate import StorageMixins, WeaviateConfig
__all__ = ['DocumentArrayWeaviate', 'WeaviateConfig']
class DocumentArrayWeaviate(StorageMixins, DocumentArray):
"""
DocumentArray that stores Documents in a `Weaviate <https://weaviate.io/>`_ vector search engine.
.. note::
This DocumentArray requires `weaviate-client`. You can install it via `pip install "docarray[weaviate]"`.
To use Weaviate as storage backend, a Weaviate service needs to be running on your machine.
With this implementation, :meth:`match` and :meth:`find` perform fast (approximate) vector search.
Additionally, search with filters is supported.
Example usage:
.. code-block:: python
from docarray import DocumentArray
# connect to running Weaviate service with default configuration (address: http://localhost:8080)
da = DocumentArray(storage='weaviate')
# connect to a previously persisted DocumentArrayWeaviate by specifying name, host, and port
da = DocumentArray(
storage='weaviate', config={'name': 'Persisted', 'host': 'localhost', 'port': 1234}
)
.. seealso::
For further details, see our :ref:`user guide <weaviate>`.
"""
def __new__(cls, *args, **kwargs):
"""``__new__`` method for :class:`DocumentArrayWeaviate`
:param *args: list of args to instantiate the object
:param **kwargs: dict of args to instantiate the object
:return: the instantiated :class:`DocumentArrayWeaviate` object
"""
return super().__new__(cls)
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import AnyEmbedding, NdArrayEmbedding, TorchEmbedding
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.computation.tensorflow_backend import tnp
from docarray.typing.tensor.embedding import TensorFlowEmbedding
@pytest.mark.proto
def test_proto_embedding():
embedding = parse_obj_as(AnyEmbedding, np.zeros((3, 224, 224)))
embedding._to_node_protobuf()
def test_json_schema():
schema_json_of(AnyEmbedding)
def test_dump_json():
tensor = parse_obj_as(AnyEmbedding, np.zeros((3, 224, 224)))
orjson_dumps(tensor)
@pytest.mark.parametrize(
'tensor,cls_audio_tensor,cls_tensor',
[
(torch.zeros(1000, 2), TorchEmbedding, torch.Tensor),
(np.zeros((1000, 2)), NdArrayEmbedding, np.ndarray),
],
)
def test_torch_ndarray_to_any_embedding(tensor, cls_audio_tensor, cls_tensor):
class MyEmbeddingDoc(BaseDoc):
tensor: AnyEmbedding
doc = MyEmbeddingDoc(tensor=tensor)
assert isinstance(doc.tensor, cls_audio_tensor)
assert isinstance(doc.tensor, cls_tensor)
assert (doc.tensor == tensor).all()
@pytest.mark.tensorflow
def test_tensorflow_to_any_embedding():
class MyEmbeddingDoc(BaseDoc):
tensor: AnyEmbedding
doc = MyEmbeddingDoc(tensor=tf.zeros((1000, 2)))
assert isinstance(doc.tensor, TensorFlowEmbedding)
assert isinstance(doc.tensor.tensor, tf.Tensor)
assert tnp.allclose(doc.tensor.tensor, tf.zeros((1000, 2)))
|
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import AnyEmbedding, NdArrayEmbedding, TorchEmbedding
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.computation.tensorflow_backend import tnp
from docarray.typing.tensor.embedding import TensorFlowEmbedding
@pytest.mark.proto
def test_proto_embedding():
embedding = parse_obj_as(AnyEmbedding, np.zeros((3, 224, 224)))
embedding._to_node_protobuf()
def test_json_schema():
schema_json_of(AnyEmbedding)
def test_dump_json():
tensor = parse_obj_as(AnyEmbedding, np.zeros((3, 224, 224)))
orjson_dumps(tensor)
@pytest.mark.parametrize(
'tensor,cls_audio_tensor,cls_tensor',
[
(torch.zeros(1000, 2), TorchEmbedding, torch.Tensor),
(np.zeros((1000, 2)), NdArrayEmbedding, np.ndarray),
],
)
def test_torch_ndarray_to_any_embedding(tensor, cls_audio_tensor, cls_tensor):
class MyEmbeddingDoc(BaseDoc):
tensor: AnyEmbedding
doc = MyEmbeddingDoc(tensor=tensor)
assert isinstance(doc.tensor, cls_audio_tensor)
assert isinstance(doc.tensor, cls_tensor)
assert (doc.tensor == tensor).all()
@pytest.mark.tensorflow
def test_tensorflow_to_any_embedding():
class MyEmbeddingDoc(BaseDoc):
tensor: AnyEmbedding
doc = MyEmbeddingDoc(tensor=tf.zeros((1000, 2)))
assert isinstance(doc.tensor, TensorFlowEmbedding)
assert isinstance(doc.tensor.tensor, tf.Tensor)
assert tnp.allclose(doc.tensor.tensor, tf.zeros((1000, 2)))
|
import torch
from torchvision.prototype import datapoints
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int, temporal_dim: int = -4) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchvideo/transforms/functional.py#L19
t_max = video.shape[temporal_dim] - 1
indices = torch.linspace(0, t_max, num_samples, device=video.device).long()
return torch.index_select(video, temporal_dim, indices)
def uniform_temporal_subsample(
inpt: datapoints.VideoTypeJIT, num_samples: int, temporal_dim: int = -4
) -> datapoints.VideoTypeJIT:
if isinstance(inpt, torch.Tensor) and (torch.jit.is_scripting() or not isinstance(inpt, datapoints.Video)):
return uniform_temporal_subsample_video(inpt, num_samples, temporal_dim=temporal_dim)
elif isinstance(inpt, datapoints.Video):
if temporal_dim != -4 and inpt.ndim - 4 != temporal_dim:
raise ValueError("Video inputs must have temporal_dim equivalent to -4")
output = uniform_temporal_subsample_video(
inpt.as_subclass(torch.Tensor), num_samples, temporal_dim=temporal_dim
)
return datapoints.Video.wrap_like(inpt, output)
else:
raise TypeError(f"Input can either be a plain tensor or a `Video` datapoint, but got {type(inpt)} instead.")
|
import torch
from torchvision.prototype import features
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int, temporal_dim: int = -4) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchvideo/transforms/functional.py#L19
t_max = video.shape[temporal_dim] - 1
indices = torch.linspace(0, t_max, num_samples, device=video.device).long()
return torch.index_select(video, temporal_dim, indices)
def uniform_temporal_subsample(
inpt: features.VideoTypeJIT, num_samples: int, temporal_dim: int = -4
) -> features.VideoTypeJIT:
if isinstance(inpt, torch.Tensor) and (torch.jit.is_scripting() or not isinstance(inpt, features.Video)):
return uniform_temporal_subsample_video(inpt, num_samples, temporal_dim=temporal_dim)
elif isinstance(inpt, features.Video):
if temporal_dim != -4 and inpt.ndim - 4 != temporal_dim:
raise ValueError("Video inputs must have temporal_dim equivalent to -4")
output = uniform_temporal_subsample_video(
inpt.as_subclass(torch.Tensor), num_samples, temporal_dim=temporal_dim
)
return features.Video.wrap_like(inpt, output)
else:
raise TypeError(
f"Input can either be a plain tensor or a `Video` tensor subclass, but got {type(inpt)} instead."
)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.tavily_search.tool import (
TavilyAnswer,
TavilyInput,
TavilySearchResults,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"TavilyInput": "langchain_community.tools.tavily_search.tool",
"TavilySearchResults": "langchain_community.tools.tavily_search.tool",
"TavilyAnswer": "langchain_community.tools.tavily_search.tool",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"TavilyAnswer",
"TavilyInput",
"TavilySearchResults",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.tavily_search.tool import (
TavilyAnswer,
TavilyInput,
TavilySearchResults,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"TavilyInput": "langchain_community.tools.tavily_search.tool",
"TavilySearchResults": "langchain_community.tools.tavily_search.tool",
"TavilyAnswer": "langchain_community.tools.tavily_search.tool",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"TavilyInput",
"TavilySearchResults",
"TavilyAnswer",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
class TestDINO(TestCase):
def setUp(self):
register_all_modules()
def test_dino_head_loss(self):
"""Tests transformer head loss when truth is empty and non-empty."""
s = 256
metainfo = {
'img_shape': (s, s),
'scale_factor': (1, 1),
'pad_shape': (s, s),
'batch_input_shape': (s, s)
}
data_sample = DetDataSample()
data_sample.set_metainfo(metainfo)
configs = [get_detector_cfg('dino/dino-4scale_r50_8xb2-12e_coco.py')]
for config in configs:
model = MODELS.build(config)
model.init_weights()
random_image = torch.rand(1, 3, s, s)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
data_sample.gt_instances = gt_instances
batch_data_samples_1 = [data_sample]
empty_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples_1)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
for key, loss in empty_gt_losses.items():
_loss = loss.item()
if 'bbox' in key or 'iou' in key or 'dn' in key:
self.assertEqual(
_loss, 0, f'there should be no {key}({_loss}) '
f'when no ground true boxes')
elif 'cls' in key:
self.assertGreater(_loss, 0,
f'{key}({_loss}) should be non-zero')
# When truth is non-empty then both cls and box loss should
# be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
data_sample.gt_instances = gt_instances
batch_data_samples_2 = [data_sample]
one_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples_2)
for loss in one_gt_losses.values():
self.assertGreater(
loss.item(), 0,
'cls loss, or box loss, or iou loss should be non-zero')
model.eval()
# test _forward
model._forward(
random_image, batch_data_samples=batch_data_samples_2)
# test only predict
model.predict(
random_image,
batch_data_samples=batch_data_samples_2,
rescale=True)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models import build_detector
from mmdet.structures import DetDataSample
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
class TestDINO(TestCase):
def setUp(self):
register_all_modules()
def test_dino_head_loss(self):
"""Tests transformer head loss when truth is empty and non-empty."""
s = 256
metainfo = {
'img_shape': (s, s),
'scale_factor': (1, 1),
'pad_shape': (s, s),
'batch_input_shape': (s, s)
}
data_sample = DetDataSample()
data_sample.set_metainfo(metainfo)
configs = [get_detector_cfg('dino/dino-4scale_r50_8xb2-12e_coco.py')]
for config in configs:
model = build_detector(config)
model.init_weights()
random_image = torch.rand(1, 3, s, s)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
data_sample.gt_instances = gt_instances
batch_data_samples_1 = [data_sample]
empty_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples_1)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
for key, loss in empty_gt_losses.items():
_loss = loss.item()
if 'bbox' in key or 'iou' in key or 'dn' in key:
self.assertEqual(
_loss, 0, f'there should be no {key}({_loss}) '
f'when no ground true boxes')
elif 'cls' in key:
self.assertGreater(_loss, 0,
f'{key}({_loss}) should be non-zero')
# When truth is non-empty then both cls and box loss should
# be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
data_sample.gt_instances = gt_instances
batch_data_samples_2 = [data_sample]
one_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples_2)
for loss in one_gt_losses.values():
self.assertGreater(
loss.item(), 0,
'cls loss, or box loss, or iou loss should be non-zero')
model.eval()
# test _forward
model._forward(
random_image, batch_data_samples=batch_data_samples_2)
# test only predict
model.predict(
random_image,
batch_data_samples=batch_data_samples_2,
rescale=True)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.20.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.19.1'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
_base_ = '../common/lsj-200e_coco-detection.py'
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
# model settings
model = dict(
type='FCOS',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output', # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='FCOSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
norm_on_bbox=True,
centerness_on_reg=True,
dcn_on_last_conv=False,
center_sampling=True,
conv_bias=True,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# testing settings
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=dict(
type='SGD', lr=0.01 * 4, momentum=0.9, weight_decay=0.00004),
paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.),
clip_grad=dict(max_norm=35, norm_type=2))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = '../common/lsj_200e_coco_detection.py'
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
# model settings
model = dict(
type='FCOS',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output', # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='FCOSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
norm_on_bbox=True,
centerness_on_reg=True,
dcn_on_last_conv=False,
center_sampling=True,
conv_bias=True,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# testing settings
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=dict(
type='SGD', lr=0.01 * 4, momentum=0.9, weight_decay=0.00004),
paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.),
clip_grad=dict(max_norm=35, norm_type=2))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .ade20k import ADE20KDataset, ADE20KPanopticDataset
from .base_det_dataset import BaseDetDataset
from .base_semseg_dataset import BaseSegDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_caption import COCOCaptionDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .dsdl import DSDLDetDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .mot_challenge_dataset import MOTChallengeDataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .refcoco import RefCOCODataset
from .reid_dataset import ReIDDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler,
TrackAspectRatioBatchSampler, TrackImgSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
from .youtube_vis_dataset import YouTubeVISDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'get_loading_pipeline', 'CocoPanopticDataset',
'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset',
'Objects365V1Dataset', 'Objects365V2Dataset', 'DSDLDetDataset',
'BaseVideoDataset', 'MOTChallengeDataset', 'TrackImgSampler',
'ReIDDataset', 'YouTubeVISDataset', 'TrackAspectRatioBatchSampler',
'ADE20KPanopticDataset', 'COCOCaptionDataset', 'RefCOCODataset',
'BaseSegDataset', 'ADE20KDataset'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .ade20k import ADE20KPanopticDataset
from .base_det_dataset import BaseDetDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_caption import COCOCaptionDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .dsdl import DSDLDetDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .mot_challenge_dataset import MOTChallengeDataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .refcoco import RefCOCODataset
from .reid_dataset import ReIDDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler,
TrackAspectRatioBatchSampler, TrackImgSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
from .youtube_vis_dataset import YouTubeVISDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'get_loading_pipeline', 'CocoPanopticDataset',
'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset',
'Objects365V1Dataset', 'Objects365V2Dataset', 'DSDLDetDataset',
'BaseVideoDataset', 'MOTChallengeDataset', 'TrackImgSampler',
'ReIDDataset', 'YouTubeVISDataset', 'TrackAspectRatioBatchSampler',
'ADE20KPanopticDataset', 'COCOCaptionDataset', 'RefCOCODataset'
]
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.port = self.runtime_args.port[0]
self.host = self.runtime_args.host
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=self.host, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.port = self.runtime_args.port[0]
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=__default_host__, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
# Copyright 2022 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torchvision_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_vision_available():
from transformers import MobileNetV2ImageProcessor
if is_torchvision_available():
from transformers import MobileNetV2ImageProcessorFast
class MobileNetV2ImageProcessingTester:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_center_crop=True,
crop_size=None,
):
size = size if size is not None else {"shortest_edge": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_center_crop = do_center_crop
self.crop_size = crop_size
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.crop_size["height"], self.crop_size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class MobileNetV2ImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = MobileNetV2ImageProcessor if is_vision_available() else None
fast_image_processing_class = MobileNetV2ImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = MobileNetV2ImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processor, "do_resize"))
self.assertTrue(hasattr(image_processor, "size"))
self.assertTrue(hasattr(image_processor, "do_center_crop"))
self.assertTrue(hasattr(image_processor, "crop_size"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"shortest_edge": 20})
self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18})
image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84)
self.assertEqual(image_processor.size, {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
|
# Copyright 2022 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_vision_available():
from transformers import MobileNetV2ImageProcessor
class MobileNetV2ImageProcessingTester:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_center_crop=True,
crop_size=None,
):
size = size if size is not None else {"shortest_edge": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_center_crop = do_center_crop
self.crop_size = crop_size
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.crop_size["height"], self.crop_size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class MobileNetV2ImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = MobileNetV2ImageProcessor if is_vision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = MobileNetV2ImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
image_processor = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processor, "do_resize"))
self.assertTrue(hasattr(image_processor, "size"))
self.assertTrue(hasattr(image_processor, "do_center_crop"))
self.assertTrue(hasattr(image_processor, "crop_size"))
def test_image_processor_from_dict_with_kwargs(self):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"shortest_edge": 20})
self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18})
image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84)
self.assertEqual(image_processor.size, {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
|
import argparse
import os
from typing import List, Union
def api_to_dict(show_all_args: bool = False):
"""Convert Jina API to a dict
:param show_all_args: if set, then hidden args are also exported
:return: dict
"""
if show_all_args:
from jina.parsers import helper
helper._SHOW_ALL_ARGS, old_val = True, helper._SHOW_ALL_ARGS
from jina import __version__
from jina.parsers import get_main_parser
all_d = {
'name': 'Jina',
'description': 'Build multimodal AI services via cloud native technologies',
'license': 'Apache 2.0',
'vendor': 'Jina AI Limited',
'source': 'https://github.com/jina-ai/jina/tree/'
+ os.environ.get('JINA_VCS_VERSION', 'master'),
'url': 'https://jina.ai',
'docs': 'https://docs.jina.ai',
'authors': '[email protected]',
'version': __version__,
'methods': [],
'revision': os.environ.get('JINA_VCS_VERSION'),
}
def get_p(p, parent_d):
parsers = p()._actions[-1].choices
if parsers:
for p_name in parsers.keys():
d = {'name': p_name, 'options': [], 'help': parsers[p_name].description}
for ddd in _export_parser_args(
lambda *x: p()._actions[-1].choices[p_name], type_as_str=True
):
d['options'].append(ddd)
if not d['options']:
d['methods'] = []
get_p(lambda *x: parsers[p_name], d)
parent_d['methods'].append(d)
get_p(get_main_parser, all_d)
if show_all_args:
helper._SHOW_ALL_ARGS = old_val
return all_d
def _export_parser_args(parser_fn, type_as_str: bool = False, **kwargs):
from argparse import _StoreAction, _StoreTrueAction
from jina.enums import BetterEnum
from jina.parsers.helper import _SHOW_ALL_ARGS, CastToIntAction, KVAppendAction
port_attr = ('help', 'choices', 'default', 'required', 'option_strings', 'dest')
parser = parser_fn(**kwargs)
parser2 = parser_fn(**kwargs)
random_dest = set()
for a, b in zip(parser._actions, parser2._actions):
if a.default != b.default:
random_dest.add(a.dest)
for a in parser._actions:
if isinstance(
a, (_StoreAction, _StoreTrueAction, KVAppendAction, CastToIntAction)
):
if not _SHOW_ALL_ARGS and a.help == argparse.SUPPRESS:
continue
ddd = {p: getattr(a, p) for p in port_attr}
if isinstance(a, _StoreTrueAction):
ddd['type'] = bool
elif isinstance(a, KVAppendAction):
ddd['type'] = dict
elif isinstance(a, CastToIntAction):
ddd['type'] = Union[int, List[int]]
else:
ddd['type'] = a.type
if ddd['choices']:
ddd['choices'] = [
str(k) if isinstance(k, BetterEnum) else k for k in ddd['choices']
]
ddd['type'] = str
if isinstance(ddd['default'], BetterEnum):
ddd['default'] = str(ddd['default'])
ddd['type'] = str
if ddd['type'] == str and (a.nargs == '*' or a.nargs == '+'):
ddd['type'] = List[str]
else:
continue
if a.dest in random_dest:
ddd['default_random'] = True
from jina.helper import random_identity, random_port
if isinstance(a.default, str):
ddd['default_factory'] = random_identity.__name__
elif isinstance(a.default, int):
ddd['default_factory'] = random_port.__name__
else:
ddd['default_random'] = False
if type_as_str:
ddd['type'] = getattr(ddd['type'], '__name__', str(ddd['type']))
ddd['name'] = ddd.pop('dest')
yield ddd
|
import argparse
import os
from typing import List
def api_to_dict(show_all_args: bool = False):
"""Convert Jina API to a dict
:param show_all_args: if set, then hidden args are also exported
:return: dict
"""
if show_all_args:
from jina.parsers import helper
helper._SHOW_ALL_ARGS, old_val = True, helper._SHOW_ALL_ARGS
from jina import __version__
from jina.parsers import get_main_parser
all_d = {
'name': 'Jina',
'description': 'Build multimodal AI services via cloud native technologies',
'license': 'Apache 2.0',
'vendor': 'Jina AI Limited',
'source': 'https://github.com/jina-ai/jina/tree/'
+ os.environ.get('JINA_VCS_VERSION', 'master'),
'url': 'https://jina.ai',
'docs': 'https://docs.jina.ai',
'authors': '[email protected]',
'version': __version__,
'methods': [],
'revision': os.environ.get('JINA_VCS_VERSION'),
}
def get_p(p, parent_d):
parsers = p()._actions[-1].choices
if parsers:
for p_name in parsers.keys():
d = {'name': p_name, 'options': [], 'help': parsers[p_name].description}
for ddd in _export_parser_args(
lambda *x: p()._actions[-1].choices[p_name], type_as_str=True
):
d['options'].append(ddd)
if not d['options']:
d['methods'] = []
get_p(lambda *x: parsers[p_name], d)
parent_d['methods'].append(d)
get_p(get_main_parser, all_d)
if show_all_args:
helper._SHOW_ALL_ARGS = old_val
return all_d
def _export_parser_args(parser_fn, type_as_str: bool = False, **kwargs):
from argparse import _StoreAction, _StoreTrueAction
from jina.enums import BetterEnum
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction
port_attr = ('help', 'choices', 'default', 'required', 'option_strings', 'dest')
parser = parser_fn(**kwargs)
parser2 = parser_fn(**kwargs)
random_dest = set()
for a, b in zip(parser._actions, parser2._actions):
if a.default != b.default:
random_dest.add(a.dest)
for a in parser._actions:
if isinstance(a, (_StoreAction, _StoreTrueAction, KVAppendAction)):
if not _SHOW_ALL_ARGS and a.help == argparse.SUPPRESS:
continue
ddd = {p: getattr(a, p) for p in port_attr}
if isinstance(a, _StoreTrueAction):
ddd['type'] = bool
elif isinstance(a, KVAppendAction):
ddd['type'] = dict
else:
ddd['type'] = a.type
if ddd['choices']:
ddd['choices'] = [
str(k) if isinstance(k, BetterEnum) else k for k in ddd['choices']
]
ddd['type'] = str
if isinstance(ddd['default'], BetterEnum):
ddd['default'] = str(ddd['default'])
ddd['type'] = str
if ddd['type'] == str and (a.nargs == '*' or a.nargs == '+'):
ddd['type'] = List[str]
else:
continue
if a.dest in random_dest:
ddd['default_random'] = True
from jina.helper import random_identity, random_port
if isinstance(a.default, str):
ddd['default_factory'] = random_identity.__name__
elif isinstance(a.default, int):
ddd['default_factory'] = random_port.__name__
else:
ddd['default_random'] = False
if type_as_str:
ddd['type'] = getattr(ddd['type'], '__name__', str(ddd['type']))
ddd['name'] = ddd.pop('dest')
yield ddd
|
_base_ = './grid_rcnn_r50_fpn_gn-head_2x_coco.py'
# training schedule
max_epochs = 12
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.0001, by_epoch=False, begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
|
_base_ = ['grid_rcnn_r50_fpn_gn-head_2x_coco.py']
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[8, 11])
checkpoint_config = dict(interval=1)
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=12)
|
"""Smart PDF Loader."""
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class SmartPDFLoader(BaseReader):
"""
SmartPDFLoader uses nested layout information such as sections, paragraphs, lists and tables to smartly chunk PDFs for optimal usage of LLM context window.
Args:
llmsherpa_api_url (str): Address of the service hosting llmsherpa PDF parser
"""
def __init__(
self, *args: Any, llmsherpa_api_url: str = None, **kwargs: Any
) -> None:
super().__init__(*args, **kwargs)
from llmsherpa.readers import LayoutPDFReader
self.pdf_reader = LayoutPDFReader(llmsherpa_api_url)
def load_data(
self, pdf_path_or_url: str, extra_info: Optional[Dict] = None
) -> List[Document]:
"""
Load data and extract table from PDF file.
Args:
pdf_path_or_url (str): A url or file path pointing to the PDF
Returns:
List[Document]: List of documents.
"""
results = []
doc = self.pdf_reader.read_pdf(str(pdf_path_or_url))
for chunk in doc.chunks():
document = Document(
text=chunk.to_context_text(),
extra_info={**extra_info, "chunk_type": chunk.tag}
if extra_info
else {"chunk_type": chunk.tag},
)
results.append(document)
return results
|
"""Smart PDF Loader."""
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class SmartPDFLoader(BaseReader):
"""SmartPDFLoader uses nested layout information such as sections, paragraphs, lists and tables to smartly chunk PDFs for optimal usage of LLM context window.
Args:
llmsherpa_api_url (str): Address of the service hosting llmsherpa PDF parser
"""
def __init__(
self, *args: Any, llmsherpa_api_url: str = None, **kwargs: Any
) -> None:
super().__init__(*args, **kwargs)
from llmsherpa.readers import LayoutPDFReader
self.pdf_reader = LayoutPDFReader(llmsherpa_api_url)
def load_data(
self, pdf_path_or_url: str, extra_info: Optional[Dict] = None
) -> List[Document]:
"""Load data and extract table from PDF file.
Args:
pdf_path_or_url (str): A url or file path pointing to the PDF
Returns:
List[Document]: List of documents.
"""
results = []
doc = self.pdf_reader.read_pdf(str(pdf_path_or_url))
for chunk in doc.chunks():
document = Document(
text=chunk.to_context_text(),
extra_info={**extra_info, "chunk_type": chunk.tag}
if extra_info
else {"chunk_type": chunk.tag},
)
results.append(document)
return results
|
"""Chat generation output classes."""
from __future__ import annotations
from typing import Literal, Union
from pydantic import model_validator
from typing_extensions import Self
from langchain_core.messages import BaseMessage, BaseMessageChunk
from langchain_core.outputs.generation import Generation
from langchain_core.utils._merge import merge_dicts
class ChatGeneration(Generation):
"""A single chat generation output.
A subclass of Generation that represents the response from a chat model
that generates chat messages.
The `message` attribute is a structured representation of the chat message.
Most of the time, the message will be of type `AIMessage`.
Users working with chat models will usually access information via either
`AIMessage` (returned from runnable interfaces) or `LLMResult` (available
via callbacks).
"""
text: str = ""
"""*SHOULD NOT BE SET DIRECTLY* The text contents of the output message."""
message: BaseMessage
"""The message output by the chat model."""
# Override type to be ChatGeneration, ignore mypy error as this is intentional
type: Literal["ChatGeneration"] = "ChatGeneration" # type: ignore[assignment]
"""Type is used exclusively for serialization purposes."""
@model_validator(mode="after")
def set_text(self) -> Self:
"""Set the text attribute to be the contents of the message.
Args:
values: The values of the object.
Returns:
The values of the object with the text attribute set.
Raises:
ValueError: If the message is not a string or a list.
"""
text = ""
if isinstance(self.message.content, str):
text = self.message.content
# Assumes text in content blocks in OpenAI format.
# Uses first text block.
elif isinstance(self.message.content, list):
for block in self.message.content:
if isinstance(block, str):
text = block
break
if isinstance(block, dict) and "text" in block:
text = block["text"]
break
self.text = text
return self
class ChatGenerationChunk(ChatGeneration):
"""ChatGeneration chunk.
ChatGeneration chunks can be concatenated with other ChatGeneration chunks.
"""
message: BaseMessageChunk
"""The message chunk output by the chat model."""
# Override type to be ChatGeneration, ignore mypy error as this is intentional
type: Literal["ChatGenerationChunk"] = "ChatGenerationChunk" # type: ignore[assignment]
"""Type is used exclusively for serialization purposes."""
def __add__(
self, other: Union[ChatGenerationChunk, list[ChatGenerationChunk]]
) -> ChatGenerationChunk:
"""Concatenate two ChatGenerationChunks.
Args:
other: The other ChatGenerationChunk or list of ChatGenerationChunks to
concatenate.
"""
if isinstance(other, ChatGenerationChunk):
generation_info = merge_dicts(
self.generation_info or {},
other.generation_info or {},
)
return ChatGenerationChunk(
message=self.message + other.message,
generation_info=generation_info or None,
)
if isinstance(other, list) and all(
isinstance(x, ChatGenerationChunk) for x in other
):
generation_info = merge_dicts(
self.generation_info or {},
*[chunk.generation_info for chunk in other if chunk.generation_info],
)
return ChatGenerationChunk(
message=self.message + [chunk.message for chunk in other],
generation_info=generation_info or None,
)
msg = f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
raise TypeError(msg)
def merge_chat_generation_chunks(
chunks: list[ChatGenerationChunk],
) -> Union[ChatGenerationChunk, None]:
"""Merge a list of ChatGenerationChunks into a single ChatGenerationChunk."""
if not chunks:
return None
if len(chunks) == 1:
return chunks[0]
return chunks[0] + chunks[1:]
|
"""Chat generation output classes."""
from __future__ import annotations
from typing import Literal, Union
from pydantic import model_validator
from typing_extensions import Self
from langchain_core.messages import BaseMessage, BaseMessageChunk
from langchain_core.outputs.generation import Generation
from langchain_core.utils._merge import merge_dicts
class ChatGeneration(Generation):
"""A single chat generation output.
A subclass of Generation that represents the response from a chat model
that generates chat messages.
The `message` attribute is a structured representation of the chat message.
Most of the time, the message will be of type `AIMessage`.
Users working with chat models will usually access information via either
`AIMessage` (returned from runnable interfaces) or `LLMResult` (available
via callbacks).
"""
text: str = ""
"""*SHOULD NOT BE SET DIRECTLY* The text contents of the output message."""
message: BaseMessage
"""The message output by the chat model."""
# Override type to be ChatGeneration, ignore mypy error as this is intentional
type: Literal["ChatGeneration"] = "ChatGeneration" # type: ignore[assignment]
"""Type is used exclusively for serialization purposes."""
@model_validator(mode="after")
def set_text(self) -> Self:
"""Set the text attribute to be the contents of the message.
Args:
values: The values of the object.
Returns:
The values of the object with the text attribute set.
Raises:
ValueError: If the message is not a string or a list.
"""
text = ""
if isinstance(self.message.content, str):
text = self.message.content
# Assumes text in content blocks in OpenAI format.
# Uses first text block.
elif isinstance(self.message.content, list):
for block in self.message.content:
if isinstance(block, str):
text = block
break
if isinstance(block, dict) and "text" in block:
text = block["text"]
break
else:
pass
self.text = text
return self
class ChatGenerationChunk(ChatGeneration):
"""ChatGeneration chunk.
ChatGeneration chunks can be concatenated with other ChatGeneration chunks.
"""
message: BaseMessageChunk
"""The message chunk output by the chat model."""
# Override type to be ChatGeneration, ignore mypy error as this is intentional
type: Literal["ChatGenerationChunk"] = "ChatGenerationChunk" # type: ignore[assignment]
"""Type is used exclusively for serialization purposes."""
def __add__(
self, other: Union[ChatGenerationChunk, list[ChatGenerationChunk]]
) -> ChatGenerationChunk:
"""Concatenate two ChatGenerationChunks.
Args:
other: The other ChatGenerationChunk or list of ChatGenerationChunks to
concatenate.
"""
if isinstance(other, ChatGenerationChunk):
generation_info = merge_dicts(
self.generation_info or {},
other.generation_info or {},
)
return ChatGenerationChunk(
message=self.message + other.message,
generation_info=generation_info or None,
)
if isinstance(other, list) and all(
isinstance(x, ChatGenerationChunk) for x in other
):
generation_info = merge_dicts(
self.generation_info or {},
*[chunk.generation_info for chunk in other if chunk.generation_info],
)
return ChatGenerationChunk(
message=self.message + [chunk.message for chunk in other],
generation_info=generation_info or None,
)
msg = f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
raise TypeError(msg)
def merge_chat_generation_chunks(
chunks: list[ChatGenerationChunk],
) -> Union[ChatGenerationChunk, None]:
"""Merge a list of ChatGenerationChunks into a single ChatGenerationChunk."""
if not chunks:
return None
if len(chunks) == 1:
return chunks[0]
return chunks[0] + chunks[1:]
|
import logging
from datetime import datetime as dt
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.slack.base import SlackBaseTool
from langchain_community.tools.slack.utils import UTC_FORMAT
logger = logging.getLogger(__name__)
class ScheduleMessageSchema(BaseModel):
"""Input for ScheduleMessageTool."""
message: str = Field(
...,
description="The message to be sent.",
)
channel: str = Field(
...,
description="The channel, private group, or IM channel to send message to.",
)
timestamp: str = Field(
...,
description="The datetime for when the message should be sent in the "
' following format: YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date '
" and time components, and the time zone offset is specified as ±hh:mm. "
' For example: "2023-06-09T10:30:00+03:00" represents June 9th, '
" 2023, at 10:30 AM in a time zone with a positive offset of 3 "
" hours from Coordinated Universal Time (UTC).",
)
class SlackScheduleMessage(SlackBaseTool):
"""Tool for scheduling a message in Slack."""
name: str = "schedule_message"
description: str = (
"Use this tool to schedule a message to be sent on a specific date and time."
)
args_schema: Type[ScheduleMessageSchema] = ScheduleMessageSchema
def _run(
self,
message: str,
channel: str,
timestamp: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
unix_timestamp = dt.timestamp(dt.strptime(timestamp, UTC_FORMAT))
result = self.client.chat_scheduleMessage(
channel=channel, text=message, post_at=unix_timestamp
)
output = "Message scheduled: " + str(result)
return output
except Exception as e:
return "Error scheduling message: {}".format(e)
|
import logging
from datetime import datetime as dt
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.slack.base import SlackBaseTool
from langchain_community.tools.slack.utils import UTC_FORMAT
logger = logging.getLogger(__name__)
class ScheduleMessageSchema(BaseModel):
"""Input for ScheduleMessageTool."""
message: str = Field(
...,
description="The message to be sent.",
)
channel: str = Field(
...,
description="The channel, private group, or IM channel to send message to.",
)
timestamp: str = Field(
...,
description="The datetime for when the message should be sent in the "
' following format: YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date '
" and time components, and the time zone offset is specified as ±hh:mm. "
' For example: "2023-06-09T10:30:00+03:00" represents June 9th, '
" 2023, at 10:30 AM in a time zone with a positive offset of 3 "
" hours from Coordinated Universal Time (UTC).",
)
class SlackScheduleMessage(SlackBaseTool): # type: ignore[override, override]
"""Tool for scheduling a message in Slack."""
name: str = "schedule_message"
description: str = (
"Use this tool to schedule a message to be sent on a specific date and time."
)
args_schema: Type[ScheduleMessageSchema] = ScheduleMessageSchema
def _run(
self,
message: str,
channel: str,
timestamp: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
unix_timestamp = dt.timestamp(dt.strptime(timestamp, UTC_FORMAT))
result = self.client.chat_scheduleMessage(
channel=channel, text=message, post_at=unix_timestamp
)
output = "Message scheduled: " + str(result)
return output
except Exception as e:
return "Error scheduling message: {}".format(e)
|
import json
from pathlib import Path
from typing import List, Optional
from langchain_core.chat_history import (
BaseChatMessageHistory,
)
from langchain_core.messages import BaseMessage, messages_from_dict, messages_to_dict
class FileChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that stores history in a local file."""
def __init__(
self,
file_path: str,
*,
encoding: Optional[str] = None,
ensure_ascii: bool = True,
) -> None:
"""Initialize the file path for the chat history.
Args:
file_path: The path to the local file to store the chat history.
encoding: The encoding to use for file operations. Defaults to None.
ensure_ascii: If True, escape non-ASCII in JSON. Defaults to True.
"""
self.file_path = Path(file_path)
self.encoding = encoding
self.ensure_ascii = ensure_ascii
if not self.file_path.exists():
self.file_path.touch()
self.file_path.write_text(
json.dumps([], ensure_ascii=self.ensure_ascii), encoding=self.encoding
)
@property
def messages(self) -> List[BaseMessage]: # type: ignore[override]
"""Retrieve the messages from the local file"""
items = json.loads(self.file_path.read_text(encoding=self.encoding))
messages = messages_from_dict(items)
return messages
def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in the local file"""
messages = messages_to_dict(self.messages)
messages.append(messages_to_dict([message])[0])
self.file_path.write_text(
json.dumps(messages, ensure_ascii=self.ensure_ascii), encoding=self.encoding
)
def clear(self) -> None:
"""Clear session memory from the local file"""
self.file_path.write_text(
json.dumps([], ensure_ascii=self.ensure_ascii), encoding=self.encoding
)
|
import json
from pathlib import Path
from typing import List, Optional
from langchain_core.chat_history import (
BaseChatMessageHistory,
)
from langchain_core.messages import BaseMessage, messages_from_dict, messages_to_dict
class FileChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that stores history in a local file."""
def __init__(
self,
file_path: str,
*,
encoding: Optional[str] = None,
ensure_ascii: bool = True,
) -> None:
"""Initialize the file path for the chat history.
Args:
file_path: The path to the local file to store the chat history.
encoding: The encoding to use for file operations. Defaults to None.
ensure_ascii: If True, escape non-ASCII in JSON. Defaults to True.
"""
self.file_path = Path(file_path)
self.encoding = encoding
self.ensure_ascii = ensure_ascii
if not self.file_path.exists():
self.file_path.touch()
self.file_path.write_text(
json.dumps([], ensure_ascii=self.ensure_ascii), encoding=self.encoding
)
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from the local file"""
items = json.loads(self.file_path.read_text(encoding=self.encoding))
messages = messages_from_dict(items)
return messages
def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in the local file"""
messages = messages_to_dict(self.messages)
messages.append(messages_to_dict([message])[0])
self.file_path.write_text(
json.dumps(messages, ensure_ascii=self.ensure_ascii), encoding=self.encoding
)
def clear(self) -> None:
"""Clear session memory from the local file"""
self.file_path.write_text(
json.dumps([], ensure_ascii=self.ensure_ascii), encoding=self.encoding
)
|
__version__ = '0.30.0a3'
from docarray.array.array.array import DocumentArray
from docarray.base_document.document import BaseDocument
__all__ = [
'BaseDocument',
'DocumentArray',
]
|
__version__ = '0.30.a3'
from docarray.array.array.array import DocumentArray
from docarray.base_document.document import BaseDocument
__all__ = [
'BaseDocument',
'DocumentArray',
]
|
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc, DocList
from docarray.index.backends.in_memory import InMemoryExactNNIndex
from docarray.typing import NdArray
class SchemaDoc(BaseDoc):
text: str
price: int
tensor: NdArray[10]
@pytest.fixture
def docs():
docs = DocList[SchemaDoc](
[
SchemaDoc(text=f'hello {i}', price=i, tensor=np.array([i] * 10))
for i in range(9)
]
)
docs.append(SchemaDoc(text='good bye', price=100, tensor=np.array([100.0] * 10)))
return docs
def test_indexing(docs):
doc_index = InMemoryExactNNIndex[SchemaDoc]()
assert doc_index.num_docs() == 0
doc_index.index(docs)
assert doc_index.num_docs() == 10
@pytest.fixture
def doc_index(docs):
doc_index = InMemoryExactNNIndex[SchemaDoc]()
doc_index.index(docs)
return doc_index
def test_del_item(docs, doc_index):
to_remove = [docs[0].id, docs[1].id]
doc_index._del_items(to_remove)
assert doc_index.num_docs() == 8
def test_del(docs, doc_index):
del doc_index[docs[0].id]
assert doc_index.num_docs() == 9
@pytest.mark.parametrize('space', ['cosine_sim', 'euclidean_dist', 'sqeuclidean_dist'])
@pytest.mark.parametrize('is_query_doc', [True, False])
def test_find(doc_index, space, is_query_doc):
class MyDoc(BaseDoc):
text: str
price: int
tensor: NdArray[10] = Field(space=space)
if is_query_doc:
query = MyDoc(text='query', price=0, tensor=np.ones(10))
else:
query = np.ones(10)
docs, scores = doc_index.find(query, search_field='tensor', limit=5)
assert len(docs) == 5
assert len(scores) == 5
assert doc_index.num_docs() == 10
empty_index = InMemoryExactNNIndex[MyDoc]()
docs, scores = empty_index.find(query, search_field='tensor', limit=5)
assert len(docs) == 0
assert len(scores) == 0
@pytest.mark.parametrize('space', ['cosine_sim', 'euclidean_dist', 'sqeuclidean_dist'])
@pytest.mark.parametrize('is_query_doc', [True, False])
def test_find_batched(doc_index, space, is_query_doc):
class MyDoc(BaseDoc):
text: str
price: int
tensor: NdArray[10] = Field(space=space)
if is_query_doc:
query = DocList[MyDoc](
[
MyDoc(text='query 0', price=0, tensor=np.zeros(10)),
MyDoc(text='query 1', price=1, tensor=np.ones(10)),
]
)
else:
query = np.ones((2, 10))
docs, scores = doc_index.find_batched(query, search_field='tensor', limit=5)
assert len(docs) == 2
for result in docs:
assert len(result) == 5
assert doc_index.num_docs() == 10
empty_index = InMemoryExactNNIndex[MyDoc]()
docs, scores = empty_index.find_batched(query, search_field='tensor', limit=5)
assert len(docs) == 0
assert len(scores) == 0
def test_concatenated_queries(doc_index):
query = SchemaDoc(text='query', price=0, tensor=np.ones(10))
q = (
doc_index.build_query()
.find(query=query, search_field='tensor', limit=5)
.filter(filter_query={'price': {'$neq': 5}})
.build()
)
docs, scores = doc_index.execute_query(q)
assert len(docs) == 4
def test_save_and_load(doc_index, tmpdir):
initial_num_docs = doc_index.num_docs()
binary_file = str(tmpdir / 'docs.bin')
doc_index.persist(binary_file)
new_doc_index = InMemoryExactNNIndex[SchemaDoc](index_file_path=binary_file)
docs, scores = new_doc_index.find(np.ones(10), search_field='tensor', limit=5)
assert len(docs) == 5
assert len(scores) == 5
assert new_doc_index.num_docs() == initial_num_docs
|
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc, DocList
from docarray.index.backends.in_memory import InMemoryExactNNIndex
from docarray.typing import NdArray
class SchemaDoc(BaseDoc):
text: str
price: int
tensor: NdArray[10]
@pytest.fixture
def docs():
docs = DocList[SchemaDoc](
[
SchemaDoc(text=f'hello {i}', price=i, tensor=np.array([i] * 10))
for i in range(9)
]
)
docs.append(SchemaDoc(text='good bye', price=100, tensor=np.array([100.0] * 10)))
return docs
def test_indexing(docs):
doc_index = InMemoryExactNNIndex[SchemaDoc]()
assert doc_index.num_docs() == 0
doc_index.index(docs)
assert doc_index.num_docs() == 10
@pytest.fixture
def doc_index(docs):
doc_index = InMemoryExactNNIndex[SchemaDoc]()
doc_index.index(docs)
return doc_index
def test_del_item(docs, doc_index):
to_remove = [docs[0].id, docs[1].id]
doc_index._del_items(to_remove)
assert doc_index.num_docs() == 8
def test_del(docs, doc_index):
del doc_index[docs[0].id]
assert doc_index.num_docs() == 9
@pytest.mark.parametrize('space', ['cosine_sim', 'euclidean_dist', 'sqeuclidean_dist'])
@pytest.mark.parametrize('is_query_doc', [True, False])
def test_find(doc_index, space, is_query_doc):
class MyDoc(BaseDoc):
text: str
price: int
tensor: NdArray[10] = Field(space=space)
if is_query_doc:
query = MyDoc(text='query', price=0, tensor=np.ones(10))
else:
query = np.ones(10)
docs, scores = doc_index.find(query, search_field='tensor', limit=5)
assert len(docs) == 5
assert len(scores) == 5
assert doc_index.num_docs() == 10
empty_index = InMemoryExactNNIndex[MyDoc]()
docs, scores = empty_index.find(query, search_field='tensor', limit=5)
assert len(docs) == 0
assert len(scores) == 0
@pytest.mark.parametrize('space', ['cosine_sim', 'euclidean_dist', 'sqeuclidean_dist'])
@pytest.mark.parametrize('is_query_doc', [True, False])
def test_find_batched(doc_index, space, is_query_doc):
class MyDoc(BaseDoc):
text: str
price: int
tensor: NdArray[10] = Field(space=space)
if is_query_doc:
query = DocList[MyDoc](
[
MyDoc(text='query 0', price=0, tensor=np.zeros(10)),
MyDoc(text='query 1', price=1, tensor=np.ones(10)),
]
)
else:
query = np.ones((2, 10))
docs, scores = doc_index.find_batched(query, search_field='tensor', limit=5)
assert len(docs) == 2
for result in docs:
assert len(result) == 5
assert doc_index.num_docs() == 10
empty_index = InMemoryExactNNIndex[MyDoc]()
docs, scores = empty_index.find_batched(query, search_field='tensor', limit=5)
assert len(docs) == 0
assert len(scores) == 0
def test_concatenated_queries(doc_index):
query = SchemaDoc(text='query', price=0, tensor=np.ones(10))
q = (
doc_index.build_query()
.find(query=query, search_field='tensor', limit=5)
.filter(filter_query={'price': {'$neq': 5}})
.build()
)
docs, scores = doc_index.execute_query(q)
assert len(docs) == 4
|
_base_ = 'faster-rcnn_r50_fpn_ms-3x_coco.py'
model = dict(
backbone=dict(
depth=101,
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
_base_ = 'faster-rcnn_r50_fpn_ms-3x_coco.py'
model = dict(
backbone=dict(
depth=101,
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
lang_model_name = 'bert-base-uncased'
model = dict(
type='GroundingDINO',
num_queries=900,
with_box_refine=True,
as_two_stage=True,
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_mask=False,
),
language_model=dict(
type='BertModel',
name=lang_model_name,
pad_to_max=False,
use_sub_sentence_represent=True,
special_tokens_list=['[CLS]', '[SEP]', '.', '?'],
add_pooling_layer=True,
),
backbone=dict(
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(1, 2, 3),
with_cp=False,
convert_weights=False),
neck=dict(
type='ChannelMapper',
in_channels=[192, 384, 768],
kernel_size=1,
out_channels=256,
act_cfg=None,
bias=True,
norm_cfg=dict(type='GN', num_groups=32),
num_outs=4),
encoder=dict(
num_layers=6,
# visual layer config
layer_cfg=dict(
self_attn_cfg=dict(embed_dims=256, num_levels=4, dropout=0.0),
ffn_cfg=dict(
embed_dims=256, feedforward_channels=2048, ffn_drop=0.0)),
# text layer config
text_layer_cfg=dict(
self_attn_cfg=dict(num_heads=4, embed_dims=256, dropout=0.0),
ffn_cfg=dict(
embed_dims=256, feedforward_channels=1024, ffn_drop=0.0)),
# fusion layer config
fusion_layer_cfg=dict(
v_dim=256,
l_dim=256,
embed_dim=1024,
num_heads=4,
init_values=1e-4),
),
decoder=dict(
num_layers=6,
return_intermediate=True,
layer_cfg=dict(
# query self attention layer
self_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0),
# cross attention layer query to text
cross_attn_text_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0),
# cross attention layer query to image
cross_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0),
ffn_cfg=dict(
embed_dims=256, feedforward_channels=2048, ffn_drop=0.0)),
post_norm_cfg=None),
positional_encoding=dict(
num_feats=128, normalize=True, offset=0.0, temperature=20),
bbox_head=dict(
type='GroundingDINOHead',
num_classes=80,
sync_cls_avg_factor=True,
contrastive_cfg=dict(max_text_len=256),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0), # 2.0 in DeformDETR
loss_bbox=dict(type='L1Loss', loss_weight=5.0)),
dn_cfg=dict( # TODO: Move to model.train_cfg ?
label_noise_scale=0.5,
box_noise_scale=1.0, # 0.4 for DN-DETR
group_cfg=dict(dynamic=True, num_groups=None,
num_dn_queries=100)), # TODO: half num_dn_queries
# training and testing settings
train_cfg=None,
test_cfg=dict(max_per_img=300))
test_pipeline = [
dict(
type='LoadImageFromFile', backend_args=None,
imdecode_backend='pillow'),
dict(
type='FixScaleResize',
scale=(800, 1333),
keep_ratio=True,
backend='pillow'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'text', 'custom_entities'))
]
val_dataloader = dict(
dataset=dict(pipeline=test_pipeline, return_classes=True))
test_dataloader = val_dataloader
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
lang_model_name = 'bert-base-uncased'
model = dict(
type='GroundingDINO',
num_queries=900,
with_box_refine=True,
as_two_stage=True,
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_mask=False,
),
language_model=dict(
type='BertModel',
name=lang_model_name,
pad_to_max=False,
use_sub_sentence_represent=True,
special_tokens_list=['[CLS]', '[SEP]', '.', '?'],
add_pooling_layer=True,
),
backbone=dict(
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(1, 2, 3),
with_cp=False,
convert_weights=False),
neck=dict(
type='ChannelMapper',
in_channels=[192, 384, 768],
kernel_size=1,
out_channels=256,
act_cfg=None,
bias=True,
norm_cfg=dict(type='GN', num_groups=32),
num_outs=4),
encoder=dict(
num_layers=6,
# visual layer config
layer_cfg=dict(
self_attn_cfg=dict(embed_dims=256, num_levels=4, dropout=0.0),
ffn_cfg=dict(
embed_dims=256, feedforward_channels=2048, ffn_drop=0.0)),
# text layer config
text_layer_cfg=dict(
self_attn_cfg=dict(num_heads=4, embed_dims=256, dropout=0.0),
ffn_cfg=dict(
embed_dims=256, feedforward_channels=1024, ffn_drop=0.0)),
# fusion layer config
fusion_layer_cfg=dict(
v_dim=256,
l_dim=256,
embed_dim=1024,
num_heads=4,
init_values=1e-4),
),
decoder=dict(
num_layers=6,
return_intermediate=True,
layer_cfg=dict(
# query self attention layer
self_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0),
# cross attention layer query to text
cross_attn_text_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0),
# cross attention layer query to image
cross_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0),
ffn_cfg=dict(
embed_dims=256, feedforward_channels=2048, ffn_drop=0.0)),
post_norm_cfg=None),
positional_encoding=dict(
num_feats=128, normalize=True, offset=0.0, temperature=20),
bbox_head=dict(
type='GroundingDINOHead',
num_classes=80,
sync_cls_avg_factor=True,
max_text_len=256,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0), # 2.0 in DeformDETR
loss_bbox=dict(type='L1Loss', loss_weight=5.0)),
dn_cfg=dict( # TODO: Move to model.train_cfg ?
label_noise_scale=0.5,
box_noise_scale=1.0, # 0.4 for DN-DETR
group_cfg=dict(dynamic=True, num_groups=None,
num_dn_queries=100)), # TODO: half num_dn_queries
# training and testing settings
train_cfg=None,
test_cfg=dict(max_per_img=300))
test_pipeline = [
dict(
type='LoadImageFromFile', backend_args=None,
imdecode_backend='pillow'),
dict(
type='FixScaleResize',
scale=(800, 1333),
keep_ratio=True,
backend='pillow'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'text', 'custom_entities'))
]
val_dataloader = dict(
dataset=dict(pipeline=test_pipeline, return_classes=True))
test_dataloader = val_dataloader
|
import os
from pathlib import Path
from typing import List, Optional, Tuple, Union
import torch
import torchaudio
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
_URL = "https://zenodo.org/record/3338373/files/musdb18hq.zip"
_CHECKSUM = "baac80d0483c61d74b2e5f3be75fa557eec52898339e6aa45c1fa48833c5d21d"
_EXT = ".wav"
_SAMPLE_RATE = 44100
_VALIDATION_SET = [
"Actions - One Minute Smile",
"Clara Berry And Wooldog - Waltz For My Victims",
"Johnny Lokke - Promises & Lies",
"Patrick Talbot - A Reason To Leave",
"Triviul - Angelsaint",
"Alexander Ross - Goodbye Bolero",
"Fergessen - Nos Palpitants",
"Leaf - Summerghost",
"Skelpolu - Human Mistakes",
"Young Griffo - Pennies",
"ANiMAL - Rockshow",
"James May - On The Line",
"Meaxic - Take A Step",
"Traffic Experiment - Sirens",
]
class MUSDB_HQ(Dataset):
"""Create *MUSDB_HQ* [:footcite:`MUSDB18HQ`] Dataset
Args:
root (str or Path): Root directory where the dataset's top level directory is found
subset (str): Subset of the dataset to use. Options: [``"train"``, ``"test"``].
sources (List[str] or None, optional): Sources extract data from.
List can contain the following options: [``"bass"``, ``"drums"``, ``"other"``, ``"mixture"``, ``"vocals"``].
If ``None``, dataset consists of tracks except mixture.
(default: ``None``)
split (str or None, optional): Whether to split training set into train and validation set.
If ``None``, no splitting occurs. If ``train`` or ``validation``, returns respective set.
(default: ``None``)
download (bool, optional): Whether to download the dataset if it is not found at root path.
(default: ``False``)
"""
def __init__(
self,
root: Union[str, Path],
subset: str,
sources: Optional[List[str]] = None,
split: Optional[str] = None,
download: bool = False,
) -> None:
self.sources = ["bass", "drums", "other", "vocals"] if not sources else sources
self.split = split
basename = os.path.basename(_URL)
archive = os.path.join(root, basename)
basename = basename.rsplit(".", 2)[0]
assert subset in ["test", "train"], "`subset` must be one of ['test', 'train']"
assert self.split is None or self.split in [
"train",
"validation",
], "`split` must be one of ['train', 'validation']"
base_path = os.path.join(root, basename)
self._path = os.path.join(base_path, subset)
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download")
download_url_to_file(_URL, archive, hash_prefix=_CHECKSUM)
os.makedirs(base_path, exist_ok=True)
extract_archive(archive, base_path)
self.names = self._collect_songs()
def _get_track(self, name, source):
return Path(self._path) / name / f"{source}{_EXT}"
def _load_sample(self, n: int) -> Tuple[torch.Tensor, int, int, str]:
name = self.names[n]
wavs = []
num_frames = None
for source in self.sources:
track = self._get_track(name, source)
wav, sr = torchaudio.load(str(track))
assert sr == _SAMPLE_RATE, f"expected sample rate {_SAMPLE_RATE}, but got {sr}"
if num_frames is None:
num_frames = wav.shape[-1]
else:
assert wav.shape[-1] == num_frames, "num_frames do not match across sources"
wavs.append(wav)
stacked = torch.stack(wavs)
return stacked, _SAMPLE_RATE, num_frames, name
def _collect_songs(self):
if self.split == "validation":
return _VALIDATION_SET
path = Path(self._path)
names = []
for root, folders, _ in os.walk(path, followlinks=True):
root = Path(root)
if root.name.startswith(".") or folders or root == path:
continue
name = str(root.relative_to(path))
if self.split and name in _VALIDATION_SET:
continue
names.append(name)
return sorted(names)
def __getitem__(self, n: int) -> Tuple[torch.Tensor, int, int, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, int, str): ``(waveforms, sample_rate, num_frames, track_name)``
"""
return self._load_sample(n)
def __len__(self) -> int:
return len(self.names)
|
import os
from pathlib import Path
from typing import List, Optional, Tuple, Union
import torch
import torchaudio
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
_URL = "https://zenodo.org/record/3338373/files/musdb18hq.zip"
_CHECKSUM = "baac80d0483c61d74b2e5f3be75fa557eec52898339e6aa45c1fa48833c5d21d"
_EXT = ".wav"
_SAMPLE_RATE = 44100
_VALIDATION_SET = [
"Actions - One Minute Smile",
"Clara Berry And Wooldog - Waltz For My Victims",
"Johnny Lokke - Promises & Lies",
"Patrick Talbot - A Reason To Leave",
"Triviul - Angelsaint",
"Alexander Ross - Goodbye Bolero",
"Fergessen - Nos Palpitants",
"Leaf - Summerghost",
"Skelpolu - Human Mistakes",
"Young Griffo - Pennies",
"ANiMAL - Rockshow",
"James May - On The Line",
"Meaxic - Take A Step",
"Traffic Experiment - Sirens",
]
class MUSDB_HQ(Dataset):
"""Create *MUSDB_HQ* [:footcite:`MUSDB18HQ`] Dataset
Args:
root (str or Path): Root directory where the dataset's top level directory is found
subset (str): Subset of the dataset to use. Options: [``"train"``, ``"test"``].
sources (List[str] or None, optional): Sources extract data from.
List can contain the following options: [``"bass"``, ``"drums"``, ``"other"``, ``"mixture"``, ``"vocals"``].
If ``None``, dataset consists of tracks except mixture.
(default: ``None``)
split (str or None, optional): Whether to split training set into train and validation set.
If ``None``, no splitting occurs. If ``train`` or ``validation``, returns respective set.
(default: ``None``)
download (bool, optional): Whether to download the dataset if it is not found at root path.
(default: ``False``)
"""
def __init__(
self,
root: Union[str, Path],
subset: str,
sources: Optional[List[str]] = None,
split: Optional[str] = None,
download: bool = False,
) -> None:
self.sources = ["bass", "drums", "other", "vocals"] if not sources else sources
self.split = split
basename = os.path.basename(_URL)
archive = os.path.join(root, basename)
basename = basename.rsplit(".", 2)[0]
assert subset in ["test", "train"], "`subset` must be one of ['test', 'train']"
assert self.split is None or self.split in [
"train",
"validation",
], "`split` must be one of ['train', 'validation']"
base_path = os.path.join(root, basename)
self._path = os.path.join(base_path, subset)
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download")
download_url_to_file(_URL, archive, hash_prefix=_CHECKSUM)
os.makedirs(base_path, exist_ok=True)
extract_archive(archive, base_path)
self.names = self._collect_songs()
def _get_track(self, name, source):
return Path(self._path) / name / f"{source}{_EXT}"
def _load_sample(self, n: int) -> Tuple[torch.Tensor, int, int, str]:
name = self.names[n]
wavs = []
num_frames = None
for source in self.sources:
track = self._get_track(name, source)
wav, sr = torchaudio.load(str(track))
assert sr == _SAMPLE_RATE, f"expected sample rate {_SAMPLE_RATE}, but got {sr}"
if num_frames is None:
num_frames = wav.shape[-1]
else:
assert wav.shape[-1] == num_frames, "num_frames do not match across sources"
wavs.append(wav)
stacked = torch.stack(wavs)
return stacked, _SAMPLE_RATE, num_frames, name
def _collect_songs(self):
if self.split == "validation":
return _VALIDATION_SET
path = Path(self._path)
names = []
for root, folders, _ in os.walk(path, followlinks=True):
root = Path(root)
if root.name.startswith(".") or folders or root == path:
continue
name = str(root.relative_to(path))
if self.split and name in _VALIDATION_SET:
continue
names.append(name)
return sorted(names)
def __getitem__(self, n: int) -> Tuple[torch.Tensor, int, int, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, int, str): ``(waveforms, sample_rate, num_frames, track_name)``
"""
return self._load_sample(n)
def __len__(self) -> int:
return len(self.names)
|
import numpy as np
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.models import MLMTransformer, SpladePooling
def main():
# Initialize the SPLADE model
model_name = "opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill" # "naver/efficient-splade-V-large-doc" # "prithivida/Splade_PP_en_v1" # "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Sample texts
texts = [
"The weather is lovely today.",
"It's so sunny outside!",
"He drove to the stadium.",
]
# Generate embeddings
embeddings = model.encode(texts, convert_to_sparse_tensor=True)
print(type(embeddings))
# Print embedding shape and sparsity
print(f"Embedding shape: {embeddings.shape}")
print(f"Embedding sparsity: {model.get_sparsity_stats(embeddings)}%")
# Compute similarity matrix
similarity_matrix = model.similarity(embeddings, embeddings)
# Print similarity matrix
print("\nSimilarity Matrix:")
for i, text in enumerate(texts):
print(f"{i}: {text[:50]}...")
print("\n" + " " * 10 + " ".join([f"{i:5d}" for i in range(len(texts))]))
for i, row in enumerate(similarity_matrix):
print(f"{i:5d} " + " ".join([f"{val:.3f}" for val in row]))
vocab_size = embeddings.shape[1]
print(f"Vocabulary size: {vocab_size}")
# Visualize top tokens for each text
top_k = 20
print(f"\nTop tokens {top_k} for each text:")
for i, text in enumerate(texts):
# Get top k indices in sparse tensor
# Get top k indices in sparse tensor (sorted from highest to lowest)
top_indices = np.argsort(-embeddings[i].to_dense().cpu().numpy())[:top_k]
top_values = embeddings[i].to_dense().cpu().numpy()[top_indices]
top_tokens = [model.tokenizer.decode([idx]) for idx in top_indices]
print(f"{i}: {text}")
print(f"Top tokens: {top_tokens}")
print(f"Top values: {top_values}")
print()
if __name__ == "__main__":
main()
|
import numpy as np
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.models import MLMTransformer, SpladePooling
def main():
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil" # "opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill" # "naver/efficient-splade-V-large-doc" # "prithivida/Splade_PP_en_v1" # "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Sample texts
texts = [
"The weather is lovely today.",
"It's so sunny outside!",
"He drove to the stadium.",
]
# Generate embeddings
embeddings = model.encode(texts, convert_to_sparse_tensor=True)
print(type(embeddings))
# Print embedding shape and sparsity
print(f"Embedding shape: {embeddings.shape}")
print(f"Embedding sparsity: {model.get_sparsity_stats(embeddings)}%")
# Compute similarity matrix
similarity_matrix = model.similarity(embeddings, embeddings)
# Print similarity matrix
print("\nSimilarity Matrix:")
for i, text in enumerate(texts):
print(f"{i}: {text[:50]}...")
print("\n" + " " * 10 + " ".join([f"{i:5d}" for i in range(len(texts))]))
for i, row in enumerate(similarity_matrix):
print(f"{i:5d} " + " ".join([f"{val:.3f}" for val in row]))
vocab_size = embeddings.shape[1]
print(f"Vocabulary size: {vocab_size}")
# Visualize top tokens for each text
top_k = 20
print(f"\nTop tokens {top_k} for each text:")
for i, text in enumerate(texts):
# Get top k indices in sparse tensor
# Get top k indices in sparse tensor (sorted from highest to lowest)
top_indices = np.argsort(-embeddings[i].to_dense().cpu().numpy())[:top_k]
top_values = embeddings[i].to_dense().cpu().numpy()[top_indices]
top_tokens = [model.tokenizer.decode([idx]) for idx in top_indices]
print(f"{i}: {text}")
print(f"Top tokens: {top_tokens}")
print(f"Top values: {top_values}")
print()
if __name__ == "__main__":
main()
|
"""
Use scikit-learn regressor interface with GPU histogram tree method
===================================================================
"""
from dask import array as da
from dask.distributed import Client
# It's recommended to use dask_cuda for GPU assignment
from dask_cuda import LocalCUDACluster
from xgboost import dask as dxgb
def main(client):
# generate some random data for demonstration
n = 100
m = 1000000
partition_size = 10000
X = da.random.random((m, n), partition_size)
y = da.random.random(m, partition_size)
regressor = dxgb.DaskXGBRegressor(verbosity=1)
# set the device to CUDA
regressor.set_params(tree_method="hist", device="cuda")
# assigning client here is optional
regressor.client = client
regressor.fit(X, y, eval_set=[(X, y)])
prediction = regressor.predict(X)
bst = regressor.get_booster()
history = regressor.evals_result()
print("Evaluation history:", history)
# returned prediction is always a dask array.
assert isinstance(prediction, da.Array)
return bst # returning the trained model
if __name__ == "__main__":
# With dask cuda, one can scale up XGBoost to arbitrary GPU clusters.
# `LocalCUDACluster` used here is only for demonstration purpose.
with LocalCUDACluster() as cluster:
with Client(cluster) as client:
main(client)
|
"""
Use scikit-learn regressor interface with GPU histogram tree method
===================================================================
"""
from dask import array as da
from dask.distributed import Client
# It's recommended to use dask_cuda for GPU assignment
from dask_cuda import LocalCUDACluster
import xgboost
def main(client):
# generate some random data for demonstration
n = 100
m = 1000000
partition_size = 10000
X = da.random.random((m, n), partition_size)
y = da.random.random(m, partition_size)
regressor = xgboost.dask.DaskXGBRegressor(verbosity=1)
# set the device to CUDA
regressor.set_params(tree_method="hist", device="cuda")
# assigning client here is optional
regressor.client = client
regressor.fit(X, y, eval_set=[(X, y)])
prediction = regressor.predict(X)
bst = regressor.get_booster()
history = regressor.evals_result()
print("Evaluation history:", history)
# returned prediction is always a dask array.
assert isinstance(prediction, da.Array)
return bst # returning the trained model
if __name__ == "__main__":
# With dask cuda, one can scale up XGBoost to arbitrary GPU clusters.
# `LocalCUDACluster` used here is only for demonstration purpose.
with LocalCUDACluster() as cluster:
with Client(cluster) as client:
main(client)
|
import functools
import importlib
import os
import re
from pathlib import Path
from typing import TYPE_CHECKING, TypeVar
if TYPE_CHECKING:
from backend.data.block import Block
T = TypeVar("T")
@functools.cache
def load_all_blocks() -> dict[str, type["Block"]]:
from backend.data.block import Block
# Dynamically load all modules under backend.blocks
current_dir = Path(__file__).parent
modules = [
str(f.relative_to(current_dir))[:-3].replace(os.path.sep, ".")
for f in current_dir.rglob("*.py")
if f.is_file() and f.name != "__init__.py"
]
for module in modules:
if not re.match("^[a-z0-9_.]+$", module):
raise ValueError(
f"Block module {module} error: module name must be lowercase, "
"and contain only alphanumeric characters and underscores."
)
importlib.import_module(f".{module}", package=__name__)
# Load all Block instances from the available modules
available_blocks: dict[str, type["Block"]] = {}
for block_cls in all_subclasses(Block):
class_name = block_cls.__name__
if class_name.endswith("Base"):
continue
if not class_name.endswith("Block"):
raise ValueError(
f"Block class {class_name} does not end with 'Block'. "
"If you are creating an abstract class, "
"please name the class with 'Base' at the end"
)
block = block_cls.create()
if not isinstance(block.id, str) or len(block.id) != 36:
raise ValueError(
f"Block ID {block.name} error: {block.id} is not a valid UUID"
)
if block.id in available_blocks:
raise ValueError(
f"Block ID {block.name} error: {block.id} is already in use"
)
input_schema = block.input_schema.model_fields
output_schema = block.output_schema.model_fields
# Make sure `error` field is a string in the output schema
if "error" in output_schema and output_schema["error"].annotation is not str:
raise ValueError(
f"{block.name} `error` field in output_schema must be a string"
)
# Ensure all fields in input_schema and output_schema are annotated SchemaFields
for field_name, field in [*input_schema.items(), *output_schema.items()]:
if field.annotation is None:
raise ValueError(
f"{block.name} has a field {field_name} that is not annotated"
)
if field.json_schema_extra is None:
raise ValueError(
f"{block.name} has a field {field_name} not defined as SchemaField"
)
for field in block.input_schema.model_fields.values():
if field.annotation is bool and field.default not in (True, False):
raise ValueError(
f"{block.name} has a boolean field with no default value"
)
available_blocks[block.id] = block_cls
return available_blocks
__all__ = ["load_all_blocks"]
def all_subclasses(cls: type[T]) -> list[type[T]]:
subclasses = cls.__subclasses__()
for subclass in subclasses:
subclasses += all_subclasses(subclass)
return subclasses
|
import importlib
import os
import re
from pathlib import Path
from typing import TYPE_CHECKING, TypeVar
if TYPE_CHECKING:
from backend.data.block import Block
T = TypeVar("T")
_AVAILABLE_BLOCKS: dict[str, type["Block"]] = {}
def load_all_blocks() -> dict[str, type["Block"]]:
from backend.data.block import Block
if _AVAILABLE_BLOCKS:
return _AVAILABLE_BLOCKS
# Dynamically load all modules under backend.blocks
AVAILABLE_MODULES = []
current_dir = Path(__file__).parent
modules = [
str(f.relative_to(current_dir))[:-3].replace(os.path.sep, ".")
for f in current_dir.rglob("*.py")
if f.is_file() and f.name != "__init__.py"
]
for module in modules:
if not re.match("^[a-z0-9_.]+$", module):
raise ValueError(
f"Block module {module} error: module name must be lowercase, "
"and contain only alphanumeric characters and underscores."
)
importlib.import_module(f".{module}", package=__name__)
AVAILABLE_MODULES.append(module)
# Load all Block instances from the available modules
for block_cls in all_subclasses(Block):
class_name = block_cls.__name__
if class_name.endswith("Base"):
continue
if not class_name.endswith("Block"):
raise ValueError(
f"Block class {class_name} does not end with 'Block'. "
"If you are creating an abstract class, "
"please name the class with 'Base' at the end"
)
block = block_cls.create()
if not isinstance(block.id, str) or len(block.id) != 36:
raise ValueError(
f"Block ID {block.name} error: {block.id} is not a valid UUID"
)
if block.id in _AVAILABLE_BLOCKS:
raise ValueError(
f"Block ID {block.name} error: {block.id} is already in use"
)
input_schema = block.input_schema.model_fields
output_schema = block.output_schema.model_fields
# Make sure `error` field is a string in the output schema
if "error" in output_schema and output_schema["error"].annotation is not str:
raise ValueError(
f"{block.name} `error` field in output_schema must be a string"
)
# Ensure all fields in input_schema and output_schema are annotated SchemaFields
for field_name, field in [*input_schema.items(), *output_schema.items()]:
if field.annotation is None:
raise ValueError(
f"{block.name} has a field {field_name} that is not annotated"
)
if field.json_schema_extra is None:
raise ValueError(
f"{block.name} has a field {field_name} not defined as SchemaField"
)
for field in block.input_schema.model_fields.values():
if field.annotation is bool and field.default not in (True, False):
raise ValueError(
f"{block.name} has a boolean field with no default value"
)
_AVAILABLE_BLOCKS[block.id] = block_cls
return _AVAILABLE_BLOCKS
__all__ = ["load_all_blocks"]
def all_subclasses(cls: type[T]) -> list[type[T]]:
subclasses = cls.__subclasses__()
for subclass in subclasses:
subclasses += all_subclasses(subclass)
return subclasses
|
__version__ = '0.21.1'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
from docarray.helper import login, logout
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.21.0'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
from docarray.helper import login, logout
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import mmengine
from mmengine.utils import digit_version
from .version import __version__, version_info
mmcv_minimum_version = '2.0.0rc4'
mmcv_maximum_version = '2.2.0'
mmcv_version = digit_version(mmcv.__version__)
mmengine_minimum_version = '0.7.1'
mmengine_maximum_version = '1.0.0'
mmengine_version = digit_version(mmengine.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version < digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <{mmcv_maximum_version}.'
assert (mmengine_version >= digit_version(mmengine_minimum_version)
and mmengine_version < digit_version(mmengine_maximum_version)), \
f'MMEngine=={mmengine.__version__} is used but incompatible. ' \
f'Please install mmengine>={mmengine_minimum_version}, ' \
f'<{mmengine_maximum_version}.'
__all__ = ['__version__', 'version_info', 'digit_version']
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import mmengine
from mmengine.utils import digit_version
from .version import __version__, version_info
mmcv_minimum_version = '2.0.0rc4'
mmcv_maximum_version = '3.0.0'
mmcv_version = digit_version(mmcv.__version__)
mmengine_minimum_version = '0.7.1'
mmengine_maximum_version = '1.0.0'
mmengine_version = digit_version(mmengine.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version < digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <{mmcv_maximum_version}.'
assert (mmengine_version >= digit_version(mmengine_minimum_version)
and mmengine_version < digit_version(mmengine_maximum_version)), \
f'MMEngine=={mmengine.__version__} is used but incompatible. ' \
f'Please install mmengine>={mmengine_minimum_version}, ' \
f'<{mmengine_maximum_version}.'
__all__ = ['__version__', 'version_info', 'digit_version']
|
__version__ = '0.13.12'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.13.11'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='[email protected]',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'weaviate-client~=3.3.0',
'annlite>=0.3.0',
'qdrant-client~=0.7.1',
'strawberry-graphql',
'elasticsearch>=8.0.1',
],
'qdrant': [
'qdrant-client~=0.7.0',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='[email protected]',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'weaviate-client~=3.3.0',
'annlite>=0.3.0',
'qdrant-client~=0.7.0',
'strawberry-graphql',
'elasticsearch>=8.0.1',
],
'qdrant': [
'qdrant-client~=0.7.0',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
"""This file only exists to be lazy-imported and avoid V2-related import warnings when just using V1."""
import torch
from torchvision import datapoints
from torchvision.transforms import v2
class PadIfSmaller(v2.Transform):
def __init__(self, size, fill=0):
super().__init__()
self.size = size
self.fill = v2._utils._setup_fill_arg(fill)
def _get_params(self, sample):
_, height, width = v2.utils.query_chw(sample)
padding = [0, 0, max(self.size - width, 0), max(self.size - height, 0)]
needs_padding = any(padding)
return dict(padding=padding, needs_padding=needs_padding)
def _transform(self, inpt, params):
if not params["needs_padding"]:
return inpt
fill = v2._utils._get_fill(self.fill, type(inpt))
fill = v2._utils._convert_fill_arg(fill)
return v2.functional.pad(inpt, padding=params["padding"], fill=fill)
class CocoDetectionToVOCSegmentation(v2.Transform):
"""Turn samples from datasets.CocoDetection into the same format as VOCSegmentation.
This is achieved in two steps:
1. COCO differentiates between 91 categories while VOC only supports 21, including background for both. Fortunately,
the COCO categories are a superset of the VOC ones and thus can be mapped. Instances of the 70 categories not
present in VOC are dropped and replaced by background.
2. COCO only offers detection masks, i.e. a (N, H, W) bool-ish tensor, where the truthy values in each individual
mask denote the instance. However, a segmentation mask is a (H, W) integer tensor (typically torch.uint8), where
the value of each pixel denotes the category it belongs to. The detection masks are merged into one segmentation
mask while pixels that belong to multiple detection masks are marked as invalid.
"""
COCO_TO_VOC_LABEL_MAP = dict(
zip(
[0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4, 1, 64, 20, 63, 7, 72],
range(21),
)
)
INVALID_VALUE = 255
def _coco_detection_masks_to_voc_segmentation_mask(self, target):
if "masks" not in target:
return None
instance_masks, instance_labels_coco = target["masks"], target["labels"]
valid_labels_voc = [
(idx, label_voc)
for idx, label_coco in enumerate(instance_labels_coco.tolist())
if (label_voc := self.COCO_TO_VOC_LABEL_MAP.get(label_coco)) is not None
]
if not valid_labels_voc:
return None
valid_voc_category_idcs, instance_labels_voc = zip(*valid_labels_voc)
instance_masks = instance_masks[list(valid_voc_category_idcs)].to(torch.uint8)
instance_labels_voc = torch.tensor(instance_labels_voc, dtype=torch.uint8)
# Calling `.max()` on the stacked detection masks works fine to separate background from foreground as long as
# there is at most a single instance per pixel. Overlapping instances will be filtered out in the next step.
segmentation_mask, _ = (instance_masks * instance_labels_voc.reshape(-1, 1, 1)).max(dim=0)
segmentation_mask[instance_masks.sum(dim=0) > 1] = self.INVALID_VALUE
return segmentation_mask
def forward(self, image, target):
segmentation_mask = self._coco_detection_masks_to_voc_segmentation_mask(target)
if segmentation_mask is None:
segmentation_mask = torch.zeros(v2.functional.get_spatial_size(image), dtype=torch.uint8)
return image, datapoints.Mask(segmentation_mask)
|
"""This file only exists to be lazy-imported and avoid V2-related import warnings when just using V1."""
import torch
from torchvision import datapoints
from torchvision.transforms import v2
class PadIfSmaller(v2.Transform):
def __init__(self, size, fill=0):
super().__init__()
self.size = size
self.fill = v2._geometry._setup_fill_arg(fill)
def _get_params(self, sample):
_, height, width = v2.utils.query_chw(sample)
padding = [0, 0, max(self.size - width, 0), max(self.size - height, 0)]
needs_padding = any(padding)
return dict(padding=padding, needs_padding=needs_padding)
def _transform(self, inpt, params):
if not params["needs_padding"]:
return inpt
fill = self.fill[type(inpt)]
fill = v2._utils._convert_fill_arg(fill)
return v2.functional.pad(inpt, padding=params["padding"], fill=fill)
class CocoDetectionToVOCSegmentation(v2.Transform):
"""Turn samples from datasets.CocoDetection into the same format as VOCSegmentation.
This is achieved in two steps:
1. COCO differentiates between 91 categories while VOC only supports 21, including background for both. Fortunately,
the COCO categories are a superset of the VOC ones and thus can be mapped. Instances of the 70 categories not
present in VOC are dropped and replaced by background.
2. COCO only offers detection masks, i.e. a (N, H, W) bool-ish tensor, where the truthy values in each individual
mask denote the instance. However, a segmentation mask is a (H, W) integer tensor (typically torch.uint8), where
the value of each pixel denotes the category it belongs to. The detection masks are merged into one segmentation
mask while pixels that belong to multiple detection masks are marked as invalid.
"""
COCO_TO_VOC_LABEL_MAP = dict(
zip(
[0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4, 1, 64, 20, 63, 7, 72],
range(21),
)
)
INVALID_VALUE = 255
def _coco_detection_masks_to_voc_segmentation_mask(self, target):
if "masks" not in target:
return None
instance_masks, instance_labels_coco = target["masks"], target["labels"]
valid_labels_voc = [
(idx, label_voc)
for idx, label_coco in enumerate(instance_labels_coco.tolist())
if (label_voc := self.COCO_TO_VOC_LABEL_MAP.get(label_coco)) is not None
]
if not valid_labels_voc:
return None
valid_voc_category_idcs, instance_labels_voc = zip(*valid_labels_voc)
instance_masks = instance_masks[list(valid_voc_category_idcs)].to(torch.uint8)
instance_labels_voc = torch.tensor(instance_labels_voc, dtype=torch.uint8)
# Calling `.max()` on the stacked detection masks works fine to separate background from foreground as long as
# there is at most a single instance per pixel. Overlapping instances will be filtered out in the next step.
segmentation_mask, _ = (instance_masks * instance_labels_voc.reshape(-1, 1, 1)).max(dim=0)
segmentation_mask[instance_masks.sum(dim=0) > 1] = self.INVALID_VALUE
return segmentation_mask
def forward(self, image, target):
segmentation_mask = self._coco_detection_masks_to_voc_segmentation_mask(target)
if segmentation_mask is None:
segmentation_mask = torch.zeros(v2.functional.get_spatial_size(image), dtype=torch.uint8)
return image, datapoints.Mask(segmentation_mask)
|
import torch
from torchvision.prototype import datapoints
from torchvision.utils import _log_api_usage_once
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int, temporal_dim: int = -4) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchvideo/transforms/functional.py#L19
t_max = video.shape[temporal_dim] - 1
indices = torch.linspace(0, t_max, num_samples, device=video.device).long()
return torch.index_select(video, temporal_dim, indices)
def uniform_temporal_subsample(
inpt: datapoints.VideoTypeJIT, num_samples: int, temporal_dim: int = -4
) -> datapoints.VideoTypeJIT:
if not torch.jit.is_scripting():
_log_api_usage_once(uniform_temporal_subsample)
if isinstance(inpt, torch.Tensor) and (torch.jit.is_scripting() or not isinstance(inpt, datapoints.Video)):
return uniform_temporal_subsample_video(inpt, num_samples, temporal_dim=temporal_dim)
elif isinstance(inpt, datapoints.Video):
if temporal_dim != -4 and inpt.ndim - 4 != temporal_dim:
raise ValueError("Video inputs must have temporal_dim equivalent to -4")
output = uniform_temporal_subsample_video(
inpt.as_subclass(torch.Tensor), num_samples, temporal_dim=temporal_dim
)
return datapoints.Video.wrap_like(inpt, output)
else:
raise TypeError(f"Input can either be a plain tensor or a `Video` datapoint, but got {type(inpt)} instead.")
|
import torch
from torchvision.prototype import datapoints
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int, temporal_dim: int = -4) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchvideo/transforms/functional.py#L19
t_max = video.shape[temporal_dim] - 1
indices = torch.linspace(0, t_max, num_samples, device=video.device).long()
return torch.index_select(video, temporal_dim, indices)
def uniform_temporal_subsample(
inpt: datapoints.VideoTypeJIT, num_samples: int, temporal_dim: int = -4
) -> datapoints.VideoTypeJIT:
if isinstance(inpt, torch.Tensor) and (torch.jit.is_scripting() or not isinstance(inpt, datapoints.Video)):
return uniform_temporal_subsample_video(inpt, num_samples, temporal_dim=temporal_dim)
elif isinstance(inpt, datapoints.Video):
if temporal_dim != -4 and inpt.ndim - 4 != temporal_dim:
raise ValueError("Video inputs must have temporal_dim equivalent to -4")
output = uniform_temporal_subsample_video(
inpt.as_subclass(torch.Tensor), num_samples, temporal_dim=temporal_dim
)
return datapoints.Video.wrap_like(inpt, output)
else:
raise TypeError(f"Input can either be a plain tensor or a `Video` datapoint, but got {type(inpt)} instead.")
|
from typing import Union
from torch import nn
import transformers
import torch
from PIL import Image
class CLIPModel(nn.Module):
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None):
super(CLIPModel, self).__init__()
if processor_name is None:
processor_name = model_name
self.model = transformers.CLIPModel.from_pretrained(model_name)
self.processor = transformers.CLIPProcessor.from_pretrained(processor_name)
def __repr__(self):
return "CLIPModel()"
def forward(self, features):
image_embeds = []
text_embeds = []
if "pixel_values" in features:
vision_outputs = self.model.vision_model(pixel_values=features["pixel_values"])
image_embeds = self.model.visual_projection(vision_outputs[1])
if "input_ids" in features:
text_outputs = self.model.text_model(
input_ids=features.get("input_ids"),
attention_mask=features.get("attention_mask", None),
position_ids=features.get("position_ids", None),
output_attentions=features.get("output_attentions", None),
output_hidden_states=features.get("output_hidden_states", None),
)
text_embeds = self.model.text_projection(text_outputs[1])
sentence_embedding = []
image_features = iter(image_embeds)
text_features = iter(text_embeds)
for idx, input_type in enumerate(features["image_text_info"]):
if input_type == 0:
sentence_embedding.append(next(image_features))
else:
sentence_embedding.append(next(text_features))
features["sentence_embedding"] = torch.stack(sentence_embedding).float()
return features
def tokenize(self, texts, padding: Union[str, bool] = True):
images = []
texts_values = []
image_text_info = []
for idx, data in enumerate(texts):
if isinstance(data, Image.Image): # An Image
images.append(data)
image_text_info.append(0)
else: # A text
texts_values.append(data)
image_text_info.append(1)
encoding = {}
if len(texts_values):
encoding = self.processor.tokenizer(texts_values, return_tensors="pt", padding=padding)
if len(images):
image_features = self.processor.image_processor(images, return_tensors="pt")
encoding["pixel_values"] = image_features.pixel_values
encoding["image_text_info"] = image_text_info
return encoding
def save(self, output_path: str):
self.model.save_pretrained(output_path)
self.processor.save_pretrained(output_path)
@staticmethod
def load(input_path: str):
return CLIPModel(model_name=input_path)
|
from typing import Union
from torch import nn
import transformers
import torch
from PIL import Image
class CLIPModel(nn.Module):
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None):
super(CLIPModel, self).__init__()
if processor_name is None:
processor_name = model_name
self.model = transformers.CLIPModel.from_pretrained(model_name)
self.processor = transformers.CLIPProcessor.from_pretrained(processor_name)
def __repr__(self):
return "CLIPModel()"
def forward(self, features):
image_embeds = []
text_embeds = []
if "pixel_values" in features:
vision_outputs = self.model.vision_model(pixel_values=features["pixel_values"])
image_embeds = self.model.visual_projection(vision_outputs[1])
if "input_ids" in features:
text_outputs = self.model.text_model(
input_ids=features.get("input_ids"),
attention_mask=features.get("attention_mask", None),
position_ids=features.get("position_ids", None),
output_attentions=features.get("output_attentions", None),
output_hidden_states=features.get("output_hidden_states", None),
)
text_embeds = self.model.text_projection(text_outputs[1])
sentence_embedding = []
image_features = iter(image_embeds)
text_features = iter(text_embeds)
for idx, input_type in enumerate(features["image_text_info"]):
if input_type == 0:
sentence_embedding.append(next(image_features))
else:
sentence_embedding.append(next(text_features))
features["sentence_embedding"] = torch.stack(sentence_embedding).float()
return features
def tokenize(self, texts, padding: Union[str, bool] = True):
images = []
texts_values = []
image_text_info = []
for idx, data in enumerate(texts):
if isinstance(data, Image.Image): # An Image
images.append(data)
image_text_info.append(0)
else: # A text
texts_values.append(data)
image_text_info.append(1)
if len(texts_values) == 0:
texts_values = None
if len(images) == 0:
images = None
inputs = self.processor(text=texts_values, images=images, return_tensors="pt", padding=padding)
inputs["image_text_info"] = image_text_info
return inputs
def save(self, output_path: str):
self.model.save_pretrained(output_path)
self.processor.save_pretrained(output_path)
@staticmethod
def load(input_path: str):
return CLIPModel(model_name=input_path)
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.21'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.20'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
from typing import Any, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.prototype.tv_tensors import Label, OneHotLabel
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import (
_FillType,
_get_fill,
_setup_fill_arg,
_setup_size,
get_bounding_boxes,
has_any,
is_pure_tensor,
query_size,
)
class FixedSizeCrop(Transform):
def __init__(
self,
size: Union[int, Sequence[int]],
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = 0,
padding_mode: str = "constant",
) -> None:
super().__init__()
size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
self.crop_height = size[0]
self.crop_width = size[1]
self.fill = fill
self._fill = _setup_fill_arg(fill)
self.padding_mode = padding_mode
def check_inputs(self, flat_inputs: List[Any]) -> None:
if not has_any(
flat_inputs,
PIL.Image.Image,
tv_tensors.Image,
is_pure_tensor,
tv_tensors.Video,
):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain an tensor or PIL image or a Video."
)
if has_any(flat_inputs, tv_tensors.BoundingBoxes) and not has_any(flat_inputs, Label, OneHotLabel):
raise TypeError(
f"If a BoundingBoxes is contained in the input sample, "
f"{type(self).__name__}() also requires it to contain a Label or OneHotLabel."
)
def make_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_size(flat_inputs)
new_height = min(height, self.crop_height)
new_width = min(width, self.crop_width)
needs_crop = new_height != height or new_width != width
offset_height = max(height - self.crop_height, 0)
offset_width = max(width - self.crop_width, 0)
r = torch.rand(1)
top = int(offset_height * r)
left = int(offset_width * r)
bounding_boxes: Optional[torch.Tensor]
try:
bounding_boxes = get_bounding_boxes(flat_inputs)
except ValueError:
bounding_boxes = None
if needs_crop and bounding_boxes is not None:
format = bounding_boxes.format
bounding_boxes, canvas_size = F.crop_bounding_boxes(
bounding_boxes.as_subclass(torch.Tensor),
format=format,
top=top,
left=left,
height=new_height,
width=new_width,
)
bounding_boxes = F.clamp_bounding_boxes(bounding_boxes, format=format, canvas_size=canvas_size)
height_and_width = F.convert_bounding_box_format(
bounding_boxes, old_format=format, new_format=tv_tensors.BoundingBoxFormat.XYWH
)[..., 2:]
is_valid = torch.all(height_and_width > 0, dim=-1)
else:
is_valid = None
pad_bottom = max(self.crop_height - new_height, 0)
pad_right = max(self.crop_width - new_width, 0)
needs_pad = pad_bottom != 0 or pad_right != 0
return dict(
needs_crop=needs_crop,
top=top,
left=left,
height=new_height,
width=new_width,
is_valid=is_valid,
padding=[0, 0, pad_right, pad_bottom],
needs_pad=needs_pad,
)
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["needs_crop"]:
inpt = self._call_kernel(
F.crop,
inpt,
top=params["top"],
left=params["left"],
height=params["height"],
width=params["width"],
)
if params["is_valid"] is not None:
if isinstance(inpt, (Label, OneHotLabel, tv_tensors.Mask)):
inpt = tv_tensors.wrap(inpt[params["is_valid"]], like=inpt)
elif isinstance(inpt, tv_tensors.BoundingBoxes):
inpt = tv_tensors.wrap(
F.clamp_bounding_boxes(inpt[params["is_valid"]], format=inpt.format, canvas_size=inpt.canvas_size),
like=inpt,
)
if params["needs_pad"]:
fill = _get_fill(self._fill, type(inpt))
inpt = self._call_kernel(F.pad, inpt, params["padding"], fill=fill, padding_mode=self.padding_mode)
return inpt
|
from typing import Any, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.prototype.tv_tensors import Label, OneHotLabel
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import (
_FillType,
_get_fill,
_setup_fill_arg,
_setup_size,
get_bounding_boxes,
has_any,
is_pure_tensor,
query_size,
)
class FixedSizeCrop(Transform):
def __init__(
self,
size: Union[int, Sequence[int]],
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = 0,
padding_mode: str = "constant",
) -> None:
super().__init__()
size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
self.crop_height = size[0]
self.crop_width = size[1]
self.fill = fill
self._fill = _setup_fill_arg(fill)
self.padding_mode = padding_mode
def _check_inputs(self, flat_inputs: List[Any]) -> None:
if not has_any(
flat_inputs,
PIL.Image.Image,
tv_tensors.Image,
is_pure_tensor,
tv_tensors.Video,
):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain an tensor or PIL image or a Video."
)
if has_any(flat_inputs, tv_tensors.BoundingBoxes) and not has_any(flat_inputs, Label, OneHotLabel):
raise TypeError(
f"If a BoundingBoxes is contained in the input sample, "
f"{type(self).__name__}() also requires it to contain a Label or OneHotLabel."
)
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_size(flat_inputs)
new_height = min(height, self.crop_height)
new_width = min(width, self.crop_width)
needs_crop = new_height != height or new_width != width
offset_height = max(height - self.crop_height, 0)
offset_width = max(width - self.crop_width, 0)
r = torch.rand(1)
top = int(offset_height * r)
left = int(offset_width * r)
bounding_boxes: Optional[torch.Tensor]
try:
bounding_boxes = get_bounding_boxes(flat_inputs)
except ValueError:
bounding_boxes = None
if needs_crop and bounding_boxes is not None:
format = bounding_boxes.format
bounding_boxes, canvas_size = F.crop_bounding_boxes(
bounding_boxes.as_subclass(torch.Tensor),
format=format,
top=top,
left=left,
height=new_height,
width=new_width,
)
bounding_boxes = F.clamp_bounding_boxes(bounding_boxes, format=format, canvas_size=canvas_size)
height_and_width = F.convert_bounding_box_format(
bounding_boxes, old_format=format, new_format=tv_tensors.BoundingBoxFormat.XYWH
)[..., 2:]
is_valid = torch.all(height_and_width > 0, dim=-1)
else:
is_valid = None
pad_bottom = max(self.crop_height - new_height, 0)
pad_right = max(self.crop_width - new_width, 0)
needs_pad = pad_bottom != 0 or pad_right != 0
return dict(
needs_crop=needs_crop,
top=top,
left=left,
height=new_height,
width=new_width,
is_valid=is_valid,
padding=[0, 0, pad_right, pad_bottom],
needs_pad=needs_pad,
)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["needs_crop"]:
inpt = self._call_kernel(
F.crop,
inpt,
top=params["top"],
left=params["left"],
height=params["height"],
width=params["width"],
)
if params["is_valid"] is not None:
if isinstance(inpt, (Label, OneHotLabel, tv_tensors.Mask)):
inpt = tv_tensors.wrap(inpt[params["is_valid"]], like=inpt)
elif isinstance(inpt, tv_tensors.BoundingBoxes):
inpt = tv_tensors.wrap(
F.clamp_bounding_boxes(inpt[params["is_valid"]], format=inpt.format, canvas_size=inpt.canvas_size),
like=inpt,
)
if params["needs_pad"]:
fill = _get_fill(self._fill, type(inpt))
inpt = self._call_kernel(F.pad, inpt, params["padding"], fill=fill, padding_mode=self.padding_mode)
return inpt
|
#!/usr/bin/env python
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.convert_to_parquet import ConvertToParquetCommand
from datasets.commands.delete_from_hub import DeleteFromHubCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def parse_unknown_args(unknown_args):
return {key.lstrip("-"): value for key, value in zip(unknown_args[::2], unknown_args[1::2])}
def main():
parser = ArgumentParser(
"HuggingFace Datasets CLI tool", usage="datasets-cli <command> [<args>]", allow_abbrev=False
)
commands_parser = parser.add_subparsers(help="datasets-cli command helpers")
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(commands_parser)
EnvironmentCommand.register_subcommand(commands_parser)
TestCommand.register_subcommand(commands_parser)
RunBeamCommand.register_subcommand(commands_parser)
DummyDataCommand.register_subcommand(commands_parser)
ConvertToParquetCommand.register_subcommand(commands_parser)
DeleteFromHubCommand.register_subcommand(commands_parser)
# Parse args
args, unknown_args = parser.parse_known_args()
if not hasattr(args, "func"):
parser.print_help()
exit(1)
kwargs = parse_unknown_args(unknown_args)
# Run
service = args.func(args, **kwargs)
service.run()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.convert_to_parquet import ConvertToParquetCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def parse_unknown_args(unknown_args):
return {key.lstrip("-"): value for key, value in zip(unknown_args[::2], unknown_args[1::2])}
def main():
parser = ArgumentParser(
"HuggingFace Datasets CLI tool", usage="datasets-cli <command> [<args>]", allow_abbrev=False
)
commands_parser = parser.add_subparsers(help="datasets-cli command helpers")
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(commands_parser)
EnvironmentCommand.register_subcommand(commands_parser)
TestCommand.register_subcommand(commands_parser)
RunBeamCommand.register_subcommand(commands_parser)
DummyDataCommand.register_subcommand(commands_parser)
ConvertToParquetCommand.register_subcommand(commands_parser)
# Parse args
args, unknown_args = parser.parse_known_args()
if not hasattr(args, "func"):
parser.print_help()
exit(1)
kwargs = parse_unknown_args(unknown_args)
# Run
service = args.func(args, **kwargs)
service.run()
if __name__ == "__main__":
main()
|
_base_ = '../cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')),
roi_head=dict(
bbox_head=[
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_head=dict(norm_cfg=norm_cfg)))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = '../cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')),
roi_head=dict(
bbox_head=[
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_head=dict(norm_cfg=norm_cfg)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
from pathlib import Path
from typing import Callable, Optional
from .folder import ImageFolder
from .utils import download_and_extract_archive, verify_str_arg
class Country211(ImageFolder):
"""`The Country211 Data Set <https://github.com/openai/CLIP/blob/main/data/country211.md>`_ from OpenAI.
This dataset was built by filtering the images from the YFCC100m dataset
that have GPS coordinate corresponding to a ISO-3166 country code. The
dataset is balanced by sampling 150 train images, 50 validation images, and
100 test images for each country.
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), ``"valid"`` and ``"test"``.
transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and puts it into
``root/country211/``. If dataset is already downloaded, it is not downloaded again.
"""
_URL = "https://openaipublic.azureedge.net/clip/data/country211.tgz"
_MD5 = "84988d7644798601126c29e9877aab6a"
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
self._split = verify_str_arg(split, "split", ("train", "valid", "test"))
root = Path(root).expanduser()
self.root = str(root)
self._base_folder = root / "country211"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
super().__init__(str(self._base_folder / self._split), transform=transform, target_transform=target_transform)
self.root = str(root)
def _check_exists(self) -> bool:
return self._base_folder.exists() and self._base_folder.is_dir()
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
|
from pathlib import Path
from typing import Callable, Optional
from .folder import ImageFolder
from .utils import download_and_extract_archive, verify_str_arg
class Country211(ImageFolder):
"""`The Country211 Data Set <https://github.com/openai/CLIP/blob/main/data/country211.md>`_ from OpenAI.
This dataset was built by filtering the images from the YFCC100m dataset
that have GPS coordinate corresponding to a ISO-3166 country code. The
dataset is balanced by sampling 150 train images, 50 validation images, and
100 test images images for each country.
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), ``"valid"`` and ``"test"``.
transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and puts it into
``root/country211/``. If dataset is already downloaded, it is not downloaded again.
"""
_URL = "https://openaipublic.azureedge.net/clip/data/country211.tgz"
_MD5 = "84988d7644798601126c29e9877aab6a"
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
self._split = verify_str_arg(split, "split", ("train", "valid", "test"))
root = Path(root).expanduser()
self.root = str(root)
self._base_folder = root / "country211"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
super().__init__(str(self._base_folder / self._split), transform=transform, target_transform=target_transform)
self.root = str(root)
def _check_exists(self) -> bool:
return self._base_folder.exists() and self._base_folder.is_dir()
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core import bbox2result, bbox_mapping_back
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class CornerNet(SingleStageDetector):
"""CornerNet.
This detector is the implementation of the paper `CornerNet: Detecting
Objects as Paired Keypoints <https://arxiv.org/abs/1808.01244>`_ .
"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(CornerNet, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
def merge_aug_results(self, aug_results, img_metas):
"""Merge augmented detection bboxes and score.
Args:
aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each
image.
img_metas (list[list[dict]]): Meta information of each image, e.g.,
image size, scaling factor, etc.
Returns:
tuple: (bboxes, labels)
"""
recovered_bboxes, aug_labels = [], []
for bboxes_labels, img_info in zip(aug_results, img_metas):
img_shape = img_info[0]['img_shape'] # using shape before padding
scale_factor = img_info[0]['scale_factor']
flip = img_info[0]['flip']
bboxes, labels = bboxes_labels
bboxes, scores = bboxes[:, :4], bboxes[:, -1:]
bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip)
recovered_bboxes.append(torch.cat([bboxes, scores], dim=-1))
aug_labels.append(labels)
bboxes = torch.cat(recovered_bboxes, dim=0)
labels = torch.cat(aug_labels)
if bboxes.shape[0] > 0:
out_bboxes, out_labels = self.bbox_head._bboxes_nms(
bboxes, labels, self.bbox_head.test_cfg)
else:
out_bboxes, out_labels = bboxes, labels
return out_bboxes, out_labels
def aug_test(self, imgs, img_metas, rescale=False):
"""Augment testing of CornerNet.
Args:
imgs (list[Tensor]): Augmented images.
img_metas (list[list[dict]]): Meta information of each image, e.g.,
image size, scaling factor, etc.
rescale (bool): If True, return boxes in original image space.
Default: False.
Note:
``imgs`` must including flipped image pairs.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
img_inds = list(range(len(imgs)))
assert img_metas[0][0]['flip'] + img_metas[1][0]['flip'], (
'aug test must have flipped image pair')
aug_results = []
for ind, flip_ind in zip(img_inds[0::2], img_inds[1::2]):
img_pair = torch.cat([imgs[ind], imgs[flip_ind]])
x = self.extract_feat(img_pair)
outs = self.bbox_head(x)
bbox_list = self.bbox_head.get_bboxes(
*outs, [img_metas[ind], img_metas[flip_ind]], False, False)
aug_results.append(bbox_list[0])
aug_results.append(bbox_list[1])
bboxes, labels = self.merge_aug_results(aug_results, img_metas)
bbox_results = bbox2result(bboxes, labels, self.bbox_head.num_classes)
return [bbox_results]
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core import bbox2result, bbox_mapping_back
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class CornerNet(SingleStageDetector):
"""CornerNet.
This detector is the implementation of the paper `CornerNet: Detecting
Objects as Paired Keypoints <https://arxiv.org/abs/1808.01244>`_ .
"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(CornerNet, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
def merge_aug_results(self, aug_results, img_metas):
"""Merge augmented detection bboxes and score.
Args:
aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each
image.
img_metas (list[list[dict]]): Meta information of each image, e.g.,
image size, scaling factor, etc.
Returns:
tuple: (bboxes, labels)
"""
recovered_bboxes, aug_labels = [], []
for bboxes_labels, img_info in zip(aug_results, img_metas):
img_shape = img_info[0]['img_shape'] # using shape before padding
scale_factor = img_info[0]['scale_factor']
flip = img_info[0]['flip']
bboxes, labels = bboxes_labels
bboxes, scores = bboxes[:, :4], bboxes[:, -1:]
bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip)
recovered_bboxes.append(torch.cat([bboxes, scores], dim=-1))
aug_labels.append(labels)
bboxes = torch.cat(recovered_bboxes, dim=0)
labels = torch.cat(aug_labels)
if bboxes.shape[0] > 0:
out_bboxes, out_labels = self.bbox_head._bboxes_nms(
bboxes, labels, self.bbox_head.test_cfg)
else:
out_bboxes, out_labels = bboxes, labels
return out_bboxes, out_labels
def aug_test(self, imgs, img_metas, rescale=False):
"""Augment testing of CornerNet.
Args:
imgs (list[Tensor]): Augmented images.
img_metas (list[list[dict]]): Meta information of each image, e.g.,
image size, scaling factor, etc.
rescale (bool): If True, return boxes in original image space.
Default: False.
Note:
``imgs`` must including flipped image pairs.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
img_inds = list(range(len(imgs)))
assert img_metas[0][0]['flip'] + img_metas[1][0]['flip'], (
'aug test must have flipped image pair')
aug_results = []
for ind, flip_ind in zip(img_inds[0::2], img_inds[1::2]):
img_pair = torch.cat([imgs[ind], imgs[flip_ind]])
x = self.extract_feat(img_pair)
outs = self.bbox_head(x)
bbox_list = self.bbox_head.get_bboxes(
*outs, [img_metas[ind], img_metas[flip_ind]], False, False)
aug_results.append(bbox_list[0])
aug_results.append(bbox_list[1])
bboxes, labels = self.merge_aug_results(aug_results, img_metas)
bbox_results = bbox2result(bboxes, labels, self.bbox_head.num_classes)
return [bbox_results]
|
"""Chain that runs an arbitrary python function."""
import functools
import logging
from collections.abc import Awaitable
from typing import Any, Callable, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from pydantic import Field
from langchain.chains.base import Chain
logger = logging.getLogger(__name__)
class TransformChain(Chain):
"""Chain that transforms the chain output.
Example:
.. code-block:: python
from langchain.chains import TransformChain
transform_chain = TransformChain(input_variables=["text"],
output_variables["entities"], transform=func())
"""
input_variables: list[str]
"""The keys expected by the transform's input dictionary."""
output_variables: list[str]
"""The keys returned by the transform's output dictionary."""
transform_cb: Callable[[dict[str, str]], dict[str, str]] = Field(alias="transform")
"""The transform function."""
atransform_cb: Optional[Callable[[dict[str, Any]], Awaitable[dict[str, Any]]]] = (
Field(None, alias="atransform")
)
"""The async coroutine transform function."""
@staticmethod
@functools.lru_cache
def _log_once(msg: str) -> None:
"""Log a message once.
:meta private:
"""
logger.warning(msg)
@property
def input_keys(self) -> list[str]:
"""Expect input keys.
:meta private:
"""
return self.input_variables
@property
def output_keys(self) -> list[str]:
"""Return output keys.
:meta private:
"""
return self.output_variables
def _call(
self,
inputs: dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, str]:
return self.transform_cb(inputs)
async def _acall(
self,
inputs: dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> dict[str, Any]:
if self.atransform_cb is not None:
return await self.atransform_cb(inputs)
self._log_once(
"TransformChain's atransform is not provided, falling"
" back to synchronous transform",
)
return self.transform_cb(inputs)
|
"""Chain that runs an arbitrary python function."""
import functools
import logging
from collections.abc import Awaitable
from typing import Any, Callable, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from pydantic import Field
from langchain.chains.base import Chain
logger = logging.getLogger(__name__)
class TransformChain(Chain):
"""Chain that transforms the chain output.
Example:
.. code-block:: python
from langchain.chains import TransformChain
transform_chain = TransformChain(input_variables=["text"],
output_variables["entities"], transform=func())
"""
input_variables: list[str]
"""The keys expected by the transform's input dictionary."""
output_variables: list[str]
"""The keys returned by the transform's output dictionary."""
transform_cb: Callable[[dict[str, str]], dict[str, str]] = Field(alias="transform")
"""The transform function."""
atransform_cb: Optional[Callable[[dict[str, Any]], Awaitable[dict[str, Any]]]] = (
Field(None, alias="atransform")
)
"""The async coroutine transform function."""
@staticmethod
@functools.lru_cache
def _log_once(msg: str) -> None:
"""Log a message once.
:meta private:
"""
logger.warning(msg)
@property
def input_keys(self) -> list[str]:
"""Expect input keys.
:meta private:
"""
return self.input_variables
@property
def output_keys(self) -> list[str]:
"""Return output keys.
:meta private:
"""
return self.output_variables
def _call(
self,
inputs: dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, str]:
return self.transform_cb(inputs)
async def _acall(
self,
inputs: dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> dict[str, Any]:
if self.atransform_cb is not None:
return await self.atransform_cb(inputs)
self._log_once(
"TransformChain's atransform is not provided, falling"
" back to synchronous transform"
)
return self.transform_cb(inputs)
|
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts.few_shot import FewShotPromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
TEST_GEN_TEMPLATE_SUFFIX = "Add another example."
def generate_example(
examples: list[dict],
llm: BaseLanguageModel,
prompt_template: PromptTemplate,
) -> str:
"""Return another example given a list of examples for a prompt."""
prompt = FewShotPromptTemplate(
examples=examples,
suffix=TEST_GEN_TEMPLATE_SUFFIX,
input_variables=[],
example_prompt=prompt_template,
)
chain = prompt | llm | StrOutputParser()
return chain.invoke({})
|
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts.few_shot import FewShotPromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
TEST_GEN_TEMPLATE_SUFFIX = "Add another example."
def generate_example(
examples: list[dict], llm: BaseLanguageModel, prompt_template: PromptTemplate
) -> str:
"""Return another example given a list of examples for a prompt."""
prompt = FewShotPromptTemplate(
examples=examples,
suffix=TEST_GEN_TEMPLATE_SUFFIX,
input_variables=[],
example_prompt=prompt_template,
)
chain = prompt | llm | StrOutputParser()
return chain.invoke({})
|
"""Test SparkLLM."""
from langchain_core.outputs import LLMResult
from langchain_community.llms.sparkllm import SparkLLM
def test_call() -> None:
"""Test valid call to sparkllm."""
llm = SparkLLM()
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_generate() -> None:
"""Test valid call to sparkllm."""
llm = SparkLLM()
output = llm.generate(["Say foo:"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
def test_spark_llm_with_param_alias() -> None:
"""Test SparkLLM with parameters alias."""
llm = SparkLLM(
app_id="your-app-id",
api_key="your-api-key",
api_secret="your-api-secret",
model="Spark4.0 Ultra",
api_url="your-api-url",
timeout=20,
)
assert llm.spark_app_id == "your-app-id"
assert llm.spark_api_key == "your-api-key"
assert llm.spark_api_secret == "your-api-secret"
assert llm.spark_llm_domain == "Spark4.0 Ultra"
assert llm.spark_api_url == "your-api-url"
assert llm.request_timeout == 20
def test_spark_llm_with_stream() -> None:
"""Test SparkLLM with stream."""
llm = SparkLLM()
for chunk in llm.stream("你好呀"):
assert isinstance(chunk, str)
|
"""Test SparkLLM."""
from langchain_core.outputs import LLMResult
from langchain_community.llms.sparkllm import SparkLLM
def test_call() -> None:
"""Test valid call to sparkllm."""
llm = SparkLLM()
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_generate() -> None:
"""Test valid call to sparkllm."""
llm = SparkLLM()
output = llm.generate(["Say foo:"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
def test_spark_llm_with_param_alias() -> None:
"""Test SparkLLM with parameters alias."""
llm = SparkLLM( # type: ignore[call-arg]
app_id="your-app-id",
api_key="your-api-key",
api_secret="your-api-secret",
model="Spark4.0 Ultra",
api_url="your-api-url",
timeout=20,
)
assert llm.spark_app_id == "your-app-id"
assert llm.spark_api_key == "your-api-key"
assert llm.spark_api_secret == "your-api-secret"
assert llm.spark_llm_domain == "Spark4.0 Ultra"
assert llm.spark_api_url == "your-api-url"
assert llm.request_timeout == 20
def test_spark_llm_with_stream() -> None:
"""Test SparkLLM with stream."""
llm = SparkLLM() # type: ignore[call-arg]
for chunk in llm.stream("你好呀"):
assert isinstance(chunk, str)
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval", "MSMARCO"]
evaluator = SparseNanoBEIREvaluator(
dataset_names=datasets,
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
results = evaluator(model)
"""
Evaluating NanoQuoraRetrieval
Information Retrieval Evaluation of the model on the NanoQuoraRetrieval dataset:
Queries: 50
Corpus: 5046
Score-Function: dot
Accuracy@1: 92.00%
Accuracy@3: 96.00%
Accuracy@5: 98.00%
Accuracy@10: 100.00%
Precision@1: 92.00%
Precision@3: 40.00%
Precision@5: 24.80%
Precision@10: 13.20%
Recall@1: 79.73%
Recall@3: 92.53%
Recall@5: 94.93%
Recall@10: 98.27%
MRR@10: 0.9439
NDCG@10: 0.9339
MAP@100: 0.9070
Model Query Sparsity: Active Dimensions: 59.4, Sparsity Ratio: 0.9981
Model Corpus Sparsity: Active Dimensions: 61.9, Sparsity Ratio: 0.9980
Information Retrieval Evaluation of the model on the NanoMSMARCO dataset:
Queries: 50
Corpus: 5043
Score-Function: dot
Accuracy@1: 48.00%
Accuracy@3: 74.00%
Accuracy@5: 76.00%
Accuracy@10: 86.00%
Precision@1: 48.00%
Precision@3: 24.67%
Precision@5: 15.20%
Precision@10: 8.60%
Recall@1: 48.00%
Recall@3: 74.00%
Recall@5: 76.00%
Recall@10: 86.00%
MRR@10: 0.6191
NDCG@10: 0.6780
MAP@100: 0.6277
Model Query Sparsity: Active Dimensions: 45.4, Sparsity Ratio: 0.9985
Model Corpus Sparsity: Active Dimensions: 122.6, Sparsity Ratio: 0.9960
Average Queries: 50.0
Average Corpus: 5044.5
Aggregated for Score Function: dot
Accuracy@1: 70.00%
Accuracy@3: 85.00%
Accuracy@5: 87.00%
Accuracy@10: 93.00%
Precision@1: 70.00%
Recall@1: 63.87%
Precision@3: 32.33%
Recall@3: 83.27%
Precision@5: 20.00%
Recall@5: 85.47%
Precision@10: 10.90%
Recall@10: 92.13%
MRR@10: 0.7815
NDCG@10: 0.8060
Model Query Sparsity: Active Dimensions: 52.4, Sparsity Ratio: 0.9983
Model Corpus Sparsity: Active Dimensions: 92.2, Sparsity Ratio: 0.9970
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8060
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval", "MSMARCO"]
evaluator = SparseNanoBEIREvaluator(
dataset_names=datasets,
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
results = evaluator(model)
"""
Evaluating NanoQuoraRetrieval
Information Retrieval Evaluation of the model on the NanoQuoraRetrieval dataset:
Queries: 50
Corpus: 5046
Score-Function: dot
Accuracy@1: 92.00%
Accuracy@3: 96.00%
Accuracy@5: 98.00%
Accuracy@10: 100.00%
Precision@1: 92.00%
Precision@3: 40.00%
Precision@5: 24.80%
Precision@10: 13.20%
Recall@1: 79.73%
Recall@3: 92.53%
Recall@5: 94.93%
Recall@10: 98.27%
MRR@10: 0.9439
NDCG@10: 0.9339
MAP@100: 0.9072
Model Query Sparsity: Active Dimensions: 63.0, Sparsity Ratio: 0.9979
Model Corpus Sparsity: Active Dimensions: 63.4, Sparsity Ratio: 0.9979
Information Retrieval Evaluation of the model on the NanoMSMARCO dataset:
Queries: 50
Corpus: 5043
Score-Function: dot
Accuracy@1: 48.00%
Accuracy@3: 74.00%
Accuracy@5: 76.00%
Accuracy@10: 88.00%
Precision@1: 48.00%
Precision@3: 24.67%
Precision@5: 15.20%
Precision@10: 8.80%
Recall@1: 48.00%
Recall@3: 74.00%
Recall@5: 76.00%
Recall@10: 88.00%
MRR@10: 0.6211
NDCG@10: 0.6838
MAP@100: 0.6277
Model Query Sparsity: Active Dimensions: 48.1, Sparsity Ratio: 0.9984
Model Corpus Sparsity: Active Dimensions: 125.4, Sparsity Ratio: 0.9959
Average Queries: 50.0
Average Corpus: 5044.5
Aggregated for Score Function: dot
Accuracy@1: 70.00%
Accuracy@3: 85.00%
Accuracy@5: 87.00%
Accuracy@10: 94.00%
Precision@1: 70.00%
Recall@1: 63.87%
Precision@3: 32.33%
Recall@3: 83.27%
Precision@5: 20.00%
Recall@5: 85.47%
Precision@10: 11.00%
Recall@10: 93.13%
MRR@10: 0.7825
NDCG@10: 0.8089
Model Query Sparsity: Active Dimensions: 55.5, Sparsity Ratio: 0.9982
Model Corpus Sparsity: Active Dimensions: 94.4, Sparsity Ratio: 0.9969
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8089
|
"""
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
we can also visualize the top tokens for each text."""
from sentence_transformers import SparseEncoder
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Embed a list of sentences
sentences = [
"This framework generates embeddings for each input sentence",
"Sentences are passed as a list of string.",
"The quick brown fox jumps over the lazy dog.",
]
# Generate embeddings
embeddings = model.encode(sentences)
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
stats = model.sparsity(embeddings)
print(f"Embedding sparsity: {stats}")
print(f"Average non-zero dimensions: {stats['active_dims']:.2f}")
print(f"Sparsity percentage: {stats['sparsity_ratio']:.2%}")
"""
Embedding dim: 30522
Embedding sparsity: {'active_dims': 56.66666793823242, 'sparsity_ratio': 0.9981433749198914}
Average non-zero dimensions: 56.67
Sparsity percentage: 99.81%
"""
# Visualize top tokens for each text
top_k = 10
token_weights = model.decode(embeddings, top_k=top_k)
print(f"\nTop tokens {top_k} for each text:")
# The result is a list of sentence embeddings as numpy arrays
for i, sentence in enumerate(sentences):
token_scores = ", ".join([f'("{token.strip()}", {value:.2f})' for token, value in token_weights[i]])
print(f"{i}: {sentence} -> Top tokens: {token_scores}")
"""
Top tokens 10 for each text:
0: This framework generates embeddings for each input sentence -> Top tokens: ("framework", 2.19), ("##bed", 2.12), ("input", 1.99), ("each", 1.60), ("em", 1.58), ("sentence", 1.49), ("generate", 1.42), ("##ding", 1.33), ("sentences", 1.10), ("create", 0.93)
1: Sentences are passed as a list of string. -> Top tokens: ("string", 2.72), ("pass", 2.24), ("sentences", 2.15), ("passed", 2.07), ("sentence", 1.90), ("strings", 1.86), ("list", 1.84), ("lists", 1.49), ("as", 1.18), ("passing", 0.73)
2: The quick brown fox jumps over the lazy dog. -> Top tokens: ("lazy", 2.18), ("fox", 1.67), ("brown", 1.56), ("over", 1.52), ("dog", 1.50), ("quick", 1.49), ("jump", 1.39), ("dogs", 1.25), ("foxes", 0.99), ("jumping", 0.84)
"""
# Example of using max_active_dims during encoding
print("\n--- Using max_active_dims during encoding ---")
# Generate embeddings with limited active dimensions
embeddings_limited = model.encode(sentences, max_active_dims=32)
stats_limited = model.sparsity(embeddings_limited)
print(f"Limited embedding sparsity: {stats_limited}")
print(f"Average non-zero dimensions: {stats_limited['active_dims']:.2f}")
print(f"Sparsity percentage: {stats_limited['sparsity_ratio']:.2%}")
"""
--- Using max_active_dims during encoding ---
Limited embedding sparsity: {'active_dims': 32.0, 'sparsity_ratio': 0.9989516139030457}
Average non-zero dimensions: 32.00
Sparsity percentage: 99.90%
"""
# Comparing memory usage
print("\n--- Comparing memory usage ---")
def get_memory_size(tensor):
if tensor.is_sparse:
# For sparse tensors, only count non-zero elements
return (
tensor._values().element_size() * tensor._values().nelement()
+ tensor._indices().element_size() * tensor._indices().nelement()
)
else:
return tensor.element_size() * tensor.nelement()
print(f"Original embeddings memory: {get_memory_size(embeddings) / 1024:.2f} KB")
print(f"Embeddings with max_active_dims=32 memory: {get_memory_size(embeddings_limited) / 1024:.2f} KB")
"""
--- Comparing memory usage ---
Original embeddings memory: 3.32 KB
Embeddings with max_active_dims=32 memory: 1.88 KB
"""
|
"""
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
we can also visualize the top tokens for each text."""
from sentence_transformers import SparseEncoder
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Embed a list of sentences
sentences = [
"This framework generates embeddings for each input sentence",
"Sentences are passed as a list of string.",
"The quick brown fox jumps over the lazy dog.",
]
# Generate embeddings
embeddings = model.encode(sentences)
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
stats = model.get_sparsity_stats(embeddings)
print(f"Embedding sparsity: {stats}")
print(f"Average non-zero dimensions: {stats['row_non_zero_mean']:.2f}")
print(f"Sparsity percentage: {stats['row_sparsity_mean']:.2%}")
"""
Embedding dim: 30522
Embedding sparsity: {'num_rows': 3, 'num_cols': 30522, 'row_non_zero_mean': 56.66666793823242, 'row_sparsity_mean': 0.9981433749198914}
Average non-zero dimensions: 56.67
Sparsity percentage: 99.81%
"""
# Visualize top tokens for each text
top_k = 10
token_weights = model.decode(embeddings, top_k=top_k)
print(f"\nTop tokens {top_k} for each text:")
# The result is a list of sentence embeddings as numpy arrays
for i, sentence in enumerate(sentences):
token_scores = ", ".join([f'("{token.strip()}", {value:.2f})' for token, value in token_weights[i]])
print(f"{i}: {sentence} -> Top tokens: {token_scores}")
"""
Top tokens 10 for each text:
0: This framework generates embeddings for each input sentence -> Top tokens: ("framework", 2.19), ("##bed", 2.12), ("input", 1.99), ("each", 1.60), ("em", 1.58), ("sentence", 1.49), ("generate", 1.42), ("##ding", 1.33), ("sentences", 1.10), ("create", 0.93)
1: Sentences are passed as a list of string. -> Top tokens: ("string", 2.72), ("pass", 2.24), ("sentences", 2.15), ("passed", 2.07), ("sentence", 1.90), ("strings", 1.86), ("list", 1.84), ("lists", 1.49), ("as", 1.18), ("passing", 0.73)
2: The quick brown fox jumps over the lazy dog. -> Top tokens: ("lazy", 2.18), ("fox", 1.67), ("brown", 1.56), ("over", 1.52), ("dog", 1.50), ("quick", 1.49), ("jump", 1.39), ("dogs", 1.25), ("foxes", 0.99), ("jumping", 0.84)
"""
# Example of using max_active_dims during encoding
print("\n--- Using max_active_dims during encoding ---")
# Generate embeddings with limited active dimensions
embeddings_limited = model.encode(sentences, max_active_dims=32)
stats_limited = model.get_sparsity_stats(embeddings_limited)
print(f"Limited embedding sparsity: {stats_limited}")
print(f"Average non-zero dimensions: {stats_limited['row_non_zero_mean']:.2f}")
print(f"Sparsity percentage: {stats_limited['row_sparsity_mean']:.2%}")
"""
--- Using max_active_dims during encoding ---
Limited embedding sparsity: {'num_rows': 3, 'num_cols': 30522, 'row_non_zero_mean': 32.0, 'row_sparsity_mean': 0.9989516139030457}
Average non-zero dimensions: 32.00
Sparsity percentage: 99.90%
"""
# Comparing memory usage
print("\n--- Comparing memory usage ---")
def get_memory_size(tensor):
if tensor.is_sparse:
# For sparse tensors, only count non-zero elements
return (
tensor._values().element_size() * tensor._values().nelement()
+ tensor._indices().element_size() * tensor._indices().nelement()
)
else:
return tensor.element_size() * tensor.nelement()
print(f"Original embeddings memory: {get_memory_size(embeddings) / 1024:.2f} KB")
print(f"Embeddings with max_active_dims=32 memory: {get_memory_size(embeddings_limited) / 1024:.2f} KB")
"""
--- Comparing memory usage ---
Original embeddings memory: 3.32 KB
Embeddings with max_active_dims=32 memory: 1.88 KB
"""
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
@keras_export("keras.layers.RandomGrayscale")
class RandomGrayscale(BaseImagePreprocessingLayer):
"""Preprocessing layer for random conversion of RGB images to grayscale.
This layer randomly converts input images to grayscale with a specified
factor. When applied, it maintains the original number of channels
but sets all channels to the same grayscale value. This can be useful
for data augmentation and training models to be robust to color
variations.
The conversion preserves the perceived luminance of the original color
image using standard RGB to grayscale conversion coefficients. Images
that are not selected for conversion remain unchanged.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
factor: Float between 0 and 1, specifying the factor of
converting each image to grayscale. Defaults to 0.5. A value of
1.0 means all images will be converted, while 0.0 means no images
will be converted.
data_format: String, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch, channels, height, width)`.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
Same as input shape. The output maintains the same number of channels
as the input, even for grayscale-converted images where all channels
will have the same value.
"""
def __init__(self, factor=0.5, data_format=None, **kwargs):
super().__init__(**kwargs)
if factor < 0 or factor > 1:
raise ValueError(
"`factor` should be between 0 and 1. "
f"Received: factor={factor}"
)
self.factor = factor
self.data_format = backend.standardize_data_format(data_format)
self.random_generator = self.backend.random.SeedGenerator()
def get_random_transformation(self, images, training=True, seed=None):
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
random_values = self.backend.random.uniform(
shape=(self.backend.core.shape(images)[0],),
minval=0,
maxval=1,
seed=seed,
)
should_apply = self.backend.numpy.expand_dims(
random_values < self.factor, axis=[1, 2, 3]
)
return should_apply
def transform_images(self, images, transformations=None, **kwargs):
should_apply = (
transformations
if transformations is not None
else self.get_random_transformation(images)
)
grayscale_images = self.backend.image.rgb_to_grayscale(
images, data_format=self.data_format
)
return self.backend.numpy.where(should_apply, grayscale_images, images)
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs, **kwargs):
return inputs
def transform_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def transform_labels(self, labels, transformations=None, **kwargs):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformations=None, **kwargs
):
return segmentation_masks
def get_config(self):
config = super().get_config()
config.update({"factor": self.factor})
return config
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
@keras_export("keras.layers.RandomGrayscale")
class RandomGrayscale(BaseImagePreprocessingLayer):
"""Preprocessing layer for random conversion of RGB images to grayscale.
This layer randomly converts input images to grayscale with a specified
factor. When applied, it maintains the original number of channels
but sets all channels to the same grayscale value. This can be useful
for data augmentation and training models to be robust to color
variations.
The conversion preserves the perceived luminance of the original color
image using standard RGB to grayscale conversion coefficients. Images
that are not selected for conversion remain unchanged.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
factor: Float between 0 and 1, specifying the factor of
converting each image to grayscale. Defaults to 0.5. A value of
1.0 means all images will be converted, while 0.0 means no images
will be converted.
data_format: String, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch, channels, height, width)`.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
Same as input shape. The output maintains the same number of channels
as the input, even for grayscale-converted images where all channels
will have the same value.
"""
def __init__(self, factor=0.5, data_format=None, **kwargs):
super().__init__(**kwargs)
if factor < 0 or factor > 1:
raise ValueError(
"`factor` should be between 0 and 1. "
f"Received: factor={factor}"
)
self.factor = factor
self.data_format = backend.standardize_data_format(data_format)
self.random_generator = self.backend.random.SeedGenerator()
def get_random_transformation(self, images, training=True, seed=None):
random_values = self.backend.random.uniform(
shape=(self.backend.core.shape(images)[0],),
minval=0,
maxval=1,
seed=self.random_generator,
)
should_apply = self.backend.numpy.expand_dims(
random_values < self.factor, axis=[1, 2, 3]
)
return should_apply
def transform_images(self, images, transformations=None, **kwargs):
should_apply = (
transformations
if transformations is not None
else self.get_random_transformation(images)
)
grayscale_images = self.backend.image.rgb_to_grayscale(
images, data_format=self.data_format
)
return self.backend.numpy.where(should_apply, grayscale_images, images)
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs, **kwargs):
return inputs
def transform_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def transform_labels(self, labels, transformations=None, **kwargs):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformations=None, **kwargs
):
return segmentation_masks
def get_config(self):
config = super().get_config()
config.update({"factor": self.factor})
return config
|
import os.path as osp
import tempfile
import unittest
from mmengine.fileio import dump
from mmdet.datasets.api_wrappers import COCOPanoptic
class TestCOCOPanoptic(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.tmp_dir.cleanup()
def test_create_index(self):
ann_json = {'test': ['test', 'createIndex']}
annotation_file = osp.join(self.tmp_dir.name, 'createIndex.json')
dump(ann_json, annotation_file)
COCOPanoptic(annotation_file)
def test_load_anns(self):
categories = [{
'id': 0,
'name': 'person',
'supercategory': 'person',
'isthing': 1
}]
images = [{
'id': 0,
'width': 80,
'height': 60,
'file_name': 'fake_name1.jpg',
}]
annotations = [{
'segments_info': [
{
'id': 1,
'category_id': 0,
'area': 400,
'bbox': [10, 10, 10, 40],
'iscrowd': 0
},
],
'file_name':
'fake_name1.png',
'image_id':
0
}]
ann_json = {
'images': images,
'annotations': annotations,
'categories': categories,
}
annotation_file = osp.join(self.tmp_dir.name, 'load_anns.json')
dump(ann_json, annotation_file)
api = COCOPanoptic(annotation_file)
api.load_anns(1)
self.assertIsNone(api.load_anns(0.1))
|
import os.path as osp
import tempfile
import unittest
import mmcv
from mmdet.datasets.api_wrappers import COCOPanoptic
class TestCOCOPanoptic(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.tmp_dir.cleanup()
def test_create_index(self):
ann_json = {'test': ['test', 'createIndex']}
annotation_file = osp.join(self.tmp_dir.name, 'createIndex.json')
mmcv.dump(ann_json, annotation_file)
COCOPanoptic(annotation_file)
def test_load_anns(self):
categories = [{
'id': 0,
'name': 'person',
'supercategory': 'person',
'isthing': 1
}]
images = [{
'id': 0,
'width': 80,
'height': 60,
'file_name': 'fake_name1.jpg',
}]
annotations = [{
'segments_info': [
{
'id': 1,
'category_id': 0,
'area': 400,
'bbox': [10, 10, 10, 40],
'iscrowd': 0
},
],
'file_name':
'fake_name1.png',
'image_id':
0
}]
ann_json = {
'images': images,
'annotations': annotations,
'categories': categories,
}
annotation_file = osp.join(self.tmp_dir.name, 'load_anns.json')
mmcv.dump(ann_json, annotation_file)
api = COCOPanoptic(annotation_file)
api.load_anns(1)
self.assertIsNone(api.load_anns(0.1))
|
"""
This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_stsbenchmark.py
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CrossEncoderCorrelationEvaluator
from sentence_transformers.cross_encoder.losses.BinaryCrossEntropyLoss import BinaryCrossEntropyLoss
from sentence_transformers.cross_encoder.trainer import CrossEncoderTrainer
from sentence_transformers.cross_encoder.training_args import CrossEncoderTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
train_batch_size = 64
num_epochs = 4
output_dir = "output/training_ce_stsbenchmark-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Define our CrossEncoder model. We use distilroberta-base as the base model and set it up to predict 1 label
# You can also use other base models, like bert-base-uncased, microsoft/mpnet-base, or rerankers like Alibaba-NLP/gte-reranker-modernbert-base
model_name = "distilroberta-base"
model = CrossEncoder(model_name, num_labels=1)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss, we use one that accepts pairs with a binary label
loss = BinaryCrossEntropyLoss(model)
# 4. Before and during training, we use CrossEncoderClassificationEvaluator to measure the performance on the dev set
eval_evaluator = CrossEncoderCorrelationEvaluator(
sentence_pairs=list(zip(eval_dataset["sentence1"], eval_dataset["sentence2"])),
scores=eval_dataset["score"],
name="stsb-validation",
)
eval_evaluator(model)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"reranker-{short_model_name}-stsb"
args = CrossEncoderTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=80,
save_strategy="steps",
save_steps=80,
save_total_limit=2,
logging_steps=20,
run_name=run_name, # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = CrossEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=eval_evaluator,
)
trainer.train()
# 7. Evaluate the final model on test dataset
test_evaluator = CrossEncoderCorrelationEvaluator(
sentence_pairs=list(zip(test_dataset["sentence1"], test_dataset["sentence2"])),
scores=test_dataset["score"],
name="stsb-test",
)
test_evaluator(model)
# 8. Save the final model
final_output_dir = f"{output_dir}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = CrossEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
|
"""
This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_stsbenchmark.py
"""
import csv
import gzip
import logging
import math
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CrossEncoderCorrelationEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Define our Cross-Encoder
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_stsbenchmark-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# We use distilroberta-base as base model and set num_labels=1, which predicts a continuous score between 0 and 1
model = CrossEncoder("distilroberta-base", num_labels=1)
# Read STSb dataset
logger.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
if row["split"] == "dev":
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
elif row["split"] == "test":
test_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
else:
# As we want to get symmetric scores, i.e. CrossEncoder(A,B) = CrossEncoder(B,A), we pass both combinations to the train set
train_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
train_samples.append(InputExample(texts=[row["sentence2"], row["sentence1"]], label=score))
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# We add an evaluator, which evaluates the performance during training
evaluator = CrossEncoderCorrelationEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info(f"Warmup-steps: {warmup_steps}")
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##### Load model and eval on test set
model = CrossEncoder(model_save_path)
evaluator = CrossEncoderCorrelationEvaluator.from_input_examples(test_samples, name="sts-test")
evaluator(model)
|
"""Run smoke tests"""
import os
from pathlib import Path
import torch
import torchvision
from torchvision.io import read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.ndim != 3 or img_jpg.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.ndim != 3 or img_png.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms()
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
if torch.cuda.is_available():
smoke_test_torchvision_resnet50_classify("cuda")
if torch.backends.mps.is_available():
smoke_test_torchvision_resnet50_classify("mps")
if __name__ == "__main__":
main()
|
"""Run smoke tests"""
import os
from pathlib import Path
import torch
import torchvision
from torchvision.io import read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.ndim != 3 or img_jpg.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.ndim != 3 or img_png.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms()
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
if torch.cuda.is_available():
smoke_test_torchvision_resnet50_classify("cuda")
if __name__ == "__main__":
main()
|
from typing import Optional
import pandas as pd
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDocument):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: ImageDoc
return MyDocNested
def test_to_from_pandas_df(nested_doc_cls):
da = DocumentArray[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=ImageDoc(url='aux.png'),
),
nested_doc_cls(text='hello world', image=ImageDoc()),
]
)
df = da.to_pandas()
assert isinstance(df, pd.DataFrame)
assert len(df) == 2
assert (
df.columns
== [
'id',
'count',
'text',
'image__id',
'image__url',
'image__tensor',
'image__embedding',
'image__bytes',
]
).all()
da_from_df = DocumentArray[nested_doc_cls].from_pandas(df)
for doc1, doc2 in zip(da, da_from_df):
assert doc1 == doc2
@pytest.fixture()
def nested_doc():
class Inner(BaseDocument):
img: Optional[ImageDoc]
class Middle(BaseDocument):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDocument):
img: Optional[ImageDoc]
middle: Optional[Middle]
doc = Outer(
img=ImageDoc(), middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc()))
)
return doc
def test_from_pandas_without_schema_raise_exception():
with pytest.raises(TypeError, match='no document schema defined'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
DocumentArray.from_pandas(df=df)
def test_from_pandas_with_wrong_schema_raise_exception(nested_doc):
with pytest.raises(ValueError, match='Column names do not match the schema'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
DocumentArray[nested_doc.__class__].from_pandas(df=df)
|
from typing import Optional
import pandas as pd
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDocument):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: Image
return MyDocNested
def test_to_from_pandas_df(nested_doc_cls):
da = DocumentArray[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=Image(url='aux.png'),
),
nested_doc_cls(text='hello world', image=Image()),
]
)
df = da.to_pandas()
assert isinstance(df, pd.DataFrame)
assert len(df) == 2
assert (
df.columns
== [
'id',
'count',
'text',
'image__id',
'image__url',
'image__tensor',
'image__embedding',
'image__bytes',
]
).all()
da_from_df = DocumentArray[nested_doc_cls].from_pandas(df)
for doc1, doc2 in zip(da, da_from_df):
assert doc1 == doc2
@pytest.fixture()
def nested_doc():
class Inner(BaseDocument):
img: Optional[Image]
class Middle(BaseDocument):
img: Optional[Image]
inner: Optional[Inner]
class Outer(BaseDocument):
img: Optional[Image]
middle: Optional[Middle]
doc = Outer(img=Image(), middle=Middle(img=Image(), inner=Inner(img=Image())))
return doc
def test_from_pandas_without_schema_raise_exception():
with pytest.raises(TypeError, match='no document schema defined'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
DocumentArray.from_pandas(df=df)
def test_from_pandas_with_wrong_schema_raise_exception(nested_doc):
with pytest.raises(ValueError, match='Column names do not match the schema'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
DocumentArray[nested_doc.__class__].from_pandas(df=df)
|
import os
# When using jax.experimental.enable_x64 in unit test, we want to keep the
# default dtype with 32 bits, aligning it with Keras's default.
os.environ["JAX_DEFAULT_DTYPE_BITS"] = "32"
try:
# When using torch and tensorflow, torch needs to be imported first,
# otherwise it will segfault upon import. This should force the torch
# import to happen first for all tests.
import torch # noqa: F401
except ImportError:
torch = None
import pytest # noqa: E402
from keras.src.backend import backend # noqa: E402
def pytest_configure(config):
config.addinivalue_line(
"markers",
"requires_trainable_backend: mark test for trainable backend only",
)
def pytest_collection_modifyitems(config, items):
openvino_skipped_tests = []
if backend() == "openvino":
with open(
"keras/src/backend/openvino/excluded_concrete_tests.txt", "r"
) as file:
openvino_skipped_tests = file.readlines()
# it is necessary to check if stripped line is not empty
# and exclude such lines
openvino_skipped_tests = [
line.strip() for line in openvino_skipped_tests if line.strip()
]
requires_trainable_backend = pytest.mark.skipif(
backend() in ["numpy", "openvino"],
reason="Trainer not implemented for NumPy and OpenVINO backend.",
)
for item in items:
if "requires_trainable_backend" in item.keywords:
item.add_marker(requires_trainable_backend)
# also, skip concrete tests for openvino, listed in the special file
# this is more granular mechanism to exclude tests rather
# than using --ignore option
for skipped_test in openvino_skipped_tests:
if skipped_test in item.nodeid:
item.add_marker(
skip_if_backend(
"openvino",
"Not supported operation by openvino backend",
)
)
def skip_if_backend(given_backend, reason):
return pytest.mark.skipif(backend() == given_backend, reason=reason)
|
import os
# When using jax.experimental.enable_x64 in unit test, we want to keep the
# default dtype with 32 bits, aligning it with Keras's default.
os.environ["JAX_DEFAULT_DTYPE_BITS"] = "32"
try:
# When using torch and tensorflow, torch needs to be imported first,
# otherwise it will segfault upon import. This should force the torch
# import to happen first for all tests.
import torch # noqa: F401
except ImportError:
pass
import pytest # noqa: E402
from keras.src.backend import backend # noqa: E402
def pytest_configure(config):
config.addinivalue_line(
"markers",
"requires_trainable_backend: mark test for trainable backend only",
)
def pytest_collection_modifyitems(config, items):
openvino_skipped_tests = []
if backend() == "openvino":
with open(
"keras/src/backend/openvino/excluded_concrete_tests.txt", "r"
) as file:
openvino_skipped_tests = file.readlines()
# it is necessary to check if stripped line is not empty
# and exclude such lines
openvino_skipped_tests = [
line.strip() for line in openvino_skipped_tests if line.strip()
]
requires_trainable_backend = pytest.mark.skipif(
backend() in ["numpy", "openvino"],
reason="Trainer not implemented for NumPy and OpenVINO backend.",
)
for item in items:
if "requires_trainable_backend" in item.keywords:
item.add_marker(requires_trainable_backend)
# also, skip concrete tests for openvino, listed in the special file
# this is more granular mechanism to exclude tests rather
# than using --ignore option
for skipped_test in openvino_skipped_tests:
if skipped_test in item.nodeid:
item.add_marker(
skip_if_backend(
"openvino",
"Not supported operation by openvino backend",
)
)
def skip_if_backend(given_backend, reason):
return pytest.mark.skipif(backend() == given_backend, reason=reason)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.