input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
tf_imported = False
else:
tf_imported = True
def is_torch_available():
return torch_imported
def is_tf_available():
return tf_imported
def is_notebook() -> bool:
"""
Check if we're running in a Jupyter notebook, using magic command
`get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
shell = get_ipython().__class__.__name__ # type: ignore
except NameError:
return False
if shell == 'ZMQInteractiveShell':
return True
elif shell == 'Shell':
return True
elif shell == 'TerminalInteractiveShell':
return False
else:
return False
|
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
tf_imported = False
else:
tf_imported = True
def is_torch_available():
return torch_imported
def is_tf_available():
return tf_imported
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.14.8.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.14.7.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
import os
from unittest.mock import patch
import pytest
from langchain_openai import OpenAIEmbeddings
os.environ["OPENAI_API_KEY"] = "foo"
def test_openai_invalid_model_kwargs() -> None:
with pytest.raises(ValueError):
OpenAIEmbeddings(model_kwargs={"model": "foo"})
def test_openai_incorrect_field() -> None:
with pytest.warns(match="not default parameter"):
llm = OpenAIEmbeddings(foo="bar") # type: ignore[call-arg]
assert llm.model_kwargs == {"foo": "bar"}
def test_embed_documents_with_custom_chunk_size() -> None:
embeddings = OpenAIEmbeddings(chunk_size=2)
texts = ["text1", "text2", "text3", "text4"]
custom_chunk_size = 3
with patch.object(embeddings.client, "create") as mock_create:
mock_create.side_effect = [
{"data": [{"embedding": [0.1, 0.2]}, {"embedding": [0.3, 0.4]}]},
{"data": [{"embedding": [0.5, 0.6]}, {"embedding": [0.7, 0.8]}]},
]
result = embeddings.embed_documents(texts, chunk_size=custom_chunk_size)
_, tokens, __ = embeddings._tokenize(texts, custom_chunk_size)
mock_create.call_args
mock_create.assert_any_call(input=tokens[0:3], **embeddings._invocation_params)
mock_create.assert_any_call(input=tokens[3:4], **embeddings._invocation_params)
assert result == [[0.1, 0.2], [0.3, 0.4], [0.5, 0.6], [0.7, 0.8]]
def test_embed_documents_with_custom_chunk_size_no_check_ctx_length() -> None:
embeddings = OpenAIEmbeddings(chunk_size=2, check_embedding_ctx_length=False)
texts = ["text1", "text2", "text3", "text4"]
custom_chunk_size = 3
with patch.object(embeddings.client, "create") as mock_create:
mock_create.side_effect = [
{"data": [{"embedding": [0.1, 0.2]}, {"embedding": [0.3, 0.4]}]},
{"data": [{"embedding": [0.5, 0.6]}, {"embedding": [0.7, 0.8]}]},
]
result = embeddings.embed_documents(texts, chunk_size=custom_chunk_size)
mock_create.call_args
mock_create.assert_any_call(input=texts[0:3], **embeddings._invocation_params)
mock_create.assert_any_call(input=texts[3:4], **embeddings._invocation_params)
assert result == [[0.1, 0.2], [0.3, 0.4], [0.5, 0.6], [0.7, 0.8]]
|
import os
from unittest.mock import patch
import pytest
from langchain_openai import OpenAIEmbeddings
os.environ["OPENAI_API_KEY"] = "foo"
def test_openai_invalid_model_kwargs() -> None:
with pytest.raises(ValueError):
OpenAIEmbeddings(model_kwargs={"model": "foo"})
def test_openai_incorrect_field() -> None:
with pytest.warns(match="not default parameter"):
llm = OpenAIEmbeddings(foo="bar") # type: ignore[call-arg]
assert llm.model_kwargs == {"foo": "bar"}
def test_embed_documents_with_custom_chunk_size() -> None:
embeddings = OpenAIEmbeddings(chunk_size=2, check_embedding_ctx_length=False)
texts = ["text1", "text2", "text3", "text4"]
custom_chunk_size = 3
with patch.object(embeddings.client, "create") as mock_create:
mock_create.side_effect = [
{"data": [{"embedding": [0.1, 0.2]}, {"embedding": [0.3, 0.4]}]},
{"data": [{"embedding": [0.5, 0.6]}, {"embedding": [0.7, 0.8]}]},
]
result = embeddings.embed_documents(texts, chunk_size=custom_chunk_size)
mock_create.call_args
mock_create.assert_any_call(input=texts[0:3], **embeddings._invocation_params)
mock_create.assert_any_call(input=texts[3:4], **embeddings._invocation_params)
assert result == [[0.1, 0.2], [0.3, 0.4], [0.5, 0.6], [0.7, 0.8]]
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import AnchorHead
def test_anchor_head_loss():
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False))
self = AnchorHead(num_classes=4, in_channels=1, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(self.anchor_generator.strides))
]
cls_scores, bbox_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
|
import mmcv
import torch
from mmdet.models.dense_heads import AnchorHead
def test_anchor_head_loss():
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False))
self = AnchorHead(num_classes=4, in_channels=1, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(self.anchor_generator.strides))
]
cls_scores, bbox_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
|
from abc import ABC, abstractmethod
from collections import namedtuple
from dataclasses import is_dataclass, asdict
from typing import Dict, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from docarray.typing import DocumentArraySourceType, ArrayType
TypeMap = namedtuple('TypeMap', ['type', 'converter'])
class BaseBackendMixin(ABC):
TYPE_MAP: Dict[str, TypeMap]
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
copy: bool = False,
*args,
**kwargs,
):
self._load_offset2ids()
def _init_subindices(self, *args, **kwargs):
self._subindices = {}
subindex_configs = kwargs.get('subindex_configs', None)
if subindex_configs:
config = asdict(self._config) if getattr(self, '_config', None) else dict()
for name, config_subindex in subindex_configs.items():
config_subindex = (
dict() if config_subindex is None else config_subindex
) # allow None as input
config_joined = {**config, **config_subindex}
config_joined = self._ensure_unique_config(
config, config_subindex, config_joined, name
)
self._subindices[name] = self.__class__(config=config_joined)
self._subindices[name].extend(self.traverse_flat(name[1:]))
@abstractmethod
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
"""
Ensures that the subindex configuration is unique, despite it inheriting unpopulated fields from the root config.
:param config_root: The configuration of the root index.
:param config_subindex: The configuration that was explicitly provided by the user for the subindex.
:param config_joined: The configuration that combines root and subindex configs. This is the configuration that will be used for subindex construction.
:param subindex_name: Name (access path) of the subindex
:return: config_joined that is unique compared to config_root
"""
...
def _get_storage_infos(self) -> Optional[Dict]:
if hasattr(self, '_config') and is_dataclass(self._config):
return {k: str(v) for k, v in asdict(self._config).items()}
def _map_id(self, _id: str) -> str:
return _id
def _map_column(self, value, col_type) -> str:
return self.TYPE_MAP[col_type].converter(value)
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
from docarray.math.ndarray import to_numpy_array
return to_numpy_array(embedding)
def _map_type(self, col_type: str) -> str:
return self.TYPE_MAP[col_type].type
def _normalize_columns(self, columns):
if columns is None:
return []
return columns
|
from abc import ABC
from collections import namedtuple
from dataclasses import is_dataclass, asdict
from typing import Dict, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from docarray.typing import DocumentArraySourceType, ArrayType
TypeMap = namedtuple('TypeMap', ['type', 'converter'])
class BaseBackendMixin(ABC):
TYPE_MAP: Dict[str, TypeMap]
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
copy: bool = False,
*args,
**kwargs
):
self._load_offset2ids()
def _get_storage_infos(self) -> Optional[Dict]:
if hasattr(self, '_config') and is_dataclass(self._config):
return {k: str(v) for k, v in asdict(self._config).items()}
def _map_id(self, _id: str) -> str:
return _id
def _map_column(self, value, col_type) -> str:
return self.TYPE_MAP[col_type].converter(value)
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
from docarray.math.ndarray import to_numpy_array
return to_numpy_array(embedding)
def _map_type(self, col_type: str) -> str:
return self.TYPE_MAP[col_type].type
def _normalize_columns(self, columns):
if columns is None:
return []
return columns
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'mmengine'
copyright = '2022, mmengine contributors'
author = 'mmengine contributors'
version_file = '../../mmengine/version.py'
with open(version_file) as f:
exec(compile(f.read(), version_file, 'exec'))
__version__ = locals()['__version__']
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.autosectionlabel',
'sphinx_markdown_tables',
'myst_parser',
'sphinx_copybutton',
'sphinx.ext.autodoc.typehints',
] # yapf: disable
autodoc_typehints = 'description'
myst_heading_anchors = 4
# Configuration for intersphinx
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable', None),
'torch': ('https://pytorch.org/docs/stable/', None),
'mmcv': ('https://mmcv.readthedocs.io/en/2.x/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmengine'
},
],
# Specify the language of shared menu
'menu_lang': 'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'mmengine'
copyright = '2022, mmengine contributors'
author = 'mmengine contributors'
version_file = '../../mmengine/version.py'
with open(version_file) as f:
exec(compile(f.read(), version_file, 'exec'))
__version__ = locals()['__version__']
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.autosectionlabel',
'sphinx_markdown_tables',
'myst_parser',
'sphinx_copybutton',
'sphinx.ext.autodoc.typehints',
] # yapf: disable
autodoc_typehints = 'description'
# Configuration for intersphinx
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable', None),
'torch': ('https://pytorch.org/docs/stable/', None),
'mmcv': ('https://mmcv.readthedocs.io/en/dev-2.x/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmengine'
},
],
# Specify the language of shared menu
'menu_lang': 'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
|
from abc import abstractmethod
from typing import (
TYPE_CHECKING,
TypeVar,
Sequence,
List,
)
from qdrant_client.http.models.models import Distance
from .... import Document, DocumentArray
from ....math import ndarray
from ....score import NamedScore
if TYPE_CHECKING:
import tensorflow
import torch
import numpy as np
from qdrant_client import QdrantClient
QdrantArrayType = TypeVar(
'QdrantArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
class FindMixin:
@property
@abstractmethod
def client(self) -> 'QdrantClient':
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def serialize_config(self) -> dict:
raise NotImplementedError()
@property
@abstractmethod
def distance(self) -> 'Distance':
raise NotImplementedError()
def _find_similar_vectors(self, q: 'QdrantArrayType', limit=10):
query_vector = self._map_embedding(q)
search_result = self.client.search(
self.collection_name,
query_vector=query_vector,
query_filter=None,
search_params=None,
top=limit,
append_payload=['_serialized'],
)
docs = []
for hit in search_result:
doc = Document.from_base64(
hit.payload['_serialized'], **self.serialize_config
)
doc.scores[f'{self.distance.lower()}_similarity'] = NamedScore(
value=hit.score
)
docs.append(doc)
return DocumentArray(docs)
def _find(
self, query: 'QdrantArrayType', limit: int = 10, **kwargs
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given a batch of input queries.
:param query: input supported to be used in Qdrant.
:param limit: number of retrieved items
:return: a list of DocumentArrays containing the closest Document objects for each of the queries in `query`.
"""
num_rows, _ = ndarray.get_array_rows(query)
if num_rows == 1:
return [self._find_similar_vectors(query, limit=limit)]
else:
closest_docs = []
for q in query:
da = self._find_similar_vectors(q, limit=limit)
closest_docs.append(da)
return closest_docs
|
from abc import abstractmethod
from typing import (
TYPE_CHECKING,
TypeVar,
Sequence,
List,
)
from qdrant_openapi_client.models.models import Distance
from .... import Document, DocumentArray
from ....math import ndarray
from ....score import NamedScore
if TYPE_CHECKING:
import tensorflow
import torch
import numpy as np
from qdrant_client import QdrantClient
QdrantArrayType = TypeVar(
'QdrantArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
class FindMixin:
@property
@abstractmethod
def client(self) -> 'QdrantClient':
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def serialize_config(self) -> dict:
raise NotImplementedError()
@property
@abstractmethod
def distance(self) -> 'Distance':
raise NotImplementedError()
def _find_similar_vectors(self, q: 'QdrantArrayType', limit=10):
query_vector = self._map_embedding(q)
search_result = self.client.search(
self.collection_name,
query_vector=query_vector,
query_filter=None,
search_params=None,
top=limit,
append_payload=['_serialized'],
)
docs = []
for hit in search_result:
doc = Document.from_base64(
hit.payload['_serialized'].value[0], **self.serialize_config
)
doc.scores[f'{self.distance.lower()}_similarity'] = NamedScore(
value=hit.score
)
docs.append(doc)
return DocumentArray(docs)
def _find(
self, query: 'QdrantArrayType', limit: int = 10, **kwargs
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given a batch of input queries.
:param query: input supported to be used in Qdrant.
:param limit: number of retrieved items
:return: a list of DocumentArrays containing the closest Document objects for each of the queries in `query`.
"""
num_rows, _ = ndarray.get_array_rows(query)
if num_rows == 1:
return [self._find_similar_vectors(query, limit=limit)]
else:
closest_docs = []
for q in query:
da = self._find_similar_vectors(q, limit=limit)
closest_docs.append(da)
return closest_docs
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import PGEmbedding
from langchain_community.vectorstores.pgembedding import (
CollectionStore,
EmbeddingStore,
QueryResult,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"CollectionStore": "langchain_community.vectorstores.pgembedding",
"EmbeddingStore": "langchain_community.vectorstores.pgembedding",
"QueryResult": "langchain_community.vectorstores.pgembedding",
"PGEmbedding": "langchain_community.vectorstores",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"CollectionStore",
"EmbeddingStore",
"PGEmbedding",
"QueryResult",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import PGEmbedding
from langchain_community.vectorstores.pgembedding import (
CollectionStore,
EmbeddingStore,
QueryResult,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"CollectionStore": "langchain_community.vectorstores.pgembedding",
"EmbeddingStore": "langchain_community.vectorstores.pgembedding",
"QueryResult": "langchain_community.vectorstores.pgembedding",
"PGEmbedding": "langchain_community.vectorstores",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"CollectionStore",
"EmbeddingStore",
"QueryResult",
"PGEmbedding",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
# yapf: disable
from .lr_scheduler import (ConstantLR, CosineAnnealingLR, CosineRestartLR,
ExponentialLR, LinearLR, MultiStepLR, OneCycleLR,
PolyLR, StepLR)
from .momentum_scheduler import (ConstantMomentum, CosineAnnealingMomentum,
CosineRestartMomentum, ExponentialMomentum,
LinearMomentum, MultiStepMomentum,
PolyMomentum, StepMomentum)
from .param_scheduler import (ConstantParamScheduler,
CosineAnnealingParamScheduler,
CosineRestartParamScheduler,
ExponentialParamScheduler, LinearParamScheduler,
MultiStepParamScheduler, OneCycleParamScheduler,
PolyParamScheduler, StepParamScheduler,
_ParamScheduler)
# yapf: enable
__all__ = [
'ConstantLR', 'CosineAnnealingLR', 'ExponentialLR', 'LinearLR',
'MultiStepLR', 'StepLR', 'ConstantMomentum', 'CosineAnnealingMomentum',
'ExponentialMomentum', 'LinearMomentum', 'MultiStepMomentum',
'StepMomentum', 'ConstantParamScheduler', 'CosineAnnealingParamScheduler',
'ExponentialParamScheduler', 'LinearParamScheduler',
'MultiStepParamScheduler', 'StepParamScheduler', '_ParamScheduler',
'PolyParamScheduler', 'PolyLR', 'PolyMomentum', 'OneCycleParamScheduler',
'OneCycleLR', 'CosineRestartParamScheduler', 'CosineRestartLR',
'CosineRestartMomentum'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .lr_scheduler import (ConstantLR, CosineAnnealingLR, ExponentialLR,
LinearLR, MultiStepLR, OneCycleLR, PolyLR, StepLR)
from .momentum_scheduler import (ConstantMomentum, CosineAnnealingMomentum,
ExponentialMomentum, LinearMomentum,
MultiStepMomentum, PolyMomentum, StepMomentum)
from .param_scheduler import (ConstantParamScheduler,
CosineAnnealingParamScheduler,
ExponentialParamScheduler, LinearParamScheduler,
MultiStepParamScheduler, OneCycleParamScheduler,
PolyParamScheduler, StepParamScheduler,
_ParamScheduler)
__all__ = [
'ConstantLR', 'CosineAnnealingLR', 'ExponentialLR', 'LinearLR',
'MultiStepLR', 'StepLR', 'ConstantMomentum', 'CosineAnnealingMomentum',
'ExponentialMomentum', 'LinearMomentum', 'MultiStepMomentum',
'StepMomentum', 'ConstantParamScheduler', 'CosineAnnealingParamScheduler',
'ExponentialParamScheduler', 'LinearParamScheduler',
'MultiStepParamScheduler', 'StepParamScheduler', '_ParamScheduler',
'PolyParamScheduler', 'PolyLR', 'PolyMomentum', 'OneCycleParamScheduler',
'OneCycleLR'
]
|
from typing import Dict, List, Optional
import re
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.base.llms.types import ContentBlock, TextBlock
class SafeFormatter:
"""Safe string formatter that does not raise KeyError if key is missing."""
def __init__(self, format_dict: Optional[Dict[str, str]] = None):
self.format_dict = format_dict or {}
def format(self, format_string: str) -> str:
return re.sub(r"\{([^{}]+)\}", self._replace_match, format_string)
def parse(self, format_string: str) -> List[str]:
return re.findall(
r"\{([a-zA-Z_][a-zA-Z0-9_]*(?:\.[a-zA-Z_][a-zA-Z0-9_]*)*)\}", format_string
)
def _replace_match(self, match: re.Match) -> str:
key = match.group(1)
return str(self.format_dict.get(key, match.group(0)))
def format_string(string_to_format: str, **kwargs: str) -> str:
"""Format a string with kwargs."""
formatter = SafeFormatter(format_dict=kwargs)
return formatter.format(string_to_format)
def format_content_blocks(
content_blocks: List[ContentBlock], **kwargs: str
) -> List[ContentBlock]:
"""Format content blocks with kwargs."""
formatter = SafeFormatter(format_dict=kwargs)
formatted_blocks: List[ContentBlock] = []
for block in content_blocks:
if isinstance(block, TextBlock):
formatted_blocks.append(TextBlock(text=formatter.format(block.text)))
else:
formatted_blocks.append(block)
return formatted_blocks
def get_template_vars(template_str: str) -> List[str]:
"""Get template variables from a template string."""
variables = []
formatter = SafeFormatter()
for variable_name in formatter.parse(template_str):
if variable_name:
variables.append(variable_name)
return variables
def is_chat_model(llm: BaseLLM) -> bool:
return llm.metadata.is_chat_model
|
from typing import Dict, List, Optional
import re
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.base.llms.types import ContentBlock, TextBlock
class SafeFormatter:
"""Safe string formatter that does not raise KeyError if key is missing."""
def __init__(self, format_dict: Optional[Dict[str, str]] = None):
self.format_dict = format_dict or {}
def format(self, format_string: str) -> str:
return re.sub(r"\{([^{}]+)\}", self._replace_match, format_string)
def parse(self, format_string: str) -> List[str]:
return re.findall(r"\{([^{}]+)\}", format_string)
def _replace_match(self, match: re.Match) -> str:
key = match.group(1)
return str(self.format_dict.get(key, match.group(0)))
def format_string(string_to_format: str, **kwargs: str) -> str:
"""Format a string with kwargs."""
formatter = SafeFormatter(format_dict=kwargs)
return formatter.format(string_to_format)
def format_content_blocks(
content_blocks: List[ContentBlock], **kwargs: str
) -> List[ContentBlock]:
"""Format content blocks with kwargs."""
formatter = SafeFormatter(format_dict=kwargs)
formatted_blocks: List[ContentBlock] = []
for block in content_blocks:
if isinstance(block, TextBlock):
formatted_blocks.append(TextBlock(text=formatter.format(block.text)))
else:
formatted_blocks.append(block)
return formatted_blocks
def get_template_vars(template_str: str) -> List[str]:
"""Get template variables from a template string."""
variables = []
formatter = SafeFormatter()
for variable_name in formatter.parse(template_str):
if variable_name:
variables.append(variable_name)
return variables
def is_chat_model(llm: BaseLLM) -> bool:
return llm.metadata.is_chat_model
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.random.random import beta as beta
from keras.src.random.random import binomial as binomial
from keras.src.random.random import categorical as categorical
from keras.src.random.random import dropout as dropout
from keras.src.random.random import gamma as gamma
from keras.src.random.random import normal as normal
from keras.src.random.random import randint as randint
from keras.src.random.random import shuffle as shuffle
from keras.src.random.random import truncated_normal as truncated_normal
from keras.src.random.random import uniform as uniform
from keras.src.random.seed_generator import SeedGenerator as SeedGenerator
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.random.random import beta
from keras.src.random.random import binomial
from keras.src.random.random import categorical
from keras.src.random.random import dropout
from keras.src.random.random import gamma
from keras.src.random.random import normal
from keras.src.random.random import randint
from keras.src.random.random import shuffle
from keras.src.random.random import truncated_normal
from keras.src.random.random import uniform
from keras.src.random.seed_generator import SeedGenerator
|
import os
import sys
import pytest
import torch
import torchaudio
from torchaudio.pipelines import CONVTASNET_BASE_LIBRI2MIX
from torchaudio.prototype.pipelines import HDEMUCS_HIGH_MUSDB, HDEMUCS_HIGH_MUSDB_PLUS
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "examples"))
from source_separation.utils.metrics import sdr
@pytest.mark.parametrize(
"bundle,task,channel,expected_score",
[
[CONVTASNET_BASE_LIBRI2MIX, "speech_separation", 1, 8.1373],
[HDEMUCS_HIGH_MUSDB_PLUS, "music_separation", 2, 8.7480],
[HDEMUCS_HIGH_MUSDB, "music_separation", 2, 8.0697],
],
)
def test_source_separation_models(bundle, task, channel, expected_score, mixture_source, clean_sources):
"""Integration test for the source separation pipeline.
Given the mixture waveform with dimensions `(batch, channel, time)`, the pre-trained pipeline generates
the separated sources Tensor with dimensions `(batch, num_sources, time)`.
The test computes the scale-invariant signal-to-distortion ratio (Si-SDR) score in decibel (dB).
Si-SDR score should be equal to or larger than the expected score.
"""
model = bundle.get_model()
mixture_waveform, sample_rate = torchaudio.load(mixture_source)
assert sample_rate == bundle.sample_rate, "The sample rate of audio must match that in the bundle."
clean_waveforms = []
for source in clean_sources:
clean_waveform, sample_rate = torchaudio.load(source)
assert sample_rate == bundle.sample_rate, "The sample rate of audio must match that in the bundle."
clean_waveforms.append(clean_waveform)
mixture_waveform = mixture_waveform.reshape(1, channel, -1)
estimated_sources = model(mixture_waveform)
clean_waveforms = torch.cat(clean_waveforms).unsqueeze(0)
estimated_sources = estimated_sources.reshape(1, -1, clean_waveforms.shape[-1])
sdr_values = sdr(estimated_sources, clean_waveforms).mean()
assert sdr_values >= expected_score
|
import os
import sys
import pytest
import torch
import torchaudio
from torchaudio.prototype.pipelines import CONVTASNET_BASE_LIBRI2MIX, HDEMUCS_HIGH_MUSDB, HDEMUCS_HIGH_MUSDB_PLUS
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "examples"))
from source_separation.utils.metrics import sdr
@pytest.mark.parametrize(
"bundle,task,channel,expected_score",
[
[CONVTASNET_BASE_LIBRI2MIX, "speech_separation", 1, 8.1373],
[HDEMUCS_HIGH_MUSDB_PLUS, "music_separation", 2, 8.7480],
[HDEMUCS_HIGH_MUSDB, "music_separation", 2, 8.0697],
],
)
def test_source_separation_models(bundle, task, channel, expected_score, mixture_source, clean_sources):
"""Integration test for the source separation pipeline.
Given the mixture waveform with dimensions `(batch, channel, time)`, the pre-trained pipeline generates
the separated sources Tensor with dimensions `(batch, num_sources, time)`.
The test computes the scale-invariant signal-to-distortion ratio (Si-SDR) score in decibel (dB).
Si-SDR score should be equal to or larger than the expected score.
"""
model = bundle.get_model()
mixture_waveform, sample_rate = torchaudio.load(mixture_source)
assert sample_rate == bundle.sample_rate, "The sample rate of audio must match that in the bundle."
clean_waveforms = []
for source in clean_sources:
clean_waveform, sample_rate = torchaudio.load(source)
assert sample_rate == bundle.sample_rate, "The sample rate of audio must match that in the bundle."
clean_waveforms.append(clean_waveform)
mixture_waveform = mixture_waveform.reshape(1, channel, -1)
estimated_sources = model(mixture_waveform)
clean_waveforms = torch.cat(clean_waveforms).unsqueeze(0)
estimated_sources = estimated_sources.reshape(1, -1, clean_waveforms.shape[-1])
sdr_values = sdr(estimated_sources, clean_waveforms).mean()
assert sdr_values >= expected_score
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
import torch
from mmengine.data import BaseDataElement
from mmengine.hooks import NaiveVisualizationHook
class TestNaiveVisualizationHook:
def test_after_train_iter(self):
naive_visualization_hook = NaiveVisualizationHook()
runner = Mock(iter=1)
runner.writer.add_image = Mock()
inputs = torch.randn(1, 3, 15, 15)
batch_idx = 10
# test with normalize, resize, pad
gt_datasamples = BaseDataElement(
metainfo=dict(
img_norm_cfg=dict(
mean=(0, 0, 0), std=(0.5, 0.5, 0.5), to_bgr=True),
scale=(10, 10),
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg'))
pred_datasamples = [BaseDataElement()]
data_batch = [dict(inputs=inputs, data_sample=gt_datasamples)]
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with resize, pad
gt_datasamples = BaseDataElement(
metainfo=dict(
scale=(10, 10),
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg'))
pred_datasamples = [BaseDataElement()]
data_batch = [dict(inputs=inputs, data_sample=gt_datasamples)]
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with only resize
gt_datasamples = BaseDataElement(
metainfo=dict(
scale=(15, 15), ori_height=5, ori_width=5, img_path='tmp.jpg'))
pred_datasamples = [BaseDataElement()]
data_batch = [dict(inputs=inputs, data_sample=gt_datasamples)]
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with only pad
gt_datasamples = BaseDataElement(
metainfo=dict(
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg'))
pred_datasamples = [BaseDataElement()]
data_batch = [dict(inputs=inputs, data_sample=gt_datasamples)]
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test no transform
gt_datasamples = BaseDataElement(
metainfo=dict(ori_height=15, ori_width=15, img_path='tmp.jpg'))
pred_datasamples = [BaseDataElement()]
data_batch = [dict(inputs=inputs, data_sample=gt_datasamples)]
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
import torch
from mmengine.data import BaseDataElement
from mmengine.hooks import NaiveVisualizationHook
class TestNaiveVisualizationHook:
def test_after_train_iter(self):
naive_visualization_hook = NaiveVisualizationHook()
runner = Mock(iter=1)
runner.writer.add_image = Mock()
inputs = torch.randn(1, 3, 15, 15)
batch_idx = 10
# test with normalize, resize, pad
gt_datasamples = [
BaseDataElement(
metainfo=dict(
img_norm_cfg=dict(
mean=(0, 0, 0), std=(0.5, 0.5, 0.5), to_bgr=True),
scale=(10, 10),
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg'))
]
pred_datasamples = [BaseDataElement()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with resize, pad
gt_datasamples = [
BaseDataElement(
metainfo=dict(
scale=(10, 10),
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataElement()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with only resize
gt_datasamples = [
BaseDataElement(
metainfo=dict(
scale=(15, 15),
ori_height=5,
ori_width=5,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataElement()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with only pad
gt_datasamples = [
BaseDataElement(
metainfo=dict(
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataElement()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test no transform
gt_datasamples = [
BaseDataElement(
metainfo=dict(ori_height=15, ori_width=15,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataElement()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
|
import warnings
from abc import ABC
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.chat_history import (
BaseChatMessageHistory,
InMemoryChatMessageHistory,
)
from langchain_core.memory import BaseMemory
from langchain_core.messages import AIMessage, HumanMessage
from pydantic import Field
from langchain.memory.utils import get_prompt_input_key
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
),
)
class BaseChatMemory(BaseMemory, ABC):
"""Abstract base class for chat memory.
**ATTENTION** This abstraction was created prior to when chat models had
native tool calling capabilities.
It does **NOT** support native tool calling capabilities for chat models and
will fail SILENTLY if used with a chat model that has native tool calling.
DO NOT USE THIS ABSTRACTION FOR NEW CODE.
"""
chat_memory: BaseChatMessageHistory = Field(
default_factory=InMemoryChatMessageHistory
)
output_key: Optional[str] = None
input_key: Optional[str] = None
return_messages: bool = False
def _get_input_output(
self, inputs: dict[str, Any], outputs: dict[str, str]
) -> tuple[str, str]:
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
if self.output_key is None:
if len(outputs) == 1:
output_key = next(iter(outputs.keys()))
elif "output" in outputs:
output_key = "output"
warnings.warn(
f"'{self.__class__.__name__}' got multiple output keys:"
f" {outputs.keys()}. The default 'output' key is being used."
f" If this is not desired, please manually set 'output_key'."
)
else:
msg = (
f"Got multiple output keys: {outputs.keys()}, cannot "
f"determine which to store in memory. Please set the "
f"'output_key' explicitly."
)
raise ValueError(msg)
else:
output_key = self.output_key
return inputs[prompt_input_key], outputs[output_key]
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_messages(
[
HumanMessage(content=input_str),
AIMessage(content=output_str),
]
)
async def asave_context(
self, inputs: dict[str, Any], outputs: dict[str, str]
) -> None:
"""Save context from this conversation to buffer."""
input_str, output_str = self._get_input_output(inputs, outputs)
await self.chat_memory.aadd_messages(
[
HumanMessage(content=input_str),
AIMessage(content=output_str),
]
)
def clear(self) -> None:
"""Clear memory contents."""
self.chat_memory.clear()
async def aclear(self) -> None:
"""Clear memory contents."""
await self.chat_memory.aclear()
|
import warnings
from abc import ABC
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.chat_history import (
BaseChatMessageHistory,
InMemoryChatMessageHistory,
)
from langchain_core.memory import BaseMemory
from langchain_core.messages import AIMessage, HumanMessage
from pydantic import Field
from langchain.memory.utils import get_prompt_input_key
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
),
)
class BaseChatMemory(BaseMemory, ABC):
"""Abstract base class for chat memory.
**ATTENTION** This abstraction was created prior to when chat models had
native tool calling capabilities.
It does **NOT** support native tool calling capabilities for chat models and
will fail SILENTLY if used with a chat model that has native tool calling.
DO NOT USE THIS ABSTRACTION FOR NEW CODE.
"""
chat_memory: BaseChatMessageHistory = Field(
default_factory=InMemoryChatMessageHistory
)
output_key: Optional[str] = None
input_key: Optional[str] = None
return_messages: bool = False
def _get_input_output(
self, inputs: dict[str, Any], outputs: dict[str, str]
) -> tuple[str, str]:
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
if self.output_key is None:
if len(outputs) == 1:
output_key = list(outputs.keys())[0]
elif "output" in outputs:
output_key = "output"
warnings.warn(
f"'{self.__class__.__name__}' got multiple output keys:"
f" {outputs.keys()}. The default 'output' key is being used."
f" If this is not desired, please manually set 'output_key'."
)
else:
msg = (
f"Got multiple output keys: {outputs.keys()}, cannot "
f"determine which to store in memory. Please set the "
f"'output_key' explicitly."
)
raise ValueError(msg)
else:
output_key = self.output_key
return inputs[prompt_input_key], outputs[output_key]
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_messages(
[
HumanMessage(content=input_str),
AIMessage(content=output_str),
]
)
async def asave_context(
self, inputs: dict[str, Any], outputs: dict[str, str]
) -> None:
"""Save context from this conversation to buffer."""
input_str, output_str = self._get_input_output(inputs, outputs)
await self.chat_memory.aadd_messages(
[
HumanMessage(content=input_str),
AIMessage(content=output_str),
]
)
def clear(self) -> None:
"""Clear memory contents."""
self.chat_memory.clear()
async def aclear(self) -> None:
"""Clear memory contents."""
await self.chat_memory.aclear()
|
from __future__ import annotations
import json
import os
from typing import Callable
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
from sentence_transformers.util import fullname, import_from_string
class Dense(nn.Module):
"""
Feed-forward function with activation function.
This layer takes a fixed-sized sentence embedding and passes it through a feed-forward layer. Can be used to generate deep averaging networks (DAN).
Args:
in_features: Size of the input dimension
out_features: Output size
bias: Add a bias vector
activation_function: Pytorch activation function applied on
output
init_weight: Initial value for the matrix of the linear layer
init_bias: Initial value for the bias of the linear layer
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
activation_function: Callable[[Tensor], Tensor] | None = nn.Tanh(),
init_weight: Tensor | None = None,
init_bias: Tensor | None = None,
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.activation_function = nn.Identity() if activation_function is None else activation_function
self.linear = nn.Linear(in_features, out_features, bias=bias)
if init_weight is not None:
self.linear.weight = nn.Parameter(init_weight)
if init_bias is not None:
self.linear.bias = nn.Parameter(init_bias)
def forward(self, features: dict[str, Tensor]):
features.update({"sentence_embedding": self.activation_function(self.linear(features["sentence_embedding"]))})
return features
def get_sentence_embedding_dimension(self) -> int:
return self.out_features
def get_config_dict(self):
return {
"in_features": self.in_features,
"out_features": self.out_features,
"bias": self.bias,
"activation_function": fullname(self.activation_function),
}
def save(self, output_path, safe_serialization: bool = True) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def __repr__(self):
return f"Dense({self.get_config_dict()})"
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
config["activation_function"] = import_from_string(config["activation_function"])()
model = Dense(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(
os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"), weights_only=True
)
)
return model
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
from sentence_transformers.util import fullname, import_from_string
class Dense(nn.Module):
"""
Feed-forward function with activation function.
This layer takes a fixed-sized sentence embedding and passes it through a feed-forward layer. Can be used to generate deep averaging networks (DAN).
Args:
in_features: Size of the input dimension
out_features: Output size
bias: Add a bias vector
activation_function: Pytorch activation function applied on
output
init_weight: Initial value for the matrix of the linear layer
init_bias: Initial value for the bias of the linear layer
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
activation_function=nn.Tanh(),
init_weight: Tensor = None,
init_bias: Tensor = None,
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.activation_function = activation_function
self.linear = nn.Linear(in_features, out_features, bias=bias)
if init_weight is not None:
self.linear.weight = nn.Parameter(init_weight)
if init_bias is not None:
self.linear.bias = nn.Parameter(init_bias)
def forward(self, features: dict[str, Tensor]):
features.update({"sentence_embedding": self.activation_function(self.linear(features["sentence_embedding"]))})
return features
def get_sentence_embedding_dimension(self) -> int:
return self.out_features
def get_config_dict(self):
return {
"in_features": self.in_features,
"out_features": self.out_features,
"bias": self.bias,
"activation_function": fullname(self.activation_function),
}
def save(self, output_path, safe_serialization: bool = True) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def __repr__(self):
return f"Dense({self.get_config_dict()})"
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
config["activation_function"] = import_from_string(config["activation_function"])()
model = Dense(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(
os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"), weights_only=True
)
)
return model
|
# Owner(s): ["module: dynamo"]
from torch._dynamo.metrics_context import MetricsContext, TopN
from torch._dynamo.test_case import run_tests, TestCase
class TestMetricsContext(TestCase):
def setUp(self):
super().setUp()
self.metrics = {}
def _on_exit(self, start_ns, end_ns, metrics, exc_type, exc_value):
# Save away the metrics to be validated in the test.
self.metrics = metrics.copy()
def test_context_exists(self):
"""
Setting a value without entering the context should raise.
"""
context = MetricsContext(self._on_exit)
with self.assertRaisesRegex(RuntimeError, "outside of a MetricsContext"):
context.increment("m", 1)
with self.assertRaisesRegex(RuntimeError, "outside of a MetricsContext"):
context.set("m", 1)
with self.assertRaisesRegex(RuntimeError, "outside of a MetricsContext"):
context.update({"m", 1})
def test_nested_context(self):
"""
Only the outermost context should get an on_exit call, and it should
include everything.
"""
context = MetricsContext(self._on_exit)
with context:
with context:
context.set("m1", 1)
self.assertEqual(self.metrics, {})
context.set("m2", 2)
self.assertEqual(self.metrics, {"m1": 1, "m2": 2})
def test_set(self):
"""
Validate various ways to set metrics.
"""
with MetricsContext(self._on_exit) as context:
context.set("m1", 1)
context.set("m2", 2)
context.update({"m3": 3, "m4": 4})
self.assertEqual(self.metrics, {"m1": 1, "m2": 2, "m3": 3, "m4": 4})
def test_set_disallow_overwrite(self):
"""
Validate set won't overwrite.
"""
with MetricsContext(self._on_exit) as context:
context.set("m1", 1)
with self.assertRaisesRegex(RuntimeError, "already been set"):
context.set("m1", 2)
self.assertEqual(self.metrics, {"m1": 1})
def test_update_disallow_overwrite(self):
"""
Validate update won't overwrite.
"""
with MetricsContext(self._on_exit) as context:
context.update({"m1": 1, "m2": 2})
with self.assertRaisesRegex(RuntimeError, "already been set"):
context.update({"m1": 7, "m3": 3})
def test_update_allow_overwrite(self):
"""
Validate update will overwrite when given param.
"""
with MetricsContext(self._on_exit) as context:
context.update({"m1": 1, "m2": 2})
context.update({"m1": 7, "m3": 3}, overwrite=True)
self.assertEqual(self.metrics, {"m1": 7, "m2": 2, "m3": 3})
def test_add_to_set(self):
"""
Validate add_to_set.
"""
with MetricsContext(self._on_exit) as context:
context.add_to_set("m1", 1)
context.add_to_set("m1", 2)
context.add_to_set("m2", 3)
context.add_to_set("m2", 4)
self.assertEqual(self.metrics, {"m1": {1, 2}, "m2": {3, 4}})
self.assertTrue(isinstance(self.metrics["m1"], set))
self.assertTrue(isinstance(self.metrics["m2"], set))
def test_set_key_value(self):
with MetricsContext(self._on_exit) as context:
context.set_key_value("feature_usage", "k", True)
# Overrides allowed
context.set_key_value("feature_usage", "k2", True)
context.set_key_value("feature_usage", "k2", False)
self.assertEqual(self.metrics, {"feature_usage": {"k": True, "k2": False}})
def test_top_n(self):
top_n = TopN(3)
for k, v in (("seven", 7), ("four", 4), ("five", 5), ("six", 6), ("eight", 8)):
top_n.add(k, v)
self.assertEqual(len(top_n), 3)
print(list(top_n))
self.assertEqual(list(top_n), [("eight", 8), ("seven", 7), ("six", 6)])
if __name__ == "__main__":
run_tests()
|
# Owner(s): ["module: dynamo"]
from torch._dynamo.metrics_context import MetricsContext, TopN
from torch._dynamo.test_case import run_tests, TestCase
class TestMetricsContext(TestCase):
def setUp(self):
super().setUp()
self.metrics = {}
def _on_exit(self, start_ns, end_ns, metrics, exc_type, exc_value):
# Save away the metrics to be validated in the test.
self.metrics = metrics.copy()
def test_context_exists(self):
"""
Setting a value without entering the context should raise.
"""
context = MetricsContext(self._on_exit)
with self.assertRaisesRegex(RuntimeError, "outside of a MetricsContext"):
context.increment("m", 1)
with self.assertRaisesRegex(RuntimeError, "outside of a MetricsContext"):
context.set("m", 1)
with self.assertRaisesRegex(RuntimeError, "outside of a MetricsContext"):
context.update({"m", 1})
def test_nested_context(self):
"""
Only the outermost context should get an on_exit call, and it should
include everything.
"""
context = MetricsContext(self._on_exit)
with context:
with context:
context.set("m1", 1)
self.assertEqual(self.metrics, {})
context.set("m2", 2)
self.assertEqual(self.metrics, {"m1": 1, "m2": 2})
def test_set(self):
"""
Validate various ways to set metrics.
"""
with MetricsContext(self._on_exit) as context:
context.set("m1", 1)
context.set("m2", 2)
context.update({"m3": 3, "m4": 4})
self.assertEqual(self.metrics, {"m1": 1, "m2": 2, "m3": 3, "m4": 4})
def test_set_disallow_overwrite(self):
"""
Validate set won't overwrite.
"""
with MetricsContext(self._on_exit) as context:
context.set("m1", 1)
with self.assertRaisesRegex(RuntimeError, "already been set"):
context.set("m1", 2)
self.assertEqual(self.metrics, {"m1": 1})
def test_update_disallow_overwrite(self):
"""
Validate update won't overwite.
"""
with MetricsContext(self._on_exit) as context:
context.update({"m1": 1, "m2": 2})
with self.assertRaisesRegex(RuntimeError, "already been set"):
context.update({"m1": 7, "m3": 3})
def test_update_allow_overwrite(self):
"""
Validate update will overwite when given param.
"""
with MetricsContext(self._on_exit) as context:
context.update({"m1": 1, "m2": 2})
context.update({"m1": 7, "m3": 3}, overwrite=True)
self.assertEqual(self.metrics, {"m1": 7, "m2": 2, "m3": 3})
def test_add_to_set(self):
"""
Validate add_to_set.
"""
with MetricsContext(self._on_exit) as context:
context.add_to_set("m1", 1)
context.add_to_set("m1", 2)
context.add_to_set("m2", 3)
context.add_to_set("m2", 4)
self.assertEqual(self.metrics, {"m1": {1, 2}, "m2": {3, 4}})
self.assertTrue(isinstance(self.metrics["m1"], set))
self.assertTrue(isinstance(self.metrics["m2"], set))
def test_set_key_value(self):
with MetricsContext(self._on_exit) as context:
context.set_key_value("feature_usage", "k", True)
# Overrides allowed
context.set_key_value("feature_usage", "k2", True)
context.set_key_value("feature_usage", "k2", False)
self.assertEqual(self.metrics, {"feature_usage": {"k": True, "k2": False}})
def test_top_n(self):
top_n = TopN(3)
for k, v in (("seven", 7), ("four", 4), ("five", 5), ("six", 6), ("eight", 8)):
top_n.add(k, v)
self.assertEqual(len(top_n), 3)
print(list(top_n))
self.assertEqual(list(top_n), [("eight", 8), ("seven", 7), ("six", 6)])
if __name__ == "__main__":
run_tests()
|
"""Chat generation output classes."""
from __future__ import annotations
from typing import Literal, Union
from pydantic import computed_field
from langchain_core.messages import BaseMessage, BaseMessageChunk
from langchain_core.outputs.generation import Generation
from langchain_core.utils._merge import merge_dicts
class ChatGeneration(Generation):
"""A single chat generation output.
A subclass of Generation that represents the response from a chat model
that generates chat messages.
The `message` attribute is a structured representation of the chat message.
Most of the time, the message will be of type `AIMessage`.
Users working with chat models will usually access information via either
`AIMessage` (returned from runnable interfaces) or `LLMResult` (available
via callbacks).
"""
message: BaseMessage
"""The message output by the chat model."""
type: Literal["ChatGeneration"] = "ChatGeneration" # type: ignore[assignment]
"""Type is used exclusively for serialization purposes."""
@computed_field # type: ignore[prop-decorator]
@property
def text(self) -> str:
"""Set the text attribute to be the contents of the message."""
text_ = ""
if isinstance(self.message.content, str):
text_ = self.message.content
# Assumes text in content blocks in OpenAI format.
# Uses first text block.
elif isinstance(self.message.content, list):
for block in self.message.content:
if isinstance(block, str):
text_ = block
break
if isinstance(block, dict) and "text" in block:
text_ = block["text"]
break
return text_
class ChatGenerationChunk(ChatGeneration):
"""ChatGeneration chunk.
ChatGeneration chunks can be concatenated with other ChatGeneration chunks.
"""
message: BaseMessageChunk
"""The message chunk output by the chat model."""
type: Literal["ChatGenerationChunk"] = "ChatGenerationChunk" # type: ignore[assignment]
"""Type is used exclusively for serialization purposes."""
def __add__(
self, other: Union[ChatGenerationChunk, list[ChatGenerationChunk]]
) -> ChatGenerationChunk:
"""Concatenate two ChatGenerationChunks.
Args:
other: The other ChatGenerationChunk or list of ChatGenerationChunks to
concatenate.
"""
if isinstance(other, ChatGenerationChunk):
generation_info = merge_dicts(
self.generation_info or {},
other.generation_info or {},
)
return ChatGenerationChunk(
message=self.message + other.message,
generation_info=generation_info or None,
)
if isinstance(other, list) and all(
isinstance(x, ChatGenerationChunk) for x in other
):
generation_info = merge_dicts(
self.generation_info or {},
*[chunk.generation_info for chunk in other if chunk.generation_info],
)
return ChatGenerationChunk(
message=self.message + [chunk.message for chunk in other],
generation_info=generation_info or None,
)
msg = f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
raise TypeError(msg)
def merge_chat_generation_chunks(
chunks: list[ChatGenerationChunk],
) -> Union[ChatGenerationChunk, None]:
"""Merge a list of ChatGenerationChunks into a single ChatGenerationChunk."""
if not chunks:
return None
if len(chunks) == 1:
return chunks[0]
return chunks[0] + chunks[1:]
|
"""Chat generation output classes."""
from __future__ import annotations
from typing import Literal, Union
from pydantic import model_validator
from typing_extensions import Self
from langchain_core.messages import BaseMessage, BaseMessageChunk
from langchain_core.outputs.generation import Generation
from langchain_core.utils._merge import merge_dicts
class ChatGeneration(Generation):
"""A single chat generation output.
A subclass of Generation that represents the response from a chat model
that generates chat messages.
The `message` attribute is a structured representation of the chat message.
Most of the time, the message will be of type `AIMessage`.
Users working with chat models will usually access information via either
`AIMessage` (returned from runnable interfaces) or `LLMResult` (available
via callbacks).
"""
text: str = ""
"""*SHOULD NOT BE SET DIRECTLY* The text contents of the output message."""
message: BaseMessage
"""The message output by the chat model."""
# Override type to be ChatGeneration, ignore mypy error as this is intentional
type: Literal["ChatGeneration"] = "ChatGeneration" # type: ignore[assignment]
"""Type is used exclusively for serialization purposes."""
@model_validator(mode="after")
def set_text(self) -> Self:
"""Set the text attribute to be the contents of the message.
Args:
values: The values of the object.
Returns:
The values of the object with the text attribute set.
Raises:
ValueError: If the message is not a string or a list.
"""
try:
text = ""
if isinstance(self.message.content, str):
text = self.message.content
# Assumes text in content blocks in OpenAI format.
# Uses first text block.
elif isinstance(self.message.content, list):
for block in self.message.content:
if isinstance(block, str):
text = block
break
if isinstance(block, dict) and "text" in block:
text = block["text"]
break
else:
pass
self.text = text
except (KeyError, AttributeError) as e:
msg = "Error while initializing ChatGeneration"
raise ValueError(msg) from e
return self
class ChatGenerationChunk(ChatGeneration):
"""ChatGeneration chunk.
ChatGeneration chunks can be concatenated with other ChatGeneration chunks.
"""
message: BaseMessageChunk
"""The message chunk output by the chat model."""
# Override type to be ChatGeneration, ignore mypy error as this is intentional
type: Literal["ChatGenerationChunk"] = "ChatGenerationChunk" # type: ignore[assignment]
"""Type is used exclusively for serialization purposes."""
def __add__(
self, other: Union[ChatGenerationChunk, list[ChatGenerationChunk]]
) -> ChatGenerationChunk:
"""Concatenate two ChatGenerationChunks.
Args:
other: The other ChatGenerationChunk or list of ChatGenerationChunks to
concatenate.
"""
if isinstance(other, ChatGenerationChunk):
generation_info = merge_dicts(
self.generation_info or {},
other.generation_info or {},
)
return ChatGenerationChunk(
message=self.message + other.message,
generation_info=generation_info or None,
)
if isinstance(other, list) and all(
isinstance(x, ChatGenerationChunk) for x in other
):
generation_info = merge_dicts(
self.generation_info or {},
*[chunk.generation_info for chunk in other if chunk.generation_info],
)
return ChatGenerationChunk(
message=self.message + [chunk.message for chunk in other],
generation_info=generation_info or None,
)
msg = f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
raise TypeError(msg)
def merge_chat_generation_chunks(
chunks: list[ChatGenerationChunk],
) -> Union[ChatGenerationChunk, None]:
"""Merge a list of ChatGenerationChunks into a single ChatGenerationChunk."""
if not chunks:
return None
if len(chunks) == 1:
return chunks[0]
return chunks[0] + chunks[1:]
|
# Copyright (c) OpenMMLab. All rights reserved.
from pathlib import Path
from typing import Any, Optional, Union
import torch
import torch.nn as nn
from mmengine.config import Config
from mmengine.runner import load_checkpoint
from torch import Tensor
from mmdet.core import ConfigType, OptConfigType, SampleList
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class KnowledgeDistillationSingleStageDetector(SingleStageDetector):
r"""Implementation of `Distilling the Knowledge in a Neural Network.
<https://arxiv.org/abs/1503.02531>`_.
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
teacher_config (:obj:`ConfigDict` | dict | str | Path): Config file
path or the config object of teacher model.
teacher_ckpt (str, optional): Checkpoint path of teacher model.
If left as None, the model will not load any weights.
Defaults to True.
eval_teacher (bool): Set the train mode for teacher.
Defaults to True.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of ATSS. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of ATSS. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
"""
def __init__(
self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
teacher_config: Union[ConfigType, str, Path],
teacher_ckpt: Optional[str] = None,
eval_teacher: bool = True,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor)
self.eval_teacher = eval_teacher
# Build teacher model
if isinstance(teacher_config, (str, Path)):
teacher_config = Config.fromfile(teacher_config)
self.teacher_model = MODELS.build(teacher_config['model'])
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.extract_feat(batch_inputs)
with torch.no_grad():
teacher_x = self.teacher_model.extract_feat(batch_inputs)
out_teacher = self.teacher_model.bbox_head(teacher_x)
losses = self.bbox_head.loss(x, out_teacher, batch_data_samples)
return losses
def cuda(self, device: Optional[str] = None) -> nn.Module:
"""Since teacher_model is registered as a plain object, it is necessary
to put the teacher model to cuda when calling ``cuda`` function."""
self.teacher_model.cuda(device=device)
return super().cuda(device=device)
def to(self, device: Optional[str] = None) -> nn.Module:
"""Since teacher_model is registered as a plain object, it is necessary
to put the teacher model to other device when calling ``to``
function."""
self.teacher_model.to(device=device)
return super().to(device=device)
def train(self, mode: bool = True) -> None:
"""Set the same train mode for teacher and student model."""
if self.eval_teacher:
self.teacher_model.train(False)
else:
self.teacher_model.train(mode)
super().train(mode)
def __setattr__(self, name: str, value: Any) -> None:
"""Set attribute, i.e. self.name = value
This reloading prevent the teacher model from being registered as a
nn.Module. The teacher module is registered as a plain object, so that
the teacher parameters will not show up when calling
``self.parameters``, ``self.modules``, ``self.children`` methods.
"""
if name == 'teacher_model':
object.__setattr__(self, name, value)
else:
super().__setattr__(name, value)
|
# Copyright (c) OpenMMLab. All rights reserved.
from pathlib import Path
from typing import Any, Optional, Union
import torch
import torch.nn as nn
from mmengine.config import Config
from mmengine.runner import load_checkpoint
from torch import Tensor
from mmdet.core import ConfigType, OptConfigType, SampleList
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class KnowledgeDistillationSingleStageDetector(SingleStageDetector):
r"""Implementation of `Distilling the Knowledge in a Neural Network.
<https://arxiv.org/abs/1503.02531>`_.
Args:
teacher_config (:obj:`ConfigDict` | dict | str | Path): Config file
path or the config object of teacher model.
teacher_ckpt (str, optional): Checkpoint path of teacher model.
If left as None, the model will not load any weights.
"""
def __init__(
self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
teacher_config: Union[ConfigType, str, Path],
teacher_ckpt: Optional[str] = None,
eval_teacher: bool = True,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
preprocess_cfg: OptConfigType = None,
) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
preprocess_cfg=preprocess_cfg)
self.eval_teacher = eval_teacher
# Build teacher model
if isinstance(teacher_config, (str, Path)):
teacher_config = Config.fromfile(teacher_config)
self.teacher_model = MODELS.build(teacher_config['model'])
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
def forward_train(self, batch_inputs: Tensor,
batch_data_samples: SampleList, **kwargs) -> dict:
"""
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.extract_feat(batch_inputs)
with torch.no_grad():
teacher_x = self.teacher_model.extract_feat(batch_inputs)
out_teacher = self.teacher_model.bbox_head(teacher_x)
losses = self.bbox_head.forward_train(x, out_teacher,
batch_data_samples)
return losses
def cuda(self, device: Optional[str] = None) -> nn.Module:
"""Since teacher_model is registered as a plain object, it is necessary
to put the teacher model to cuda when calling ``cuda`` function."""
self.teacher_model.cuda(device=device)
return super().cuda(device=device)
def to(self, device: Optional[str] = None) -> nn.Module:
"""Since teacher_model is registered as a plain object, it is necessary
to put the teacher model to other device when calling ``to``
function."""
self.teacher_model.to(device=device)
return super().to(device=device)
def train(self, mode: bool = True) -> None:
"""Set the same train mode for teacher and student model."""
if self.eval_teacher:
self.teacher_model.train(False)
else:
self.teacher_model.train(mode)
super().train(mode)
def __setattr__(self, name: str, value: Any) -> None:
"""Set attribute, i.e. self.name = value
This reloading prevent the teacher model from being registered as a
nn.Module. The teacher module is registered as a plain object, so that
the teacher parameters will not show up when calling
``self.parameters``, ``self.modules``, ``self.children`` methods.
"""
if name == 'teacher_model':
object.__setattr__(self, name, value)
else:
super().__setattr__(name, value)
|
from typing import Any, TYPE_CHECKING
import torch
from torch._C import DispatchKey
from torch._higher_order_ops.utils import autograd_not_implemented
from torch._ops import HigherOrderOperator
from torch._subclasses.fake_tensor import FakeTensorMode
if TYPE_CHECKING:
from torch._subclasses.functional_tensor import BaseFunctionalizeAPI
from torch.fx.experimental.proxy_tensor import ProxyTorchDispatchMode, track_tensor_tree
from torch.utils import _pytree as pytree
class RunConstGraph(HigherOrderOperator):
def __init__(self) -> None:
super().__init__("run_const_graph")
def __call__(self, graph: torch.fx.GraphModule, args: tuple[object, ...]) -> object:
return super().__call__(graph, args)
run_const_graph = RunConstGraph()
@run_const_graph.py_impl(ProxyTorchDispatchMode)
def run_const_graph_dispatch_mode(
mode: ProxyTorchDispatchMode, graph: torch.fx.GraphModule, args: tuple[object, ...]
) -> object:
const_gm, weights = graph, args
p_args = pytree.tree_map(mode.tracer.unwrap_proxy, (graph, args)) # type: ignore[union-attr]
assert isinstance(const_gm, torch.fx.GraphModule)
assert not hasattr(mode.tracer.root, "_const_graph") # type: ignore[union-attr]
mode.tracer.root.register_module("_const_graph", const_gm) # type: ignore[union-attr]
proxy = mode.tracer.create_proxy("call_function", run_const_graph, p_args, {})
out = const_gm(*weights)
return track_tensor_tree(out, proxy, constant=None, tracer=mode.tracer)
@run_const_graph.py_functionalize_impl
def run_const_graph_functional(
ctx: "BaseFunctionalizeAPI", graph: torch.fx.GraphModule, args: tuple[Any, ...]
) -> Any:
unwrapped_args = ctx.unwrap_tensors(args)
with ctx.redispatch_to_next():
out = run_const_graph(graph, unwrapped_args)
return ctx.wrap_tensors(out) # type: ignore[arg-type]
run_const_graph.py_autograd_impl(
autograd_not_implemented(run_const_graph, deferred_error=True)
)
@run_const_graph.py_impl(FakeTensorMode)
def run_const_graph_fake_tensor_mode(
mode: FakeTensorMode, graph: torch.fx.GraphModule, args: tuple[object, ...]
) -> object:
assert isinstance(graph, torch.fx.GraphModule)
with mode:
return graph(*args)
@run_const_graph.py_impl(DispatchKey.CPU)
def run_const_graph_cpu(
graph: torch.fx.GraphModule, args: tuple[object, ...]
) -> object:
assert isinstance(graph, torch.fx.GraphModule)
return graph(*args)
|
# mypy: allow-untyped-defs
import torch
from torch._C import DispatchKey
from torch._higher_order_ops.utils import autograd_not_implemented
from torch._ops import HigherOrderOperator
from torch._subclasses.fake_tensor import FakeTensorMode
from torch.fx.experimental.proxy_tensor import ProxyTorchDispatchMode, track_tensor_tree
from torch.utils import _pytree as pytree
class RunConstGraph(HigherOrderOperator):
def __init__(self):
super().__init__("run_const_graph")
def __call__(self, graph, args):
return super().__call__(graph, args)
run_const_graph = RunConstGraph()
@run_const_graph.py_impl(ProxyTorchDispatchMode)
def run_const_graph_dispatch_mode(mode, graph, args):
const_gm, weights = graph, args
p_args = pytree.tree_map(mode.tracer.unwrap_proxy, (graph, args))
assert isinstance(const_gm, torch.fx.GraphModule)
assert not hasattr(mode.tracer.root, "_const_graph")
mode.tracer.root.register_module("_const_graph", const_gm)
proxy = mode.tracer.create_proxy("call_function", run_const_graph, p_args, {})
out = const_gm(*weights)
return track_tensor_tree(out, proxy, constant=None, tracer=mode.tracer)
@run_const_graph.py_functionalize_impl
def run_const_graph_functional(ctx, graph, args):
unwrapped_args = ctx.unwrap_tensors(args)
with ctx.redispatch_to_next():
out = run_const_graph(*unwrapped_args)
return ctx.wrap_tensors(out)
run_const_graph.py_autograd_impl(
autograd_not_implemented(run_const_graph, deferred_error=True)
)
@run_const_graph.py_impl(FakeTensorMode)
def run_const_graph_fake_tensor_mode(mode, graph, args):
assert isinstance(graph, torch.fx.GraphModule)
with mode:
return graph(*args)
@run_const_graph.py_impl(DispatchKey.CPU)
def run_const_graph_cpu(graph, args):
assert isinstance(graph, torch.fx.GraphModule)
return graph(*args)
|
import os
import sys
from pathlib import Path
import pytest
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
from .utils import execute_subprocess_async, get_torch_dist_unique_port, require_torch
def test_split_dataset_by_node_map_style():
full_ds = Dataset.from_dict({"i": range(17)})
full_size = len(full_ds)
world_size = 3
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert sum(len(ds) for ds in datasets_per_rank) == full_size
assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size
def test_split_dataset_by_node_iterable():
def gen():
return ({"i": i} for i in range(17))
world_size = 3
full_ds = IterableDataset.from_generator(gen)
full_size = len(list(full_ds))
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert sum(len(list(ds)) for ds in datasets_per_rank) == full_size
assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size
@pytest.mark.parametrize("shards_per_node", [1, 2, 3])
def test_split_dataset_by_node_iterable_sharded(shards_per_node):
def gen(shards):
for shard in shards:
yield from ({"i": i, "shard": shard} for i in range(17))
world_size = 3
num_shards = shards_per_node * world_size
gen_kwargs = {"shards": [f"shard_{shard_idx}.txt" for shard_idx in range(num_shards)]}
full_ds = IterableDataset.from_generator(gen, gen_kwargs=gen_kwargs)
full_size = len(list(full_ds))
assert full_ds.n_shards == world_size * shards_per_node
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert [ds.n_shards for ds in datasets_per_rank] == [shards_per_node] * world_size
assert sum(len(list(ds)) for ds in datasets_per_rank) == full_size
assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size
def test_distributed_shuffle_iterable():
def gen():
return ({"i": i} for i in range(17))
world_size = 2
full_ds = IterableDataset.from_generator(gen)
full_size = len(list(full_ds))
ds_rank0 = split_dataset_by_node(full_ds, rank=0, world_size=world_size).shuffle(seed=42)
assert len(list(ds_rank0)) == 1 + full_size // world_size
with pytest.raises(RuntimeError):
split_dataset_by_node(full_ds, rank=0, world_size=world_size).shuffle()
ds_rank0 = split_dataset_by_node(full_ds.shuffle(seed=42), rank=0, world_size=world_size)
assert len(list(ds_rank0)) == 1 + full_size // world_size
with pytest.raises(RuntimeError):
split_dataset_by_node(full_ds.shuffle(), rank=0, world_size=world_size)
@pytest.mark.parametrize("streaming", [False, True])
@require_torch
@pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows")
@pytest.mark.integration
def test_torch_distributed_launch(streaming):
nproc_per_node = 2
master_port = get_torch_dist_unique_port()
test_script = Path(__file__).resolve().parent / "distributed_scripts" / "launch_torch_distributed.py"
distributed_args = f"""
-m torch.distributed.launch
--nproc_per_node={nproc_per_node}
--master_port={master_port}
{test_script}
""".split()
args = f"""
--streaming={streaming}
""".split()
cmd = [sys.executable] + distributed_args + args
execute_subprocess_async(cmd, env=os.environ.copy())
@pytest.mark.parametrize(
"nproc_per_node, num_workers",
[
(2, 2), # each node has 2 shards and each worker has 1 shards
(3, 2), # each node uses all the shards but skips examples, and each worker has 2 shards
],
)
@require_torch
@pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows")
@pytest.mark.integration
def test_torch_distributed_launch_streaming_with_num_workers(nproc_per_node, num_workers):
streaming = True
master_port = get_torch_dist_unique_port()
test_script = Path(__file__).resolve().parent / "distributed_scripts" / "launch_torch_distributed.py"
distributed_args = f"""
-m torch.distributed.launch
--nproc_per_node={nproc_per_node}
--master_port={master_port}
{test_script}
""".split()
args = f"""
--streaming={streaming}
--num_workers={num_workers}
""".split()
cmd = [sys.executable] + distributed_args + args
execute_subprocess_async(cmd, env=os.environ.copy())
|
import os
import sys
from pathlib import Path
import pytest
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
from .utils import execute_subprocess_async, get_torch_dist_unique_port, require_torch
def test_split_dataset_by_node_map_style():
full_ds = Dataset.from_dict({"i": range(17)})
full_size = len(full_ds)
world_size = 3
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert sum(len(ds) for ds in datasets_per_rank) == full_size
assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size
def test_split_dataset_by_node_iterable():
def gen():
return ({"i": i} for i in range(17))
world_size = 3
full_ds = IterableDataset.from_generator(gen)
full_size = len(list(full_ds))
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert sum(len(list(ds)) for ds in datasets_per_rank) == full_size
assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size
@pytest.mark.parametrize("shards_per_node", [1, 2, 3])
def test_split_dataset_by_node_iterable_sharded(shards_per_node):
def gen(shards):
for shard in shards:
yield from ({"i": i, "shard": shard} for i in range(17))
world_size = 3
num_shards = shards_per_node * world_size
gen_kwargs = {"shards": [f"shard_{shard_idx}.txt" for shard_idx in range(num_shards)]}
full_ds = IterableDataset.from_generator(gen, gen_kwargs=gen_kwargs)
full_size = len(list(full_ds))
assert full_ds.n_shards == world_size * shards_per_node
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert [ds.n_shards for ds in datasets_per_rank] == [shards_per_node] * world_size
assert sum(len(list(ds)) for ds in datasets_per_rank) == full_size
assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size
def test_distributed_shuffle_iterable():
def gen():
return ({"i": i} for i in range(17))
world_size = 2
full_ds = IterableDataset.from_generator(gen)
full_size = len(list(full_ds))
ds_rank0 = split_dataset_by_node(full_ds, rank=0, world_size=world_size).shuffle(seed=42)
assert len(list(ds_rank0)) == 1 + full_size // world_size
with pytest.raises(RuntimeError):
split_dataset_by_node(full_ds, rank=0, world_size=world_size).shuffle()
ds_rank0 = split_dataset_by_node(full_ds.shuffle(seed=42), rank=0, world_size=world_size)
assert len(list(ds_rank0)) == 1 + full_size // world_size
with pytest.raises(RuntimeError):
split_dataset_by_node(full_ds.shuffle(), rank=0, world_size=world_size)
@pytest.mark.parametrize("streaming", [False, True])
@require_torch
@pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows")
@pytest.mark.integration
def test_torch_distributed_launch(streaming):
nproc_per_node = 2
master_port = get_torch_dist_unique_port()
test_script = Path(__file__).resolve().parent / "distributed_scripts" / "test_torch_distributed_launch.py"
distributed_args = f"""
-m torch.distributed.launch
--nproc_per_node={nproc_per_node}
--master_port={master_port}
{test_script}
""".split()
args = f"""
--streaming={streaming}
""".split()
cmd = [sys.executable] + distributed_args + args
execute_subprocess_async(cmd, env=os.environ.copy())
@pytest.mark.parametrize(
"nproc_per_node, num_workers",
[
(2, 2), # each node has 2 shards and each worker has 1 shards
(3, 2), # each node uses all the shards but skips examples, and each worker has 2 shards
],
)
@require_torch
@pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows")
@pytest.mark.integration
def test_torch_distributed_launch_streaming_with_num_workers(nproc_per_node, num_workers):
streaming = True
master_port = get_torch_dist_unique_port()
test_script = Path(__file__).resolve().parent / "distributed_scripts" / "test_torch_distributed_launch.py"
distributed_args = f"""
-m torch.distributed.launch
--nproc_per_node={nproc_per_node}
--master_port={master_port}
{test_script}
""".split()
args = f"""
--streaming={streaming}
--num_workers={num_workers}
""".split()
cmd = [sys.executable] + distributed_args + args
execute_subprocess_async(cmd, env=os.environ.copy())
|
"""Standard LangChain interface tests"""
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ( # type: ignore[import-not-found]
ChatModelUnitTests, # type: ignore[import-not-found]
)
from langchain_mistralai import ChatMistralAI
class TestMistralStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatMistralAI
|
"""Standard LangChain interface tests"""
from typing import Type
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ( # type: ignore[import-not-found]
ChatModelUnitTests, # type: ignore[import-not-found]
)
from langchain_mistralai import ChatMistralAI
class TestMistralStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatMistralAI
|
import os
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import Audio
from docarray.typing import AudioUrl
from docarray.typing.tensor.audio import AudioNdArray, AudioTorchTensor
from docarray.utils.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
from docarray.typing.tensor import TensorFlowTensor
from docarray.typing.tensor.audio import AudioTensorFlowTensor
LOCAL_AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', LOCAL_AUDIO_FILES)
def test_audio(file_url):
audio = Audio(url=file_url)
audio.tensor = audio.url.load()
assert isinstance(audio.tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', LOCAL_AUDIO_FILES)
def test_save_audio_ndarray(file_url, tmpdir):
tmp_file = str(tmpdir / 'tmp.wav')
audio = Audio(url=file_url)
audio.tensor = audio.url.load()
assert isinstance(audio.tensor, np.ndarray)
assert isinstance(audio.tensor, AudioNdArray)
audio.tensor.save_to_wav_file(tmp_file)
assert os.path.isfile(tmp_file)
audio_from_file = Audio(url=tmp_file)
audio_from_file.tensor = audio_from_file.url.load()
assert np.allclose(audio.tensor, audio_from_file.tensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', LOCAL_AUDIO_FILES)
def test_save_audio_torch_tensor(file_url, tmpdir):
tmp_file = str(tmpdir / 'tmp.wav')
audio = Audio(url=file_url)
audio.tensor = parse_obj_as(AudioTorchTensor, torch.from_numpy(audio.url.load()))
assert isinstance(audio.tensor, torch.Tensor)
assert isinstance(audio.tensor, AudioTorchTensor)
audio.tensor.save_to_wav_file(tmp_file)
assert os.path.isfile(tmp_file)
audio_from_file = Audio(url=tmp_file)
audio_from_file.tensor = parse_obj_as(
AudioTorchTensor, torch.from_numpy(audio_from_file.url.load())
)
assert torch.allclose(audio.tensor, audio_from_file.tensor)
@pytest.mark.tensorflow
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', LOCAL_AUDIO_FILES)
def test_save_audio_tensorflow(file_url, tmpdir):
tmp_file = str(tmpdir / 'tmp.wav')
audio = Audio(url=file_url)
audio.tensor = AudioTensorFlowTensor(tensor=tf.constant(audio.url.load()))
assert isinstance(audio.tensor, TensorFlowTensor)
assert isinstance(audio.tensor, AudioTensorFlowTensor)
assert isinstance(audio.tensor.tensor, tf.Tensor)
audio.tensor.save_to_wav_file(tmp_file)
assert os.path.isfile(tmp_file)
audio_from_file = Audio(url=tmp_file)
audio_from_file.tensor = AudioTensorFlowTensor(
tensor=tf.constant(audio_from_file.url.load())
)
assert tnp.allclose(audio.tensor.tensor, audio_from_file.tensor.tensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
LOCAL_AUDIO_FILES,
)
def test_extend_audio(file_url):
class MyAudio(Audio):
title: str
tensor: Optional[AudioNdArray]
my_audio = MyAudio(title='my extended audio', url=file_url)
my_audio.tensor = parse_obj_as(AudioNdArray, my_audio.url.load())
assert isinstance(my_audio.tensor, AudioNdArray)
assert isinstance(my_audio.url, AudioUrl)
def test_audio_np():
audio = parse_obj_as(Audio, np.zeros((10, 10, 3)))
assert (audio.tensor == np.zeros((10, 10, 3))).all()
def test_audio_torch():
audio = parse_obj_as(Audio, torch.zeros(10, 10, 3))
assert (audio.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.tensorflow
def test_audio_tensorflow():
audio = parse_obj_as(Audio, tf.zeros((10, 10, 3)))
assert tnp.allclose(audio.tensor.tensor, tf.zeros((10, 10, 3)))
def test_audio_bytes():
audio = parse_obj_as(Audio, torch.zeros(10, 10, 3))
audio.bytes = audio.tensor.to_bytes()
def test_audio_shortcut_doc():
class MyDoc(BaseDocument):
audio: Audio
audio2: Audio
audio3: Audio
doc = MyDoc(
audio='http://myurl.wav',
audio2=np.zeros((10, 10, 3)),
audio3=torch.zeros(10, 10, 3),
)
assert doc.audio.url == 'http://myurl.wav'
assert (doc.audio2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.audio3.tensor == torch.zeros(10, 10, 3)).all()
|
import os
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import Audio
from docarray.typing import AudioUrl
from docarray.typing.tensor.audio import AudioNdArray, AudioTorchTensor
from tests import TOYDATA_DIR
LOCAL_AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', LOCAL_AUDIO_FILES)
def test_audio(file_url):
audio = Audio(url=file_url)
audio.tensor = audio.url.load()
assert isinstance(audio.tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', LOCAL_AUDIO_FILES)
def test_save_audio_ndarray(file_url, tmpdir):
tmp_file = str(tmpdir / 'tmp.wav')
audio = Audio(url=file_url)
audio.tensor = audio.url.load()
assert isinstance(audio.tensor, np.ndarray)
assert isinstance(audio.tensor, AudioNdArray)
audio.tensor.save_to_wav_file(tmp_file)
assert os.path.isfile(tmp_file)
audio_from_file = Audio(url=tmp_file)
audio_from_file.tensor = audio_from_file.url.load()
assert np.allclose(audio.tensor, audio_from_file.tensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', LOCAL_AUDIO_FILES)
def test_save_audio_torch_tensor(file_url, tmpdir):
tmp_file = str(tmpdir / 'tmp.wav')
audio = Audio(url=file_url)
audio.tensor = parse_obj_as(AudioTorchTensor, torch.from_numpy(audio.url.load()))
assert isinstance(audio.tensor, torch.Tensor)
assert isinstance(audio.tensor, AudioTorchTensor)
audio.tensor.save_to_wav_file(tmp_file)
assert os.path.isfile(tmp_file)
audio_from_file = Audio(url=tmp_file)
audio_from_file.tensor = parse_obj_as(
AudioTorchTensor, torch.from_numpy(audio_from_file.url.load())
)
assert torch.allclose(audio.tensor, audio_from_file.tensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
LOCAL_AUDIO_FILES,
)
def test_extend_audio(file_url):
class MyAudio(Audio):
title: str
tensor: Optional[AudioNdArray]
my_audio = MyAudio(title='my extended audio', url=file_url)
my_audio.tensor = parse_obj_as(AudioNdArray, my_audio.url.load())
assert isinstance(my_audio.tensor, AudioNdArray)
assert isinstance(my_audio.url, AudioUrl)
def test_audio_np():
audio = parse_obj_as(Audio, np.zeros((10, 10, 3)))
assert (audio.tensor == np.zeros((10, 10, 3))).all()
def test_audio_torch():
audio = parse_obj_as(Audio, torch.zeros(10, 10, 3))
assert (audio.tensor == torch.zeros(10, 10, 3)).all()
def test_audio_bytes():
audio = parse_obj_as(Audio, torch.zeros(10, 10, 3))
audio.bytes = audio.tensor.to_bytes()
def test_audio_shortcut_doc():
class MyDoc(BaseDocument):
audio: Audio
audio2: Audio
audio3: Audio
doc = MyDoc(
audio='http://myurl.wav',
audio2=np.zeros((10, 10, 3)),
audio3=torch.zeros(10, 10, 3),
)
assert doc.audio.url == 'http://myurl.wav'
assert (doc.audio2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.audio3.tensor == torch.zeros(10, 10, 3)).all()
|
import logging
import os
import sys
from torchaudio._internal.module_utils import fail_with_message, is_module_available, no_op
from .utils import _check_cuda_version, _init_dll_path, _init_ffmpeg, _init_sox, _LazyImporter, _load_lib
_LG = logging.getLogger(__name__)
# Note:
# `_check_cuda_version` is not meant to be used by regular users.
# Builder uses it for debugging purpose, so we export it.
# https://github.com/pytorch/builder/blob/e2e4542b8eb0bdf491214451a1a4128bd606cce2/test/smoke_test/smoke_test.py#L80
__all__ = [
"_check_cuda_version",
"_IS_TORCHAUDIO_EXT_AVAILABLE",
"_IS_RIR_AVAILABLE",
"lazy_import_sox_ext",
"lazy_import_ffmpeg_ext",
]
if os.name == "nt" and (3, 8) <= sys.version_info < (3, 9):
_init_dll_path()
# When the extension module is built, we initialize it.
# In case of an error, we do not catch the failure as it suggests there is something
# wrong with the installation.
_IS_TORCHAUDIO_EXT_AVAILABLE = is_module_available("torchaudio.lib._torchaudio")
# RIR features are implemented in _torchaudio extension, but they can be individually
# turned on/off at build time. Available means that _torchaudio is loaded properly, and
# RIR features are found there.
_IS_RIR_AVAILABLE = False
_IS_ALIGN_AVAILABLE = False
if _IS_TORCHAUDIO_EXT_AVAILABLE:
_load_lib("libtorchaudio")
import torchaudio.lib._torchaudio # noqa
_check_cuda_version()
_IS_RIR_AVAILABLE = torchaudio.lib._torchaudio.is_rir_available()
_IS_ALIGN_AVAILABLE = torchaudio.lib._torchaudio.is_align_available()
_SOX_EXT = None
def lazy_import_sox_ext():
"""Load SoX integration based on availability in lazy manner"""
global _SOX_EXT
if _SOX_EXT is None:
_SOX_EXT = _LazyImporter("_torchaudio_sox", _init_sox)
return _SOX_EXT
_FFMPEG_EXT = None
def lazy_import_ffmpeg_ext():
"""Load FFmpeg integration based on availability in lazy manner"""
global _FFMPEG_EXT
if _FFMPEG_EXT is None:
_FFMPEG_EXT = _LazyImporter("_torchaudio_ffmpeg", _init_ffmpeg)
return _FFMPEG_EXT
fail_if_no_rir = (
no_op
if _IS_RIR_AVAILABLE
else fail_with_message(
"requires RIR extension, but TorchAudio is not compiled with it. Please build TorchAudio with RIR support."
)
)
fail_if_no_align = (
no_op
if _IS_ALIGN_AVAILABLE
else fail_with_message(
"Requires alignment extension, but TorchAudio is not compiled with it. \
Please build TorchAudio with alignment support."
)
)
|
import logging
import os
import sys
from torchaudio._internal.module_utils import eval_env, fail_with_message, is_module_available, no_op
from .utils import (
_check_cuda_version,
_fail_since_no_sox,
_init_dll_path,
_init_ffmpeg,
_init_sox,
_LazyImporter,
_load_lib,
)
_LG = logging.getLogger(__name__)
# Note:
# `_check_cuda_version` is not meant to be used by regular users.
# Builder uses it for debugging purpose, so we export it.
# https://github.com/pytorch/builder/blob/e2e4542b8eb0bdf491214451a1a4128bd606cce2/test/smoke_test/smoke_test.py#L80
__all__ = [
"fail_if_no_sox",
"_check_cuda_version",
"_IS_TORCHAUDIO_EXT_AVAILABLE",
"_IS_RIR_AVAILABLE",
"_SOX_INITIALIZED",
"lazy_import_ffmpeg_ext",
]
if os.name == "nt" and (3, 8) <= sys.version_info < (3, 9):
_init_dll_path()
# When the extension module is built, we initialize it.
# In case of an error, we do not catch the failure as it suggests there is something
# wrong with the installation.
_IS_TORCHAUDIO_EXT_AVAILABLE = is_module_available("torchaudio.lib._torchaudio")
# RIR features are implemented in _torchaudio extension, but they can be individually
# turned on/off at build time. Available means that _torchaudio is loaded properly, and
# RIR features are found there.
_IS_RIR_AVAILABLE = False
_IS_ALIGN_AVAILABLE = False
if _IS_TORCHAUDIO_EXT_AVAILABLE:
_load_lib("libtorchaudio")
import torchaudio.lib._torchaudio # noqa
_check_cuda_version()
_IS_RIR_AVAILABLE = torchaudio.lib._torchaudio.is_rir_available()
_IS_ALIGN_AVAILABLE = torchaudio.lib._torchaudio.is_align_available()
# Initialize libsox-related features
_SOX_INITIALIZED = False
_USE_SOX = False if os.name == "nt" else eval_env("TORCHAUDIO_USE_SOX", True)
_SOX_MODULE_AVAILABLE = is_module_available("torchaudio.lib._torchaudio_sox")
if _USE_SOX and _SOX_MODULE_AVAILABLE:
try:
_init_sox()
_SOX_INITIALIZED = True
except Exception:
# The initialization of sox extension will fail if supported sox
# libraries are not found in the system.
# Since the rest of the torchaudio works without it, we do not report the
# error here.
# The error will be raised when user code attempts to use these features.
_LG.debug("Failed to initialize sox extension", exc_info=True)
if os.name == "nt":
fail_if_no_sox = fail_with_message("requires sox extension, which is not supported on Windows.")
elif not _USE_SOX:
fail_if_no_sox = fail_with_message("requires sox extension, but it is disabled. (TORCHAUDIO_USE_SOX=0)")
elif not _SOX_MODULE_AVAILABLE:
fail_if_no_sox = fail_with_message(
"requires sox extension, but TorchAudio is not compiled with it. "
"Please build TorchAudio with libsox support. (BUILD_SOX=1)"
)
else:
fail_if_no_sox = no_op if _SOX_INITIALIZED else _fail_since_no_sox
_FFMPEG_EXT = None
def lazy_import_ffmpeg_ext():
"""Load FFmpeg integration based on availability in lazy manner"""
global _FFMPEG_EXT
if _FFMPEG_EXT is None:
_FFMPEG_EXT = _LazyImporter("_torchaudio_ffmpeg", _init_ffmpeg)
return _FFMPEG_EXT
fail_if_no_rir = (
no_op
if _IS_RIR_AVAILABLE
else fail_with_message(
"requires RIR extension, but TorchAudio is not compiled with it. Please build TorchAudio with RIR support."
)
)
fail_if_no_align = (
no_op
if _IS_ALIGN_AVAILABLE
else fail_with_message(
"Requires alignment extension, but TorchAudio is not compiled with it. \
Please build TorchAudio with alignment support."
)
)
|
from docarray.score.mixins.property import PropertyMixin
from docarray.score.mixins.representer import RepresentMixin
class AllMixins(RepresentMixin, PropertyMixin):
...
|
from .property import PropertyMixin
from .representer import RepresentMixin
class AllMixins(RepresentMixin, PropertyMixin):
...
|
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class JinaChunkingBlock(Block):
class Input(BlockSchema):
texts: list = SchemaField(description="List of texts to chunk")
credentials: JinaCredentialsInput = JinaCredentialsField()
max_chunk_length: int = SchemaField(
description="Maximum length of each chunk", default=1000
)
return_tokens: bool = SchemaField(
description="Whether to return token information", default=False
)
class Output(BlockSchema):
chunks: list = SchemaField(description="List of chunked texts")
tokens: list = SchemaField(
description="List of token information for each chunk", optional=True
)
def __init__(self):
super().__init__(
id="806fb15e-830f-4796-8692-557d300ff43c",
description="Chunks texts using Jina AI's segmentation service",
categories={BlockCategory.AI, BlockCategory.TEXT},
input_schema=JinaChunkingBlock.Input,
output_schema=JinaChunkingBlock.Output,
)
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
url = "https://segment.jina.ai/"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
all_chunks = []
all_tokens = []
for text in input_data.texts:
data = {
"content": text,
"return_tokens": str(input_data.return_tokens).lower(),
"return_chunks": "true",
"max_chunk_length": str(input_data.max_chunk_length),
}
response = requests.post(url, headers=headers, json=data)
result = response.json()
all_chunks.extend(result.get("chunks", []))
if input_data.return_tokens:
all_tokens.extend(result.get("tokens", []))
yield "chunks", all_chunks
if input_data.return_tokens:
yield "tokens", all_tokens
|
import requests
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class JinaChunkingBlock(Block):
class Input(BlockSchema):
texts: list = SchemaField(description="List of texts to chunk")
credentials: JinaCredentialsInput = JinaCredentialsField()
max_chunk_length: int = SchemaField(
description="Maximum length of each chunk", default=1000
)
return_tokens: bool = SchemaField(
description="Whether to return token information", default=False
)
class Output(BlockSchema):
chunks: list = SchemaField(description="List of chunked texts")
tokens: list = SchemaField(
description="List of token information for each chunk", optional=True
)
def __init__(self):
super().__init__(
id="806fb15e-830f-4796-8692-557d300ff43c",
description="Chunks texts using Jina AI's segmentation service",
categories={BlockCategory.AI, BlockCategory.TEXT},
input_schema=JinaChunkingBlock.Input,
output_schema=JinaChunkingBlock.Output,
)
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
url = "https://segment.jina.ai/"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
all_chunks = []
all_tokens = []
for text in input_data.texts:
data = {
"content": text,
"return_tokens": str(input_data.return_tokens).lower(),
"return_chunks": "true",
"max_chunk_length": str(input_data.max_chunk_length),
}
response = requests.post(url, headers=headers, json=data)
response.raise_for_status()
result = response.json()
all_chunks.extend(result.get("chunks", []))
if input_data.return_tokens:
all_tokens.extend(result.get("tokens", []))
yield "chunks", all_chunks
if input_data.return_tokens:
yield "tokens", all_tokens
|
from typing import Optional
import numpy as np
import pytest
from pydantic import BaseModel, ValidationError
from typing_extensions import TypedDict
from docarray import BaseDoc, DocList
from docarray.documents import AudioDoc, ImageDoc, TextDoc
from docarray.documents.helper import (
create_doc,
create_doc_from_dict,
create_doc_from_typeddict,
)
from docarray.typing import AudioNdArray
from docarray.utils._internal.pydantic import is_pydantic_v2
def test_multi_modal_doc():
class MyMultiModalDoc(BaseDoc):
image: ImageDoc
text: TextDoc
doc = MyMultiModalDoc(
image=ImageDoc(tensor=np.zeros((3, 224, 224))), text=TextDoc(text='hello')
)
assert isinstance(doc.image, BaseDoc)
assert isinstance(doc.image, ImageDoc)
assert isinstance(doc.text, TextDoc)
assert doc.text.text == 'hello'
assert (doc.image.tensor == np.zeros((3, 224, 224))).all()
def test_nested_chunks_document():
class ChunksDocument(BaseDoc):
text: str
images: DocList[ImageDoc]
doc = ChunksDocument(
text='hello',
images=DocList[ImageDoc]([ImageDoc() for _ in range(10)]),
)
assert isinstance(doc.images, DocList)
def test_create_doc():
with pytest.raises(ValueError):
_ = create_doc(
'MyMultiModalDoc',
__base__=BaseModel,
image=(ImageDoc, ...),
text=(TextDoc, ...),
)
MyMultiModalDoc = create_doc(
'MyMultiModalDoc', image=(ImageDoc, ...), text=(TextDoc, ...)
)
assert issubclass(MyMultiModalDoc, BaseDoc)
doc = MyMultiModalDoc(
image=ImageDoc(tensor=np.zeros((3, 224, 224))), text=TextDoc(text='hello')
)
assert isinstance(doc.image, BaseDoc)
assert isinstance(doc.image, ImageDoc)
assert isinstance(doc.text, TextDoc)
assert doc.text.text == 'hello'
assert (doc.image.tensor == np.zeros((3, 224, 224))).all()
MyAudio = create_doc(
'MyAudio',
__base__=AudioDoc,
title=(str, ...),
tensor=(Optional[AudioNdArray], ...),
)
assert issubclass(MyAudio, BaseDoc)
assert issubclass(MyAudio, AudioDoc)
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
def test_create_doc_from_typeddict():
class MyMultiModalDoc(TypedDict):
image: ImageDoc
text: TextDoc
with pytest.raises(ValueError):
_ = create_doc_from_typeddict(MyMultiModalDoc, __base__=BaseModel)
Doc = create_doc_from_typeddict(MyMultiModalDoc)
assert issubclass(Doc, BaseDoc)
class MyAudio(TypedDict):
title: str
tensor: Optional[AudioNdArray]
Doc = create_doc_from_typeddict(MyAudio, __base__=AudioDoc)
assert issubclass(Doc, BaseDoc)
assert issubclass(Doc, AudioDoc)
def test_create_doc_from_dict():
data_dict = {
'image': ImageDoc(tensor=np.random.rand(3, 224, 224)),
'text': TextDoc(text='hello'),
'id': 123,
}
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict=data_dict)
assert issubclass(MyDoc, BaseDoc)
doc = MyDoc(
image=ImageDoc(tensor=np.random.rand(3, 224, 224)),
text=TextDoc(text='hey'),
id=111,
)
assert isinstance(doc, BaseDoc)
assert isinstance(doc.text, TextDoc)
assert isinstance(doc.image, ImageDoc)
assert isinstance(doc.id, int)
# Create a doc with an incorrect type
with pytest.raises(ValidationError):
doc = MyDoc(
image=ImageDoc(tensor=np.random.rand(3, 224, 224)),
text=['some', 'text'], # should be TextDoc
id=111,
)
# Handle empty data_dict
with pytest.raises(ValueError):
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict={})
# Data with a None value
data_dict = {'text': 'some text', 'other': None}
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict=data_dict)
assert issubclass(MyDoc, BaseDoc)
doc1 = MyDoc(text='txt', other=10)
doc2 = MyDoc(text='txt', other='also text')
assert isinstance(doc1, BaseDoc) and isinstance(doc2, BaseDoc)
|
from typing import Optional
import numpy as np
import pytest
from pydantic import BaseModel, ValidationError
from typing_extensions import TypedDict
from docarray import BaseDoc, DocList
from docarray.documents import AudioDoc, ImageDoc, TextDoc
from docarray.documents.helper import (
create_doc,
create_doc_from_dict,
create_doc_from_typeddict,
)
from docarray.typing import AudioNdArray
def test_multi_modal_doc():
class MyMultiModalDoc(BaseDoc):
image: ImageDoc
text: TextDoc
doc = MyMultiModalDoc(
image=ImageDoc(tensor=np.zeros((3, 224, 224))), text=TextDoc(text='hello')
)
assert isinstance(doc.image, BaseDoc)
assert isinstance(doc.image, ImageDoc)
assert isinstance(doc.text, TextDoc)
assert doc.text.text == 'hello'
assert (doc.image.tensor == np.zeros((3, 224, 224))).all()
def test_nested_chunks_document():
class ChunksDocument(BaseDoc):
text: str
images: DocList[ImageDoc]
doc = ChunksDocument(
text='hello',
images=DocList[ImageDoc]([ImageDoc() for _ in range(10)]),
)
assert isinstance(doc.images, DocList)
def test_create_doc():
with pytest.raises(ValueError):
_ = create_doc(
'MyMultiModalDoc',
__base__=BaseModel,
image=(ImageDoc, ...),
text=(TextDoc, ...),
)
MyMultiModalDoc = create_doc(
'MyMultiModalDoc', image=(ImageDoc, ...), text=(TextDoc, ...)
)
assert issubclass(MyMultiModalDoc, BaseDoc)
doc = MyMultiModalDoc(
image=ImageDoc(tensor=np.zeros((3, 224, 224))), text=TextDoc(text='hello')
)
assert isinstance(doc.image, BaseDoc)
assert isinstance(doc.image, ImageDoc)
assert isinstance(doc.text, TextDoc)
assert doc.text.text == 'hello'
assert (doc.image.tensor == np.zeros((3, 224, 224))).all()
MyAudio = create_doc(
'MyAudio',
__base__=AudioDoc,
title=(str, ...),
tensor=(Optional[AudioNdArray], ...),
)
assert issubclass(MyAudio, BaseDoc)
assert issubclass(MyAudio, AudioDoc)
def test_create_doc_from_typeddict():
class MyMultiModalDoc(TypedDict):
image: ImageDoc
text: TextDoc
with pytest.raises(ValueError):
_ = create_doc_from_typeddict(MyMultiModalDoc, __base__=BaseModel)
Doc = create_doc_from_typeddict(MyMultiModalDoc)
assert issubclass(Doc, BaseDoc)
class MyAudio(TypedDict):
title: str
tensor: Optional[AudioNdArray]
Doc = create_doc_from_typeddict(MyAudio, __base__=AudioDoc)
assert issubclass(Doc, BaseDoc)
assert issubclass(Doc, AudioDoc)
def test_create_doc_from_dict():
data_dict = {
'image': ImageDoc(tensor=np.random.rand(3, 224, 224)),
'text': TextDoc(text='hello'),
'id': 123,
}
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict=data_dict)
assert issubclass(MyDoc, BaseDoc)
doc = MyDoc(
image=ImageDoc(tensor=np.random.rand(3, 224, 224)),
text=TextDoc(text='hey'),
id=111,
)
assert isinstance(doc, BaseDoc)
assert isinstance(doc.text, TextDoc)
assert isinstance(doc.image, ImageDoc)
assert isinstance(doc.id, int)
# Create a doc with an incorrect type
with pytest.raises(ValidationError):
doc = MyDoc(
image=ImageDoc(tensor=np.random.rand(3, 224, 224)),
text=['some', 'text'], # should be TextDoc
id=111,
)
# Handle empty data_dict
with pytest.raises(ValueError):
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict={})
# Data with a None value
data_dict = {'text': 'some text', 'other': None}
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict=data_dict)
assert issubclass(MyDoc, BaseDoc)
doc1 = MyDoc(text='txt', other=10)
doc2 = MyDoc(text='txt', other='also text')
assert isinstance(doc1, BaseDoc) and isinstance(doc2, BaseDoc)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_nms import fast_nms, multiclass_nms
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .dropblock import DropBlock
from .ema import ExpMomentumEMA
from .inverted_residual import InvertedResidual
from .matrix_nms import mask_matrix_nms
from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder
from .normed_predictor import NormedConv2d, NormedLinear
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import ChannelAttention, DyReLU, SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, PatchMerging, Transformer,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
__all__ = [
'fast_nms', 'multiclass_nms', 'mask_matrix_nms', 'DropBlock',
'PixelDecoder', 'TransformerEncoderPixelDecoder',
'MSDeformAttnPixelDecoder', 'ResLayer', 'DetrTransformerDecoderLayer',
'DetrTransformerDecoder', 'Transformer', 'PatchMerging',
'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv',
'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'InvertedResidual',
'SELayer', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d',
'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'DyReLU',
'ExpMomentumEMA', 'inverse_sigmoid', 'ChannelAttention'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_nms import fast_nms, multiclass_nms
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .dropblock import DropBlock
from .ema import ExpMomentumEMA
from .inverted_residual import InvertedResidual
from .matrix_nms import mask_matrix_nms
from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder
from .normed_predictor import NormedConv2d, NormedLinear
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import DyReLU, SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, PatchMerging, Transformer,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
__all__ = [
'fast_nms', 'multiclass_nms', 'mask_matrix_nms', 'DropBlock',
'PixelDecoder', 'TransformerEncoderPixelDecoder',
'MSDeformAttnPixelDecoder', 'ResLayer', 'DetrTransformerDecoderLayer',
'DetrTransformerDecoder', 'Transformer', 'PatchMerging',
'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv',
'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'InvertedResidual',
'SELayer', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d',
'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'DyReLU',
'ExpMomentumEMA', 'inverse_sigmoid'
]
|
import pytest
from docarray import DocumentArray, Document
from docarray.array.weaviate import DocumentArrayWeaviate
import numpy as np
@pytest.fixture()
def docs():
return DocumentArray([Document(id=f'{i}') for i in range(1, 10)])
@pytest.mark.parametrize(
'to_delete',
[
0,
1,
4,
-1,
list(range(1, 4)),
[2, 4, 7, 1, 1],
slice(0, 2),
slice(2, 4),
slice(4, -1),
[True, True, False],
...,
],
)
def test_del_all(docs, to_delete):
doc_to_delete = docs[to_delete]
del docs[to_delete]
assert doc_to_delete not in docs
@pytest.mark.parametrize(
'to_delete, missing_id',
[
([True, False], ['1']),
([True, True, False], ['1', '2']),
([False, True], ['2']),
([False, False, True, True], ['3', '4']),
],
)
def test_del_boolean_mask(docs, to_delete, missing_id):
all_ids = docs[:, 'id']
# assert each missing_id is present before deleting
for m_id in missing_id:
assert m_id in docs[:, 'id']
del docs[to_delete]
# assert each missing_id is NOT present AFTER deleting
for m_id in missing_id:
assert m_id not in docs[:, 'id']
for m_id in filter(lambda id: id not in missing_id, all_ids):
assert m_id in docs[:, 'id']
@pytest.mark.parametrize(
['deleted_ids', 'expected_ids'],
[
(['1', '2', '3', '4'], ['5', '6', '7', '8', '9']),
(['2', '4', '7', '1'], ['3', '5', '6', '8', '9']),
],
)
def test_del_by_multiple_idx(docs, deleted_ids, expected_ids):
del docs[deleted_ids]
assert docs[:, 'id'] == expected_ids
@pytest.mark.parametrize(
'da_cls,config,persist',
[
(DocumentArrayWeaviate, {'n_dim': 10}, False),
(DocumentArrayWeaviate, {'name': 'Storage', 'n_dim': 10}, True),
],
)
def test_del_da_persist(da_cls, config, persist, docs, start_storage):
da = da_cls(docs, config=config)
del da
da2 = da_cls(config=config)
if persist:
assert len(da2) == len(docs)
else:
assert len(da2) == 0
def test_del_da_attribute():
da = DocumentArray(
[
Document(embedding=np.array([1, 2, 3]), text='d1'),
Document(embedding=np.array([1, 2, 3]), text='d2'),
]
)
q = DocumentArray(
[
Document(embedding=np.array([4, 5, 6]), text='q1'),
Document(embedding=np.array([2, 3, 4]), text='q1'),
]
)
da.match(q)
del da[...][:, 'embedding']
for d in da:
assert d.embedding is None
@pytest.mark.parametrize(
'storage, config',
[
('memory', None),
('weaviate', {'n_dim': 3, 'distance': 'l2-squared'}),
('annlite', {'n_dim': 3, 'metric': 'Euclidean'}),
('qdrant', {'n_dim': 3, 'distance': 'euclidean'}),
('elasticsearch', {'n_dim': 3, 'distance': 'l2_norm'}),
('sqlite', dict()),
('redis', {'n_dim': 3, 'distance': 'L2'}),
('milvus', {'n_dim': 3, 'distance': 'L2'}),
],
)
def test_del_subindex(storage, config, start_storage):
n_dim = 3
subindex_configs = (
{'@c': dict()} if storage in ['sqlite', 'memory'] else {'@c': {'n_dim': 2}}
)
da = DocumentArray(
storage=storage,
config=config,
subindex_configs=subindex_configs,
)
with da:
da.extend(
[
Document(
id=str(i),
embedding=i * np.ones(n_dim),
chunks=[
Document(id=str(i) + '_0', embedding=np.array([i, i])),
Document(id=str(i) + '_1', embedding=np.array([i, i])),
],
)
for i in range(10)
]
)
del da['0']
assert len(da) == 9
assert len(da._subindices['@c']) == 18
del da[-2:]
assert len(da) == 7
assert len(da._subindices['@c']) == 14
def test_del_subindex_annlite_multimodal():
from docarray import dataclass
from docarray.typing import Text
@dataclass
class MMDoc:
my_text: Text
my_other_text: Text
n_dim = 3
da = DocumentArray(
storage='annlite',
config={'n_dim': n_dim, 'metric': 'Euclidean'},
subindex_configs={'@.[my_text, my_other_text]': {'n_dim': 2}},
)
num_docs = 10
docs_to_add = DocumentArray(
[
Document(MMDoc(my_text='hello', my_other_text='world'))
for _ in range(num_docs)
]
)
for i, d in enumerate(docs_to_add):
d.id = str(i)
d.embedding = i * np.ones(n_dim)
d.my_text.id = str(i) + '_0'
d.my_text.embedding = [i, i]
d.my_other_text.id = str(i) + '_1'
d.my_other_text.embedding = [i, i]
with da:
da.extend(docs_to_add)
del da['0']
assert len(da) == 9
assert len(da._subindices['@.[my_text, my_other_text]']) == 18
|
import pytest
from docarray import DocumentArray, Document
from docarray.array.weaviate import DocumentArrayWeaviate
import numpy as np
@pytest.fixture()
def docs():
return DocumentArray([Document(id=f'{i}') for i in range(1, 10)])
@pytest.mark.parametrize(
'to_delete',
[
0,
1,
4,
-1,
list(range(1, 4)),
[2, 4, 7, 1, 1],
slice(0, 2),
slice(2, 4),
slice(4, -1),
[True, True, False],
...,
],
)
def test_del_all(docs, to_delete):
doc_to_delete = docs[to_delete]
del docs[to_delete]
assert doc_to_delete not in docs
@pytest.mark.parametrize(
'to_delete, missing_id',
[
([True, False], ['1']),
([True, True, False], ['1', '2']),
([False, True], ['2']),
([False, False, True, True], ['3', '4']),
],
)
def test_del_boolean_mask(docs, to_delete, missing_id):
all_ids = docs[:, 'id']
# assert each missing_id is present before deleting
for m_id in missing_id:
assert m_id in docs[:, 'id']
del docs[to_delete]
# assert each missing_id is NOT present AFTER deleting
for m_id in missing_id:
assert m_id not in docs[:, 'id']
for m_id in filter(lambda id: id not in missing_id, all_ids):
assert m_id in docs[:, 'id']
@pytest.mark.parametrize(
['deleted_ids', 'expected_ids'],
[
(['1', '2', '3', '4'], ['5', '6', '7', '8', '9']),
(['2', '4', '7', '1'], ['3', '5', '6', '8', '9']),
],
)
def test_del_by_multiple_idx(docs, deleted_ids, expected_ids):
del docs[deleted_ids]
assert docs[:, 'id'] == expected_ids
@pytest.mark.parametrize(
'da_cls,config,persist',
[
(DocumentArrayWeaviate, {'n_dim': 10}, False),
(DocumentArrayWeaviate, {'name': 'Storage', 'n_dim': 10}, True),
],
)
def test_del_da_persist(da_cls, config, persist, docs, start_storage):
da = da_cls(docs, config=config)
del da
da2 = da_cls(config=config)
if persist:
assert len(da2) == len(docs)
else:
assert len(da2) == 0
def test_del_da_attribute():
da = DocumentArray(
[
Document(embedding=np.array([1, 2, 3]), text='d1'),
Document(embedding=np.array([1, 2, 3]), text='d2'),
]
)
q = DocumentArray(
[
Document(embedding=np.array([4, 5, 6]), text='q1'),
Document(embedding=np.array([2, 3, 4]), text='q1'),
]
)
da.match(q)
del da[...][:, 'embedding']
for d in da:
assert d.embedding is None
@pytest.mark.parametrize(
'storage, config',
[
('memory', None),
('weaviate', {'n_dim': 3, 'distance': 'l2-squared'}),
('annlite', {'n_dim': 3, 'metric': 'Euclidean'}),
('qdrant', {'n_dim': 3, 'distance': 'euclidean'}),
('elasticsearch', {'n_dim': 3, 'distance': 'l2_norm'}),
('sqlite', dict()),
('redis', {'n_dim': 3, 'distance': 'L2'}),
],
)
def test_del_subindex(storage, config):
n_dim = 3
subindex_configs = (
{'@c': dict()} if storage in ['sqlite', 'memory'] else {'@c': {'n_dim': 2}}
)
da = DocumentArray(
storage=storage,
config=config,
subindex_configs=subindex_configs,
)
with da:
da.extend(
[
Document(
id=str(i),
embedding=i * np.ones(n_dim),
chunks=[
Document(id=str(i) + '_0', embedding=np.array([i, i])),
Document(id=str(i) + '_1', embedding=np.array([i, i])),
],
)
for i in range(10)
]
)
del da['0']
assert len(da) == 9
assert len(da._subindices['@c']) == 18
del da[-2:]
assert len(da) == 7
assert len(da._subindices['@c']) == 14
def test_del_subindex_annlite_multimodal():
from docarray import dataclass
from docarray.typing import Text
@dataclass
class MMDoc:
my_text: Text
my_other_text: Text
n_dim = 3
da = DocumentArray(
storage='annlite',
config={'n_dim': n_dim, 'metric': 'Euclidean'},
subindex_configs={'@.[my_text, my_other_text]': {'n_dim': 2}},
)
num_docs = 10
docs_to_add = DocumentArray(
[
Document(MMDoc(my_text='hello', my_other_text='world'))
for _ in range(num_docs)
]
)
for i, d in enumerate(docs_to_add):
d.id = str(i)
d.embedding = i * np.ones(n_dim)
d.my_text.id = str(i) + '_0'
d.my_text.embedding = [i, i]
d.my_other_text.id = str(i) + '_1'
d.my_other_text.embedding = [i, i]
with da:
da.extend(docs_to_add)
del da['0']
assert len(da) == 9
assert len(da._subindices['@.[my_text, my_other_text]']) == 18
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import nn
class LSTM(nn.Module):
"""Bidirectional LSTM running over word embeddings."""
def __init__(
self,
word_embedding_dimension: int,
hidden_dim: int,
num_layers: int = 1,
dropout: float = 0,
bidirectional: bool = True,
):
nn.Module.__init__(self)
self.config_keys = ["word_embedding_dimension", "hidden_dim", "num_layers", "dropout", "bidirectional"]
self.word_embedding_dimension = word_embedding_dimension
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = dropout
self.bidirectional = bidirectional
self.embeddings_dimension = hidden_dim
if self.bidirectional:
self.embeddings_dimension *= 2
self.encoder = nn.LSTM(
word_embedding_dimension,
hidden_dim,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional,
batch_first=True,
)
def forward(self, features):
token_embeddings = features["token_embeddings"]
sentence_lengths = torch.clamp(features["sentence_lengths"], min=1)
packed = nn.utils.rnn.pack_padded_sequence(
token_embeddings, sentence_lengths.cpu(), batch_first=True, enforce_sorted=False
)
packed = self.encoder(packed)
unpack = nn.utils.rnn.pad_packed_sequence(packed[0], batch_first=True)[0]
features.update({"token_embeddings": unpack})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> list[int]:
raise NotImplementedError()
def save(self, output_path: str, safe_serialization: bool = True):
with open(os.path.join(output_path, "lstm_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
device = next(self.parameters()).device
if safe_serialization:
save_safetensors_model(self.cpu(), os.path.join(output_path, "model.safetensors"))
self.to(device)
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "lstm_config.json")) as fIn:
config = json.load(fIn)
model = LSTM(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(
os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"), weights_only=True
)
)
return model
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import nn
class LSTM(nn.Module):
"""Bidirectional LSTM running over word embeddings."""
def __init__(
self,
word_embedding_dimension: int,
hidden_dim: int,
num_layers: int = 1,
dropout: float = 0,
bidirectional: bool = True,
):
nn.Module.__init__(self)
self.config_keys = ["word_embedding_dimension", "hidden_dim", "num_layers", "dropout", "bidirectional"]
self.word_embedding_dimension = word_embedding_dimension
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = dropout
self.bidirectional = bidirectional
self.embeddings_dimension = hidden_dim
if self.bidirectional:
self.embeddings_dimension *= 2
self.encoder = nn.LSTM(
word_embedding_dimension,
hidden_dim,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional,
batch_first=True,
)
def forward(self, features):
token_embeddings = features["token_embeddings"]
sentence_lengths = torch.clamp(features["sentence_lengths"], min=1)
packed = nn.utils.rnn.pack_padded_sequence(
token_embeddings, sentence_lengths.cpu(), batch_first=True, enforce_sorted=False
)
packed = self.encoder(packed)
unpack = nn.utils.rnn.pad_packed_sequence(packed[0], batch_first=True)[0]
features.update({"token_embeddings": unpack})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> list[int]:
raise NotImplementedError()
def save(self, output_path: str, safe_serialization: bool = True):
with open(os.path.join(output_path, "lstm_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
device = next(self.parameters()).device
if safe_serialization:
save_safetensors_model(self.cpu(), os.path.join(output_path, "model.safetensors"))
self.to(device)
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "lstm_config.json")) as fIn:
config = json.load(fIn)
model = LSTM(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
from typing import Union, Dict, Any
import google.ai.generativelanguage as glm
import google.generativeai as genai
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
CompletionResponse,
ImageBlock,
TextBlock,
)
from llama_index.core.multi_modal_llms.base import ChatMessage
from llama_index.core.utilities.gemini_utils import ROLES_FROM_GEMINI, ROLES_TO_GEMINI
# These are the shortened model names
# Any model that contains one of these names will not support function calling
MODELS_WITHOUT_FUNCTION_CALLING_SUPPORT = [
"gemini-2.0-flash-thinking",
"gemini-2.0-flash-lite",
]
def _error_if_finished_early(candidate: "glm.Candidate") -> None: # type: ignore[name-defined] # only until release
if (finish_reason := candidate.finish_reason) > 1: # 1=STOP (normally)
reason = finish_reason.name
# Safety reasons have more detail, so include that if we can.
if finish_reason == 3: # 3=Safety
relevant_safety = list(
filter(
lambda sr: sr.probability > 1, # 1=Negligible
candidate.safety_ratings,
)
)
reason += f" {relevant_safety}"
raise RuntimeError(f"Response was terminated early: {reason}")
def completion_from_gemini_response(
response: Union[
"genai.types.GenerateContentResponse",
"genai.types.AsyncGenerateContentResponse",
],
text: str = None,
delta: str = None,
) -> CompletionResponse:
top_candidate = response.candidates[0]
_error_if_finished_early(top_candidate)
raw = {
**(type(top_candidate).to_dict(top_candidate)), # type: ignore
**(type(response.prompt_feedback).to_dict(response.prompt_feedback)), # type: ignore
}
if response.usage_metadata:
raw["usage_metadata"] = type(response.usage_metadata).to_dict(
response.usage_metadata
)
return CompletionResponse(
text=text if text is not None else response.text,
delta=delta if delta is not None else response.text,
raw=raw,
)
def chat_from_gemini_response(
response: Union[
"genai.types.GenerateContentResponse",
"genai.types.AsyncGenerateContentResponse",
],
) -> ChatResponse:
top_candidate = response.candidates[0]
_error_if_finished_early(top_candidate)
raw = {
**(type(top_candidate).to_dict(top_candidate)), # type: ignore
**(type(response.prompt_feedback).to_dict(response.prompt_feedback)), # type: ignore
}
if response.usage_metadata:
raw["usage_metadata"] = type(response.usage_metadata).to_dict(
response.usage_metadata
)
role = ROLES_FROM_GEMINI[top_candidate.content.role]
try:
# When the response contains only a function call, the library
# raises an exception.
# The easiest way to detect this is to try access the text attribute and
# catch the exception.
# https://github.com/google-gemini/generative-ai-python/issues/670
text = response.text
except (ValueError, AttributeError):
text = None
additional_kwargs: Dict[str, Any] = {}
for part in response.parts:
if fn := part.function_call:
if "tool_calls" not in additional_kwargs:
additional_kwargs["tool_calls"] = []
additional_kwargs["tool_calls"].append(fn)
return ChatResponse(
message=ChatMessage(
role=role, content=text, additional_kwargs=additional_kwargs
),
raw=raw,
additional_kwargs=additional_kwargs,
)
def chat_message_to_gemini(message: ChatMessage) -> "genai.types.ContentDict":
"""Convert ChatMessages to Gemini-specific history, including ImageDocuments."""
parts = []
for block in message.blocks:
if isinstance(block, TextBlock):
if block.text:
parts.append({"text": block.text})
elif isinstance(block, ImageBlock):
base64_bytes = block.resolve_image(as_base64=False).read()
parts.append(
{
"mime_type": block.image_mimetype,
"data": base64_bytes,
}
)
else:
msg = f"Unsupported content block type: {type(block).__name__}"
raise ValueError(msg)
for tool_call in message.additional_kwargs.get("tool_calls", []):
parts.append(tool_call)
return {
"role": ROLES_TO_GEMINI[message.role],
"parts": parts,
}
def is_function_calling_model(model: str) -> bool:
for model_name in MODELS_WITHOUT_FUNCTION_CALLING_SUPPORT:
if model_name in model:
return False
return True
|
from typing import Union, Dict, Any
import google.ai.generativelanguage as glm
import google.generativeai as genai
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
CompletionResponse,
ImageBlock,
TextBlock,
)
from llama_index.core.multi_modal_llms.base import ChatMessage
from llama_index.core.utilities.gemini_utils import ROLES_FROM_GEMINI, ROLES_TO_GEMINI
# These are the shortened model names
# Any model that contains one of these names will not support function calling
MODELS_WITHOUT_FUNCTION_CALLING_SUPPORT = [
"gemini-2.0-flash-thinking",
"gemini-2.0-flash-lite",
]
def _error_if_finished_early(candidate: "glm.Candidate") -> None: # type: ignore[name-defined] # only until release
if (finish_reason := candidate.finish_reason) > 1: # 1=STOP (normally)
reason = finish_reason.name
# Safety reasons have more detail, so include that if we can.
if finish_reason == 3: # 3=Safety
relevant_safety = list(
filter(
lambda sr: sr.probability > 1, # 1=Negligible
candidate.safety_ratings,
)
)
reason += f" {relevant_safety}"
raise RuntimeError(f"Response was terminated early: {reason}")
def completion_from_gemini_response(
response: Union[
"genai.types.GenerateContentResponse",
"genai.types.AsyncGenerateContentResponse",
],
text: str = None,
delta: str = None,
) -> CompletionResponse:
top_candidate = response.candidates[0]
_error_if_finished_early(top_candidate)
raw = {
**(type(top_candidate).to_dict(top_candidate)), # type: ignore
**(type(response.prompt_feedback).to_dict(response.prompt_feedback)), # type: ignore
}
if response.usage_metadata:
raw["usage_metadata"] = type(response.usage_metadata).to_dict(
response.usage_metadata
)
return CompletionResponse(
text=text if text is not None else response.text,
delta=delta if delta is not None else response.text,
raw=raw,
)
def chat_from_gemini_response(
response: Union[
"genai.types.GenerateContentResponse",
"genai.types.AsyncGenerateContentResponse",
],
) -> ChatResponse:
top_candidate = response.candidates[0]
_error_if_finished_early(top_candidate)
raw = {
**(type(top_candidate).to_dict(top_candidate)), # type: ignore
**(type(response.prompt_feedback).to_dict(response.prompt_feedback)), # type: ignore
}
if response.usage_metadata:
raw["usage_metadata"] = type(response.usage_metadata).to_dict(
response.usage_metadata
)
role = ROLES_FROM_GEMINI[top_candidate.content.role]
try:
# When the response contains only a function call, the library
# raises an exception.
# The easiest way to detect this is to try access the text attribute and
# catch the exception.
# https://github.com/google-gemini/generative-ai-python/issues/670
text = response.text
except (ValueError, AttributeError):
text = None
additional_kwargs: Dict[str, Any] = {}
for part in response.parts:
if fn := part.function_call:
if "tool_calls" not in additional_kwargs:
additional_kwargs["tool_calls"] = []
additional_kwargs["tool_calls"].append(fn)
return ChatResponse(
message=ChatMessage(
role=role, content=text, additional_kwargs=additional_kwargs
),
raw=raw,
additional_kwargs=additional_kwargs,
)
def chat_message_to_gemini(message: ChatMessage) -> "genai.types.ContentDict":
"""Convert ChatMessages to Gemini-specific history, including ImageDocuments."""
parts = []
for block in message.blocks:
if isinstance(block, TextBlock):
if block.text:
parts.append({"text": block.text})
elif isinstance(block, ImageBlock):
base64_bytes = block.resolve_image(as_base64=False).read()
parts.append(
{
"mime_type": block.image_mimetype,
"data": base64_bytes,
}
)
else:
msg = f"Unsupported content block type: {type(block).__name__}"
raise ValueError(msg)
for tool_call in message.additional_kwargs.get("tool_calls", []):
parts.append(tool_call)
return {
"role": ROLES_TO_GEMINI[message.role],
"parts": parts,
}
def is_function_calling_model(model: str) -> bool:
for model_name in MODELS_WITHOUT_FUNCTION_CALLING_SUPPORT:
if model_name in model:
return False
return True
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
logger = logging.get_logger(__name__)
class SpeechEncoderDecoderConfig(PretrainedConfig):
r"""
[`SpeechEncoderDecoderConfig`] is the configuration class to store the configuration of a
[`SpeechEncoderDecoderModel`]. It is used to instantiate an Encoder Decoder model according to the specified
arguments, defining the encoder and decoder configs.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
kwargs (*optional*):
Dictionary of keyword arguments. Notably:
- **encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
the encoder config.
- **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
the decoder config.
Examples:
```python
>>> from transformers import BertConfig, Wav2Vec2Config, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel
>>> # Initializing a Wav2Vec2 & BERT style configuration
>>> config_encoder = Wav2Vec2Config()
>>> config_decoder = BertConfig()
>>> config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder)
>>> # Initializing a Wav2Vec2Bert model from a Wav2Vec2 & google-bert/bert-base-uncased style configurations
>>> model = SpeechEncoderDecoderModel(config=config)
>>> # Accessing the model configuration
>>> config_encoder = model.config.encoder
>>> config_decoder = model.config.decoder
>>> # set decoder config to causal lm
>>> config_decoder.is_decoder = True
>>> config_decoder.add_cross_attention = True
>>> # Saving the model, including its configuration
>>> model.save_pretrained("my-model")
>>> # loading model and config from pretrained folder
>>> encoder_decoder_config = SpeechEncoderDecoderConfig.from_pretrained("my-model")
>>> model = SpeechEncoderDecoderModel.from_pretrained("my-model", config=encoder_decoder_config)
```"""
model_type = "speech-encoder-decoder"
sub_configs = {"encoder": AutoConfig, "decoder": AutoConfig}
has_no_defaults_at_init = True
def __init__(self, **kwargs):
super().__init__(**kwargs)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"A configuration of type {self.model_type} cannot be instantiated because not both `encoder` and"
f" `decoder` sub-configurations are passed, but only {kwargs}"
)
encoder_config = kwargs.pop("encoder")
encoder_model_type = encoder_config.pop("model_type")
decoder_config = kwargs.pop("decoder")
decoder_model_type = decoder_config.pop("model_type")
self.encoder = AutoConfig.for_model(encoder_model_type, **encoder_config)
self.decoder = AutoConfig.for_model(decoder_model_type, **decoder_config)
self.is_encoder_decoder = True
@classmethod
def from_encoder_decoder_configs(
cls, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs
) -> PretrainedConfig:
r"""
Instantiate a [`SpeechEncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model
configuration and decoder model configuration.
Returns:
[`SpeechEncoderDecoderConfig`]: An instance of a configuration object
"""
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config")
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs)
__all__ = ["SpeechEncoderDecoderConfig"]
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
logger = logging.get_logger(__name__)
class SpeechEncoderDecoderConfig(PretrainedConfig):
r"""
[`SpeechEncoderDecoderConfig`] is the configuration class to store the configuration of a
[`SpeechEncoderDecoderModel`]. It is used to instantiate an Encoder Decoder model according to the specified
arguments, defining the encoder and decoder configs.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
kwargs (*optional*):
Dictionary of keyword arguments. Notably:
- **encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
the encoder config.
- **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
the decoder config.
Examples:
```python
>>> from transformers import BertConfig, Wav2Vec2Config, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel
>>> # Initializing a Wav2Vec2 & BERT style configuration
>>> config_encoder = Wav2Vec2Config()
>>> config_decoder = BertConfig()
>>> config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder)
>>> # Initializing a Wav2Vec2Bert model from a Wav2Vec2 & google-bert/bert-base-uncased style configurations
>>> model = SpeechEncoderDecoderModel(config=config)
>>> # Accessing the model configuration
>>> config_encoder = model.config.encoder
>>> config_decoder = model.config.decoder
>>> # set decoder config to causal lm
>>> config_decoder.is_decoder = True
>>> config_decoder.add_cross_attention = True
>>> # Saving the model, including its configuration
>>> model.save_pretrained("my-model")
>>> # loading model and config from pretrained folder
>>> encoder_decoder_config = SpeechEncoderDecoderConfig.from_pretrained("my-model")
>>> model = SpeechEncoderDecoderModel.from_pretrained("my-model", config=encoder_decoder_config)
```"""
model_type = "speech-encoder-decoder"
sub_configs = {"encoder": AutoConfig, "decoder": AutoConfig}
has_no_defaults_at_init = True
def __init__(self, **kwargs):
super().__init__(**kwargs)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"A configuraton of type {self.model_type} cannot be instantiated because not both `encoder` and"
f" `decoder` sub-configurations are passed, but only {kwargs}"
)
encoder_config = kwargs.pop("encoder")
encoder_model_type = encoder_config.pop("model_type")
decoder_config = kwargs.pop("decoder")
decoder_model_type = decoder_config.pop("model_type")
self.encoder = AutoConfig.for_model(encoder_model_type, **encoder_config)
self.decoder = AutoConfig.for_model(decoder_model_type, **decoder_config)
self.is_encoder_decoder = True
@classmethod
def from_encoder_decoder_configs(
cls, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs
) -> PretrainedConfig:
r"""
Instantiate a [`SpeechEncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model
configuration and decoder model configuration.
Returns:
[`SpeechEncoderDecoderConfig`]: An instance of a configuration object
"""
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config")
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs)
__all__ = ["SpeechEncoderDecoderConfig"]
|
import pytest
from jina import Client, Deployment, Executor, requests
from jina._docarray import Document, DocumentArray
from jina.excepts import BadServer
from jina.helper import random_port
class MyExecutor(Executor):
@requests(on='/hello')
async def task(self, doc: Document, **kwargs):
for i in range(100):
yield Document(text=f'{doc.text} {i}')
@requests(on='/world')
async def non_gen_task(self, docs: DocumentArray, **kwargs):
return docs
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
@pytest.mark.parametrize('include_gateway', [False, True])
async def test_streaming_deployment(protocol, include_gateway):
from jina import Deployment
port = random_port()
with Deployment(
uses=MyExecutor,
timeout_ready=-1,
protocol=protocol,
cors=True,
port=port,
include_gateway=include_gateway,
):
client = Client(port=port, protocol=protocol, cors=True, asyncio=True)
i = 0
async for doc in client.stream_doc(
on='/hello', inputs=Document(text='hello world')
):
assert doc.text == f'hello world {i}'
i += 1
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc'])
async def test_streaming_client_non_gen_endpoint(protocol):
from jina import Deployment
port = random_port()
with Deployment(
uses=MyExecutor,
timeout_ready=-1,
protocol=protocol,
cors=True,
port=port,
include_gateway=False,
):
client = Client(port=port, protocol=protocol, cors=True, asyncio=True)
i = 0
with pytest.raises(BadServer):
async for _ in client.stream_doc(
on='/world', inputs=Document(text='hello world')
):
pass
def test_invalid_executor():
with pytest.raises(RuntimeError) as exc_info:
class InvalidExecutor1(Executor):
@requests(on='/invalid')
async def invalid(self, doc: Document, **kwargs):
return doc
assert type(exc_info.value.__cause__) is AssertionError
with pytest.raises(RuntimeError) as exc_info:
class InvalidExecutor2(Executor):
@requests(on='/invalid')
def invalid(self, doc: Document, **kwargs):
return doc
assert type(exc_info.value.__cause__) is AssertionError
with pytest.raises(RuntimeError) as exc_info:
class InvalidExecutor3(Executor):
@requests(on='/invalid')
async def invalid(self, docs: DocumentArray, **kwargs):
yield docs[0]
assert type(exc_info.value.__cause__) is AssertionError
with pytest.raises(RuntimeError) as exc_info:
class InvalidExecutor4(Executor):
@requests(on='/invalid')
def invalid(self, docs: DocumentArray, **kwargs):
yield docs[0]
assert type(exc_info.value.__cause__) is AssertionError
class Executor1(Executor):
@requests
def generator(self, **kwargs):
yield Document(text='new document')
@requests(on='/non_generator')
def non_generator(self, docs: DocumentArray, **kwargs):
return docs
class Executor2(Executor):
@requests
def non_generator(self, docs: DocumentArray, **kwargs):
return docs
@requests(on='/generator')
def generator(self, **kwargs):
yield Document(text='new document')
class Executor3(Executor):
@requests(on='/non_generator')
def non_generator(self, docs: DocumentArray, **kwargs):
return docs
@requests(on='/generator')
def generator(self, **kwargs):
yield Document(text='new document')
@pytest.mark.asyncio
@pytest.mark.parametrize(
'executor,expected',
[
('Executor1', {'/default': True, '/non_generator': False}),
('Executor2', {'/default': False, '/generator': True}),
('Executor3', {'/generator': True, '/non_generator': False}),
],
)
async def test_endpoint_discovery(executor, expected):
from google.protobuf import json_format
from jina.logging.logger import JinaLogger
from jina.parsers import set_pod_parser
from jina.serve.runtimes.worker.request_handling import WorkerRequestHandler
args = set_pod_parser().parse_args(['--uses', executor])
handler = WorkerRequestHandler(args, JinaLogger('data request handler'))
res = await handler.endpoint_discovery(None, None)
for endpoint, is_generator in expected.items():
assert (
json_format.MessageToDict(res.schemas)[endpoint]['is_generator']
== is_generator
)
|
import pytest
from jina import Client, Deployment, Executor, requests
from jina._docarray import Document, DocumentArray
from jina.excepts import BadServer
from jina.helper import random_port
class MyExecutor(Executor):
@requests(on='/hello')
async def task(self, doc: Document, **kwargs):
for i in range(100):
yield Document(text=f'{doc.text} {i}')
@requests(on='/world')
async def non_gen_task(self, docs: DocumentArray, **kwargs):
return docs
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
async def test_streaming_deployment(protocol):
from jina import Deployment
port = random_port()
with Deployment(
uses=MyExecutor,
timeout_ready=-1,
protocol=protocol,
cors=True,
port=port,
include_gateway=False,
):
client = Client(port=port, protocol=protocol, cors=True, asyncio=True)
i = 0
async for doc in client.stream_doc(
on='/hello', inputs=Document(text='hello world')
):
assert doc.text == f'hello world {i}'
i += 1
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc'])
async def test_streaming_client_non_gen_endpoint(protocol):
from jina import Deployment
port = random_port()
with Deployment(
uses=MyExecutor,
timeout_ready=-1,
protocol=protocol,
cors=True,
port=port,
include_gateway=False,
):
client = Client(port=port, protocol=protocol, cors=True, asyncio=True)
i = 0
with pytest.raises(BadServer):
async for _ in client.stream_doc(
on='/world', inputs=Document(text='hello world')
):
pass
def test_invalid_executor():
with pytest.raises(RuntimeError) as exc_info:
class InvalidExecutor1(Executor):
@requests(on='/invalid')
async def invalid(self, doc: Document, **kwargs):
return doc
assert type(exc_info.value.__cause__) is AssertionError
with pytest.raises(RuntimeError) as exc_info:
class InvalidExecutor2(Executor):
@requests(on='/invalid')
def invalid(self, doc: Document, **kwargs):
return doc
assert type(exc_info.value.__cause__) is AssertionError
with pytest.raises(RuntimeError) as exc_info:
class InvalidExecutor3(Executor):
@requests(on='/invalid')
async def invalid(self, docs: DocumentArray, **kwargs):
yield docs[0]
assert type(exc_info.value.__cause__) is AssertionError
with pytest.raises(RuntimeError) as exc_info:
class InvalidExecutor4(Executor):
@requests(on='/invalid')
def invalid(self, docs: DocumentArray, **kwargs):
yield docs[0]
assert type(exc_info.value.__cause__) is AssertionError
class Executor1(Executor):
@requests
def generator(self, **kwargs):
yield Document(text='new document')
@requests(on='/non_generator')
def non_generator(self, docs: DocumentArray, **kwargs):
return docs
class Executor2(Executor):
@requests
def non_generator(self, docs: DocumentArray, **kwargs):
return docs
@requests(on='/generator')
def generator(self, **kwargs):
yield Document(text='new document')
class Executor3(Executor):
@requests(on='/non_generator')
def non_generator(self, docs: DocumentArray, **kwargs):
return docs
@requests(on='/generator')
def generator(self, **kwargs):
yield Document(text='new document')
@pytest.mark.asyncio
@pytest.mark.parametrize(
'executor,expected',
[
('Executor1', {'/default': True, '/non_generator': False}),
('Executor2', {'/default': False, '/generator': True}),
('Executor3', {'/generator': True, '/non_generator': False}),
],
)
async def test_endpoint_discovery(executor, expected):
from google.protobuf import json_format
from jina.logging.logger import JinaLogger
from jina.parsers import set_pod_parser
from jina.serve.runtimes.worker.request_handling import WorkerRequestHandler
args = set_pod_parser().parse_args(['--uses', executor])
handler = WorkerRequestHandler(args, JinaLogger('data request handler'))
res = await handler.endpoint_discovery(None, None)
for endpoint, is_generator in expected.items():
assert (
json_format.MessageToDict(res.schemas)[endpoint]['is_generator']
== is_generator
)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import pytest
from jina import Document, DocumentArray, Flow, requests
from jina.executors import BaseExecutor
from match_merger import MatchMerger
class MockShard(BaseExecutor):
@requests
def search(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.matches.append(Document(tags={'shard_id': self.runtime_args.pea_id}))
@pytest.fixture
def docs():
return [Document(text=f'sample text {i}') for i in range(2)]
@pytest.mark.parametrize('shards', (1, 3, 5))
def test_match_merger(docs, shards):
with Flow().add(
uses=MockShard, uses_after=MatchMerger, shards=shards, polling='all'
) as f:
documents = f.search(docs, return_results=True)[0].docs
assert len(documents) == 2
for doc in documents:
assert {d.tags['shard_id'] for d in doc.matches} == {
float(i) for i in range(shards)
}
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import pytest
from jina import Document, DocumentArray, Flow, requests
from jina.executors import BaseExecutor
from ...match_merger import MatchMerger
class MockShard(BaseExecutor):
@requests
def search(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.matches.append(Document(tags={'shard_id': self.runtime_args.pea_id}))
@pytest.fixture
def docs():
return [Document(text=f'sample text {i}') for i in range(2)]
@pytest.mark.parametrize('shards', (1, 3, 5))
def test_match_merger(docs, shards):
with Flow().add(
uses=MockShard, uses_after=MatchMerger, shards=shards, polling='all'
) as f:
documents = f.search(docs, return_results=True)[0].docs
assert len(documents) == 2
for doc in documents:
assert {d.tags['shard_id'] for d in doc.matches} == {
float(i) for i in range(shards)
}
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Dict, List, Optional, Union
from .base import HfQuantizer
if TYPE_CHECKING:
from ..modeling_utils import PreTrainedModel
from ..utils import is_accelerate_available, is_torch_available, logging
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class BitNetHfQuantizer(HfQuantizer):
"""
1.58-bit quantization from BitNet quantization method:
Before loading: it converts the linear layers into BitLinear layers during loading.
Check out the paper introducing this method: https://huggingface.co/papers/2402.17764
"""
requires_parameters_quantization = False
requires_calibration = True
required_packages = ["accelerate"]
def __init__(self, quantization_config, **kwargs):
super().__init__(quantization_config, **kwargs)
self.quantization_config = quantization_config
def validate_environment(self, *args, **kwargs):
if not is_accelerate_available():
raise ImportError("Loading a BitNet quantized model requires accelerate (`pip install accelerate`)")
if kwargs.get("from_tf", False) or kwargs.get("from_flax", False):
raise ValueError(
"Loading ternary weights from tf/flax is currently not supported, please make"
" sure the weights are in PyTorch format."
)
if not torch.cuda.is_available():
logger.warning_once(
"You don't have a GPU available to load the model, the inference will be slow because of weight unpacking"
)
return
device_map = kwargs.get("device_map", None)
if device_map is None:
logger.warning_once(
"You have loaded a BitNet model on CPU and have a CUDA device available, make sure to set "
"your model on a GPU device in order to run your model."
)
elif device_map is not None:
if isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()):
raise ValueError(
"You are attempting to load a BitNet model with a device_map that contains a CPU or disk device."
"This is not supported. Please remove the CPU or disk device from the device_map."
)
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
return model
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
keep_in_fp32_modules: Optional[List[str]] = None,
**kwargs,
):
from ..integrations import replace_with_bitnet_linear
self.modules_to_not_convert = self.get_modules_to_not_convert(
model, self.quantization_config.modules_to_not_convert, keep_in_fp32_modules
)
model = replace_with_bitnet_linear(
model,
modules_to_not_convert=self.modules_to_not_convert,
quantization_config=self.quantization_config,
pre_quantized=self.pre_quantized,
)
def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
max_memory = {key: val * 0.90 for key, val in max_memory.items()}
return max_memory
def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
target_dtype = torch.int8
return target_dtype
def is_serializable(self, safe_serialization=None):
return True
@property
def is_trainable(self) -> bool:
return (
self.quantization_config.linear_class == "autobitlinear"
and self.quantization_config.quantization_mode == "online"
)
@property
def is_qat_trainable(self) -> bool:
"""Flag indicating whether the quantized model can carry out quantization aware training"""
return (
self.quantization_config.linear_class == "autobitlinear"
and self.quantization_config.quantization_mode == "online"
)
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Dict, List, Optional, Union
from .base import HfQuantizer
if TYPE_CHECKING:
from ..modeling_utils import PreTrainedModel
from ..utils import is_accelerate_available, is_torch_available, logging
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class BitNetHfQuantizer(HfQuantizer):
"""
1.58-bit quantization from BitNet quantization method:
Before loading: it converts the linear layers into BitLinear layers during loading.
Check out the paper introducing this method : https://arxiv.org/pdf/2402.17764
"""
requires_parameters_quantization = False
requires_calibration = True
required_packages = ["accelerate"]
def __init__(self, quantization_config, **kwargs):
super().__init__(quantization_config, **kwargs)
self.quantization_config = quantization_config
def validate_environment(self, *args, **kwargs):
if not is_accelerate_available():
raise ImportError("Loading a BitNet quantized model requires accelerate (`pip install accelerate`)")
if kwargs.get("from_tf", False) or kwargs.get("from_flax", False):
raise ValueError(
"Loading ternary weights from tf/flax is currently not supported, please make"
" sure the weights are in PyTorch format."
)
if not torch.cuda.is_available():
logger.warning_once(
"You don't have a GPU available to load the model, the inference will be slow because of weight unpacking"
)
return
device_map = kwargs.get("device_map", None)
if device_map is None:
logger.warning_once(
"You have loaded a BitNet model on CPU and have a CUDA device available, make sure to set "
"your model on a GPU device in order to run your model."
)
elif device_map is not None:
if isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()):
raise ValueError(
"You are attempting to load a BitNet model with a device_map that contains a CPU or disk device."
"This is not supported. Please remove the CPU or disk device from the device_map."
)
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
return model
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
keep_in_fp32_modules: Optional[List[str]] = None,
**kwargs,
):
from ..integrations import replace_with_bitnet_linear
self.modules_to_not_convert = self.get_modules_to_not_convert(
model, self.quantization_config.modules_to_not_convert, keep_in_fp32_modules
)
model = replace_with_bitnet_linear(
model,
modules_to_not_convert=self.modules_to_not_convert,
quantization_config=self.quantization_config,
pre_quantized=self.pre_quantized,
)
def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
max_memory = {key: val * 0.90 for key, val in max_memory.items()}
return max_memory
def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
target_dtype = torch.int8
return target_dtype
def is_serializable(self, safe_serialization=None):
return True
@property
def is_trainable(self) -> bool:
return (
self.quantization_config.linear_class == "autobitlinear"
and self.quantization_config.quantization_mode == "online"
)
@property
def is_qat_trainable(self) -> bool:
"""Flag indicating whether the quantized model can carry out quantization aware training"""
return (
self.quantization_config.linear_class == "autobitlinear"
and self.quantization_config.quantization_mode == "online"
)
|
import importlib
import os
import re
import types
from typing import Any, Optional
import numpy as np
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
tf_imported = False
else:
tf_imported = True
INSTALL_INSTRUCTIONS = {
'google.protobuf': '"docarray[common]"',
'lz4': '"docarray[common]"',
'pandas': '"docarray[pandas]"',
'PIL': '"docarray[image]"',
'pydub': '"docarray[audio]"',
'av': '"docarray[video]"',
'trimesh': '"docarray[mesh]"',
'hnswlib': '"docarray[hnswlib]"',
'elasticsearch': '"docarray[elasticsearch]"',
'fastapi': '"docarray[web]"',
'torch': '"docarray[torch]"',
'tensorflow': 'protobuf==3.19.0 tensorflow',
'hubble': '"docarray[jac]"',
'smart_open': '"docarray[aws]"',
'boto3': '"docarray[aws]"',
'botocore': '"docarray[aws]"',
}
def import_library(
package: str, raise_error: bool = True
) -> Optional[types.ModuleType]:
lib: Optional[types.ModuleType]
try:
lib = importlib.import_module(package)
except (ModuleNotFoundError, ImportError):
lib = None
if lib is None and raise_error:
raise ImportError(
f'The following required library is not installed: {package} \n'
f'To install all necessary libraries, run: `pip install {INSTALL_INSTRUCTIONS[package]}`.'
)
else:
return lib
def _get_path_from_docarray_root_level(file_path: str) -> str:
path = os.path.dirname(file_path)
rel_path = re.sub('(?s:.*)docarray', 'docarray', path).replace('/', '.')
return rel_path
def is_torch_available():
return torch_imported
def is_tf_available():
return tf_imported
def is_np_int(item: Any) -> bool:
dtype = getattr(item, 'dtype', None)
ndim = getattr(item, 'ndim', None)
if dtype is not None and ndim is not None:
try:
return ndim == 0 and np.issubdtype(dtype, np.integer)
except TypeError:
return False
return False # this is unreachable, but mypy wants it
def is_notebook() -> bool:
"""
Check if we're running in a Jupyter notebook, using magic command
`get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
shell = get_ipython().__class__.__name__ # type: ignore
except NameError:
return False
if shell == 'ZMQInteractiveShell':
return True
elif shell == 'Shell':
return True
elif shell == 'TerminalInteractiveShell':
return False
else:
return False
|
from typing import Any
import numpy as np
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
tf_imported = False
else:
tf_imported = True
def is_torch_available():
return torch_imported
def is_np_int(item: Any) -> bool:
dtype = getattr(item, 'dtype', None)
ndim = getattr(item, 'ndim', None)
if dtype is not None and ndim is not None:
try:
return ndim == 0 and np.issubdtype(dtype, np.integer)
except TypeError:
return False
return False # this is unreachable, but mypy wants it
def is_tf_available():
return tf_imported
def is_notebook() -> bool:
"""
Check if we're running in a Jupyter notebook, using magic command
`get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
shell = get_ipython().__class__.__name__ # type: ignore
except NameError:
return False
if shell == 'ZMQInteractiveShell':
return True
elif shell == 'Shell':
return True
elif shell == 'TerminalInteractiveShell':
return False
else:
return False
|
from datetime import datetime, timezone
from unittest.mock import AsyncMock
import pytest
from fastapi import WebSocket
from backend.data.execution import ExecutionResult, ExecutionStatus
from backend.server.conn_manager import ConnectionManager
from backend.server.model import Methods, WsMessage
@pytest.fixture
def connection_manager() -> ConnectionManager:
return ConnectionManager()
@pytest.fixture
def mock_websocket() -> AsyncMock:
websocket: AsyncMock = AsyncMock(spec=WebSocket)
websocket.send_text = AsyncMock()
return websocket
@pytest.mark.asyncio
async def test_connect(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
await connection_manager.connect(mock_websocket)
assert mock_websocket in connection_manager.active_connections
mock_websocket.accept.assert_called_once()
def test_disconnect(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
connection_manager.active_connections.add(mock_websocket)
connection_manager.subscriptions["test_graph_1"] = {mock_websocket}
connection_manager.disconnect(mock_websocket)
assert mock_websocket not in connection_manager.active_connections
assert mock_websocket not in connection_manager.subscriptions["test_graph_1"]
@pytest.mark.asyncio
async def test_subscribe(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
await connection_manager.subscribe("test_graph", 1, mock_websocket)
assert mock_websocket in connection_manager.subscriptions["test_graph_1"]
@pytest.mark.asyncio
async def test_unsubscribe(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
connection_manager.subscriptions["test_graph_1"] = {mock_websocket}
await connection_manager.unsubscribe("test_graph", 1, mock_websocket)
assert "test_graph" not in connection_manager.subscriptions
@pytest.mark.asyncio
async def test_send_execution_result(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
connection_manager.subscriptions["test_graph_1"] = {mock_websocket}
result: ExecutionResult = ExecutionResult(
graph_id="test_graph",
graph_version=1,
graph_exec_id="test_exec_id",
node_exec_id="test_node_exec_id",
node_id="test_node_id",
block_id="test_block_id",
status=ExecutionStatus.COMPLETED,
input_data={"input1": "value1"},
output_data={"output1": ["result1"]},
add_time=datetime.now(tz=timezone.utc),
queue_time=None,
start_time=datetime.now(tz=timezone.utc),
end_time=datetime.now(tz=timezone.utc),
)
await connection_manager.send_execution_result(result)
mock_websocket.send_text.assert_called_once_with(
WsMessage(
method=Methods.EXECUTION_EVENT,
channel="test_graph_1",
data=result.model_dump(),
).model_dump_json()
)
@pytest.mark.asyncio
async def test_send_execution_result_no_subscribers(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
result: ExecutionResult = ExecutionResult(
graph_id="test_graph",
graph_version=1,
graph_exec_id="test_exec_id",
node_exec_id="test_node_exec_id",
node_id="test_node_id",
block_id="test_block_id",
status=ExecutionStatus.COMPLETED,
input_data={"input1": "value1"},
output_data={"output1": ["result1"]},
add_time=datetime.now(),
queue_time=None,
start_time=datetime.now(),
end_time=datetime.now(),
)
await connection_manager.send_execution_result(result)
mock_websocket.send_text.assert_not_called()
|
from datetime import datetime, timezone
from unittest.mock import AsyncMock
import pytest
from fastapi import WebSocket
from backend.data.execution import ExecutionResult, ExecutionStatus
from backend.server.conn_manager import ConnectionManager
from backend.server.model import Methods, WsMessage
@pytest.fixture
def connection_manager() -> ConnectionManager:
return ConnectionManager()
@pytest.fixture
def mock_websocket() -> AsyncMock:
websocket: AsyncMock = AsyncMock(spec=WebSocket)
websocket.send_text = AsyncMock()
return websocket
@pytest.mark.asyncio
async def test_connect(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
await connection_manager.connect(mock_websocket)
assert mock_websocket in connection_manager.active_connections
mock_websocket.accept.assert_called_once()
def test_disconnect(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
connection_manager.active_connections.add(mock_websocket)
connection_manager.subscriptions["test_graph"] = {mock_websocket}
connection_manager.disconnect(mock_websocket)
assert mock_websocket not in connection_manager.active_connections
assert mock_websocket not in connection_manager.subscriptions["test_graph"]
@pytest.mark.asyncio
async def test_subscribe(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
await connection_manager.subscribe("test_graph", mock_websocket)
assert mock_websocket in connection_manager.subscriptions["test_graph"]
@pytest.mark.asyncio
async def test_unsubscribe(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
connection_manager.subscriptions["test_graph"] = {mock_websocket}
await connection_manager.unsubscribe("test_graph", mock_websocket)
assert "test_graph" not in connection_manager.subscriptions
@pytest.mark.asyncio
async def test_send_execution_result(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
connection_manager.subscriptions["test_graph"] = {mock_websocket}
result: ExecutionResult = ExecutionResult(
graph_id="test_graph",
graph_version=1,
graph_exec_id="test_exec_id",
node_exec_id="test_node_exec_id",
node_id="test_node_id",
block_id="test_block_id",
status=ExecutionStatus.COMPLETED,
input_data={"input1": "value1"},
output_data={"output1": ["result1"]},
add_time=datetime.now(tz=timezone.utc),
queue_time=None,
start_time=datetime.now(tz=timezone.utc),
end_time=datetime.now(tz=timezone.utc),
)
await connection_manager.send_execution_result(result)
mock_websocket.send_text.assert_called_once_with(
WsMessage(
method=Methods.EXECUTION_EVENT,
channel="test_graph",
data=result.model_dump(),
).model_dump_json()
)
@pytest.mark.asyncio
async def test_send_execution_result_no_subscribers(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
result: ExecutionResult = ExecutionResult(
graph_id="test_graph",
graph_version=1,
graph_exec_id="test_exec_id",
node_exec_id="test_node_exec_id",
node_id="test_node_id",
block_id="test_block_id",
status=ExecutionStatus.COMPLETED,
input_data={"input1": "value1"},
output_data={"output1": ["result1"]},
add_time=datetime.now(),
queue_time=None,
start_time=datetime.now(),
end_time=datetime.now(),
)
await connection_manager.send_execution_result(result)
mock_websocket.send_text.assert_not_called()
|
from unittest.mock import AsyncMock, patch
import responses
from langchain_community.tools.you import YouSearchTool
from langchain_community.utilities.you import YouSearchAPIWrapper
from ..utilities.test_you import (
LIMITED_PARSED_OUTPUT,
MOCK_PARSED_OUTPUT,
MOCK_RESPONSE_RAW,
NEWS_RESPONSE_PARSED,
NEWS_RESPONSE_RAW,
TEST_ENDPOINT,
)
class TestYouSearchTool:
@responses.activate
def test_invoke(self) -> None:
responses.add(
responses.GET, f"{TEST_ENDPOINT}/search", json=MOCK_RESPONSE_RAW, status=200
)
query = "Test query text"
you_tool = YouSearchTool(api_wrapper=YouSearchAPIWrapper(ydc_api_key="test"))
results = you_tool.invoke(query)
expected_result = MOCK_PARSED_OUTPUT
assert results == expected_result
@responses.activate
def test_invoke_max_docs(self) -> None:
responses.add(
responses.GET, f"{TEST_ENDPOINT}/search", json=MOCK_RESPONSE_RAW, status=200
)
query = "Test query text"
you_tool = YouSearchTool(
api_wrapper=YouSearchAPIWrapper(ydc_api_key="test", k=2)
)
results = you_tool.invoke(query)
expected_result = [MOCK_PARSED_OUTPUT[0], MOCK_PARSED_OUTPUT[1]]
assert results == expected_result
@responses.activate
def test_invoke_limit_snippets(self) -> None:
responses.add(
responses.GET, f"{TEST_ENDPOINT}/search", json=MOCK_RESPONSE_RAW, status=200
)
query = "Test query text"
you_tool = YouSearchTool(
api_wrapper=YouSearchAPIWrapper(ydc_api_key="test", n_snippets_per_hit=1)
)
results = you_tool.invoke(query)
expected_result = LIMITED_PARSED_OUTPUT
assert results == expected_result
@responses.activate
def test_invoke_news(self) -> None:
responses.add(
responses.GET, f"{TEST_ENDPOINT}/news", json=NEWS_RESPONSE_RAW, status=200
)
query = "Test news text"
you_tool = YouSearchTool(
api_wrapper=YouSearchAPIWrapper(ydc_api_key="test", endpoint_type="news")
)
results = you_tool.invoke(query)
expected_result = NEWS_RESPONSE_PARSED
assert results == expected_result
async def test_ainvoke(self) -> None:
you_tool = YouSearchTool(api_wrapper=YouSearchAPIWrapper(ydc_api_key="test"))
# Mock response object to simulate aiohttp response
mock_response = AsyncMock()
mock_response.__aenter__.return_value = (
mock_response # Make the context manager return itself
)
mock_response.__aexit__.return_value = None # No value needed for exit
mock_response.status = 200
mock_response.json = AsyncMock(return_value=MOCK_RESPONSE_RAW)
# Patch the aiohttp.ClientSession object
with patch("aiohttp.ClientSession.get", return_value=mock_response):
results = await you_tool.ainvoke("test query")
assert results == MOCK_PARSED_OUTPUT
|
from unittest.mock import AsyncMock, patch
import responses
from langchain_community.tools.you import YouSearchTool
from langchain_community.utilities.you import YouSearchAPIWrapper
from ..utilities.test_you import (
LIMITED_PARSED_OUTPUT,
MOCK_PARSED_OUTPUT,
MOCK_RESPONSE_RAW,
NEWS_RESPONSE_PARSED,
NEWS_RESPONSE_RAW,
TEST_ENDPOINT,
)
class TestYouSearchTool:
@responses.activate
def test_invoke(self) -> None:
responses.add(
responses.GET, f"{TEST_ENDPOINT}/search", json=MOCK_RESPONSE_RAW, status=200
)
query = "Test query text"
you_tool = YouSearchTool(api_wrapper=YouSearchAPIWrapper(ydc_api_key="test")) # type: ignore[call-arg]
results = you_tool.invoke(query)
expected_result = MOCK_PARSED_OUTPUT
assert results == expected_result
@responses.activate
def test_invoke_max_docs(self) -> None:
responses.add(
responses.GET, f"{TEST_ENDPOINT}/search", json=MOCK_RESPONSE_RAW, status=200
)
query = "Test query text"
you_tool = YouSearchTool( # type: ignore[call-arg]
api_wrapper=YouSearchAPIWrapper(ydc_api_key="test", k=2)
)
results = you_tool.invoke(query)
expected_result = [MOCK_PARSED_OUTPUT[0], MOCK_PARSED_OUTPUT[1]]
assert results == expected_result
@responses.activate
def test_invoke_limit_snippets(self) -> None:
responses.add(
responses.GET, f"{TEST_ENDPOINT}/search", json=MOCK_RESPONSE_RAW, status=200
)
query = "Test query text"
you_tool = YouSearchTool( # type: ignore[call-arg]
api_wrapper=YouSearchAPIWrapper(ydc_api_key="test", n_snippets_per_hit=1)
)
results = you_tool.invoke(query)
expected_result = LIMITED_PARSED_OUTPUT
assert results == expected_result
@responses.activate
def test_invoke_news(self) -> None:
responses.add(
responses.GET, f"{TEST_ENDPOINT}/news", json=NEWS_RESPONSE_RAW, status=200
)
query = "Test news text"
you_tool = YouSearchTool( # type: ignore[call-arg]
api_wrapper=YouSearchAPIWrapper(ydc_api_key="test", endpoint_type="news")
)
results = you_tool.invoke(query)
expected_result = NEWS_RESPONSE_PARSED
assert results == expected_result
async def test_ainvoke(self) -> None:
you_tool = YouSearchTool(api_wrapper=YouSearchAPIWrapper(ydc_api_key="test")) # type: ignore[call-arg]
# Mock response object to simulate aiohttp response
mock_response = AsyncMock()
mock_response.__aenter__.return_value = (
mock_response # Make the context manager return itself
)
mock_response.__aexit__.return_value = None # No value needed for exit
mock_response.status = 200
mock_response.json = AsyncMock(return_value=MOCK_RESPONSE_RAW)
# Patch the aiohttp.ClientSession object
with patch("aiohttp.ClientSession.get", return_value=mock_response):
results = await you_tool.ainvoke("test query")
assert results == MOCK_PARSED_OUTPUT
|
import logging
import tqdm
class LoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.handleError(record)
def install_logger(given_logger, level=logging.WARNING, fmt="%(levelname)s:%(name)s:%(message)s"):
"""Configures the given logger; format, logging level, style, etc"""
import coloredlogs
def add_notice_log_level():
"""Creates a new 'notice' logging level"""
# inspired by:
# https://stackoverflow.com/questions/2183233/how-to-add-a-custom-loglevel-to-pythons-logging-facility
NOTICE_LEVEL_NUM = 25
logging.addLevelName(NOTICE_LEVEL_NUM, "NOTICE")
def notice(self, message, *args, **kws):
if self.isEnabledFor(NOTICE_LEVEL_NUM):
self._log(NOTICE_LEVEL_NUM, message, args, **kws)
logging.Logger.notice = notice
# Add an extra logging level above INFO and below WARNING
add_notice_log_level()
# More style info at:
# https://coloredlogs.readthedocs.io/en/latest/api.html
field_styles = coloredlogs.DEFAULT_FIELD_STYLES.copy()
field_styles["asctime"] = {}
level_styles = coloredlogs.DEFAULT_LEVEL_STYLES.copy()
level_styles["debug"] = {"color": "white", "faint": True}
level_styles["notice"] = {"color": "cyan", "bold": True}
coloredlogs.install(
logger=given_logger,
level=level,
use_chroot=False,
fmt=fmt,
level_styles=level_styles,
field_styles=field_styles,
)
|
import logging
import tqdm
class LoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def install_logger(
given_logger, level = logging.WARNING, fmt="%(levelname)s:%(name)s:%(message)s"
):
""" Configures the given logger; format, logging level, style, etc """
import coloredlogs
def add_notice_log_level():
""" Creates a new 'notice' logging level """
# inspired by:
# https://stackoverflow.com/questions/2183233/how-to-add-a-custom-loglevel-to-pythons-logging-facility
NOTICE_LEVEL_NUM = 25
logging.addLevelName(NOTICE_LEVEL_NUM, "NOTICE")
def notice(self, message, *args, **kws):
if self.isEnabledFor(NOTICE_LEVEL_NUM):
self._log(NOTICE_LEVEL_NUM, message, args, **kws)
logging.Logger.notice = notice
# Add an extra logging level above INFO and below WARNING
add_notice_log_level()
# More style info at:
# https://coloredlogs.readthedocs.io/en/latest/api.html
field_styles = coloredlogs.DEFAULT_FIELD_STYLES.copy()
field_styles["asctime"] = {}
level_styles = coloredlogs.DEFAULT_LEVEL_STYLES.copy()
level_styles["debug"] = {"color": "white", "faint": True}
level_styles["notice"] = {"color": "cyan", "bold": True}
coloredlogs.install(
logger=given_logger,
level=level,
use_chroot=False,
fmt=fmt,
level_styles=level_styles,
field_styles=field_styles,
)
|
import numpy as np
import pytest
from pydantic import Field
from typing import Optional
from docarray import BaseDoc, DocList
from docarray.index.backends.in_memory import InMemoryExactNNIndex
from docarray.typing import NdArray
class SchemaDoc(BaseDoc):
text: str
price: int
tensor: NdArray[10]
@pytest.fixture
def docs():
docs = DocList[SchemaDoc](
[
SchemaDoc(text=f'hello {i}', price=i, tensor=np.array([i] * 10))
for i in range(9)
]
)
docs.append(SchemaDoc(text='good bye', price=100, tensor=np.array([100.0] * 10)))
return docs
def test_indexing(docs):
doc_index = InMemoryExactNNIndex[SchemaDoc]()
assert doc_index.num_docs() == 0
doc_index.index(docs)
assert doc_index.num_docs() == 10
@pytest.fixture
def doc_index(docs):
doc_index = InMemoryExactNNIndex[SchemaDoc]()
doc_index.index(docs)
return doc_index
def test_del_item(docs, doc_index):
to_remove = [docs[0].id, docs[1].id]
doc_index._del_items(to_remove)
assert doc_index.num_docs() == 8
def test_del(docs, doc_index):
del doc_index[docs[0].id]
assert doc_index.num_docs() == 9
@pytest.mark.parametrize('space', ['cosine_sim', 'euclidean_dist', 'sqeuclidean_dist'])
@pytest.mark.parametrize('is_query_doc', [True, False])
def test_find(doc_index, space, is_query_doc):
class MyDoc(BaseDoc):
text: str
price: int
tensor: NdArray[10] = Field(space=space)
if is_query_doc:
query = MyDoc(text='query', price=0, tensor=np.ones(10))
else:
query = np.ones(10)
docs, scores = doc_index.find(query, search_field='tensor', limit=5)
assert len(docs) == 5
assert len(scores) == 5
assert doc_index.num_docs() == 10
empty_index = InMemoryExactNNIndex[MyDoc]()
docs, scores = empty_index.find(query, search_field='tensor', limit=5)
assert len(docs) == 0
assert len(scores) == 0
@pytest.mark.parametrize('space', ['cosine_sim', 'euclidean_dist', 'sqeuclidean_dist'])
@pytest.mark.parametrize('is_query_doc', [True, False])
def test_find_batched(doc_index, space, is_query_doc):
class MyDoc(BaseDoc):
text: str
price: int
tensor: NdArray[10] = Field(space=space)
if is_query_doc:
query = DocList[MyDoc](
[
MyDoc(text='query 0', price=0, tensor=np.zeros(10)),
MyDoc(text='query 1', price=1, tensor=np.ones(10)),
]
)
else:
query = np.ones((2, 10))
docs, scores = doc_index.find_batched(query, search_field='tensor', limit=5)
assert len(docs) == 2
for result in docs:
assert len(result) == 5
assert doc_index.num_docs() == 10
empty_index = InMemoryExactNNIndex[MyDoc]()
docs, scores = empty_index.find_batched(query, search_field='tensor', limit=5)
assert len(docs) == 0
assert len(scores) == 0
def test_concatenated_queries(doc_index):
query = SchemaDoc(text='query', price=0, tensor=np.ones(10))
q = (
doc_index.build_query()
.find(query=query, search_field='tensor', limit=5)
.filter(filter_query={'price': {'$neq': 5}})
.build()
)
docs, scores = doc_index.execute_query(q)
assert len(docs) == 4
def test_save_and_load(doc_index, tmpdir):
initial_num_docs = doc_index.num_docs()
binary_file = str(tmpdir / 'docs.bin')
doc_index.persist(binary_file)
new_doc_index = InMemoryExactNNIndex[SchemaDoc](index_file_path=binary_file)
docs, scores = new_doc_index.find(np.ones(10), search_field='tensor', limit=5)
assert len(docs) == 5
assert len(scores) == 5
assert new_doc_index.num_docs() == initial_num_docs
newer_doc_index = InMemoryExactNNIndex[SchemaDoc](
index_file_path='some_nonexistent_file.bin'
)
assert newer_doc_index.num_docs() == 0
def test_index_with_None_embedding():
class DocTest(BaseDoc):
index: int
embedding: Optional[NdArray[4]]
# Some of the documents have the embedding field set to None
dl = DocList[DocTest](
[
DocTest(index=i, embedding=np.random.rand(4) if i % 2 else None)
for i in range(100)
]
)
index = InMemoryExactNNIndex[DocTest](dl)
res = index.find(np.random.rand(4), search_field="embedding", limit=70)
assert len(res.documents) == 50
for doc in res.documents:
assert doc.index % 2 != 0
|
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc, DocList
from docarray.index.backends.in_memory import InMemoryExactNNIndex
from docarray.typing import NdArray
class SchemaDoc(BaseDoc):
text: str
price: int
tensor: NdArray[10]
@pytest.fixture
def docs():
docs = DocList[SchemaDoc](
[
SchemaDoc(text=f'hello {i}', price=i, tensor=np.array([i] * 10))
for i in range(9)
]
)
docs.append(SchemaDoc(text='good bye', price=100, tensor=np.array([100.0] * 10)))
return docs
def test_indexing(docs):
doc_index = InMemoryExactNNIndex[SchemaDoc]()
assert doc_index.num_docs() == 0
doc_index.index(docs)
assert doc_index.num_docs() == 10
@pytest.fixture
def doc_index(docs):
doc_index = InMemoryExactNNIndex[SchemaDoc]()
doc_index.index(docs)
return doc_index
def test_del_item(docs, doc_index):
to_remove = [docs[0].id, docs[1].id]
doc_index._del_items(to_remove)
assert doc_index.num_docs() == 8
def test_del(docs, doc_index):
del doc_index[docs[0].id]
assert doc_index.num_docs() == 9
@pytest.mark.parametrize('space', ['cosine_sim', 'euclidean_dist', 'sqeuclidean_dist'])
@pytest.mark.parametrize('is_query_doc', [True, False])
def test_find(doc_index, space, is_query_doc):
class MyDoc(BaseDoc):
text: str
price: int
tensor: NdArray[10] = Field(space=space)
if is_query_doc:
query = MyDoc(text='query', price=0, tensor=np.ones(10))
else:
query = np.ones(10)
docs, scores = doc_index.find(query, search_field='tensor', limit=5)
assert len(docs) == 5
assert len(scores) == 5
assert doc_index.num_docs() == 10
empty_index = InMemoryExactNNIndex[MyDoc]()
docs, scores = empty_index.find(query, search_field='tensor', limit=5)
assert len(docs) == 0
assert len(scores) == 0
@pytest.mark.parametrize('space', ['cosine_sim', 'euclidean_dist', 'sqeuclidean_dist'])
@pytest.mark.parametrize('is_query_doc', [True, False])
def test_find_batched(doc_index, space, is_query_doc):
class MyDoc(BaseDoc):
text: str
price: int
tensor: NdArray[10] = Field(space=space)
if is_query_doc:
query = DocList[MyDoc](
[
MyDoc(text='query 0', price=0, tensor=np.zeros(10)),
MyDoc(text='query 1', price=1, tensor=np.ones(10)),
]
)
else:
query = np.ones((2, 10))
docs, scores = doc_index.find_batched(query, search_field='tensor', limit=5)
assert len(docs) == 2
for result in docs:
assert len(result) == 5
assert doc_index.num_docs() == 10
empty_index = InMemoryExactNNIndex[MyDoc]()
docs, scores = empty_index.find_batched(query, search_field='tensor', limit=5)
assert len(docs) == 0
assert len(scores) == 0
def test_concatenated_queries(doc_index):
query = SchemaDoc(text='query', price=0, tensor=np.ones(10))
q = (
doc_index.build_query()
.find(query=query, search_field='tensor', limit=5)
.filter(filter_query={'price': {'$neq': 5}})
.build()
)
docs, scores = doc_index.execute_query(q)
assert len(docs) == 4
def test_save_and_load(doc_index, tmpdir):
initial_num_docs = doc_index.num_docs()
binary_file = str(tmpdir / 'docs.bin')
doc_index.persist(binary_file)
new_doc_index = InMemoryExactNNIndex[SchemaDoc](index_file_path=binary_file)
docs, scores = new_doc_index.find(np.ones(10), search_field='tensor', limit=5)
assert len(docs) == 5
assert len(scores) == 5
assert new_doc_index.num_docs() == initial_num_docs
newer_doc_index = InMemoryExactNNIndex[SchemaDoc](
index_file_path='some_nonexistent_file.bin'
)
assert newer_doc_index.num_docs() == 0
|
# coding=utf-8
# Copyright 2021, The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast tokenization class for BlenderbotSmall."""
from typing import Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
class BlenderbotSmallTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" BlenderbotSmall tokenizer (backed by HuggingFace's *tokenizers* library).
Args:
vocab_file (`str`):
Path to the vocabulary file.
"""
vocab_files_names = VOCAB_FILES_NAMES
slow_tokenizer_class = BlenderbotSmallTokenizer
def __init__(
self,
vocab_file=None,
merges_file=None,
unk_token="<|endoftext|>",
bos_token="<|endoftext|>",
eos_token="<|endoftext|>",
add_prefix_space=False,
trim_offsets=True,
**kwargs,
):
super().__init__(
ByteLevelBPETokenizer(
vocab=vocab_file,
merges=merges_file,
add_prefix_space=add_prefix_space,
trim_offsets=trim_offsets,
),
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
**kwargs,
)
self.add_prefix_space = add_prefix_space
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
if token_ids_1 is None:
return output
return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
def create_token_type_ids_from_sequences(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. BlenderbotSmall
does not make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
__all__ = ["BlenderbotSmallTokenizerFast"]
|
# coding=utf-8
# Copyright 2021, The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast tokenization class for BlenderbotSmall."""
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
class BlenderbotSmallTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" BlenderbotSmall tokenizer (backed by HuggingFace's *tokenizers* library).
Args:
vocab_file (`str`):
Path to the vocabulary file.
"""
vocab_files_names = VOCAB_FILES_NAMES
slow_tokenizer_class = BlenderbotSmallTokenizer
def __init__(
self,
vocab_file=None,
merges_file=None,
unk_token="<|endoftext|>",
bos_token="<|endoftext|>",
eos_token="<|endoftext|>",
add_prefix_space=False,
trim_offsets=True,
**kwargs,
):
super().__init__(
ByteLevelBPETokenizer(
vocab=vocab_file,
merges=merges_file,
add_prefix_space=add_prefix_space,
trim_offsets=trim_offsets,
),
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
**kwargs,
)
self.add_prefix_space = add_prefix_space
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
if token_ids_1 is None:
return output
return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. BlenderbotSmall
does not make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
__all__ = ["BlenderbotSmallTokenizerFast"]
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
import mmcv
import pytest
from mmdet.datasets import CocoDataset
def _create_ids_error_coco_json(json_name):
image = {
'id': 0,
'width': 640,
'height': 640,
'file_name': 'fake_name.jpg',
}
annotation_1 = {
'id': 1,
'image_id': 0,
'category_id': 0,
'area': 400,
'bbox': [50, 60, 20, 20],
'iscrowd': 0,
}
annotation_2 = {
'id': 1,
'image_id': 0,
'category_id': 0,
'area': 900,
'bbox': [100, 120, 30, 30],
'iscrowd': 0,
}
categories = [{
'id': 0,
'name': 'car',
'supercategory': 'car',
}]
fake_json = {
'images': [image],
'annotations': [annotation_1, annotation_2],
'categories': categories
}
mmcv.dump(fake_json, json_name)
def test_coco_annotation_ids_unique():
tmp_dir = tempfile.TemporaryDirectory()
fake_json_file = osp.join(tmp_dir.name, 'fake_data.json')
_create_ids_error_coco_json(fake_json_file)
# test annotation ids not unique error
with pytest.raises(AssertionError):
CocoDataset(ann_file=fake_json_file, classes=('car', ), pipeline=[])
|
import os.path as osp
import tempfile
import mmcv
import pytest
from mmdet.datasets import CocoDataset
def _create_ids_error_coco_json(json_name):
image = {
'id': 0,
'width': 640,
'height': 640,
'file_name': 'fake_name.jpg',
}
annotation_1 = {
'id': 1,
'image_id': 0,
'category_id': 0,
'area': 400,
'bbox': [50, 60, 20, 20],
'iscrowd': 0,
}
annotation_2 = {
'id': 1,
'image_id': 0,
'category_id': 0,
'area': 900,
'bbox': [100, 120, 30, 30],
'iscrowd': 0,
}
categories = [{
'id': 0,
'name': 'car',
'supercategory': 'car',
}]
fake_json = {
'images': [image],
'annotations': [annotation_1, annotation_2],
'categories': categories
}
mmcv.dump(fake_json, json_name)
def test_coco_annotation_ids_unique():
tmp_dir = tempfile.TemporaryDirectory()
fake_json_file = osp.join(tmp_dir.name, 'fake_data.json')
_create_ids_error_coco_json(fake_json_file)
# test annotation ids not unique error
with pytest.raises(AssertionError):
CocoDataset(ann_file=fake_json_file, classes=('car', ), pipeline=[])
|
from datetime import datetime, timezone
import pytest
from prisma.enums import CreditTransactionType
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.credit import BetaUserCredit
from backend.data.execution import NodeExecutionEntry
from backend.data.user import DEFAULT_USER_ID
from backend.integrations.credentials_store import openai_credentials
from backend.util.test import SpinTestServer
REFILL_VALUE = 1000
user_credit = BetaUserCredit(REFILL_VALUE)
async def disable_test_user_transactions():
await CreditTransaction.prisma().delete_many(where={"userId": DEFAULT_USER_ID})
async def top_up(amount: int):
await user_credit._add_transaction(
DEFAULT_USER_ID,
amount,
CreditTransactionType.TOP_UP,
)
@pytest.mark.asyncio(scope="session")
async def test_block_credit_usage(server: SpinTestServer):
await disable_test_user_transactions()
await top_up(100)
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
spending_amount_1 = await user_credit.spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={
"model": "gpt-4-turbo",
"credentials": {
"id": openai_credentials.id,
"provider": openai_credentials.provider,
"type": openai_credentials.type,
},
},
),
0.0,
0.0,
)
assert spending_amount_1 > 0
spending_amount_2 = await user_credit.spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={"model": "gpt-4-turbo", "api_key": "owned_api_key"},
),
0.0,
0.0,
)
assert spending_amount_2 == 0
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit - spending_amount_1 - spending_amount_2
@pytest.mark.asyncio(scope="session")
async def test_block_credit_top_up(server: SpinTestServer):
await disable_test_user_transactions()
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
await top_up(100)
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit + 100
@pytest.mark.asyncio(scope="session")
async def test_block_credit_reset(server: SpinTestServer):
await disable_test_user_transactions()
month1 = 1
month2 = 2
# set the calendar to month 2 but use current time from now
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month2, day=1
)
month2credit = await user_credit.get_credits(DEFAULT_USER_ID)
# Month 1 result should only affect month 1
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month1, day=1
)
month1credit = await user_credit.get_credits(DEFAULT_USER_ID)
await top_up(100)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month1credit + 100
# Month 2 balance is unaffected
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month2, day=1
)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month2credit
@pytest.mark.asyncio(scope="session")
async def test_credit_refill(server: SpinTestServer):
await disable_test_user_transactions()
balance = await user_credit.get_credits(DEFAULT_USER_ID)
assert balance == REFILL_VALUE
|
from datetime import datetime, timezone
import pytest
from prisma.enums import CreditTransactionType
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.credit import BetaUserCredit
from backend.data.execution import NodeExecutionEntry
from backend.data.user import DEFAULT_USER_ID
from backend.integrations.credentials_store import openai_credentials
from backend.util.test import SpinTestServer
REFILL_VALUE = 1000
user_credit = BetaUserCredit(REFILL_VALUE)
async def disable_test_user_transactions():
await CreditTransaction.prisma().delete_many(where={"userId": DEFAULT_USER_ID})
async def top_up(amount: int):
await user_credit._add_transaction(
DEFAULT_USER_ID,
amount,
CreditTransactionType.TOP_UP,
)
@pytest.mark.asyncio(scope="session")
async def test_block_credit_usage(server: SpinTestServer):
await disable_test_user_transactions()
await top_up(100)
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
spending_amount_1 = await user_credit.spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={
"model": "gpt-4-turbo",
"credentials": {
"id": openai_credentials.id,
"provider": openai_credentials.provider,
"type": openai_credentials.type,
},
},
),
0.0,
0.0,
)
assert spending_amount_1 > 0
spending_amount_2 = await user_credit.spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={"model": "gpt-4-turbo", "api_key": "owned_api_key"},
),
0.0,
0.0,
)
assert spending_amount_2 == 0
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit - spending_amount_1 - spending_amount_2
@pytest.mark.asyncio(scope="session")
async def test_block_credit_top_up(server: SpinTestServer):
await disable_test_user_transactions()
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
await top_up(100)
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit + 100
@pytest.mark.asyncio(scope="session")
async def test_block_credit_reset(server: SpinTestServer):
await disable_test_user_transactions()
month1 = 1
month2 = 2
# set the calendar to month 2 but use current time from now
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(month=month2)
month2credit = await user_credit.get_credits(DEFAULT_USER_ID)
# Month 1 result should only affect month 1
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(month=month1)
month1credit = await user_credit.get_credits(DEFAULT_USER_ID)
await top_up(100)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month1credit + 100
# Month 2 balance is unaffected
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(month=month2)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month2credit
@pytest.mark.asyncio(scope="session")
async def test_credit_refill(server: SpinTestServer):
await disable_test_user_transactions()
balance = await user_credit.get_credits(DEFAULT_USER_ID)
assert balance == REFILL_VALUE
|
import os
from typing import Type
import orjson
from pydantic import BaseModel, Field
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
from docarray.document.io.json import orjson_dumps
from docarray.document.mixins import ProtoMixin
from docarray.typing import ID
class BaseDocument(BaseModel, ProtoMixin, AbstractDocument, BaseNode):
"""
The base class for Document
"""
id: ID = Field(default_factory=lambda: ID.validate(os.urandom(16).hex()))
class Config:
json_loads = orjson.loads
json_dumps = orjson_dumps
@classmethod
def _get_nested_document_class(cls, field: str) -> Type['BaseDocument']:
"""
Accessing the nested python Class define in the schema. Could be useful for
reconstruction of Document in serialization/deserilization
:param field: name of the field
:return:
"""
return cls.__fields__[field].type_
|
import os
from typing import Type
from pydantic import BaseModel, Field
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
from docarray.typing import ID
from .mixins import ProtoMixin
class BaseDocument(BaseModel, ProtoMixin, AbstractDocument, BaseNode):
"""
The base class for Document
"""
id: ID = Field(default_factory=lambda: ID.validate(os.urandom(16).hex()))
@classmethod
def _get_nested_document_class(cls, field: str) -> Type['BaseDocument']:
"""
Accessing the nested python Class define in the schema. Could be useful for
reconstruction of Document in serialization/deserilization
:param field: name of the field
:return:
"""
return cls.__fields__[field].type_
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from typing import Dict
import numpy as np
from jina import DocumentArray, Document, Executor
from ...image_tf_encoder import ImageTFEncoder
input_dim = 336
target_output_dim = 1280
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.model_name == 'MobileNetV2'
def test_encoding_results():
num_doc = 2
test_data = np.random.rand(num_doc, input_dim, input_dim, 3)
doc = DocumentArray()
for i in range(num_doc):
doc.append(Document(blob=test_data[i]))
encoder = ImageTFEncoder()
encoder.encode(doc, parameters={})
assert len(doc) == num_doc
for i in range(num_doc):
assert doc[i].embedding.shape == (target_output_dim,)
def test_image_results(test_images: Dict[str, np.array]):
embeddings = {}
encoder = ImageTFEncoder()
for name, image_arr in test_images.items():
docs = DocumentArray([Document(blob=image_arr)])
encoder.encode(docs, parameters={})
embeddings[name] = docs[0].embedding
assert docs[0].embedding.shape == (target_output_dim,)
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist('banana1', 'banana2')
assert small_distance < dist('banana1', 'airplane')
assert small_distance < dist('banana1', 'satellite')
assert small_distance < dist('banana1', 'studio')
assert small_distance < dist('banana2', 'airplane')
assert small_distance < dist('banana2', 'satellite')
assert small_distance < dist('banana2', 'studio')
assert small_distance < dist('airplane', 'studio')
assert small_distance < dist('airplane', 'satellite')
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from typing import Dict
import numpy as np
from jina import DocumentArray, Document
from jina.executors import BaseExecutor
directory = os.path.dirname(os.path.realpath(__file__))
input_dim = 336
target_output_dim = 1280
def test_encoding_results():
num_doc = 2
test_data = np.random.rand(num_doc, input_dim, input_dim, 3)
doc = DocumentArray()
for i in range(num_doc):
doc.append(Document(blob=test_data[i]))
encoder = BaseExecutor.load_config(
os.path.join(directory, '../../config.yml'))
encoder.encode(doc, parameters={})
assert len(doc) == num_doc
for i in range(num_doc):
assert doc[i].embedding.shape == (target_output_dim,)
def test_image_results(test_images: Dict[str, np.array]):
embeddings = {}
encoder = BaseExecutor.load_config(
os.path.join(directory, '../../config.yml'))
for name, image_arr in test_images.items():
docs = DocumentArray([Document(blob=image_arr)])
encoder.encode(docs, parameters={})
embeddings[name] = docs[0].embedding
assert docs[0].embedding.shape == (target_output_dim,)
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist('banana1', 'banana2')
assert small_distance < dist('banana1', 'airplane')
assert small_distance < dist('banana1', 'satellite')
assert small_distance < dist('banana1', 'studio')
assert small_distance < dist('banana2', 'airplane')
assert small_distance < dist('banana2', 'satellite')
assert small_distance < dist('banana2', 'studio')
assert small_distance < dist('airplane', 'studio')
assert small_distance < dist('airplane', 'satellite')
|
_base_ = './htc_x101-64x4d_fpn_16xb1-20e_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(
type='RandomResize',
scale=[(1600, 400), (1600, 1400)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './htc_x101_64x4d_fpn_16x1_20e_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(
type='RandomResize',
scale=[(1600, 400), (1600, 1400)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
# dataset settings
dataset_type = 'CocoPanopticDataset'
data_root = 'data/coco/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadPanopticAnnotations',
with_bbox=True,
with_mask=True,
with_seg=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Pad', size_divisor=32),
dict(type='SegRescale', scale_factor=1 / 4),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'data_sample']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'data_sample']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/panoptic_train2017.json',
img_prefix=data_root + 'train2017/',
seg_prefix=data_root + 'annotations/panoptic_train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/panoptic_val2017.json',
img_prefix=data_root + 'val2017/',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/panoptic_val2017.json',
img_prefix=data_root + 'val2017/',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric=['PQ'])
|
# dataset settings
dataset_type = 'CocoPanopticDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadPanopticAnnotations',
with_bbox=True,
with_mask=True,
with_seg=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='SegRescale', scale_factor=1 / 4),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/panoptic_train2017.json',
img_prefix=data_root + 'train2017/',
seg_prefix=data_root + 'annotations/panoptic_train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/panoptic_val2017.json',
img_prefix=data_root + 'val2017/',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/panoptic_val2017.json',
img_prefix=data_root + 'val2017/',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric=['PQ'])
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast
import numpy as np
from docarray.typing.tensor.embedding.embedding_mixin import EmbeddingMixin
from docarray.typing.tensor.embedding.ndarray import NdArrayEmbedding
from docarray.typing.tensor.tensor import AnyTensor
from docarray.utils._internal.misc import ( # noqa
is_jax_available,
is_tf_available,
is_torch_available,
)
jax_available = is_jax_available()
if jax_available:
import jax.numpy as jnp # type: ignore
from docarray.typing.tensor.embedding.jax_array import JaxArrayEmbedding
from docarray.typing.tensor.jaxarray import JaxArray # noqa: F401
torch_available = is_torch_available()
if torch_available:
import torch
from docarray.typing.tensor.embedding.torch import TorchEmbedding
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
from docarray.typing.tensor.embedding.tensorflow import TensorFlowEmbedding
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar("T", bound="AnyEmbedding")
class AnyEmbedding(AnyTensor, EmbeddingMixin):
"""
Represents an embedding tensor object that can be used with TensorFlow, PyTorch, and NumPy type.
---
'''python
from docarray import BaseDoc
from docarray.typing import AnyEmbedding
class MyEmbeddingDoc(BaseDoc):
embedding: AnyEmbedding
# Example usage with TensorFlow:
import tensorflow as tf
doc = MyEmbeddingDoc(embedding=tf.zeros(1000, 2))
type(doc.embedding) # TensorFlowEmbedding
# Example usage with PyTorch:
import torch
doc = MyEmbeddingDoc(embedding=torch.zeros(1000, 2))
type(doc.embedding) # TorchEmbedding
# Example usage with NumPy:
import numpy as np
doc = MyEmbeddingDoc(embedding=np.zeros((1000, 2)))
type(doc.embedding) # NdArrayEmbedding
'''
---
Raises:
TypeError: If the type of the value is not one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray]
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: "ModelField",
config: "BaseConfig",
):
if torch_available:
if isinstance(value, TorchTensor):
return cast(TorchEmbedding, value)
elif isinstance(value, torch.Tensor):
return TorchEmbedding._docarray_from_native(value) # noqa
if tf_available:
if isinstance(value, TensorFlowTensor):
return cast(TensorFlowEmbedding, value)
elif isinstance(value, tf.Tensor):
return TensorFlowEmbedding._docarray_from_native(value) # noqa
if jax_available:
if isinstance(value, JaxArray):
return cast(JaxArrayEmbedding, value)
elif isinstance(value, jnp.ndarray):
return JaxArrayEmbedding._docarray_from_native(value) # noqa
try:
return NdArrayEmbedding.validate(value, field, config)
except Exception: # noqa
pass
raise TypeError(
f"Expected one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray] "
f"compatible type, got {type(value)}"
)
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast
import numpy as np
from docarray.typing.tensor.embedding.embedding_mixin import EmbeddingMixin
from docarray.typing.tensor.embedding.ndarray import NdArrayEmbedding
from docarray.typing.tensor.tensor import AnyTensor
from docarray.utils._internal.misc import is_tf_available, is_torch_available # noqa
torch_available = is_torch_available()
if torch_available:
import torch
from docarray.typing.tensor.embedding.torch import TorchEmbedding
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
from docarray.typing.tensor.embedding.tensorflow import TensorFlowEmbedding
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar("T", bound="AnyEmbedding")
class AnyEmbedding(AnyTensor, EmbeddingMixin):
"""
Represents an embedding tensor object that can be used with TensorFlow, PyTorch, and NumPy type.
---
'''python
from docarray import BaseDoc
from docarray.typing import AnyEmbedding
class MyEmbeddingDoc(BaseDoc):
embedding: AnyEmbedding
# Example usage with TensorFlow:
import tensorflow as tf
doc = MyEmbeddingDoc(embedding=tf.zeros(1000, 2))
type(doc.embedding) # TensorFlowEmbedding
# Example usage with PyTorch:
import torch
doc = MyEmbeddingDoc(embedding=torch.zeros(1000, 2))
type(doc.embedding) # TorchEmbedding
# Example usage with NumPy:
import numpy as np
doc = MyEmbeddingDoc(embedding=np.zeros((1000, 2)))
type(doc.embedding) # NdArrayEmbedding
'''
---
Raises:
TypeError: If the type of the value is not one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray]
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: "ModelField",
config: "BaseConfig",
):
if torch_available:
if isinstance(value, TorchTensor):
return cast(TorchEmbedding, value)
elif isinstance(value, torch.Tensor):
return TorchEmbedding._docarray_from_native(value) # noqa
if tf_available:
if isinstance(value, TensorFlowTensor):
return cast(TensorFlowEmbedding, value)
elif isinstance(value, tf.Tensor):
return TensorFlowEmbedding._docarray_from_native(value) # noqa
try:
return NdArrayEmbedding.validate(value, field, config)
except Exception: # noqa
pass
raise TypeError(
f"Expected one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray] "
f"compatible type, got {type(value)}"
)
|
import argparse
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.func import functional_call, grad_and_value, stack_module_state, vmap
# Adapted from http://willwhitney.com/parallel-training-jax.html , which is a
# tutorial on Model Ensembling with JAX by Will Whitney.
#
# The original code comes with the following citation:
# @misc{Whitney2021Parallelizing,
# author = {William F. Whitney},
# title = { {Parallelizing neural networks on one GPU with JAX} },
# year = {2021},
# url = {http://willwhitney.com/parallel-training-jax.html},
# }
# GOAL: Demonstrate that it is possible to use eager-mode vmap
# to parallelize training over models.
parser = argparse.ArgumentParser(description="Functorch Ensembled Models")
parser.add_argument(
"--device",
type=str,
default="cpu",
help="CPU or GPU ID for this process (default: 'cpu')",
)
args = parser.parse_args()
DEVICE = args.device
# Step 1: Make some spirals
def make_spirals(n_samples, noise_std=0.0, rotations=1.0):
ts = torch.linspace(0, 1, n_samples, device=DEVICE)
rs = ts**0.5
thetas = rs * rotations * 2 * math.pi
signs = torch.randint(0, 2, (n_samples,), device=DEVICE) * 2 - 1
labels = (signs > 0).to(torch.long).to(DEVICE)
xs = (
rs * signs * torch.cos(thetas)
+ torch.randn(n_samples, device=DEVICE) * noise_std
)
ys = (
rs * signs * torch.sin(thetas)
+ torch.randn(n_samples, device=DEVICE) * noise_std
)
points = torch.stack([xs, ys], dim=1)
return points, labels
points, labels = make_spirals(100, noise_std=0.05)
# Step 2: Define two-layer MLP and loss function
class MLPClassifier(nn.Module):
def __init__(self, hidden_dim=32, n_classes=2):
super().__init__()
self.hidden_dim = hidden_dim
self.n_classes = n_classes
self.fc1 = nn.Linear(2, self.hidden_dim)
self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.log_softmax(x, -1)
return x
loss_fn = nn.NLLLoss()
model = MLPClassifier().to(DEVICE)
def train_step_fn(weights, batch, targets, lr=0.2):
def compute_loss(weights, batch, targets):
output = functional_call(model, weights, batch)
loss = loss_fn(output, targets)
return loss
grad_weights, loss = grad_and_value(compute_loss)(weights, batch, targets)
# NB: PyTorch is missing a "functional optimizer API" (possibly coming soon)
# so we are going to re-implement SGD here.
new_weights = {}
with torch.no_grad():
for key in grad_weights:
new_weights[key] = weights[key] - grad_weights[key] * lr
return loss, new_weights
# Step 4: Let's verify this actually trains.
# We should see the loss decrease.
def step4():
global weights
for i in range(2000):
loss, weights = train_step_fn(dict(model.named_parameters()), points, labels)
if i % 100 == 0:
print(loss)
step4()
# Step 5: We're ready for multiple models. Let's define an init_fn
# that, given a number of models, returns to us all of the weights.
def init_fn(num_models):
models = [MLPClassifier().to(DEVICE) for _ in range(num_models)]
params, _ = stack_module_state(models)
return params
# Step 6: Now, can we try multiple models at the same time?
# The answer is: yes! `loss` is a 2-tuple, and we can see that the value keeps
# on decreasing
def step6():
parallel_train_step_fn = vmap(train_step_fn, in_dims=(0, None, None))
batched_weights = init_fn(num_models=2)
for i in range(2000):
loss, batched_weights = parallel_train_step_fn(batched_weights, points, labels)
if i % 200 == 0:
print(loss)
step6()
# Step 7: Now, the flaw with step 6 is that we were training on the same exact
# data. This can lead to all of the models in the ensemble overfitting in the
# same way. The solution that http://willwhitney.com/parallel-training-jax.html
# applies is to randomly subset the data in a way that the models do not receive
# exactly the same data in each training step!
# Because the goal of this doc is to show that we can use eager-mode vmap to
# achieve similar things as JAX, the rest of this is left as an exercise to the reader.
# In conclusion, to achieve what http://willwhitney.com/parallel-training-jax.html
# does, we used the following additional items that PyTorch does not have:
# 1. NN module functional API that turns a module into a (state, state_less_fn) pair
# 2. Functional optimizers
# 3. A "functional" grad API (that effectively wraps autograd.grad)
# 4. Composability between the functional grad API and torch.vmap.
|
import argparse
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.func import functional_call, grad_and_value, stack_module_state, vmap
# Adapted from http://willwhitney.com/parallel-training-jax.html , which is a
# tutorial on Model Ensembling with JAX by Will Whitney.
#
# The original code comes with the following citation:
# @misc{Whitney2021Parallelizing,
# author = {William F. Whitney},
# title = { {Parallelizing neural networks on one GPU with JAX} },
# year = {2021},
# url = {http://willwhitney.com/parallel-training-jax.html},
# }
# GOAL: Demonstrate that it is possible to use eager-mode vmap
# to parallelize training over models.
parser = argparse.ArgumentParser(description="Functorch Ensembled Models")
parser.add_argument(
"--device",
type=str,
default="cpu",
help="CPU or GPU ID for this process (default: 'cpu')",
)
args = parser.parse_args()
DEVICE = args.device
# Step 1: Make some spirals
def make_spirals(n_samples, noise_std=0.0, rotations=1.0):
ts = torch.linspace(0, 1, n_samples, device=DEVICE)
rs = ts**0.5
thetas = rs * rotations * 2 * math.pi
signs = torch.randint(0, 2, (n_samples,), device=DEVICE) * 2 - 1
labels = (signs > 0).to(torch.long).to(DEVICE)
xs = (
rs * signs * torch.cos(thetas)
+ torch.randn(n_samples, device=DEVICE) * noise_std
)
ys = (
rs * signs * torch.sin(thetas)
+ torch.randn(n_samples, device=DEVICE) * noise_std
)
points = torch.stack([xs, ys], dim=1)
return points, labels
points, labels = make_spirals(100, noise_std=0.05)
# Step 2: Define two-layer MLP and loss function
class MLPClassifier(nn.Module):
def __init__(self, hidden_dim=32, n_classes=2):
super().__init__()
self.hidden_dim = hidden_dim
self.n_classes = n_classes
self.fc1 = nn.Linear(2, self.hidden_dim)
self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.log_softmax(x, -1)
return x
loss_fn = nn.NLLLoss()
model = MLPClassifier().to(DEVICE)
def train_step_fn(weights, batch, targets, lr=0.2):
def compute_loss(weights, batch, targets):
output = functional_call(model, weights, batch)
loss = loss_fn(output, targets)
return loss
grad_weights, loss = grad_and_value(compute_loss)(weights, batch, targets)
# NB: PyTorch is missing a "functional optimizer API" (possibly coming soon)
# so we are going to re-implement SGD here.
new_weights = {}
with torch.no_grad():
for key in grad_weights:
new_weights[key] = weights[key] - grad_weights[key] * lr
return loss, new_weights
# Step 4: Let's verify this actually trains.
# We should see the loss decrease.
def step4():
global weights
for i in range(2000):
loss, weights = train_step_fn(dict(model.named_parameters()), points, labels)
if i % 100 == 0:
print(loss)
step4()
# Step 5: We're ready for multiple models. Let's define an init_fn
# that, given a number of models, returns to us all of the weights.
def init_fn(num_models):
models = [MLPClassifier().to(DEVICE) for _ in range(num_models)]
params, _ = stack_module_state(models)
return params
# Step 6: Now, can we try multiple models at the same time?
# The answer is: yes! `loss` is a 2-tuple, and we can see that the value keeps
# on decreasing
def step6():
parallel_train_step_fn = vmap(train_step_fn, in_dims=(0, None, None))
batched_weights = init_fn(num_models=2)
for i in range(2000):
loss, batched_weights = parallel_train_step_fn(batched_weights, points, labels)
if i % 200 == 0:
print(loss)
step6()
# Step 7: Now, the flaw with step 6 is that we were training on the same exact
# data. This can lead to all of the models in the ensemble overfitting in the
# same way. The solution that http://willwhitney.com/parallel-training-jax.html
# applies is to randomly subset the data in a way that the models do not recieve
# exactly the same data in each training step!
# Because the goal of this doc is to show that we can use eager-mode vmap to
# achieve similar things as JAX, the rest of this is left as an exercise to the reader.
# In conclusion, to achieve what http://willwhitney.com/parallel-training-jax.html
# does, we used the following additional items that PyTorch does not have:
# 1. NN module functional API that turns a module into a (state, state_less_fn) pair
# 2. Functional optimizers
# 3. A "functional" grad API (that effectively wraps autograd.grad)
# 4. Composability between the functional grad API and torch.vmap.
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import platform
import warnings
import cv2
import torch.multiprocessing as mp
def setup_multi_processes(cfg):
"""Setup multi-processing environment variables."""
# set multi-process start method as `fork` to speed up the training
if platform.system() != 'Windows':
mp_start_method = cfg.get('mp_start_method', 'fork')
current_method = mp.get_start_method(allow_none=True)
if current_method is not None and current_method != mp_start_method:
warnings.warn(
f'Multi-processing start method `{mp_start_method}` is '
f'different from the previous setting `{current_method}`.'
f'It will be force set to `{mp_start_method}`. You can change '
f'this behavior by changing `mp_start_method` in your config.')
mp.set_start_method(mp_start_method, force=True)
# disable opencv multithreading to avoid system being overloaded
opencv_num_threads = cfg.get('opencv_num_threads', 0)
cv2.setNumThreads(opencv_num_threads)
# setup OMP threads
# This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa
workers_per_gpu = cfg.data.get('workers_per_gpu', 1)
if 'train_dataloader' in cfg.data:
workers_per_gpu = \
max(cfg.data.train_dataloader.get('workers_per_gpu', 1),
workers_per_gpu)
if 'OMP_NUM_THREADS' not in os.environ and workers_per_gpu > 1:
omp_num_threads = 1
warnings.warn(
f'Setting OMP_NUM_THREADS environment variable for each process '
f'to be {omp_num_threads} in default, to avoid your system being '
f'overloaded, please further tune the variable for optimal '
f'performance in your application as needed.')
os.environ['OMP_NUM_THREADS'] = str(omp_num_threads)
# setup MKL threads
if 'MKL_NUM_THREADS' not in os.environ and workers_per_gpu > 1:
mkl_num_threads = 1
warnings.warn(
f'Setting MKL_NUM_THREADS environment variable for each process '
f'to be {mkl_num_threads} in default, to avoid your system being '
f'overloaded, please further tune the variable for optimal '
f'performance in your application as needed.')
os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import platform
import warnings
import cv2
import torch.multiprocessing as mp
def setup_multi_processes(cfg):
"""Setup multi-processing environment variables."""
# set multi-process start method as `fork` to speed up the training
if platform.system() != 'Windows':
mp_start_method = cfg.get('mp_start_method', 'fork')
current_method = mp.get_start_method(allow_none=True)
if current_method is not None and current_method != mp_start_method:
warnings.warn(
f'Multi-processing start method `{mp_start_method}` is '
f'different from the previous setting `{current_method}`.'
f'It will be force set to `{mp_start_method}`. You can change '
f'this behavior by changing `mp_start_method` in your config.')
mp.set_start_method(mp_start_method, force=True)
# disable opencv multithreading to avoid system being overloaded
opencv_num_threads = cfg.get('opencv_num_threads', 0)
cv2.setNumThreads(opencv_num_threads)
# setup OMP threads
# This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa
if 'OMP_NUM_THREADS' not in os.environ and cfg.data.workers_per_gpu > 1:
omp_num_threads = 1
warnings.warn(
f'Setting OMP_NUM_THREADS environment variable for each process '
f'to be {omp_num_threads} in default, to avoid your system being '
f'overloaded, please further tune the variable for optimal '
f'performance in your application as needed.')
os.environ['OMP_NUM_THREADS'] = str(omp_num_threads)
# setup MKL threads
if 'MKL_NUM_THREADS' not in os.environ and cfg.data.workers_per_gpu > 1:
mkl_num_threads = 1
warnings.warn(
f'Setting MKL_NUM_THREADS environment variable for each process '
f'to be {mkl_num_threads} in default, to avoid your system being '
f'overloaded, please further tune the variable for optimal '
f'performance in your application as needed.')
os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import GFLHead
class TestGFLHead(TestCase):
def test_gfl_head_loss(self):
"""Tests gfl head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1
}]
train_cfg = Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
gfl_head = GFLHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds = gfl_head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = gfl_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_dfl_loss = sum(empty_gt_losses['loss_dfl'])
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_dfl_loss.item(), 0,
'there should be no dfl loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = gfl_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
onegt_dfl_loss = sum(one_gt_losses['loss_dfl'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_dfl_loss.item(), 0,
'dfl loss should be non-zero')
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import GFLHead
class TestGFLHead(TestCase):
def test_gfl_head_loss(self):
"""Tests gfl head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1
}]
train_cfg = Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
gfl_head = GFLHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds = gfl_head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = gfl_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_dfl_loss = sum(empty_gt_losses['loss_dfl'])
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_dfl_loss.item(), 0,
'there should be no dfl loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = gfl_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
onegt_dfl_loss = sum(one_gt_losses['loss_dfl'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_dfl_loss.item(), 0,
'dfl loss should be non-zero')
|
__version__ = '0.16.6'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.16.5'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
from typing import TYPE_CHECKING
from ..utils import DIFFUSERS_SLOW_IMPORT, _LazyModule, deprecate
from ..utils.import_utils import is_peft_available, is_torch_available, is_transformers_available
def text_encoder_lora_state_dict(text_encoder):
deprecate(
"text_encoder_load_state_dict in `models`",
"0.27.0",
"`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
)
state_dict = {}
for name, module in text_encoder_attn_modules(text_encoder):
for k, v in module.q_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.q_proj.lora_linear_layer.{k}"] = v
for k, v in module.k_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.k_proj.lora_linear_layer.{k}"] = v
for k, v in module.v_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.v_proj.lora_linear_layer.{k}"] = v
for k, v in module.out_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.out_proj.lora_linear_layer.{k}"] = v
return state_dict
if is_transformers_available():
def text_encoder_attn_modules(text_encoder):
deprecate(
"text_encoder_attn_modules in `models`",
"0.27.0",
"`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
)
from transformers import CLIPTextModel, CLIPTextModelWithProjection
attn_modules = []
if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)):
for i, layer in enumerate(text_encoder.text_model.encoder.layers):
name = f"text_model.encoder.layers.{i}.self_attn"
mod = layer.self_attn
attn_modules.append((name, mod))
else:
raise ValueError(f"do not know how to get attention modules for: {text_encoder.__class__.__name__}")
return attn_modules
_import_structure = {}
if is_torch_available():
_import_structure["single_file_model"] = ["FromOriginalModelMixin"]
_import_structure["transformer_flux"] = ["FluxTransformer2DLoadersMixin"]
_import_structure["transformer_sd3"] = ["SD3Transformer2DLoadersMixin"]
_import_structure["unet"] = ["UNet2DConditionLoadersMixin"]
_import_structure["utils"] = ["AttnProcsLayers"]
if is_transformers_available():
_import_structure["single_file"] = ["FromSingleFileMixin"]
_import_structure["lora_pipeline"] = [
"AmusedLoraLoaderMixin",
"StableDiffusionLoraLoaderMixin",
"SD3LoraLoaderMixin",
"StableDiffusionXLLoraLoaderMixin",
"LTXVideoLoraLoaderMixin",
"LoraLoaderMixin",
"FluxLoraLoaderMixin",
"CogVideoXLoraLoaderMixin",
"Mochi1LoraLoaderMixin",
"HunyuanVideoLoraLoaderMixin",
"SanaLoraLoaderMixin",
]
_import_structure["textual_inversion"] = ["TextualInversionLoaderMixin"]
_import_structure["ip_adapter"] = [
"IPAdapterMixin",
"FluxIPAdapterMixin",
"SD3IPAdapterMixin",
]
_import_structure["peft"] = ["PeftAdapterMixin"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
if is_torch_available():
from .single_file_model import FromOriginalModelMixin
from .transformer_flux import FluxTransformer2DLoadersMixin
from .transformer_sd3 import SD3Transformer2DLoadersMixin
from .unet import UNet2DConditionLoadersMixin
from .utils import AttnProcsLayers
if is_transformers_available():
from .ip_adapter import (
FluxIPAdapterMixin,
IPAdapterMixin,
SD3IPAdapterMixin,
)
from .lora_pipeline import (
AmusedLoraLoaderMixin,
CogVideoXLoraLoaderMixin,
FluxLoraLoaderMixin,
HunyuanVideoLoraLoaderMixin,
LoraLoaderMixin,
LTXVideoLoraLoaderMixin,
Mochi1LoraLoaderMixin,
SanaLoraLoaderMixin,
SD3LoraLoaderMixin,
StableDiffusionLoraLoaderMixin,
StableDiffusionXLLoraLoaderMixin,
)
from .single_file import FromSingleFileMixin
from .textual_inversion import TextualInversionLoaderMixin
from .peft import PeftAdapterMixin
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
from typing import TYPE_CHECKING
from ..utils import DIFFUSERS_SLOW_IMPORT, _LazyModule, deprecate
from ..utils.import_utils import is_peft_available, is_torch_available, is_transformers_available
def text_encoder_lora_state_dict(text_encoder):
deprecate(
"text_encoder_load_state_dict in `models`",
"0.27.0",
"`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
)
state_dict = {}
for name, module in text_encoder_attn_modules(text_encoder):
for k, v in module.q_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.q_proj.lora_linear_layer.{k}"] = v
for k, v in module.k_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.k_proj.lora_linear_layer.{k}"] = v
for k, v in module.v_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.v_proj.lora_linear_layer.{k}"] = v
for k, v in module.out_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.out_proj.lora_linear_layer.{k}"] = v
return state_dict
if is_transformers_available():
def text_encoder_attn_modules(text_encoder):
deprecate(
"text_encoder_attn_modules in `models`",
"0.27.0",
"`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
)
from transformers import CLIPTextModel, CLIPTextModelWithProjection
attn_modules = []
if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)):
for i, layer in enumerate(text_encoder.text_model.encoder.layers):
name = f"text_model.encoder.layers.{i}.self_attn"
mod = layer.self_attn
attn_modules.append((name, mod))
else:
raise ValueError(f"do not know how to get attention modules for: {text_encoder.__class__.__name__}")
return attn_modules
_import_structure = {}
if is_torch_available():
_import_structure["single_file_model"] = ["FromOriginalModelMixin"]
_import_structure["transformer_sd3"] = ["SD3Transformer2DLoadersMixin"]
_import_structure["unet"] = ["UNet2DConditionLoadersMixin"]
_import_structure["utils"] = ["AttnProcsLayers"]
if is_transformers_available():
_import_structure["single_file"] = ["FromSingleFileMixin"]
_import_structure["lora_pipeline"] = [
"AmusedLoraLoaderMixin",
"StableDiffusionLoraLoaderMixin",
"SD3LoraLoaderMixin",
"StableDiffusionXLLoraLoaderMixin",
"LTXVideoLoraLoaderMixin",
"LoraLoaderMixin",
"FluxLoraLoaderMixin",
"CogVideoXLoraLoaderMixin",
"Mochi1LoraLoaderMixin",
"HunyuanVideoLoraLoaderMixin",
"SanaLoraLoaderMixin",
]
_import_structure["textual_inversion"] = ["TextualInversionLoaderMixin"]
_import_structure["ip_adapter"] = [
"IPAdapterMixin",
"SD3IPAdapterMixin",
]
_import_structure["peft"] = ["PeftAdapterMixin"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
if is_torch_available():
from .single_file_model import FromOriginalModelMixin
from .transformer_sd3 import SD3Transformer2DLoadersMixin
from .unet import UNet2DConditionLoadersMixin
from .utils import AttnProcsLayers
if is_transformers_available():
from .ip_adapter import (
IPAdapterMixin,
SD3IPAdapterMixin,
)
from .lora_pipeline import (
AmusedLoraLoaderMixin,
CogVideoXLoraLoaderMixin,
FluxLoraLoaderMixin,
HunyuanVideoLoraLoaderMixin,
LoraLoaderMixin,
LTXVideoLoraLoaderMixin,
Mochi1LoraLoaderMixin,
SanaLoraLoaderMixin,
SD3LoraLoaderMixin,
StableDiffusionLoraLoaderMixin,
StableDiffusionXLLoraLoaderMixin,
)
from .single_file import FromSingleFileMixin
from .textual_inversion import TextualInversionLoaderMixin
from .peft import PeftAdapterMixin
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
from typing import Optional
from llama_index.core.storage.docstore.keyval_docstore import KVDocumentStore
from llama_index.core.storage.docstore.types import DEFAULT_BATCH_SIZE
from llama_index.storage.kvstore.postgres import PostgresKVStore
class PostgresDocumentStore(KVDocumentStore):
"""
Postgres Document (Node) store.
A Postgres store for Document and Node objects.
Args:
postgres_kvstore (PostgresKVStore): Postgres key-value store
namespace (str): namespace for the docstore
batch_size (int): batch size for bulk operations
"""
def __init__(
self,
postgres_kvstore: PostgresKVStore,
namespace: Optional[str] = None,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
"""Init a PostgresDocumentStore."""
super().__init__(postgres_kvstore, namespace=namespace, batch_size=batch_size)
@classmethod
def from_uri(
cls,
uri: str,
namespace: Optional[str] = None,
table_name: str = "docstore",
schema_name: str = "public",
perform_setup: bool = True,
debug: bool = False,
use_jsonb: bool = False,
) -> "PostgresDocumentStore":
"""Load a PostgresDocumentStore from a Postgres URI."""
postgres_kvstore = PostgresKVStore.from_uri(
uri=uri,
table_name=table_name,
schema_name=schema_name,
perform_setup=perform_setup,
debug=debug,
use_jsonb=use_jsonb,
)
return cls(postgres_kvstore, namespace)
@classmethod
def from_params(
cls,
host: Optional[str] = None,
port: Optional[str] = None,
database: Optional[str] = None,
user: Optional[str] = None,
password: Optional[str] = None,
namespace: Optional[str] = None,
table_name: str = "docstore",
schema_name: str = "public",
perform_setup: bool = True,
debug: bool = False,
use_jsonb: bool = False,
) -> "PostgresDocumentStore":
"""Load a PostgresDocumentStore from a Postgres host and port."""
postgres_kvstore = PostgresKVStore.from_params(
host=host,
port=port,
database=database,
user=user,
password=password,
table_name=table_name,
schema_name=schema_name,
perform_setup=perform_setup,
debug=debug,
use_jsonb=use_jsonb,
)
return cls(postgres_kvstore, namespace)
|
from typing import Optional
from llama_index.core.storage.docstore.keyval_docstore import KVDocumentStore
from llama_index.core.storage.docstore.types import DEFAULT_BATCH_SIZE
from llama_index.storage.kvstore.postgres import PostgresKVStore
class PostgresDocumentStore(KVDocumentStore):
"""Postgres Document (Node) store.
A Postgres store for Document and Node objects.
Args:
postgres_kvstore (PostgresKVStore): Postgres key-value store
namespace (str): namespace for the docstore
batch_size (int): batch size for bulk operations
"""
def __init__(
self,
postgres_kvstore: PostgresKVStore,
namespace: Optional[str] = None,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
"""Init a PostgresDocumentStore."""
super().__init__(postgres_kvstore, namespace=namespace, batch_size=batch_size)
@classmethod
def from_uri(
cls,
uri: str,
namespace: Optional[str] = None,
table_name: str = "docstore",
schema_name: str = "public",
perform_setup: bool = True,
debug: bool = False,
use_jsonb: bool = False,
) -> "PostgresDocumentStore":
"""Load a PostgresDocumentStore from a Postgres URI."""
postgres_kvstore = PostgresKVStore.from_uri(
uri=uri,
table_name=table_name,
schema_name=schema_name,
perform_setup=perform_setup,
debug=debug,
use_jsonb=use_jsonb,
)
return cls(postgres_kvstore, namespace)
@classmethod
def from_params(
cls,
host: Optional[str] = None,
port: Optional[str] = None,
database: Optional[str] = None,
user: Optional[str] = None,
password: Optional[str] = None,
namespace: Optional[str] = None,
table_name: str = "docstore",
schema_name: str = "public",
perform_setup: bool = True,
debug: bool = False,
use_jsonb: bool = False,
) -> "PostgresDocumentStore":
"""Load a PostgresDocumentStore from a Postgres host and port."""
postgres_kvstore = PostgresKVStore.from_params(
host=host,
port=port,
database=database,
user=user,
password=password,
table_name=table_name,
schema_name=schema_name,
perform_setup=perform_setup,
debug=debug,
use_jsonb=use_jsonb,
)
return cls(postgres_kvstore, namespace)
|
"""Testing code shared by other tests."""
# pylint: disable=invalid-name
import collections
import importlib.util
import json
import os
import tempfile
from typing import Any, Callable, Dict, Type
import numpy as np
import xgboost as xgb
from xgboost._typing import ArrayLike
def validate_leaf_output(leaf: np.ndarray, num_parallel_tree: int) -> None:
"""Validate output for predict leaf tests."""
for i in range(leaf.shape[0]): # n_samples
for j in range(leaf.shape[1]): # n_rounds
for k in range(leaf.shape[2]): # n_classes
tree_group = leaf[i, j, k, :]
assert tree_group.shape[0] == num_parallel_tree
# No sampling, all trees within forest are the same
assert np.all(tree_group == tree_group[0])
def validate_data_initialization(
dmatrix: Type, model: Type[xgb.XGBModel], X: ArrayLike, y: ArrayLike
) -> None:
"""Assert that we don't create duplicated DMatrix."""
old_init = dmatrix.__init__
count = [0]
def new_init(self: Any, **kwargs: Any) -> Callable:
count[0] += 1
return old_init(self, **kwargs)
dmatrix.__init__ = new_init
model(n_estimators=1).fit(X, y, eval_set=[(X, y)])
assert count[0] == 1
count[0] = 0 # only 1 DMatrix is created.
y_copy = y.copy()
model(n_estimators=1).fit(X, y, eval_set=[(X, y_copy)])
assert count[0] == 2 # a different Python object is considered different
dmatrix.__init__ = old_init
# pylint: disable=too-many-arguments,too-many-locals
def get_feature_weights(
*,
X: ArrayLike,
y: ArrayLike,
fw: np.ndarray,
parser_path: str,
tree_method: str,
model: Type[xgb.XGBModel] = xgb.XGBRegressor,
) -> np.ndarray:
"""Get feature weights using the demo parser."""
with tempfile.TemporaryDirectory() as tmpdir:
colsample_bynode = 0.5
reg = model(tree_method=tree_method, colsample_bynode=colsample_bynode)
reg.fit(X, y, feature_weights=fw)
model_path = os.path.join(tmpdir, "model.json")
reg.save_model(model_path)
with open(model_path, "r", encoding="utf-8") as fd:
model = json.load(fd)
spec = importlib.util.spec_from_file_location("JsonParser", parser_path)
assert spec is not None
jsonm = importlib.util.module_from_spec(spec)
assert spec.loader is not None
spec.loader.exec_module(jsonm)
model = jsonm.Model(model)
splits: Dict[int, int] = {}
total_nodes = 0
for tree in model.trees:
n_nodes = len(tree.nodes)
total_nodes += n_nodes
for n in range(n_nodes):
if tree.is_leaf(n):
continue
if splits.get(tree.split_index(n), None) is None:
splits[tree.split_index(n)] = 1
else:
splits[tree.split_index(n)] += 1
od = collections.OrderedDict(sorted(splits.items()))
tuples = list(od.items())
k, v = list(zip(*tuples))
w = np.polyfit(k, v, deg=1)
return w
|
"""Testing code shared by other tests."""
# pylint: disable=invalid-name
import collections
import importlib.util
import json
import os
import tempfile
from typing import Any, Callable, Dict, Type
import numpy as np
import xgboost as xgb
from xgboost._typing import ArrayLike
def validate_leaf_output(leaf: np.ndarray, num_parallel_tree: int) -> None:
"""Validate output for predict leaf tests."""
for i in range(leaf.shape[0]): # n_samples
for j in range(leaf.shape[1]): # n_rounds
for k in range(leaf.shape[2]): # n_classes
tree_group = leaf[i, j, k, :]
assert tree_group.shape[0] == num_parallel_tree
# No sampling, all trees within forest are the same
assert np.all(tree_group == tree_group[0])
def validate_data_initialization(
dmatrix: Type, model: Type[xgb.XGBModel], X: ArrayLike, y: ArrayLike
) -> None:
"""Assert that we don't create duplicated DMatrix."""
old_init = dmatrix.__init__
count = [0]
def new_init(self: Any, **kwargs: Any) -> Callable:
count[0] += 1
return old_init(self, **kwargs)
dmatrix.__init__ = new_init
model(n_estimators=1).fit(X, y, eval_set=[(X, y)])
assert count[0] == 1
count[0] = 0 # only 1 DMatrix is created.
y_copy = y.copy()
model(n_estimators=1).fit(X, y, eval_set=[(X, y_copy)])
assert count[0] == 2 # a different Python object is considered different
dmatrix.__init__ = old_init
# pylint: disable=too-many-arguments,too-many-locals
def get_feature_weights(
X: ArrayLike,
y: ArrayLike,
fw: np.ndarray,
parser_path: str,
tree_method: str,
model: Type[xgb.XGBModel] = xgb.XGBRegressor,
) -> np.ndarray:
"""Get feature weights using the demo parser."""
with tempfile.TemporaryDirectory() as tmpdir:
colsample_bynode = 0.5
reg = model(tree_method=tree_method, colsample_bynode=colsample_bynode)
reg.fit(X, y, feature_weights=fw)
model_path = os.path.join(tmpdir, "model.json")
reg.save_model(model_path)
with open(model_path, "r", encoding="utf-8") as fd:
model = json.load(fd)
spec = importlib.util.spec_from_file_location("JsonParser", parser_path)
assert spec is not None
jsonm = importlib.util.module_from_spec(spec)
assert spec.loader is not None
spec.loader.exec_module(jsonm)
model = jsonm.Model(model)
splits: Dict[int, int] = {}
total_nodes = 0
for tree in model.trees:
n_nodes = len(tree.nodes)
total_nodes += n_nodes
for n in range(n_nodes):
if tree.is_leaf(n):
continue
if splits.get(tree.split_index(n), None) is None:
splits[tree.split_index(n)] = 1
else:
splits[tree.split_index(n)] += 1
od = collections.OrderedDict(sorted(splits.items()))
tuples = list(od.items())
k, v = list(zip(*tuples))
w = np.polyfit(k, v, deg=1)
return w
|
import os
import socket
from typing import Optional, TYPE_CHECKING
def get_docker_network(client) -> Optional[str]:
"""Do a best-effort guess if the caller is already in a docker network
Check if `hostname` exists in list of docker containers.
If a container is found, check its network id
:param client: docker client object
:return: network id if exists
"""
import docker
if TYPE_CHECKING: # pragma: no cover
from docker.models.containers import Container
container: 'Container' = None
try:
hostname = socket.gethostname()
container = client.containers.get(hostname)
except docker.errors.NotFound:
try:
# https://stackoverflow.com/a/52988227/15683245
with open('/proc/1/cpuset') as f:
hostname = os.path.basename(f.read().rstrip())
container = client.containers.get(hostname)
except Exception:
return None
try:
networks = container.attrs['NetworkSettings']['Networks']
if networks:
net_mode = list(networks.keys())[0]
return networks[net_mode]['NetworkID']
else:
return None
except Exception:
return None
def get_gpu_device_requests(gpu_args):
"""Get docker device requests from gpu args
:param gpu_args: gpu args fr
:return: docker device requests
"""
import docker
_gpus = {
'count': 0,
'capabilities': ['gpu'],
'device': [],
'driver': '',
}
for gpu_arg in gpu_args.split(','):
if gpu_arg == 'all':
_gpus['count'] = -1
if gpu_arg.isdigit():
_gpus['count'] = int(gpu_arg)
if '=' in gpu_arg:
gpu_arg_key, gpu_arg_value = gpu_arg.split('=')
if gpu_arg_key in _gpus.keys():
if isinstance(_gpus[gpu_arg_key], list):
_gpus[gpu_arg_key].append(gpu_arg_value)
else:
_gpus[gpu_arg_key] = gpu_arg_value
device_requests = [
docker.types.DeviceRequest(
count=_gpus['count'],
driver=_gpus['driver'],
device_ids=_gpus['device'],
capabilities=[_gpus['capabilities']],
)
]
return device_requests
|
import os
import socket
from typing import Optional, TYPE_CHECKING
def get_docker_network(client) -> Optional[str]:
"""Do a best-effort guess if the caller is already in a docker network
Check if `hostname` exists in list of docker containers.
If a container is found, check its network id
:param client: docker client object
:return: network id if exists
"""
import docker
if TYPE_CHECKING:
from docker.models.containers import Container
container: 'Container' = None
try:
hostname = socket.gethostname()
container = client.containers.get(hostname)
except docker.errors.NotFound:
try:
# https://stackoverflow.com/a/52988227/15683245
with open('/proc/1/cpuset') as f:
hostname = os.path.basename(f.read().rstrip())
container = client.containers.get(hostname)
except Exception:
return None
try:
networks = container.attrs['NetworkSettings']['Networks']
if networks:
net_mode = list(networks.keys())[0]
return networks[net_mode]['NetworkID']
else:
return None
except Exception:
return None
def get_gpu_device_requests(gpu_args):
"""Get docker device requests from gpu args
:param gpu_args: gpu args fr
:return: docker device requests
"""
import docker
_gpus = {
'count': 0,
'capabilities': ['gpu'],
'device': [],
'driver': '',
}
for gpu_arg in gpu_args.split(','):
if gpu_arg == 'all':
_gpus['count'] = -1
if gpu_arg.isdigit():
_gpus['count'] = int(gpu_arg)
if '=' in gpu_arg:
gpu_arg_key, gpu_arg_value = gpu_arg.split('=')
if gpu_arg_key in _gpus.keys():
if isinstance(_gpus[gpu_arg_key], list):
_gpus[gpu_arg_key].append(gpu_arg_value)
else:
_gpus[gpu_arg_key] = gpu_arg_value
device_requests = [
docker.types.DeviceRequest(
count=_gpus['count'],
driver=_gpus['driver'],
device_ids=_gpus['device'],
capabilities=[_gpus['capabilities']],
)
]
return device_requests
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_transformers import DoctranPropertyExtractor
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"DoctranPropertyExtractor": "langchain_community.document_transformers",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"DoctranPropertyExtractor",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_transformers import DoctranPropertyExtractor
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"DoctranPropertyExtractor": "langchain_community.document_transformers"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"DoctranPropertyExtractor",
]
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../common/lsj_100e_coco_instance.py'
]
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1,
norm_eval=False,
norm_cfg=norm_cfg,
init_cfg=None,
style='caffe'),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
file_client_args = dict(backend='disk')
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomResize', scale=image_size, ratio_range=(0.1, 2.0)),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../common/lsj_100e_coco_instance.py'
]
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1,
norm_eval=False,
norm_cfg=norm_cfg,
init_cfg=None,
style='caffe'),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
file_client_args = dict(backend='disk')
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomResize', scale=image_size, ratio_range=(0.1, 2.0)),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
|
import logging
import os
import numpy as np
import pytest
from docarray.index import MongoDBAtlasDocumentIndex
from . import NestedDoc, SimpleDoc, SimpleSchema
@pytest.fixture(scope='session')
def mongodb_index_config():
return {
"mongo_connection_uri": os.environ["MONGODB_URI"],
"database_name": os.environ["MONGODB_DATABASE"],
}
@pytest.fixture
def simple_index(mongodb_index_config):
index = MongoDBAtlasDocumentIndex[SimpleSchema](
index_name="bespoke_name", **mongodb_index_config
)
return index
@pytest.fixture
def nested_index(mongodb_index_config):
index = MongoDBAtlasDocumentIndex[NestedDoc](**mongodb_index_config)
return index
@pytest.fixture(scope='module')
def n_dim():
return 10
@pytest.fixture(scope='module')
def embeddings(n_dim):
"""A consistent, reasonable, mock of vector embeddings, in [-1, 1]."""
x = np.linspace(-np.pi, np.pi, n_dim)
y = np.arange(n_dim)
return np.sin(x[np.newaxis, :] + y[:, np.newaxis])
@pytest.fixture(scope='module')
def random_simple_documents(n_dim, embeddings):
docs_text = [
"Text processing with Python is a valuable skill for data analysis.",
"Gardening tips for a beautiful backyard oasis.",
"Explore the wonders of deep-sea diving in tropical locations.",
"The history and art of classical music compositions.",
"An introduction to the world of gourmet cooking.",
"Integer pharetra, leo quis aliquam hendrerit, arcu ante sagittis massa, nec tincidunt arcu.",
"Sed luctus convallis velit sit amet laoreet. Morbi sit amet magna pellentesque urna tincidunt",
"luctus enim interdum lacinia. Morbi maximus diam id justo egestas pellentesque. Suspendisse",
"id laoreet odio gravida vitae. Vivamus feugiat nisi quis est pellentesque interdum. Integer",
"eleifend eros non, accumsan lectus. Curabitur porta auctor tellus at pharetra. Phasellus ut condimentum",
]
return [
SimpleSchema(embedding=embeddings[i], number=i, text=docs_text[i])
for i in range(len(docs_text))
]
@pytest.fixture
def nested_documents(n_dim):
docs = [
NestedDoc(
d=SimpleDoc(embedding=np.random.rand(n_dim)),
embedding=np.random.rand(n_dim),
)
for _ in range(10)
]
docs.append(
NestedDoc(
d=SimpleDoc(embedding=np.zeros(n_dim)),
embedding=np.ones(n_dim),
)
)
docs.append(
NestedDoc(
d=SimpleDoc(embedding=np.ones(n_dim)),
embedding=np.zeros(n_dim),
)
)
docs.append(
NestedDoc(
d=SimpleDoc(embedding=np.zeros(n_dim)),
embedding=np.ones(n_dim),
)
)
return docs
@pytest.fixture
def simple_index_with_docs(simple_index, random_simple_documents):
"""
Setup and teardown of simple_index. Accesses the underlying MongoDB collection directly.
"""
simple_index._collection.delete_many({})
simple_index._logger.setLevel(logging.DEBUG)
simple_index.index(random_simple_documents)
yield simple_index, random_simple_documents
simple_index._collection.delete_many({})
@pytest.fixture
def nested_index_with_docs(nested_index, nested_documents):
"""
Setup and teardown of simple_index. Accesses the underlying MongoDB collection directly.
"""
nested_index._collection.delete_many({})
nested_index.index(nested_documents)
yield nested_index, nested_documents
nested_index._collection.delete_many({})
|
import os
import numpy as np
import pytest
from docarray.index import MongoDBAtlasDocumentIndex
from . import NestedDoc, SimpleDoc, SimpleSchema
@pytest.fixture(scope='session')
def mongodb_index_config():
return {
"mongo_connection_uri": os.environ["MONGODB_URI"],
"database_name": os.environ["MONGODB_DATABASE"],
}
@pytest.fixture
def simple_index(mongodb_index_config):
index = MongoDBAtlasDocumentIndex[SimpleSchema](**mongodb_index_config)
return index
@pytest.fixture
def nested_index(mongodb_index_config):
index = MongoDBAtlasDocumentIndex[NestedDoc](**mongodb_index_config)
return index
@pytest.fixture(scope='module')
def random_simple_documents():
N_DIM = 10
docs_text = [
"Text processing with Python is a valuable skill for data analysis.",
"Gardening tips for a beautiful backyard oasis.",
"Explore the wonders of deep-sea diving in tropical locations.",
"The history and art of classical music compositions.",
"An introduction to the world of gourmet cooking.",
"Integer pharetra, leo quis aliquam hendrerit, arcu ante sagittis massa, nec tincidunt arcu.",
"Sed luctus convallis velit sit amet laoreet. Morbi sit amet magna pellentesque urna tincidunt",
"luctus enim interdum lacinia. Morbi maximus diam id justo egestas pellentesque. Suspendisse",
"id laoreet odio gravida vitae. Vivamus feugiat nisi quis est pellentesque interdum. Integer",
"eleifend eros non, accumsan lectus. Curabitur porta auctor tellus at pharetra. Phasellus ut condimentum",
]
return [
SimpleSchema(embedding=np.random.rand(N_DIM), number=i, text=docs_text[i])
for i in range(10)
]
@pytest.fixture
def nested_documents():
N_DIM = 10
docs = [
NestedDoc(
d=SimpleDoc(embedding=np.random.rand(N_DIM)),
embedding=np.random.rand(N_DIM),
)
for _ in range(10)
]
docs.append(
NestedDoc(
d=SimpleDoc(embedding=np.zeros(N_DIM)),
embedding=np.ones(N_DIM),
)
)
docs.append(
NestedDoc(
d=SimpleDoc(embedding=np.ones(N_DIM)),
embedding=np.zeros(N_DIM),
)
)
docs.append(
NestedDoc(
d=SimpleDoc(embedding=np.zeros(N_DIM)),
embedding=np.ones(N_DIM),
)
)
return docs
@pytest.fixture
def simple_index_with_docs(simple_index, random_simple_documents):
"""
Setup and teardown of simple_index. Accesses the underlying MongoDB collection directly.
"""
simple_index._doc_collection.delete_many({})
simple_index.index(random_simple_documents)
yield simple_index, random_simple_documents
simple_index._doc_collection.delete_many({})
@pytest.fixture
def nested_index_with_docs(nested_index, nested_documents):
"""
Setup and teardown of simple_index. Accesses the underlying MongoDB collection directly.
"""
nested_index._doc_collection.delete_many({})
nested_index.index(nested_documents)
yield nested_index, nested_documents
nested_index._doc_collection.delete_many({})
|
# Copyright (c) OpenMMLab. All rights reserved.
# Copyright (c) 2019 Western Digital Corporation or its affiliates.
import torch
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class YOLOV3(SingleStageDetector):
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(YOLOV3, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
def onnx_export(self, img, img_metas):
"""Test function for exporting to ONNX, without test time augmentation.
Args:
img (torch.Tensor): input images.
img_metas (list[dict]): List of image information.
Returns:
tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]
and class labels of shape [N, num_det].
"""
x = self.extract_feat(img)
outs = self.bbox_head.forward(x)
# get shape as tensor
img_shape = torch._shape_as_tensor(img)[2:]
img_metas[0]['img_shape_for_onnx'] = img_shape
det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas)
return det_bboxes, det_labels
|
# Copyright (c) OpenMMLab. All rights reserved.
# Copyright (c) 2019 Western Digital Corporation or its affiliates.
import torch
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class YOLOV3(SingleStageDetector):
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(YOLOV3, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
def onnx_export(self, img, img_metas):
"""Test function for exporting to ONNX, without test time augmentation.
Args:
img (torch.Tensor): input images.
img_metas (list[dict]): List of image information.
Returns:
tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]
and class labels of shape [N, num_det].
"""
x = self.extract_feat(img)
outs = self.bbox_head.forward(x)
# get shape as tensor
img_shape = torch._shape_as_tensor(img)[2:]
img_metas[0]['img_shape_for_onnx'] = img_shape
det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas)
return det_bboxes, det_labels
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Any, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that logs the time spent during iteration.
E.g. ``data_time`` for loading data and ``time`` for a model train step.
"""
priority = 'NORMAL'
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Record time flag before start a epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
self.t = time.time()
def _before_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
mode: str = 'train') -> None:
"""Logging time for loading data and update the time flag.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# TODO: update for new logging system
runner.message_hub.update_log(f'{mode}/data_time',
time.time() - self.t)
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataSample]]] = None,
mode: str = 'train') -> None:
"""Logging time for a iteration and update the time flag.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
outputs (dict or sequence, optional): Outputs from model. Defaults
to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# TODO: update for new logging system
runner.message_hub.update_log(f'{mode}/time', time.time() - self.t)
self.t = time.time()
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Any, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that logs the time spent during iteration.
Eg. ``data_time`` for loading data and ``time`` for a model train step.
"""
priority = 'NORMAL'
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Record time flag before start a epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
self.t = time.time()
def _before_iter(self,
runner,
data_batch: DATA_BATCH = None,
mode: str = 'train') -> None:
"""Logging time for loading data and update the time flag.
Args:
runner (Runner): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# TODO: update for new logging system
runner.message_hub.update_log(f'{mode}/data_time',
time.time() - self.t)
def _after_iter(self,
runner,
data_batch: DATA_BATCH = None,
outputs:
Optional[Union[dict, Sequence[BaseDataSample]]] = None,
mode: str = 'train') \
-> None:
"""Logging time for a iteration and update the time flag.
Args:
runner (Runner): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
outputs (dict or sequence, optional): Outputs from model. Defaults
to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# TODO: update for new logging system
runner.message_hub.update_log(f'{mode}/time', time.time() - self.t)
self.t = time.time()
|
"""Human message."""
from typing import Any, Literal, Union
from langchain_core.messages.base import BaseMessage, BaseMessageChunk
class HumanMessage(BaseMessage):
"""Message from a human.
HumanMessages are messages that are passed in from a human to the model.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Instantiate a chat model and invoke it with the messages
model = ...
print(model.invoke(messages))
"""
example: bool = False
"""Use to denote that a message is part of an example conversation.
At the moment, this is ignored by most models. Usage is discouraged.
Defaults to False.
"""
type: Literal["human"] = "human"
"""The type of the message (used for serialization). Defaults to "human"."""
def __init__(
self, content: Union[str, list[Union[str, dict]]], **kwargs: Any
) -> None:
"""Pass in content as positional arg.
Args:
content: The string contents of the message.
kwargs: Additional fields to pass to the message.
"""
super().__init__(content=content, **kwargs)
class HumanMessageChunk(HumanMessage, BaseMessageChunk):
"""Human Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["HumanMessageChunk"] = "HumanMessageChunk" # type: ignore[assignment]
"""The type of the message (used for serialization).
Defaults to "HumanMessageChunk"."""
|
"""Human message."""
from typing import Any, Literal, Union
from langchain_core.messages.base import BaseMessage, BaseMessageChunk
class HumanMessage(BaseMessage):
"""Message from a human.
HumanMessages are messages that are passed in from a human to the model.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Instantiate a chat model and invoke it with the messages
model = ...
print(model.invoke(messages))
"""
example: bool = False
"""Use to denote that a message is part of an example conversation.
At the moment, this is ignored by most models. Usage is discouraged.
Defaults to False.
"""
type: Literal["human"] = "human"
"""The type of the message (used for serialization). Defaults to "human"."""
def __init__(
self, content: Union[str, list[Union[str, dict]]], **kwargs: Any
) -> None:
"""Pass in content as positional arg.
Args:
content: The string contents of the message.
kwargs: Additional fields to pass to the message.
"""
super().__init__(content=content, **kwargs)
HumanMessage.model_rebuild()
class HumanMessageChunk(HumanMessage, BaseMessageChunk):
"""Human Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["HumanMessageChunk"] = "HumanMessageChunk" # type: ignore[assignment]
"""The type of the message (used for serialization).
Defaults to "HumanMessageChunk"."""
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import MSEEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseMSEEvaluator(MSEEvaluator):
def __init__(
self,
source_sentences: list[str],
target_sentences: list[str],
teacher_model=None,
show_progress_bar: bool = False,
batch_size: int = 32,
name: str = "",
write_csv: bool = True,
truncate_dim: int | None = None,
):
# Set attributes before calling super().__init__() because SparseMSEEvaluator.embed_inputs()
# is called in the superclass constructor.
self.batch_size = batch_size
self.show_progress_bar = show_progress_bar
self.name = name
self.write_csv = write_csv
self.truncate_dim = truncate_dim
super().__init__(
source_sentences=source_sentences,
target_sentences=target_sentences,
teacher_model=teacher_model,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
write_csv=write_csv,
truncate_dim=truncate_dim,
)
def __call__(
self,
model: SparseEncoder,
output_path: str = None,
epoch: int = -1,
steps: int = -1,
) -> dict[str, float]:
return super().__call__(model, output_path, epoch, steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> Tensor:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self,
model: SparseEncoder,
metrics: dict[str, Any],
epoch: int = 0,
step: int = 0,
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import MSEEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseMSEEvaluator(MSEEvaluator):
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super.__call__(model, output_path, epoch, steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> Tensor:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
|
"""
This is a simple application for sentence embeddings: clustering
Sentences are mapped to sentence embeddings and then agglomerative clustering with a threshold is applied.
"""
from sklearn.cluster import AgglomerativeClustering
from sentence_transformers import SentenceTransformer
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"A man is eating pasta.",
"The girl is carrying a baby.",
"The baby is carried by the woman",
"A man is riding a horse.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah is running behind its prey.",
"A cheetah chases prey on across a field.",
]
corpus_embeddings = embedder.encode(corpus)
# Some models don't automatically normalize the embeddings, in which case you should normalize the embeddings:
# corpus_embeddings = corpus_embeddings / np.linalg.norm(corpus_embeddings, axis=1, keepdims=True)
# Perform agglomerative clustering
clustering_model = AgglomerativeClustering(
n_clusters=None, distance_threshold=1.5
) # , affinity='cosine', linkage='average', distance_threshold=0.4)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = {}
for sentence_id, cluster_id in enumerate(cluster_assignment):
if cluster_id not in clustered_sentences:
clustered_sentences[cluster_id] = []
clustered_sentences[cluster_id].append(corpus[sentence_id])
for i, cluster in clustered_sentences.items():
print("Cluster ", i + 1)
print(cluster)
print("")
|
"""
This is a simple application for sentence embeddings: clustering
Sentences are mapped to sentence embeddings and then agglomerative clustering with a threshold is applied.
"""
from sentence_transformers import SentenceTransformer
from sklearn.cluster import AgglomerativeClustering
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"A man is eating pasta.",
"The girl is carrying a baby.",
"The baby is carried by the woman",
"A man is riding a horse.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah is running behind its prey.",
"A cheetah chases prey on across a field.",
]
corpus_embeddings = embedder.encode(corpus)
# Some models don't automatically normalize the embeddings, in which case you should normalize the embeddings:
# corpus_embeddings = corpus_embeddings / np.linalg.norm(corpus_embeddings, axis=1, keepdims=True)
# Perform agglomerative clustering
clustering_model = AgglomerativeClustering(
n_clusters=None, distance_threshold=1.5
) # , affinity='cosine', linkage='average', distance_threshold=0.4)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = {}
for sentence_id, cluster_id in enumerate(cluster_assignment):
if cluster_id not in clustered_sentences:
clustered_sentences[cluster_id] = []
clustered_sentences[cluster_id].append(corpus[sentence_id])
for i, cluster in clustered_sentences.items():
print("Cluster ", i + 1)
print(cluster)
print("")
|
"""Run smoke tests"""
import sys
from pathlib import Path
import torch
import torchvision
from torchvision.io import decode_jpeg, read_file, read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.shape != (4, 471, 354):
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
def smoke_test_torchvision_decode_jpeg(device: str = "cpu"):
img_jpg_data = read_file(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
img_jpg = decode_jpeg(img_jpg_data, device=device)
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
def smoke_test_compile() -> None:
try:
model = resnet50().cuda()
model = torch.compile(model)
x = torch.randn(1, 3, 224, 224, device="cuda")
out = model(x)
print(f"torch.compile model output: {out.shape}")
except RuntimeError:
if sys.platform == "win32":
print("Successfully caught torch.compile RuntimeError on win")
elif sys.version_info >= (3, 11, 0):
print("Successfully caught torch.compile RuntimeError on Python 3.11")
else:
raise
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms()
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
print(f"torch.cuda.is_available: {torch.cuda.is_available()}")
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
smoke_test_torchvision_decode_jpeg()
if torch.cuda.is_available():
smoke_test_torchvision_decode_jpeg("cuda")
smoke_test_torchvision_resnet50_classify("cuda")
smoke_test_compile()
if torch.backends.mps.is_available():
smoke_test_torchvision_resnet50_classify("mps")
if __name__ == "__main__":
main()
|
"""Run smoke tests"""
import sys
from pathlib import Path
import torch
import torchvision
from torchvision.io import decode_jpeg, read_file, read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.shape != (4, 471, 354):
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
def smoke_test_torchvision_decode_jpeg(device: str = "cpu"):
img_jpg_data = read_file(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
img_jpg = decode_jpeg(img_jpg_data, device=device)
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
def smoke_test_compile() -> None:
try:
model = resnet50().cuda()
model = torch.compile(model)
x = torch.randn(1, 3, 224, 224, device="cuda")
out = model(x)
print(f"torch.compile model output: {out.shape}")
except RuntimeError:
if sys.platform == "win32":
print("Successfully caught torch.compile RuntimeError on win")
elif sys.version_info >= (3, 11, 0):
print("Successfully caught torch.compile RuntimeError on Python 3.11")
else:
raise
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms()
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
print(f"torch.cuda.is_available: {torch.cuda.is_available()}")
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
smoke_test_torchvision_decode_jpeg()
if torch.cuda.is_available():
smoke_test_torchvision_decode_jpeg("cuda")
smoke_test_torchvision_resnet50_classify("cuda")
smoke_test_compile()
if torch.backends.mps.is_available():
smoke_test_torchvision_resnet50_classify("mps")
if __name__ == "__main__":
main()
|
import numpy as np
import pytest
from tensorflow import data as tf_data
import keras
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandomHueTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomHue,
init_kwargs={
"factor": 0.75,
"value_range": (20, 200),
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
def test_random_hue_inference(self):
seed = 3481
layer = layers.RandomHue(0.2, [0, 1.0])
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs, training=False)
self.assertAllClose(inputs, output)
def test_random_hue_value_range_0_to_1(self):
image = keras.random.uniform(shape=(3, 3, 3), minval=0, maxval=1)
layer = layers.RandomHue(0.2, (0, 1))
adjusted_image = layer(image)
self.assertTrue(keras.ops.numpy.all(adjusted_image >= 0))
self.assertTrue(keras.ops.numpy.all(adjusted_image <= 1))
def test_random_hue_value_range_0_to_255(self):
image = keras.random.uniform(shape=(3, 3, 3), minval=0, maxval=255)
layer = layers.RandomHue(0.2, (0, 255))
adjusted_image = layer(image)
self.assertTrue(keras.ops.numpy.all(adjusted_image >= 0))
self.assertTrue(keras.ops.numpy.all(adjusted_image <= 255))
def test_random_hue_no_change_with_zero_factor(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
inputs = keras.random.randint((224, 224, 3), 0, 255)
else:
inputs = keras.random.randint((3, 224, 224), 0, 255)
layer = layers.RandomHue(0, (0, 255), data_format=data_format)
output = layer(inputs, training=False)
self.assertAllClose(inputs, output, atol=1e-3, rtol=1e-5)
def test_random_hue_randomness(self):
image = keras.random.uniform(shape=(3, 3, 3), minval=0, maxval=1)[:5]
layer = layers.RandomHue(0.2, (0, 255))
adjusted_images = layer(image)
self.assertNotAllClose(adjusted_images, image)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3))
else:
input_data = np.random.random((2, 3, 8, 8))
layer = layers.RandomHue(
factor=0.5, value_range=[0, 1], data_format=data_format, seed=1337
)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
|
import numpy as np
import pytest
from tensorflow import data as tf_data
import keras
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandomHueTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomHue,
init_kwargs={
"factor": 0.75,
"value_range": (20, 200),
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
def test_random_hue_inference(self):
seed = 3481
layer = layers.RandomHue(0.2, [0, 1.0])
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs, training=False)
self.assertAllClose(inputs, output)
def test_random_hue_value_range(self):
image = keras.random.uniform(shape=(3, 3, 3), minval=0, maxval=1)
layer = layers.RandomHue(0.2, (0, 255))
adjusted_image = layer(image)
self.assertTrue(keras.ops.numpy.all(adjusted_image >= 0))
self.assertTrue(keras.ops.numpy.all(adjusted_image <= 1))
def test_random_hue_no_change_with_zero_factor(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
inputs = keras.random.randint((224, 224, 3), 0, 255)
else:
inputs = keras.random.randint((3, 224, 224), 0, 255)
layer = layers.RandomHue(0, (0, 255), data_format=data_format)
output = layer(inputs, training=False)
self.assertAllClose(inputs, output, atol=1e-3, rtol=1e-5)
def test_random_hue_randomness(self):
image = keras.random.uniform(shape=(3, 3, 3), minval=0, maxval=1)[:5]
layer = layers.RandomHue(0.2, (0, 255))
adjusted_images = layer(image)
self.assertNotAllClose(adjusted_images, image)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3))
else:
input_data = np.random.random((2, 3, 8, 8))
layer = layers.RandomHue(
factor=0.5, value_range=[0, 1], data_format=data_format, seed=1337
)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
|
from typing import Protocol, Optional, runtime_checkable
@runtime_checkable
class RetryPolicy(Protocol):
def next(
self, elapsed_time: float, attempts: int, error: Exception
) -> Optional[float]:
"""
Decides if we should make another retry, returning the number of seconds to wait before the next run.
Args:
elapsed_time: Time in seconds that passed since the last attempt.
attempts: The number of attempts done so far.
error: The last error occurred.
Returns:
The amount of seconds to wait before the next attempt, or None if we stop retrying.
"""
class ConstantDelayRetryPolicy:
"""A simple policy that retries a step at regular intervals for a number of times."""
def __init__(self, maximum_attempts: int = 3, delay: float = 5) -> None:
"""
Creates a ConstantDelayRetryPolicy instance.
Args:
maximum_attempts: How many consecutive times the workflow should try to run the step in case of an error.
delay: how much time in seconds must pass before another attempt.
"""
self.maximum_attempts = maximum_attempts
self.delay = delay
def next(
self, elapsed_time: float, attempts: int, error: Exception
) -> Optional[float]:
if attempts >= self.maximum_attempts:
return None
return self.delay
|
from typing import Protocol, Optional, runtime_checkable
@runtime_checkable
class RetryPolicy(Protocol):
def next(
self, elapsed_time: float, attempts: int, error: Exception
) -> Optional[float]:
"""Decides if we should make another retry, returning the number of seconds to wait before the next run.
Args:
elapsed_time: Time in seconds that passed since the last attempt.
attempts: The number of attempts done so far.
error: The last error occurred.
Returns:
The amount of seconds to wait before the next attempt, or None if we stop retrying.
"""
class ConstantDelayRetryPolicy:
"""A simple policy that retries a step at regular intervals for a number of times."""
def __init__(self, maximum_attempts: int = 3, delay: float = 5) -> None:
"""Creates a ConstantDelayRetryPolicy instance.
Args:
maximum_attempts: How many consecutive times the workflow should try to run the step in case of an error.
delay: how much time in seconds must pass before another attempt.
"""
self.maximum_attempts = maximum_attempts
self.delay = delay
def next(
self, elapsed_time: float, attempts: int, error: Exception
) -> Optional[float]:
if attempts >= self.maximum_attempts:
return None
return self.delay
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import platform
import random
import numpy as np
import torch
from mmdet.registry import DATASETS, TRANSFORMS
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
base_soft_limit = rlimit[0]
hard_limit = rlimit[1]
soft_limit = min(max(4096, base_soft_limit), hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
PIPELINES = TRANSFORMS
def _concat_dataset(cfg, default_args=None):
from mmengine.dataset.dataset_wrapper import ConcatDataset
ann_files = cfg['ann_file']
img_prefixes = cfg.get('img_prefix', None)
seg_prefixes = cfg.get('seg_prefix', None)
proposal_files = cfg.get('proposal_file', None)
separate_eval = cfg.get('separate_eval', True)
datasets = []
num_dset = len(ann_files)
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
# pop 'separate_eval' since it is not a valid key for common datasets.
if 'separate_eval' in data_cfg:
data_cfg.pop('separate_eval')
data_cfg['ann_file'] = ann_files[i]
if isinstance(img_prefixes, (list, tuple)):
data_cfg['img_prefix'] = img_prefixes[i]
if isinstance(seg_prefixes, (list, tuple)):
data_cfg['seg_prefix'] = seg_prefixes[i]
if isinstance(proposal_files, (list, tuple)):
data_cfg['proposal_file'] = proposal_files[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets, separate_eval)
# TODO: Need to refactor later
def build_dataset(cfg, default_args=None):
from mmengine.dataset import ClassBalancedDataset
from .dataset_wrappers import MultiImageMixDataset
if cfg['type'] == 'ClassBalancedDataset':
dataset = ClassBalancedDataset(
build_dataset(cfg['dataset'], default_args), cfg['oversample_thr'])
elif cfg['type'] == 'MultiImageMixDataset':
cp_cfg = copy.deepcopy(cfg)
cp_cfg['dataset'] = build_dataset(cp_cfg['dataset'])
cp_cfg.pop('type')
dataset = MultiImageMixDataset(**cp_cfg)
elif isinstance(cfg.get('ann_file'), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = DATASETS.build(cfg, default_args=default_args)
return dataset
def worker_init_fn(worker_id, num_workers, rank, seed):
# The seed of each worker equals to
# num_worker * rank + worker_id + user_seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
torch.manual_seed(worker_seed)
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import platform
import random
import numpy as np
import torch
from mmdet.registry import DATASETS, TRANSFORMS
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
base_soft_limit = rlimit[0]
hard_limit = rlimit[1]
soft_limit = min(max(4096, base_soft_limit), hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
PIPELINES = TRANSFORMS
def _concat_dataset(cfg, default_args=None):
from .dataset_wrappers import ConcatDataset
ann_files = cfg['ann_file']
img_prefixes = cfg.get('img_prefix', None)
seg_prefixes = cfg.get('seg_prefix', None)
proposal_files = cfg.get('proposal_file', None)
separate_eval = cfg.get('separate_eval', True)
datasets = []
num_dset = len(ann_files)
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
# pop 'separate_eval' since it is not a valid key for common datasets.
if 'separate_eval' in data_cfg:
data_cfg.pop('separate_eval')
data_cfg['ann_file'] = ann_files[i]
if isinstance(img_prefixes, (list, tuple)):
data_cfg['img_prefix'] = img_prefixes[i]
if isinstance(seg_prefixes, (list, tuple)):
data_cfg['seg_prefix'] = seg_prefixes[i]
if isinstance(proposal_files, (list, tuple)):
data_cfg['proposal_file'] = proposal_files[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets, separate_eval)
# TODO: Need to refactor later
def build_dataset(cfg, default_args=None):
from mmengine.dataset import ClassBalancedDataset
from .dataset_wrappers import MultiImageMixDataset
if cfg['type'] == 'ClassBalancedDataset':
dataset = ClassBalancedDataset(
build_dataset(cfg['dataset'], default_args), cfg['oversample_thr'])
elif cfg['type'] == 'MultiImageMixDataset':
cp_cfg = copy.deepcopy(cfg)
cp_cfg['dataset'] = build_dataset(cp_cfg['dataset'])
cp_cfg.pop('type')
dataset = MultiImageMixDataset(**cp_cfg)
elif isinstance(cfg.get('ann_file'), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = DATASETS.build(cfg, default_args=default_args)
return dataset
def worker_init_fn(worker_id, num_workers, rank, seed):
# The seed of each worker equals to
# num_worker * rank + worker_id + user_seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
torch.manual_seed(worker_seed)
|
_base_ = './panoptic_fpn_r50_fpn_1x_coco.py'
# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)],
# multiscale_mode='range'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadPanopticAnnotations',
with_bbox=True,
with_mask=True,
with_seg=True),
dict(type='RandomResize', img_scale=[(1333, 640), (1333, 800)]),
dict(type='RandomFlip', prob=0.5),
dict(type='SegRescale', scale_factor=1 / 4),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# TODO: Use RepeatDataset to speed up training
# training schedule for 3x
train_cfg = dict(max_epochs=36, val_interval=3)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[24, 33],
gamma=0.1)
]
|
_base_ = './panoptic_fpn_r50_fpn_1x_coco.py'
# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)],
# multiscale_mode='range'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadPanopticAnnotations',
with_bbox=True,
with_mask=True,
with_seg=True),
dict(type='RandomResize', img_scale=[(1333, 640), (1333, 800)]),
dict(type='RandomFlip', prob=0.5),
dict(type='SegRescale', scale_factor=1 / 4),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# TODO: Use RepeatDataset to speed up training
# training schedule for 3x
train_cfg = dict(by_epoch=True, max_epochs=36)
val_cfg = dict(interval=3)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[24, 33],
gamma=0.1)
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.data import BaseDataElement
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.misc import tensor2imgs
# TODO: Due to interface changes, the current class
# functions incorrectly
@HOOKS.register_module()
class NaiveVisualizationHook(Hook):
"""Show or Write the predicted results during the process of testing.
Args:
interval (int): Visualization interval. Default: 1.
draw_gt (bool): Whether to draw the ground truth. Default to True.
draw_pred (bool): Whether to draw the predicted result.
Default to True.
"""
priority = 'NORMAL'
def __init__(self,
interval: int = 1,
draw_gt: bool = True,
draw_pred: bool = True):
self.draw_gt = draw_gt
self.draw_pred = draw_pred
self._interval = interval
def _unpad(self, input: np.ndarray, unpad_shape: Tuple[int,
int]) -> np.ndarray:
unpad_width, unpad_height = unpad_shape
unpad_image = input[:unpad_height, :unpad_width]
return unpad_image
def after_test_iter(
self,
runner,
batch_idx: int,
data_batch: Optional[Sequence[dict]] = None,
outputs: Optional[Sequence[BaseDataElement]] = None) -> None:
"""Show or Write the predicted results.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the test loop.
data_batch (Sequence[dict], optional): Data
from dataloader. Defaults to None.
outputs (Sequence[BaseDataElement], optional): Outputs from model.
Defaults to None.
"""
if self.every_n_iters(runner, self._interval):
for data, output in zip(data_batch, outputs): # type: ignore
input = data['inputs']
data_sample = data['data_sample']
input = tensor2imgs(input,
**data_sample.get('img_norm_cfg',
dict()))[0]
# TODO We will implement a function to revert the augmentation
# in the future.
ori_shape = (data_sample.ori_width, data_sample.ori_height)
if 'pad_shape' in data_sample:
input = self._unpad(input,
data_sample.get('scale', ori_shape))
origin_image = cv2.resize(input, ori_shape)
name = osp.basename(data_sample.img_path)
runner.visualizer.add_datasample(name, origin_image,
data_sample, output,
self.draw_gt, self.draw_pred)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Any, Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.data import BaseDataElement
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.misc import tensor2imgs
@HOOKS.register_module()
class NaiveVisualizationHook(Hook):
"""Show or Write the predicted results during the process of testing.
Args:
interval (int): Visualization interval. Default: 1.
draw_gt (bool): Whether to draw the ground truth. Default to True.
draw_pred (bool): Whether to draw the predicted result.
Default to True.
"""
priority = 'NORMAL'
def __init__(self,
interval: int = 1,
draw_gt: bool = True,
draw_pred: bool = True):
self.draw_gt = draw_gt
self.draw_pred = draw_pred
self._interval = interval
def _unpad(self, input: np.ndarray, unpad_shape: Tuple[int,
int]) -> np.ndarray:
unpad_width, unpad_height = unpad_shape
unpad_image = input[:unpad_height, :unpad_width]
return unpad_image
def after_test_iter(
self,
runner,
batch_idx: int,
data_batch: Optional[Sequence[Tuple[Any, BaseDataElement]]] = None,
outputs: Optional[Sequence[BaseDataElement]] = None) -> None:
"""Show or Write the predicted results.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the test loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. Defaults to None.
outputs (Sequence[BaseDataElement], optional): Outputs from model.
Defaults to None.
"""
if self.every_n_iters(runner, self._interval):
inputs, data_samples = data_batch # type: ignore
inputs = tensor2imgs(inputs,
**data_samples[0].get('img_norm_cfg', dict()))
for input, data_sample, output in zip(
inputs,
data_samples, # type: ignore
outputs): # type: ignore
# TODO We will implement a function to revert the augmentation
# in the future.
ori_shape = (data_sample.ori_width, data_sample.ori_height)
if 'pad_shape' in data_sample:
input = self._unpad(input,
data_sample.get('scale', ori_shape))
origin_image = cv2.resize(input, ori_shape)
name = osp.basename(data_sample.img_path)
runner.writer.add_image(name, origin_image, data_sample,
output, self.draw_gt, self.draw_pred)
|
_INITIALIZED = False
_LAZILY_IMPORTED = [
"Streamer",
"SourceStream",
"SourceAudioStream",
"SourceVideoStream",
"OutputStream",
]
def _init_extension():
import torch
import torchaudio
try:
torchaudio._extension._load_lib("libtorchaudio_ffmpeg")
except OSError as err:
raise ImportError(
"Stream API requires FFmpeg libraries (libavformat and such). Please install FFmpeg 4."
) from err
try:
torch.ops.torchaudio.ffmpeg_init()
except RuntimeError as err:
raise RuntimeError(
"Stream API requires FFmpeg binding. Please set BUILD_FFMPEG=1 when building from source."
) from err
global _INITIALIZED
_INITIALIZED = True
def __getattr__(name: str):
if name in _LAZILY_IMPORTED:
if not _INITIALIZED:
_init_extension()
from . import streamer
item = getattr(streamer, name)
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(__all__ + _LAZILY_IMPORTED)
__all__ = []
|
import torch
import torchaudio
torchaudio._extension._load_lib("libtorchaudio_ffmpeg")
torch.ops.torchaudio.ffmpeg_init()
from .streamer import (
Streamer,
SourceStream,
SourceAudioStream,
SourceVideoStream,
OutputStream,
)
__all__ = [
"Streamer",
"SourceStream",
"SourceAudioStream",
"SourceVideoStream",
"OutputStream",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
third_part_libs = [
'pip install -r ../requirements/albu.txt',
'pip install instaboostfast',
'pip install git+https://github.com/cocodataset/panopticapi.git',
'pip install timm',
'pip install mmpretrain',
'pip install git+https://github.com/lvis-dataset/lvis-api.git',
'pip install -r ../requirements/multimodal.txt',
'pip install -r ../requirements/tracking.txt',
'pip install git+https://github.com/JonathonLuiten/TrackEval.git',
]
default_floating_range = 0.5
model_floating_ranges = {'atss/atss_r50_fpn_1x_coco.py': 0.3}
|
# Copyright (c) OpenMMLab. All rights reserved.
third_part_libs = [
'pip install -r ../requirements/albu.txt',
'pip install instaboostfast',
'pip install git+https://github.com/cocodataset/panopticapi.git',
'pip install timm',
'pip install mmcls>=1.0.0rc0',
'pip install git+https://github.com/lvis-dataset/lvis-api.git',
'pip install -r ../requirements/multimodal.txt',
'pip install -r ../requirements/tracking.txt',
'pip install git+https://github.com/JonathonLuiten/TrackEval.git',
]
default_floating_range = 0.5
model_floating_ranges = {'atss/atss_r50_fpn_1x_coco.py': 0.3}
|
from workflows.context.serializers import (
BaseSerializer, # noqa
JsonSerializer, # noqa
PickleSerializer,
)
# provided for backward compatibility
JsonPickleSerializer = PickleSerializer
|
import base64
import json
import pickle
from abc import ABC, abstractmethod
from typing import Any
from pydantic import BaseModel
from llama_index.core.schema import BaseComponent
from .utils import import_module_from_qualified_name, get_qualified_name
class BaseSerializer(ABC):
@abstractmethod
def serialize(self, value: Any) -> str: ...
@abstractmethod
def deserialize(self, value: str) -> Any: ...
class JsonSerializer(BaseSerializer):
def _serialize_value(self, value: Any) -> Any:
"""Helper to serialize a single value."""
if isinstance(value, BaseComponent):
return {
"__is_component": True,
"value": value.to_dict(),
"qualified_name": get_qualified_name(value),
}
elif isinstance(value, BaseModel):
return {
"__is_pydantic": True,
"value": value.model_dump(),
"qualified_name": get_qualified_name(value),
}
elif isinstance(value, dict):
return {k: self._serialize_value(v) for k, v in value.items()}
elif isinstance(value, list):
return [self._serialize_value(item) for item in value]
return value
def serialize(self, value: Any) -> str:
try:
serialized_value = self._serialize_value(value)
return json.dumps(serialized_value)
except Exception as e:
raise ValueError(f"Failed to serialize value: {type(value)}: {value!s}")
def _deserialize_value(self, data: Any) -> Any:
"""Helper to deserialize a single value."""
if isinstance(data, dict):
if data.get("__is_pydantic") and data.get("qualified_name"):
module_class = import_module_from_qualified_name(data["qualified_name"])
return module_class.model_validate(data["value"])
elif data.get("__is_component") and data.get("qualified_name"):
module_class = import_module_from_qualified_name(data["qualified_name"])
return module_class.from_dict(data["value"])
return {k: self._deserialize_value(v) for k, v in data.items()}
elif isinstance(data, list):
return [self._deserialize_value(item) for item in data]
return data
def deserialize(self, value: str) -> Any:
data = json.loads(value)
return self._deserialize_value(data)
class PickleSerializer(JsonSerializer):
def serialize(self, value: Any) -> str:
"""Serialize while prioritizing JSON, falling back to Pickle."""
try:
return super().serialize(value)
except Exception:
return base64.b64encode(pickle.dumps(value)).decode("utf-8")
def deserialize(self, value: str) -> Any:
"""
Deserialize while prioritizing Pickle, falling back to JSON.
To avoid malicious exploits of the deserialization, deserialize objects
only when you deem it safe to do so.
"""
try:
return pickle.loads(base64.b64decode(value))
except Exception:
return super().deserialize(value)
JsonPickleSerializer = PickleSerializer
|
from abc import abstractmethod
from typing import (
TYPE_CHECKING,
TypeVar,
Sequence,
List,
Dict,
Optional,
)
from qdrant_client.http.models.models import Distance
from docarray import Document, DocumentArray
from docarray.math import ndarray
from docarray.score import NamedScore
if TYPE_CHECKING:
import tensorflow
import torch
import numpy as np
from qdrant_client import QdrantClient
QdrantArrayType = TypeVar(
'QdrantArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
class FindMixin:
@property
@abstractmethod
def client(self) -> 'QdrantClient':
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def serialize_config(self) -> dict:
raise NotImplementedError()
@property
@abstractmethod
def distance(self) -> 'Distance':
raise NotImplementedError()
def _find_similar_vectors(
self, q: 'QdrantArrayType', limit: int = 10, filter: Optional[Dict] = None
):
query_vector = self._map_embedding(q)
search_result = self.client.search(
self.collection_name,
query_vector=query_vector,
query_filter=filter,
search_params=None,
top=limit,
append_payload=['_serialized'],
)
docs = []
for hit in search_result:
doc = Document.from_base64(
hit.payload['_serialized'], **self.serialize_config
)
doc.scores[f'{self.distance.lower()}_similarity'] = NamedScore(
value=hit.score
)
docs.append(doc)
return DocumentArray(docs)
def _find(
self,
query: 'QdrantArrayType',
limit: int = 10,
filter: Optional[Dict] = None,
**kwargs,
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given a batch of input queries.
:param query: input supported to be used in Qdrant.
:param limit: number of retrieved items
:param filter: filter query used for pre-filtering
:return: a list of DocumentArrays containing the closest Document objects for each of the queries in `query`.
"""
num_rows, _ = ndarray.get_array_rows(query)
if num_rows == 1:
return [self._find_similar_vectors(query, limit=limit, filter=filter)]
else:
closest_docs = []
for q in query:
da = self._find_similar_vectors(q, limit=limit, filter=filter)
closest_docs.append(da)
return closest_docs
|
from abc import abstractmethod
from typing import (
TYPE_CHECKING,
TypeVar,
Sequence,
List,
Dict,
Optional,
)
from qdrant_client.http.models.models import Distance
from .... import Document, DocumentArray
from ....math import ndarray
from ....score import NamedScore
if TYPE_CHECKING:
import tensorflow
import torch
import numpy as np
from qdrant_client import QdrantClient
QdrantArrayType = TypeVar(
'QdrantArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
class FindMixin:
@property
@abstractmethod
def client(self) -> 'QdrantClient':
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def serialize_config(self) -> dict:
raise NotImplementedError()
@property
@abstractmethod
def distance(self) -> 'Distance':
raise NotImplementedError()
def _find_similar_vectors(
self, q: 'QdrantArrayType', limit: int = 10, filter: Optional[Dict] = None
):
query_vector = self._map_embedding(q)
search_result = self.client.search(
self.collection_name,
query_vector=query_vector,
query_filter=filter,
search_params=None,
top=limit,
append_payload=['_serialized'],
)
docs = []
for hit in search_result:
doc = Document.from_base64(
hit.payload['_serialized'], **self.serialize_config
)
doc.scores[f'{self.distance.lower()}_similarity'] = NamedScore(
value=hit.score
)
docs.append(doc)
return DocumentArray(docs)
def _find(
self,
query: 'QdrantArrayType',
limit: int = 10,
filter: Optional[Dict] = None,
**kwargs,
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given a batch of input queries.
:param query: input supported to be used in Qdrant.
:param limit: number of retrieved items
:param filter: filter query used for pre-filtering
:return: a list of DocumentArrays containing the closest Document objects for each of the queries in `query`.
"""
num_rows, _ = ndarray.get_array_rows(query)
if num_rows == 1:
return [self._find_similar_vectors(query, limit=limit, filter=filter)]
else:
closest_docs = []
for q in query:
da = self._find_similar_vectors(q, limit=limit, filter=filter)
closest_docs.append(da)
return closest_docs
|
import os
from pathlib import Path
from jina import __cache_path__
def generate_default_volume_and_workspace(workspace_id=''):
"""automatically generate a docker volume, and an Executor workspace inside it
:param workspace_id: id that will be part of the fallback workspace path. Default is not adding such an id
:return: List of volumes and a workspace string
"""
default_workspace = os.environ.get('JINA_DEFAULT_WORKSPACE_BASE')
container_addr = '/app'
if default_workspace: # use default workspace provided in env var
host_addr = default_workspace
workspace = os.path.relpath(
path=os.path.abspath(default_workspace), start=Path.home()
)
else: # fallback if no custom volume and no default workspace
workspace = os.path.join(__cache_path__, 'executor-workspace')
host_addr = os.path.join(
Path.home(),
workspace,
workspace_id,
)
workspace_in_container = os.path.join(container_addr, workspace)
generated_volumes = [os.path.abspath(host_addr) + f':{container_addr}']
return generated_volumes, workspace_in_container
|
import os
from pathlib import Path
def generate_default_volume_and_workspace(workspace_id=''):
"""automatically generate a docker volume, and an Executor workspace inside it
:param workspace_id: id that will be part of the fallback workspace path. Default is not adding such an id
:return: List of volumes and a workspace string
"""
default_workspace = os.environ.get('JINA_DEFAULT_WORKSPACE_BASE')
container_addr = '/app'
if default_workspace: # use default workspace provided in env var
host_addr = default_workspace
workspace = os.path.relpath(
path=os.path.abspath(default_workspace), start=Path.home()
)
else: # fallback if no custom volume and no default workspace
workspace = os.path.join('.jina', 'executor-workspace')
host_addr = os.path.join(
Path.home(),
workspace,
workspace_id,
)
workspace_in_container = os.path.join(container_addr, workspace)
generated_volumes = [os.path.abspath(host_addr) + f':{container_addr}']
return generated_volumes, workspace_in_container
|
"""Vector DB tool spec."""
from typing import List
from llama_index.core.indices.base import BaseIndex
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.core.vector_stores.types import ExactMatchFilter, MetadataFilters
class VectorDBToolSpec(BaseToolSpec):
"""Vector DB tool spec."""
spec_functions = ["auto_retrieve_fn"]
def __init__(
self,
index: BaseIndex, # TODO typing
) -> None:
"""Initialize with parameters."""
self._index = index
def auto_retrieve_fn(
self,
query: str,
top_k: int,
filter_key_list: List[str],
filter_value_list: List[str],
) -> str:
"""
Auto retrieval function.
Performs auto-retrieval from a vector database, and then applies a set of filters.
Args:
query (str): The query to search
top_k (int): The number of results to retrieve
filter_key_list (List[str]): The list of filter keys
filter_value_list (List[str]): The list of filter values
"""
exact_match_filters = [
ExactMatchFilter(key=k, value=v)
for k, v in zip(filter_key_list, filter_value_list)
]
retriever = VectorIndexRetriever(
self._index,
filters=MetadataFilters(filters=exact_match_filters),
top_k=top_k,
)
query_engine = RetrieverQueryEngine.from_args(retriever)
response = query_engine.query(query)
return str(response)
# backwards compatibility
VectorDB = VectorDBToolSpec
|
"""Vector DB tool spec."""
from typing import List
from llama_index.core.indices.base import BaseIndex
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.core.vector_stores.types import ExactMatchFilter, MetadataFilters
class VectorDBToolSpec(BaseToolSpec):
"""Vector DB tool spec."""
spec_functions = ["auto_retrieve_fn"]
def __init__(
self,
index: BaseIndex, # TODO typing
) -> None:
"""Initialize with parameters."""
self._index = index
def auto_retrieve_fn(
self,
query: str,
top_k: int,
filter_key_list: List[str],
filter_value_list: List[str],
) -> str:
"""Auto retrieval function.
Performs auto-retrieval from a vector database, and then applies a set of filters.
Args:
query (str): The query to search
top_k (int): The number of results to retrieve
filter_key_list (List[str]): The list of filter keys
filter_value_list (List[str]): The list of filter values
"""
exact_match_filters = [
ExactMatchFilter(key=k, value=v)
for k, v in zip(filter_key_list, filter_value_list)
]
retriever = VectorIndexRetriever(
self._index,
filters=MetadataFilters(filters=exact_match_filters),
top_k=top_k,
)
query_engine = RetrieverQueryEngine.from_args(retriever)
response = query_engine.query(query)
return str(response)
# backwards compatibility
VectorDB = VectorDBToolSpec
|
"""
Computes embeddings
"""
import numpy as np
from sentence_transformers import SentenceTransformer
def test_encode_token_embeddings(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
"""
Test that encode(output_value='token_embeddings') works
:return:
"""
model = paraphrase_distilroberta_base_v1_model
sent = [
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
"Sentences",
"Sentence five five five five five five five",
]
emb = model.encode(sent, output_value="token_embeddings", batch_size=2)
assert len(emb) == len(sent)
for s, e in zip(sent, emb):
assert len(model.tokenize([s])["input_ids"][0]) == e.shape[0]
def test_encode_single_sentences(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
# Single sentence
emb = model.encode("Hello Word, a test sentence")
assert emb.shape == (768,)
assert abs(np.sum(emb) - 7.9811716) < 0.002
# Single sentence as list
emb = model.encode(["Hello Word, a test sentence"])
assert emb.shape == (1, 768)
assert abs(np.sum(emb) - 7.9811716) < 0.002
# Sentence list
emb = model.encode(
[
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
]
)
assert emb.shape == (3, 768)
assert abs(np.sum(emb) - 22.968266) < 0.007
def test_encode_normalize(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
emb = model.encode(
[
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
],
normalize_embeddings=True,
)
assert emb.shape == (3, 768)
for norm in np.linalg.norm(emb, axis=1):
assert abs(norm - 1) < 0.001
def test_encode_tuple_sentences(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
# Input a sentence tuple
emb = model.encode([("Hello Word, a test sentence", "Second input for model")])
assert emb.shape == (1, 768)
assert abs(np.sum(emb) - 9.503508) < 0.002
# List of sentence tuples
emb = model.encode(
[
("Hello Word, a test sentence", "Second input for model"),
("My second tuple", "With two inputs"),
("Final tuple", "final test"),
]
)
assert emb.shape == (3, 768)
assert abs(np.sum(emb) - 32.14627) < 0.002
|
"""
Computes embeddings
"""
import numpy as np
from sentence_transformers import SentenceTransformer
from sentence_transformers.util import get_device_name
def test_encode_token_embeddings(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
"""
Test that encode(output_value='token_embeddings') works
:return:
"""
model = paraphrase_distilroberta_base_v1_model
sent = [
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
"Sentences",
"Sentence five five five five five five five",
]
emb = model.encode(sent, output_value="token_embeddings", batch_size=2)
assert len(emb) == len(sent)
device = get_device_name()
if device == "hpu":
for s, e in zip(sent, emb):
assert len(model.tokenize([s])["input_ids"][0]) == model.get_max_seq_length()
else:
for s, e in zip(sent, emb):
assert len(model.tokenize([s])["input_ids"][0]) == e.shape[0]
def test_encode_single_sentences(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
# Single sentence
emb = model.encode("Hello Word, a test sentence")
assert emb.shape == (768,)
assert abs(np.sum(emb) - 7.9811716) < 0.002
# Single sentence as list
emb = model.encode(["Hello Word, a test sentence"])
assert emb.shape == (1, 768)
assert abs(np.sum(emb) - 7.9811716) < 0.002
# Sentence list
emb = model.encode(
[
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
]
)
assert emb.shape == (3, 768)
assert abs(np.sum(emb) - 22.968266) < 0.007
def test_encode_normalize(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
emb = model.encode(
[
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
],
normalize_embeddings=True,
)
assert emb.shape == (3, 768)
for norm in np.linalg.norm(emb, axis=1):
assert abs(norm - 1) < 0.001
def test_encode_tuple_sentences(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
# Input a sentence tuple
emb = model.encode([("Hello Word, a test sentence", "Second input for model")])
assert emb.shape == (1, 768)
assert abs(np.sum(emb) - 9.503508) < 0.002
# List of sentence tuples
emb = model.encode(
[
("Hello Word, a test sentence", "Second input for model"),
("My second tuple", "With two inputs"),
("Final tuple", "final test"),
]
)
assert emb.shape == (3, 768)
assert abs(np.sum(emb) - 32.14627) < 0.002
|
_base_ = './centernet-update_r50_fpn_8xb8-amp-lsj-200e_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './centernet_update_r50_fpn_fp16_lsj_200e_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
from mmengine.utils.manager import ManagerMixin, _accquire_lock, _release_lock
class DefaultScope(ManagerMixin):
"""Scope of current task used to reset the current registry, which can be
accessed globally.
Consider the case of resetting the current ``Resgitry`` by``default_scope``
in the internal module which cannot access runner directly, it is difficult
to get the ``default_scope`` defined in ``Runner``. However, if ``Runner``
created ``DefaultScope`` instance by given ``default_scope``, the internal
module can get ``default_scope`` by ``DefaultScope.get_current_instance``
everywhere.
Args:
name (str): Name of default scope for global access.
scope_name (str): Scope of current task.
Examples:
>>> from mmengine import MODELS
>>> # Define default scope in runner.
>>> DefaultScope.get_instance('task', scope_name='mmdet')
>>> # Get default scope globally.
>>> scope_name = DefaultScope.get_instance('task').scope_name
"""
def __init__(self, name: str, scope_name: str):
super().__init__(name)
self._scope_name = scope_name
@property
def scope_name(self) -> str:
"""
Returns:
str: Get current scope.
"""
return self._scope_name
@classmethod
def get_current_instance(cls) -> Optional['DefaultScope']:
"""Get latest created default scope.
Since default_scope is an optional argument for ``Registry.build``.
``get_current_instance`` should return ``None`` if there is no
``DefaultScope`` created.
Examples:
>>> default_scope = DefaultScope.get_current_instance()
>>> # There is no `DefaultScope` created yet,
>>> # `get_current_instance` return `None`.
>>> default_scope = DefaultScope.get_instance(
>>> 'instance_name', scope_name='mmengine')
>>> default_scope.scope_name
mmengine
>>> default_scope = DefaultScope.get_current_instance()
>>> default_scope.scope_name
mmengine
Returns:
Optional[DefaultScope]: Return None If there has not been
``DefaultScope`` instance created yet, otherwise return the
latest created DefaultScope instance.
"""
_accquire_lock()
if cls._instance_dict:
instance = super().get_current_instance()
else:
instance = None
_release_lock()
return instance
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
from mmengine.utils.manager import ManagerMixin, _accquire_lock, _release_lock
class DefaultScope(ManagerMixin):
"""Scope of current task used to reset the current registry, which can be
accessed globally.
Consider the case of resetting the current ``Resgitry`` by``default_scope``
in the internal module which cannot access runner directly, it is difficult
to get the ``default_scope`` defined in ``Runner``. However, if ``Runner``
created ``DefaultScope`` instance by given ``default_scope``, the internal
module can get ``default_scope`` by ``DefaultScope.get_current_instance``
everywhere.
Args:
name (str): Name of default scope for global access.
scope_name (str): Scope of current task.
Examples:
>>> from mmengine import MODELS
>>> # Define default scope in runner.
>>> DefaultScope.get_instance('task', scope_name='mmdet')
>>> # Get default scope globally.
>>> scope_name = DefaultScope.get_instance('task').scope_name
"""
def __init__(self, name: str, scope_name: str):
super().__init__(name)
self._scope_name = scope_name
@property
def scope_name(self) -> str:
"""
Returns:
str: Get current scope.
"""
return self._scope_name
@classmethod
def get_current_instance(cls) -> Optional['DefaultScope']:
"""Get latest created default scope.
Since default_scope is an optional argument for ``Registry.build``.
``get_current_instance`` should return ``None`` if there is no
``DefaultScope`` created.
Examples:
>>> default_scope = DefaultScope.get_current_instance()
>>> # There is no `DefaultScope` created yet,
>>> # `get_current_instance` return `None`.
>>> default_scope = DefaultScope.get_instance(
>>> 'instance_name', scope_name='mmengine')
>>> default_scope.scope_name
mmengine
>>> default_scope = DefaultScope.get_current_instance()
>>> default_scope.scope_name
mmengine
Returns:
Optional[DefaultScope]: Return None If there has not been
``DefaultScope`` instance created yet, otherwise return the
latest created DefaultScope instance.
"""
_accquire_lock()
if cls._instance_dict:
instance = super(DefaultScope, cls).get_current_instance()
else:
instance = None
_release_lock()
return instance
|
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
This is a test script that launches as part of the test cases in
run_test.py, to validate the correctness of
the method ``torch.distributed.is_torchelastic_launched()``. To do so,
we run this script with and without torchelastic and validate that the
boolean value written to the out_file is indeed what we expect (e.g.
should be False when not launched with torchelastic, True when launched with)
The script itself is not a test case hence no assertions are made in this script.
see: - test/distributed/launcher/run_test.py#test_is_torchelastic_launched()
- test/distributed/launcher/run_test.py#test_is_not_torchelastic_launched()
"""
import argparse
import torch.distributed as dist
def parse_args():
parser = argparse.ArgumentParser(description="test script")
parser.add_argument(
"--out-file",
"--out_file",
help="file to write indicating whether this script was launched with torchelastic",
)
return parser.parse_args()
def main():
args = parse_args()
with open(args.out_file, "w") as out:
out.write(f"{dist.is_torchelastic_launched()}")
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
This is a test script that launches as part of the test cases in
run_test.py, to validate the correctness of
the method ``torch.distributed.is_torchelastic_launched()``. To do so,
we run this script with and without torchelastic and validate that the
boolean value written to the out_file is indeed what we expect (e.g.
should be False when not launched with torchelastic, True when launched with)
The script itself is not a test case hence no assertions are made in this script.
see: - test/distributed/launcher/run_test.py#test_is_torchelastic_launched()
- test/distributed/launcher/run_test.py#test_is_not_torchelastic_launched()
"""
import argparse
import torch.distributed as dist
def parse_args():
parser = argparse.ArgumentParser(description="test script")
parser.add_argument(
"--out-file",
"--out_file",
help="file to write indicating whether this script was launched with torchelastic",
)
return parser.parse_args()
def main():
args = parse_args()
with open(args.out_file, "w") as out:
out.write(f"{dist.is_torchelastic_launched()}")
if __name__ == "__main__":
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Tuple
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.core.utils import (InstanceList, OptConfigType, OptMultiConfig,
SampleList)
from mmdet.registry import MODELS
class BaseRoIHead(BaseModule, metaclass=ABCMeta):
"""Base class for RoIHeads."""
def __init__(self,
bbox_roi_extractor: OptMultiConfig = None,
bbox_head: OptMultiConfig = None,
mask_roi_extractor: OptMultiConfig = None,
mask_head: OptMultiConfig = None,
shared_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if shared_head is not None:
self.shared_head = MODELS.build(shared_head)
if bbox_head is not None:
self.init_bbox_head(bbox_roi_extractor, bbox_head)
if mask_head is not None:
self.init_mask_head(mask_roi_extractor, mask_head)
self.init_assigner_sampler()
@property
def with_bbox(self) -> bool:
"""bool: whether the RoI head contains a `bbox_head`"""
return hasattr(self, 'bbox_head') and self.bbox_head is not None
@property
def with_mask(self) -> bool:
"""bool: whether the RoI head contains a `mask_head`"""
return hasattr(self, 'mask_head') and self.mask_head is not None
@property
def with_shared_head(self) -> bool:
"""bool: whether the RoI head contains a `shared_head`"""
return hasattr(self, 'shared_head') and self.shared_head is not None
@abstractmethod
def init_bbox_head(self, *args, **kwargs):
"""Initialize ``bbox_head``"""
pass
@abstractmethod
def init_mask_head(self, *args, **kwargs):
"""Initialize ``mask_head``"""
pass
@abstractmethod
def init_assigner_sampler(self, *args, **kwargs):
"""Initialize assigner and sampler."""
pass
@abstractmethod
def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,
batch_data_samples: SampleList, **kwargs):
"""Perform forward propagation and loss calculation of the roi head on
the features of the upstream network."""
def predict(self,
x: Tuple[Tensor],
rpn_results_list: InstanceList,
batch_data_samples: SampleList,
rescale: bool = False,
**kwargs):
"""Perform forward propagation of the roi head and predict detection
results on the features of the upstream network."""
# TODO: Currently not supported
def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs):
"""Test function with test time augmentation.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import List, Tuple
from mmcv.runner import BaseModule
from torch import Tensor
from mmdet.core.utils import (InstanceList, OptConfigType, OptMultiConfig,
SampleList)
from mmdet.registry import MODELS
class BaseRoIHead(BaseModule, metaclass=ABCMeta):
"""Base class for RoIHeads."""
def __init__(self,
bbox_roi_extractor: OptMultiConfig = None,
bbox_head: OptMultiConfig = None,
mask_roi_extractor: OptMultiConfig = None,
mask_head: OptMultiConfig = None,
shared_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if shared_head is not None:
self.shared_head = MODELS.build(shared_head)
if bbox_head is not None:
self.init_bbox_head(bbox_roi_extractor, bbox_head)
if mask_head is not None:
self.init_mask_head(mask_roi_extractor, mask_head)
self.init_assigner_sampler()
@property
def with_bbox(self) -> bool:
"""bool: whether the RoI head contains a `bbox_head`"""
return hasattr(self, 'bbox_head') and self.bbox_head is not None
@property
def with_mask(self) -> bool:
"""bool: whether the RoI head contains a `mask_head`"""
return hasattr(self, 'mask_head') and self.mask_head is not None
@property
def with_shared_head(self) -> bool:
"""bool: whether the RoI head contains a `shared_head`"""
return hasattr(self, 'shared_head') and self.shared_head is not None
@abstractmethod
def init_bbox_head(self, *args, **kwargs):
"""Initialize ``bbox_head``"""
pass
@abstractmethod
def init_mask_head(self, *args, **kwargs):
"""Initialize ``mask_head``"""
pass
@abstractmethod
def init_assigner_sampler(self, *args, **kwargs):
"""Initialize assigner and sampler."""
pass
@abstractmethod
def forward_train(self, x: Tuple[Tensor], rpn_results_list: InstanceList,
batch_data_samples: SampleList, **kwargs):
"""Forward function during training."""
# TODO: Currently not supported
async def async_simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False,
**kwargs):
"""Asynchronized test function."""
raise NotImplementedError
def simple_test(self,
x: Tuple[Tensor],
proposal_list: InstanceList,
batch_img_metas: List[dict],
rescale: bool = False,
**kwargs):
"""Test without augmentation."""
# TODO: Currently not supported
def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
import pickle
from collections import OrderedDict
import numpy as np
import pytest
import torch
from mmengine import MessageHub
class TestMessageHub:
def test_init(self):
message_hub = MessageHub('name')
assert message_hub.instance_name == 'name'
assert len(message_hub.log_scalars) == 0
assert len(message_hub.log_scalars) == 0
# The type of log_scalars's value must be `HistoryBuffer`.
with pytest.raises(AssertionError):
MessageHub('hello', log_scalars=OrderedDict(a=1))
# `Resumed_keys`
with pytest.raises(AssertionError):
MessageHub(
'hello',
runtime_info=OrderedDict(iter=1),
resumed_keys=OrderedDict(iters=False))
def test_update_scalar(self):
message_hub = MessageHub.get_instance('mmengine')
# test create target `HistoryBuffer` by name
message_hub.update_scalar('name', 1)
log_buffer = message_hub.log_scalars['name']
assert (log_buffer._log_history == np.array([1])).all()
# test update target `HistoryBuffer` by name
message_hub.update_scalar('name', 1)
assert (log_buffer._log_history == np.array([1, 1])).all()
# unmatched string will raise a key error
def test_update_info(self):
message_hub = MessageHub.get_instance('mmengine')
# test runtime value can be overwritten.
message_hub.update_info('key', 2)
assert message_hub.runtime_info['key'] == 2
message_hub.update_info('key', 1)
assert message_hub.runtime_info['key'] == 1
def test_get_scalar(self):
message_hub = MessageHub.get_instance('mmengine')
# Get undefined key will raise error
with pytest.raises(KeyError):
message_hub.get_scalar('unknown')
# test get log_buffer as wished
log_history = np.array([1, 2, 3, 4, 5])
count = np.array([1, 1, 1, 1, 1])
for i in range(len(log_history)):
message_hub.update_scalar('test_value', float(log_history[i]),
int(count[i]))
recorded_history, recorded_count = \
message_hub.get_scalar('test_value').data
assert (log_history == recorded_history).all()
assert (recorded_count == count).all()
def test_get_runtime(self):
message_hub = MessageHub.get_instance('mmengine')
with pytest.raises(KeyError):
message_hub.get_info('unknown')
recorded_dict = dict(a=1, b=2)
message_hub.update_info('test_value', recorded_dict)
assert message_hub.get_info('test_value') == recorded_dict
def test_get_scalars(self):
message_hub = MessageHub.get_instance('mmengine')
log_dict = dict(
loss=1,
loss_cls=torch.tensor(2),
loss_bbox=np.array(3),
loss_iou=dict(value=1, count=2))
message_hub.update_scalars(log_dict)
loss = message_hub.get_scalar('loss')
loss_cls = message_hub.get_scalar('loss_cls')
loss_bbox = message_hub.get_scalar('loss_bbox')
loss_iou = message_hub.get_scalar('loss_iou')
assert loss.current() == 1
assert loss_cls.current() == 2
assert loss_bbox.current() == 3
assert loss_iou.mean() == 0.5
with pytest.raises(AssertionError):
loss_dict = dict(error_type=[])
message_hub.update_scalars(loss_dict)
with pytest.raises(AssertionError):
loss_dict = dict(error_type=dict(count=1))
message_hub.update_scalars(loss_dict)
def test_getstate(self):
message_hub = MessageHub.get_instance('name')
# update log_scalars.
message_hub.update_scalar('loss', 0.1)
message_hub.update_scalar('lr', 0.1, resumed=False)
# update runtime information
message_hub.update_info('iter', 1, resumed=True)
message_hub.update_info('tensor', [1, 2, 3], resumed=False)
obj = pickle.dumps(message_hub)
instance = pickle.loads(obj)
with pytest.raises(KeyError):
instance.get_info('feat')
with pytest.raises(KeyError):
instance.get_info('lr')
instance.get_info('iter')
instance.get_scalar('loss')
def test_get_instance(self):
# Test get root mmengine message hub.
MessageHub._instance_dict = OrderedDict()
root_logger = MessageHub.get_current_instance()
assert id(MessageHub.get_instance('mmengine')) == id(root_logger)
# Test original `get_current_instance` function.
MessageHub.get_instance('mmdet')
assert MessageHub.get_current_instance().instance_name == 'mmdet'
|
# Copyright (c) OpenMMLab. All rights reserved.
import pickle
from collections import OrderedDict
import numpy as np
import pytest
import torch
from mmengine import MessageHub
class TestMessageHub:
def test_init(self):
message_hub = MessageHub('name')
assert message_hub.instance_name == 'name'
assert len(message_hub.log_scalars) == 0
assert len(message_hub.log_scalars) == 0
# The type of log_scalars's value must be `HistoryBuffer`.
with pytest.raises(AssertionError):
MessageHub('hello', log_scalars=OrderedDict(a=1))
# `Resumed_keys`
with pytest.raises(AssertionError):
MessageHub(
'hello',
runtime_info=OrderedDict(iter=1),
resumed_keys=OrderedDict(iters=False))
def test_update_scalar(self):
message_hub = MessageHub.get_instance('mmengine')
# test create target `HistoryBuffer` by name
message_hub.update_scalar('name', 1)
log_buffer = message_hub.log_scalars['name']
assert (log_buffer._log_history == np.array([1])).all()
# test update target `HistoryBuffer` by name
message_hub.update_scalar('name', 1)
assert (log_buffer._log_history == np.array([1, 1])).all()
# unmatched string will raise a key error
def test_update_info(self):
message_hub = MessageHub.get_instance('mmengine')
# test runtime value can be overwritten.
message_hub.update_info('key', 2)
assert message_hub.runtime_info['key'] == 2
message_hub.update_info('key', 1)
assert message_hub.runtime_info['key'] == 1
def test_get_scalar(self):
message_hub = MessageHub.get_instance('mmengine')
# Get undefined key will raise error
with pytest.raises(KeyError):
message_hub.get_scalar('unknown')
# test get log_buffer as wished
log_history = np.array([1, 2, 3, 4, 5])
count = np.array([1, 1, 1, 1, 1])
for i in range(len(log_history)):
message_hub.update_scalar('test_value', float(log_history[i]),
int(count[i]))
recorded_history, recorded_count = \
message_hub.get_scalar('test_value').data
assert (log_history == recorded_history).all()
assert (recorded_count == count).all()
def test_get_runtime(self):
message_hub = MessageHub.get_instance('mmengine')
with pytest.raises(KeyError):
message_hub.get_info('unknown')
recorded_dict = dict(a=1, b=2)
message_hub.update_info('test_value', recorded_dict)
assert message_hub.get_info('test_value') == recorded_dict
def test_get_scalars(self):
message_hub = MessageHub.get_instance('mmengine')
log_dict = dict(
loss=1,
loss_cls=torch.tensor(2),
loss_bbox=np.array(3),
loss_iou=dict(value=1, count=2))
message_hub.update_scalars(log_dict)
loss = message_hub.get_scalar('loss')
loss_cls = message_hub.get_scalar('loss_cls')
loss_bbox = message_hub.get_scalar('loss_bbox')
loss_iou = message_hub.get_scalar('loss_iou')
assert loss.current() == 1
assert loss_cls.current() == 2
assert loss_bbox.current() == 3
assert loss_iou.mean() == 0.5
with pytest.raises(AssertionError):
loss_dict = dict(error_type=[])
message_hub.update_scalars(loss_dict)
with pytest.raises(AssertionError):
loss_dict = dict(error_type=dict(count=1))
message_hub.update_scalars(loss_dict)
def test_getstate(self):
message_hub = MessageHub.get_instance('name')
# update log_scalars.
message_hub.update_scalar('loss', 0.1)
message_hub.update_scalar('lr', 0.1, resumed=False)
# update runtime information
message_hub.update_info('iter', 1, resumed=True)
message_hub.update_info('feat', [1, 2, 3], resumed=False)
obj = pickle.dumps(message_hub)
instance = pickle.loads(obj)
with pytest.raises(KeyError):
instance.get_info('feat')
with pytest.raises(KeyError):
instance.get_info('lr')
instance.get_info('iter')
instance.get_scalar('loss')
def test_get_instance(self):
# Test get root mmengine message hub.
MessageHub._instance_dict = OrderedDict()
root_logger = MessageHub.get_current_instance()
assert id(MessageHub.get_instance('mmengine')) == id(root_logger)
# Test original `get_current_instance` function.
MessageHub.get_instance('mmdet')
assert MessageHub.get_current_instance().instance_name == 'mmdet'
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import (
FluxTransformer2DModel,
)
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
torch_device,
)
enable_full_determinism()
@require_torch_accelerator
class FluxTransformer2DModelSingleFileTests(unittest.TestCase):
model_class = FluxTransformer2DModel
ckpt_path = "https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/flux1-dev.safetensors"
alternate_keys_ckpt_paths = ["https://huggingface.co/Comfy-Org/flux1-dev/blob/main/flux1-dev-fp8.safetensors"]
repo_id = "black-forest-labs/FLUX.1-dev"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer")
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
def test_checkpoint_loading(self):
for ckpt_path in self.alternate_keys_ckpt_paths:
torch.cuda.empty_cache()
model = self.model_class.from_single_file(ckpt_path)
del model
gc.collect()
torch.cuda.empty_cache()
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import (
FluxTransformer2DModel,
)
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
torch_device,
)
enable_full_determinism()
@require_torch_accelerator
class FluxTransformer2DModelSingleFileTests(unittest.TestCase):
model_class = FluxTransformer2DModel
ckpt_path = "https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/flux1-dev.safetensors"
alternate_keys_ckpt_paths = ["https://huggingface.co/Comfy-Org/flux1-dev/blob/main/flux1-dev-fp8.safetensors"]
repo_id = "black-forest-labs/FLUX.1-dev"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer")
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert (
model.config[param_name] == param_value
), f"{param_name} differs between single file loading and pretrained loading"
def test_checkpoint_loading(self):
for ckpt_path in self.alternate_keys_ckpt_paths:
torch.cuda.empty_cache()
model = self.model_class.from_single_file(ckpt_path)
del model
gc.collect()
torch.cuda.empty_cache()
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.azure_cognitive_services import (
AzureCognitiveServicesToolkit,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"AzureCognitiveServicesToolkit": (
"langchain_community.agent_toolkits.azure_cognitive_services"
),
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AzureCognitiveServicesToolkit",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.azure_cognitive_services import (
AzureCognitiveServicesToolkit,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"AzureCognitiveServicesToolkit": (
"langchain_community.agent_toolkits.azure_cognitive_services"
)
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AzureCognitiveServicesToolkit",
]
|
__version__ = '0.39.1'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
__version__ = '0.39.0'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
from langchain_core.documents import (
Document, # type: ignore[import-not-found, import-not-found]
)
from langchain_exa import ExaSearchRetriever
def test_exa_retriever() -> None:
retriever = ExaSearchRetriever()
res = retriever.invoke("best time to visit japan")
print(res) # noqa: T201
assert len(res) == 10 # default k
assert isinstance(res, list)
assert isinstance(res[0], Document)
def test_exa_retriever_highlights() -> None:
retriever = ExaSearchRetriever(highlights=True)
res = retriever.invoke("best time to visit japan")
print(res) # noqa: T201
assert isinstance(res, list)
assert isinstance(res[0], Document)
highlights = res[0].metadata["highlights"]
highlight_scores = res[0].metadata["highlight_scores"]
assert isinstance(highlights, list)
assert isinstance(highlight_scores, list)
assert isinstance(highlights[0], str)
assert isinstance(highlight_scores[0], float)
def test_exa_retriever_advanced_features() -> None:
retriever = ExaSearchRetriever(
k=3, text_contents_options={"max_characters": 1000}, summary=True, type="auto"
)
res = retriever.invoke("best time to visit japan")
print(res) # noqa: T201
assert len(res) == 3 # requested k=3
assert isinstance(res, list)
assert isinstance(res[0], Document)
# Verify summary is in metadata
assert "summary" in res[0].metadata
assert isinstance(res[0].metadata["summary"], str)
# Verify text was limited
assert len(res[0].page_content) <= 1000
|
from langchain_core.documents import (
Document, # type: ignore[import-not-found, import-not-found]
)
from langchain_exa import ExaSearchRetriever
def test_exa_retriever() -> None:
retriever = ExaSearchRetriever()
res = retriever.invoke("best time to visit japan")
print(res) # noqa: T201
assert len(res) == 10 # default k
assert isinstance(res, list)
assert isinstance(res[0], Document)
def test_exa_retriever_highlights() -> None:
retriever = ExaSearchRetriever(highlights=True)
res = retriever.invoke("best time to visit japan")
print(res) # noqa: T201
assert isinstance(res, list)
assert isinstance(res[0], Document)
highlights = res[0].metadata["highlights"]
highlight_scores = res[0].metadata["highlight_scores"]
assert isinstance(highlights, list)
assert isinstance(highlight_scores, list)
assert isinstance(highlights[0], str)
assert isinstance(highlight_scores[0], float)
|
import gc
import tempfile
import unittest
import torch
from diffusers import EulerDiscreteScheduler, StableDiffusionPipeline
from diffusers.loaders.single_file_utils import _extract_repo_id_and_weights_name
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
slow,
torch_device,
)
from .single_file_testing_utils import (
SDSingleFileTesterMixin,
download_original_config,
download_single_file_checkpoint,
)
enable_full_determinism()
@slow
@require_torch_accelerator
class StableDiffusionPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
pipeline_class = StableDiffusionPipeline
ckpt_path = (
"https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors"
)
original_config = (
"https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml"
)
repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
generator = torch.Generator(device=generator_device).manual_seed(seed)
inputs = {
"prompt": "a fantasy landscape, concept art, high resolution",
"generator": generator,
"num_inference_steps": 2,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "np",
}
return inputs
def test_single_file_format_inference_is_same_as_pretrained(self):
super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3)
def test_single_file_legacy_scheduler_loading(self):
with tempfile.TemporaryDirectory() as tmpdir:
repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path)
local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir)
local_original_config = download_original_config(self.original_config, tmpdir)
pipe = self.pipeline_class.from_single_file(
local_ckpt_path,
original_config=local_original_config,
cache_dir=tmpdir,
local_files_only=True,
scheduler_type="euler",
)
# Default is PNDM for this checkpoint
assert isinstance(pipe.scheduler, EulerDiscreteScheduler)
def test_single_file_legacy_scaling_factor(self):
new_scaling_factor = 10.0
init_pipe = self.pipeline_class.from_single_file(self.ckpt_path)
pipe = self.pipeline_class.from_single_file(self.ckpt_path, scaling_factor=new_scaling_factor)
assert init_pipe.vae.config.scaling_factor != new_scaling_factor
assert pipe.vae.config.scaling_factor == new_scaling_factor
@slow
class StableDiffusion21PipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
pipeline_class = StableDiffusionPipeline
ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/v2-1_768-ema-pruned.safetensors"
original_config = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml"
repo_id = "stabilityai/stable-diffusion-2-1"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
generator = torch.Generator(device=generator_device).manual_seed(seed)
inputs = {
"prompt": "a fantasy landscape, concept art, high resolution",
"generator": generator,
"num_inference_steps": 2,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "np",
}
return inputs
def test_single_file_format_inference_is_same_as_pretrained(self):
super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3)
|
import gc
import tempfile
import unittest
import torch
from diffusers import EulerDiscreteScheduler, StableDiffusionPipeline
from diffusers.loaders.single_file_utils import _extract_repo_id_and_weights_name
from diffusers.utils.testing_utils import (
enable_full_determinism,
require_torch_gpu,
slow,
)
from .single_file_testing_utils import (
SDSingleFileTesterMixin,
download_original_config,
download_single_file_checkpoint,
)
enable_full_determinism()
@slow
@require_torch_gpu
class StableDiffusionPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
pipeline_class = StableDiffusionPipeline
ckpt_path = (
"https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors"
)
original_config = (
"https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml"
)
repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
generator = torch.Generator(device=generator_device).manual_seed(seed)
inputs = {
"prompt": "a fantasy landscape, concept art, high resolution",
"generator": generator,
"num_inference_steps": 2,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "np",
}
return inputs
def test_single_file_format_inference_is_same_as_pretrained(self):
super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3)
def test_single_file_legacy_scheduler_loading(self):
with tempfile.TemporaryDirectory() as tmpdir:
repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path)
local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir)
local_original_config = download_original_config(self.original_config, tmpdir)
pipe = self.pipeline_class.from_single_file(
local_ckpt_path,
original_config=local_original_config,
cache_dir=tmpdir,
local_files_only=True,
scheduler_type="euler",
)
# Default is PNDM for this checkpoint
assert isinstance(pipe.scheduler, EulerDiscreteScheduler)
def test_single_file_legacy_scaling_factor(self):
new_scaling_factor = 10.0
init_pipe = self.pipeline_class.from_single_file(self.ckpt_path)
pipe = self.pipeline_class.from_single_file(self.ckpt_path, scaling_factor=new_scaling_factor)
assert init_pipe.vae.config.scaling_factor != new_scaling_factor
assert pipe.vae.config.scaling_factor == new_scaling_factor
@slow
class StableDiffusion21PipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
pipeline_class = StableDiffusionPipeline
ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/v2-1_768-ema-pruned.safetensors"
original_config = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml"
repo_id = "stabilityai/stable-diffusion-2-1"
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
generator = torch.Generator(device=generator_device).manual_seed(seed)
inputs = {
"prompt": "a fantasy landscape, concept art, high resolution",
"generator": generator,
"num_inference_steps": 2,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "np",
}
return inputs
def test_single_file_format_inference_is_same_as_pretrained(self):
super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3)
|
from __future__ import annotations
from typing import Any, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.util import fullname
class CosineSimilarityLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
"""
CosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SentenceTransformer model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
References:
- `Training Examples > Semantic Textual Similarity <../../examples/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Relations:
- :class:`CoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, CoSENTLoss is recommended.
- :class:`AnglELoss` is :class:`CoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than CosineSimilarityLoss.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CosineSimilarityLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(CosineSimilarityLoss, self).__init__()
self.model = model
self.loss_fct = loss_fct
self.cos_score_transformation = cos_score_transformation
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
output = self.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
return self.loss_fct(output, labels.float().view(-1))
def get_config_dict(self) -> dict[str, Any]:
return {"loss_fct": fullname(self.loss_fct)}
|
from typing import Any, Dict, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.util import fullname
class CosineSimilarityLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
"""
CosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SentenceTransformer model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
References:
- `Training Examples > Semantic Textual Similarity <../../examples/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Relations:
- :class:`CoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, CoSENTLoss is recommended.
- :class:`AnglELoss` is :class:`CoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than CosineSimilarityLoss.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CosineSimilarityLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(CosineSimilarityLoss, self).__init__()
self.model = model
self.loss_fct = loss_fct
self.cos_score_transformation = cos_score_transformation
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
output = self.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
return self.loss_fct(output, labels.float().view(-1))
def get_config_dict(self) -> Dict[str, Any]:
return {"loss_fct": fullname(self.loss_fct)}
|
# Copyright 2024 Black Forest Labs, The HuggingFace Team and The InstantX Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from ..utils import deprecate, logging
from .controlnets.controlnet_flux import FluxControlNetModel, FluxControlNetOutput, FluxMultiControlNetModel
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
class FluxControlNetOutput(FluxControlNetOutput):
def __init__(self, *args, **kwargs):
deprecation_message = "Importing `FluxControlNetOutput` from `diffusers.models.controlnet_flux` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_flux import FluxControlNetOutput`, instead."
deprecate("diffusers.models.controlnet_flux.FluxControlNetOutput", "0.34", deprecation_message)
super().__init__(*args, **kwargs)
class FluxControlNetModel(FluxControlNetModel):
def __init__(
self,
patch_size: int = 1,
in_channels: int = 64,
num_layers: int = 19,
num_single_layers: int = 38,
attention_head_dim: int = 128,
num_attention_heads: int = 24,
joint_attention_dim: int = 4096,
pooled_projection_dim: int = 768,
guidance_embeds: bool = False,
axes_dims_rope: List[int] = [16, 56, 56],
num_mode: int = None,
conditioning_embedding_channels: int = None,
):
deprecation_message = "Importing `FluxControlNetModel` from `diffusers.models.controlnet_flux` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_flux import FluxControlNetModel`, instead."
deprecate("diffusers.models.controlnet_flux.FluxControlNetModel", "0.34", deprecation_message)
super().__init__(
patch_size=patch_size,
in_channels=in_channels,
num_layers=num_layers,
num_single_layers=num_single_layers,
attention_head_dim=attention_head_dim,
num_attention_heads=num_attention_heads,
joint_attention_dim=joint_attention_dim,
pooled_projection_dim=pooled_projection_dim,
guidance_embeds=guidance_embeds,
axes_dims_rope=axes_dims_rope,
num_mode=num_mode,
conditioning_embedding_channels=conditioning_embedding_channels,
)
class FluxMultiControlNetModel(FluxMultiControlNetModel):
def __init__(self, *args, **kwargs):
deprecation_message = "Importing `FluxMultiControlNetModel` from `diffusers.models.controlnet_flux` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_flux import FluxMultiControlNetModel`, instead."
deprecate("diffusers.models.controlnet_flux.FluxMultiControlNetModel", "0.34", deprecation_message)
super().__init__(*args, **kwargs)
|
# Copyright 2024 Black Forest Labs, The HuggingFace Team and The InstantX Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..utils import deprecate, logging
from .controlnets.controlnet_flux import FluxControlNetModel, FluxControlNetOutput, FluxMultiControlNetModel
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
class FluxControlNetOutput(FluxControlNetOutput):
def __init__(self, *args, **kwargs):
deprecation_message = "Importing `FluxControlNetOutput` from `diffusers.models.controlnet_flux` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_flux import FluxControlNetOutput`, instead."
deprecate("FluxControlNetOutput", "0.34", deprecation_message)
super().__init__(*args, **kwargs)
class FluxControlNetModel(FluxControlNetModel):
def __init__(self, *args, **kwargs):
deprecation_message = "Importing `FluxControlNetModel` from `diffusers.models.controlnet_flux` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_flux import FluxControlNetModel`, instead."
deprecate("FluxControlNetModel", "0.34", deprecation_message)
super().__init__(*args, **kwargs)
class FluxMultiControlNetModel(FluxMultiControlNetModel):
def __init__(self, *args, **kwargs):
deprecation_message = "Importing `FluxMultiControlNetModel` from `diffusers.models.controlnet_flux` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_flux import FluxMultiControlNetModel`, instead."
deprecate("FluxMultiControlNetModel", "0.34", deprecation_message)
super().__init__(*args, **kwargs)
|
"""Test for CombinedMemory class"""
# from langchain_core.prompts import PromptTemplate
import pytest
from langchain.memory import CombinedMemory, ConversationBufferMemory
@pytest.fixture()
def example_memory() -> list[ConversationBufferMemory]:
example_1 = ConversationBufferMemory(memory_key="foo")
example_2 = ConversationBufferMemory(memory_key="bar")
example_3 = ConversationBufferMemory(memory_key="bar")
return [example_1, example_2, example_3]
def test_basic_functionality(example_memory: list[ConversationBufferMemory]) -> None:
"""Test basic functionality of methods exposed by class"""
combined_memory = CombinedMemory(memories=[example_memory[0], example_memory[1]])
assert combined_memory.memory_variables == ["foo", "bar"]
assert combined_memory.load_memory_variables({}) == {"foo": "", "bar": ""}
combined_memory.save_context(
{"input": "Hello there"}, {"output": "Hello, how can I help you?"}
)
assert combined_memory.load_memory_variables({}) == {
"foo": "Human: Hello there\nAI: Hello, how can I help you?",
"bar": "Human: Hello there\nAI: Hello, how can I help you?",
}
combined_memory.clear()
assert combined_memory.load_memory_variables({}) == {"foo": "", "bar": ""}
def test_repeated_memory_var(example_memory: list[ConversationBufferMemory]) -> None:
"""Test raising error when repeated memory variables found"""
with pytest.raises(ValueError):
CombinedMemory(memories=[example_memory[1], example_memory[2]])
|
"""Test for CombinedMemory class"""
# from langchain_core.prompts import PromptTemplate
from typing import List
import pytest
from langchain.memory import CombinedMemory, ConversationBufferMemory
@pytest.fixture()
def example_memory() -> List[ConversationBufferMemory]:
example_1 = ConversationBufferMemory(memory_key="foo")
example_2 = ConversationBufferMemory(memory_key="bar")
example_3 = ConversationBufferMemory(memory_key="bar")
return [example_1, example_2, example_3]
def test_basic_functionality(example_memory: List[ConversationBufferMemory]) -> None:
"""Test basic functionality of methods exposed by class"""
combined_memory = CombinedMemory(memories=[example_memory[0], example_memory[1]])
assert combined_memory.memory_variables == ["foo", "bar"]
assert combined_memory.load_memory_variables({}) == {"foo": "", "bar": ""}
combined_memory.save_context(
{"input": "Hello there"}, {"output": "Hello, how can I help you?"}
)
assert combined_memory.load_memory_variables({}) == {
"foo": "Human: Hello there\nAI: Hello, how can I help you?",
"bar": "Human: Hello there\nAI: Hello, how can I help you?",
}
combined_memory.clear()
assert combined_memory.load_memory_variables({}) == {"foo": "", "bar": ""}
def test_repeated_memory_var(example_memory: List[ConversationBufferMemory]) -> None:
"""Test raising error when repeated memory variables found"""
with pytest.raises(ValueError):
CombinedMemory(memories=[example_memory[1], example_memory[2]])
|
import os
import urllib.parse
import urllib.request
from contextlib import nullcontext
def _uri_to_blob(uri: str) -> bytes:
"""Convert uri to blob
Internally it reads uri into blob.
:param uri: the uri of Document
:return: blob bytes.
"""
if urllib.parse.urlparse(uri).scheme in {'http', 'https', 'data'}:
req = urllib.request.Request(uri, headers={'User-Agent': 'Mozilla/5.0'})
with urllib.request.urlopen(req) as fp:
return fp.read()
elif os.path.exists(uri):
with open(uri, 'rb') as fp:
return fp.read()
else:
raise FileNotFoundError(f'`{uri}` is not a URL or a valid local path')
def _get_file_context(file):
if hasattr(file, 'write'):
file_ctx = nullcontext(file)
else:
file_ctx = open(file, 'wb')
return file_ctx
def _to_datauri(
mimetype, data, charset: str = 'utf-8', base64: bool = False, binary: bool = True
) -> str:
"""
Convert data to data URI.
:param mimetype: MIME types (e.g. 'text/plain','image/png' etc.)
:param data: Data representations.
:param charset: Charset may be any character set registered with IANA
:param base64: Used to encode arbitrary octet sequences into a form that satisfies the rules of 7bit. Designed to be efficient for non-text 8 bit and binary data. Sometimes used for text data that frequently uses non-US-ASCII characters.
:param binary: True if from binary data False for other data (e.g. text)
:return: URI data
"""
parts = ['data:', mimetype]
if charset is not None:
parts.extend([';charset=', charset])
if base64:
parts.append(';base64')
from base64 import encodebytes as encode64
if binary:
encoded_data = encode64(data).decode(charset).replace('\n', '').strip()
else:
encoded_data = encode64(data).strip()
else:
from urllib.parse import quote_from_bytes, quote
if binary:
encoded_data = quote_from_bytes(data)
else:
encoded_data = quote(data)
parts.extend([',', encoded_data])
return ''.join(parts)
def _is_uri(value: str) -> bool:
scheme = urllib.parse.urlparse(value).scheme
return (
(scheme in {'http', 'https'})
or (scheme in {'data'})
or os.path.exists(value)
or os.access(os.path.dirname(value), os.W_OK)
)
def _is_datauri(value: str) -> bool:
scheme = urllib.parse.urlparse(value).scheme
return scheme in {'data'}
|
import os
import urllib.parse
import urllib.request
from contextlib import nullcontext
from ...helper import __windows__
def _uri_to_blob(uri: str) -> bytes:
"""Convert uri to blob
Internally it reads uri into blob.
:param uri: the uri of Document
:return: blob bytes.
"""
if urllib.parse.urlparse(uri).scheme in {'http', 'https', 'data'}:
req = urllib.request.Request(uri, headers={'User-Agent': 'Mozilla/5.0'})
with urllib.request.urlopen(req) as fp:
return fp.read()
elif os.path.exists(uri):
with open(uri, 'rb') as fp:
return fp.read()
else:
raise FileNotFoundError(f'`{uri}` is not a URL or a valid local path')
def _get_file_context(file):
if hasattr(file, 'write'):
file_ctx = nullcontext(file)
else:
if __windows__:
file_ctx = open(file, 'wb', newline='')
else:
file_ctx = open(file, 'wb')
return file_ctx
def _to_datauri(
mimetype, data, charset: str = 'utf-8', base64: bool = False, binary: bool = True
) -> str:
"""
Convert data to data URI.
:param mimetype: MIME types (e.g. 'text/plain','image/png' etc.)
:param data: Data representations.
:param charset: Charset may be any character set registered with IANA
:param base64: Used to encode arbitrary octet sequences into a form that satisfies the rules of 7bit. Designed to be efficient for non-text 8 bit and binary data. Sometimes used for text data that frequently uses non-US-ASCII characters.
:param binary: True if from binary data False for other data (e.g. text)
:return: URI data
"""
parts = ['data:', mimetype]
if charset is not None:
parts.extend([';charset=', charset])
if base64:
parts.append(';base64')
from base64 import encodebytes as encode64
if binary:
encoded_data = encode64(data).decode(charset).replace('\n', '').strip()
else:
encoded_data = encode64(data).strip()
else:
from urllib.parse import quote_from_bytes, quote
if binary:
encoded_data = quote_from_bytes(data)
else:
encoded_data = quote(data)
parts.extend([',', encoded_data])
return ''.join(parts)
def _is_uri(value: str) -> bool:
scheme = urllib.parse.urlparse(value).scheme
return (
(scheme in {'http', 'https'})
or (scheme in {'data'})
or os.path.exists(value)
or os.access(os.path.dirname(value), os.W_OK)
)
def _is_datauri(value: str) -> bool:
scheme = urllib.parse.urlparse(value).scheme
return scheme in {'data'}
|
import pathlib
from typing import Any, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import CSVDictParser, Demultiplexer, Filter, IterDataPipe, Mapper, Zipper
from torchvision.datapoints import BoundingBox
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
)
from .._api import register_dataset, register_info
NAME = "gtsrb"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(
categories=[f"{label:05d}" for label in range(43)],
)
@register_dataset(NAME)
class GTSRB(Dataset):
"""GTSRB Dataset
homepage="https://benchmark.ini.rub.de"
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_URL_ROOT = "https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/"
_URLS = {
"train": f"{_URL_ROOT}GTSRB-Training_fixed.zip",
"test": f"{_URL_ROOT}GTSRB_Final_Test_Images.zip",
"test_ground_truth": f"{_URL_ROOT}GTSRB_Final_Test_GT.zip",
}
_CHECKSUMS = {
"train": "df4144942083645bd60b594de348aa6930126c3e0e5de09e39611630abf8455a",
"test": "48ba6fab7e877eb64eaf8de99035b0aaecfbc279bee23e35deca4ac1d0a837fa",
"test_ground_truth": "f94e5a7614d75845c74c04ddb26b8796b9e483f43541dd95dd5b726504e16d6d",
}
def _resources(self) -> List[OnlineResource]:
rsrcs: List[OnlineResource] = [HttpResource(self._URLS[self._split], sha256=self._CHECKSUMS[self._split])]
if self._split == "test":
rsrcs.append(
HttpResource(
self._URLS["test_ground_truth"],
sha256=self._CHECKSUMS["test_ground_truth"],
)
)
return rsrcs
def _classify_train_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.suffix == ".ppm":
return 0
elif path.suffix == ".csv":
return 1
else:
return None
def _prepare_sample(self, data: Tuple[Tuple[str, Any], Dict[str, Any]]) -> Dict[str, Any]:
(path, buffer), csv_info = data
label = int(csv_info["ClassId"])
bounding_box = BoundingBox(
[int(csv_info[k]) for k in ("Roi.X1", "Roi.Y1", "Roi.X2", "Roi.Y2")],
format="xyxy",
spatial_size=(int(csv_info["Height"]), int(csv_info["Width"])),
)
return {
"path": path,
"image": EncodedImage.from_file(buffer),
"label": Label(label, categories=self._categories),
"bounding_box": bounding_box,
}
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
if self._split == "train":
images_dp, ann_dp = Demultiplexer(
resource_dps[0], 2, self._classify_train_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
else:
images_dp, ann_dp = resource_dps
images_dp = Filter(images_dp, path_comparator("suffix", ".ppm"))
# The order of the image files in the .zip archives perfectly match the order of the entries in the
# (possibly concatenated) .csv files. So we're able to use Zipper here instead of a IterKeyZipper.
ann_dp = CSVDictParser(ann_dp, delimiter=";")
dp = Zipper(images_dp, ann_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 26_640 if self._split == "train" else 12_630
|
import pathlib
from typing import Any, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import CSVDictParser, Demultiplexer, Filter, IterDataPipe, Mapper, Zipper
from torchvision.prototype.datapoints import BoundingBox, Label
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
)
from .._api import register_dataset, register_info
NAME = "gtsrb"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(
categories=[f"{label:05d}" for label in range(43)],
)
@register_dataset(NAME)
class GTSRB(Dataset):
"""GTSRB Dataset
homepage="https://benchmark.ini.rub.de"
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_URL_ROOT = "https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/"
_URLS = {
"train": f"{_URL_ROOT}GTSRB-Training_fixed.zip",
"test": f"{_URL_ROOT}GTSRB_Final_Test_Images.zip",
"test_ground_truth": f"{_URL_ROOT}GTSRB_Final_Test_GT.zip",
}
_CHECKSUMS = {
"train": "df4144942083645bd60b594de348aa6930126c3e0e5de09e39611630abf8455a",
"test": "48ba6fab7e877eb64eaf8de99035b0aaecfbc279bee23e35deca4ac1d0a837fa",
"test_ground_truth": "f94e5a7614d75845c74c04ddb26b8796b9e483f43541dd95dd5b726504e16d6d",
}
def _resources(self) -> List[OnlineResource]:
rsrcs: List[OnlineResource] = [HttpResource(self._URLS[self._split], sha256=self._CHECKSUMS[self._split])]
if self._split == "test":
rsrcs.append(
HttpResource(
self._URLS["test_ground_truth"],
sha256=self._CHECKSUMS["test_ground_truth"],
)
)
return rsrcs
def _classify_train_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.suffix == ".ppm":
return 0
elif path.suffix == ".csv":
return 1
else:
return None
def _prepare_sample(self, data: Tuple[Tuple[str, Any], Dict[str, Any]]) -> Dict[str, Any]:
(path, buffer), csv_info = data
label = int(csv_info["ClassId"])
bounding_box = BoundingBox(
[int(csv_info[k]) for k in ("Roi.X1", "Roi.Y1", "Roi.X2", "Roi.Y2")],
format="xyxy",
spatial_size=(int(csv_info["Height"]), int(csv_info["Width"])),
)
return {
"path": path,
"image": EncodedImage.from_file(buffer),
"label": Label(label, categories=self._categories),
"bounding_box": bounding_box,
}
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
if self._split == "train":
images_dp, ann_dp = Demultiplexer(
resource_dps[0], 2, self._classify_train_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
else:
images_dp, ann_dp = resource_dps
images_dp = Filter(images_dp, path_comparator("suffix", ".ppm"))
# The order of the image files in the .zip archives perfectly match the order of the entries in the
# (possibly concatenated) .csv files. So we're able to use Zipper here instead of a IterKeyZipper.
ann_dp = CSVDictParser(ann_dp, delimiter=";")
dp = Zipper(images_dp, ann_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 26_640 if self._split == "train" else 12_630
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_swin2sr import *
from .image_processing_swin2sr import *
from .image_processing_swin2sr_fast import *
from .modeling_swin2sr import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_swin2sr import *
from .image_processing_swin2sr import *
from .modeling_swin2sr import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from pathlib import Path
import pytest
from jina import Flow
@pytest.mark.parametrize('_type', ['wav', 'mp3', 'blob'])
def test_chunks_exist(build_da, _type):
da = build_da(_type)
with Flow.load_config(str(Path(__file__).parent / 'flow.yml')) as f:
responses = f.post(on='segment', inputs=da, return_results=True)
locations = [
[0, 56500],
[69500, 92000],
[94500, 213000],
[223500, 270500],
]
assert len(responses[0].docs) == 1
for doc in responses[0].docs:
assert len(doc.chunks) == 4
for chunk, location in zip(doc.chunks, locations):
assert chunk.location == location
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from pathlib import Path
import pytest
from jina import Flow, Document, DocumentArray
@pytest.mark.parametrize('_type', ['wav', 'mp3', 'blob'])
def test_chunks_exist(build_da, _type):
da = build_da(_type)
with Flow.load_config(str(Path(__file__).parent / 'flow.yml')) as f:
responses = f.post(on='segment', inputs=da, return_results=True)
locations = [
[0, 56500],
[69500, 92000],
[94500, 213000],
[223500, 270500],
]
assert len(responses[0].docs) == 1
for doc in responses[0].docs:
assert len(doc.chunks) == 4
for chunk, location in zip(doc.chunks, locations):
assert chunk.location == location
|
"""Code Interpreter tool spec."""
import subprocess
import sys
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class CodeInterpreterToolSpec(BaseToolSpec):
"""Code Interpreter tool spec.
WARNING: This tool provides the Agent access to the `subprocess.run` command.
Arbitrary code execution is possible on the machine running this tool.
This tool is not recommended to be used in a production setting, and would require heavy sandboxing or virtual machines
"""
spec_functions = ["code_interpreter"]
def code_interpreter(self, code: str):
"""
A function to execute python code, and return the stdout and stderr.
You should import any libraries that you wish to use. You have access to any libraries the user has installed.
The code passed to this function is executed in isolation. It should be complete at the time it is passed to this function.
You should interpret the output and errors returned from this function, and attempt to fix any problems.
If you cannot fix the error, show the code to the user and ask for help
It is not possible to return graphics or other complicated data from this function. If the user cannot see the output, save it to a file and tell the user.
"""
result = subprocess.run([sys.executable, "-c", code], capture_output=True)
return f"StdOut:\n{result.stdout}\nStdErr:\n{result.stderr}"
|
"""Code Interpreter tool spec."""
import subprocess
import sys
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class CodeInterpreterToolSpec(BaseToolSpec):
"""Code Interpreter tool spec.
WARNING: This tool provides the Agent access to the `subprocess.run` command.
Arbitrary code execution is possible on the machine running this tool.
This tool is not recommended to be used in a production setting, and would require heavy sandboxing or virtual machines
"""
spec_functions = ["code_interpreter"]
def code_interpreter(self, code: str):
"""
A function to execute python code, and return the stdout and stderr.
You should import any libraries that you wish to use. You have access to any libraries the user has installed.
The code passed to this functuon is executed in isolation. It should be complete at the time it is passed to this function.
You should interpret the output and errors returned from this function, and attempt to fix any problems.
If you cannot fix the error, show the code to the user and ask for help
It is not possible to return graphics or other complicated data from this function. If the user cannot see the output, save it to a file and tell the user.
"""
result = subprocess.run([sys.executable, "-c", code], capture_output=True)
return f"StdOut:\n{result.stdout}\nStdErr:\n{result.stderr}"
|
from typing import Optional
import torch
from docarray import BaseDoc, DocList
from docarray.typing import TorchTensor
def test_torch_train():
class Mmdoc(BaseDoc):
text: str
tensor: Optional[TorchTensor[3, 224, 224]] = None
N = 10
batch = DocList[Mmdoc](Mmdoc(text=f'hello{i}') for i in range(N))
batch.tensor = torch.zeros(N, 3, 224, 224)
batch = batch.to_doc_vec()
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 16, 3)
def forward(self, x):
return self.conv(x)
model = Model()
opt = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
for _ in range(2):
loss = model(batch.tensor).sum()
loss.backward()
opt.step()
|
from typing import Optional
import torch
from docarray import BaseDoc, DocList
from docarray.typing import TorchTensor
def test_torch_train():
class Mmdoc(BaseDoc):
text: str
tensor: Optional[TorchTensor[3, 224, 224]]
N = 10
batch = DocList[Mmdoc](Mmdoc(text=f'hello{i}') for i in range(N))
batch.tensor = torch.zeros(N, 3, 224, 224)
batch = batch.to_doc_vec()
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 16, 3)
def forward(self, x):
return self.conv(x)
model = Model()
opt = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
for _ in range(2):
loss = model(batch.tensor).sum()
loss.backward()
opt.step()
|
"""FastAPI framework, high performance, easy to learn, fast to code, ready for production"""
__version__ = "0.115.6"
from starlette import status as status
from .applications import FastAPI as FastAPI
from .background import BackgroundTasks as BackgroundTasks
from .datastructures import UploadFile as UploadFile
from .exceptions import HTTPException as HTTPException
from .exceptions import WebSocketException as WebSocketException
from .param_functions import Body as Body
from .param_functions import Cookie as Cookie
from .param_functions import Depends as Depends
from .param_functions import File as File
from .param_functions import Form as Form
from .param_functions import Header as Header
from .param_functions import Path as Path
from .param_functions import Query as Query
from .param_functions import Security as Security
from .requests import Request as Request
from .responses import Response as Response
from .routing import APIRouter as APIRouter
from .websockets import WebSocket as WebSocket
from .websockets import WebSocketDisconnect as WebSocketDisconnect
|
"""FastAPI framework, high performance, easy to learn, fast to code, ready for production"""
__version__ = "0.115.5"
from starlette import status as status
from .applications import FastAPI as FastAPI
from .background import BackgroundTasks as BackgroundTasks
from .datastructures import UploadFile as UploadFile
from .exceptions import HTTPException as HTTPException
from .exceptions import WebSocketException as WebSocketException
from .param_functions import Body as Body
from .param_functions import Cookie as Cookie
from .param_functions import Depends as Depends
from .param_functions import File as File
from .param_functions import Form as Form
from .param_functions import Header as Header
from .param_functions import Path as Path
from .param_functions import Query as Query
from .param_functions import Security as Security
from .requests import Request as Request
from .responses import Response as Response
from .routing import APIRouter as APIRouter
from .websockets import WebSocket as WebSocket
from .websockets import WebSocketDisconnect as WebSocketDisconnect
|
from __future__ import annotations
from collections import Counter
import pytest
from sentence_transformers.sampler import GroupByLabelBatchSampler
from sentence_transformers.util import is_datasets_available
if is_datasets_available():
from datasets import Dataset
else:
pytest.skip(
reason='Sentence Transformers was not installed with the `["train"]` extra.',
allow_module_level=True,
)
@pytest.fixture
def dummy_dataset():
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": [0, 1, 2, ..., 99],
"label_a": [0, 1, 0, 1, ..., 0, 1],
"label_b": [0, 1, 2, 3, 4, 0, ..., 4]
}
"""
data = {"data": list(range(100)), "label_a": [i % 2 for i in range(100)], "label_b": [i % 5 for i in range(100)]}
return Dataset.from_dict(data)
@pytest.fixture
def dummy_uneven_dataset():
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": ["a"] * 51,
"label": [0] * 17 + [1] * 17 + [2] * 17,
}
"""
data = {"data": ["a"] * 51, "label": [0] * 17 + [1] * 17 + [2] * 17}
return Dataset.from_dict(data)
def test_group_by_label_batch_sampler_label_a(dummy_dataset: Dataset) -> None:
batch_size = 10
sampler = GroupByLabelBatchSampler(
dataset=dummy_dataset, batch_size=batch_size, drop_last=False, valid_label_columns=["label_a", "label_b"]
)
batches = list(iter(sampler))
assert all(len(batch) == batch_size for batch in batches)
# Check if all labels within each batch are identical
# In this case, label_a has 50 0's and 50 1's, so with a batch size of 10 we expect each batch to
# have only 0's or only 1's.
for batch in batches:
labels = [dummy_dataset[int(idx)]["label_a"] for idx in batch]
assert len(set(labels)) == 1, f"Batch {batch} does not have identical labels: {labels}"
def test_group_by_label_batch_sampler_label_b(dummy_dataset: Dataset) -> None:
batch_size = 8
sampler = GroupByLabelBatchSampler(
dataset=dummy_dataset, batch_size=batch_size, drop_last=True, valid_label_columns=["label_b"]
)
# drop_last=True, so each batch should be the same length and the last batch is dropped.
batches = list(iter(sampler))
assert all(
len(batch) == batch_size for batch in batches
), "Not all batches are the same size, while drop_last was True."
# Assert that we have the expected number of total samples in the batches.
assert sum(len(batch) for batch in batches) == 100 // batch_size * batch_size
# Since we have 20 occurrences each of label_b values 0, 1, 2, 3 and 4 and a batch_size of 8, we expect each batch
# to have either 4 or 8 samples with the same label. (The first two batches are 16 samples of the first label,
# leaving 4 for the third batch. There 4 of the next label are added, leaving 16 for the next two batches, and so on.)
for batch in batches:
labels = [dummy_dataset[int(idx)]["label_b"] for idx in batch]
counts = list(Counter(labels).values())
assert counts == [8] or counts == [4, 4]
def test_group_by_label_batch_sampler_uneven_dataset(dummy_uneven_dataset: Dataset) -> None:
batch_size = 8
sampler = GroupByLabelBatchSampler(
dataset=dummy_uneven_dataset, batch_size=batch_size, drop_last=False, valid_label_columns=["label"]
)
# With a batch_size of 8 and 17 samples per label; verify that every label in a batch occurs at least twice.
# We accept some tiny data loss (1 sample per label) due to the uneven number of samples per label.
batches = list(iter(sampler))
for batch in batches:
labels = [dummy_uneven_dataset[int(idx)]["label"] for idx in batch]
counts = list(Counter(labels).values())
assert [count > 1 for count in counts]
|
from __future__ import annotations
from collections import Counter
import pytest
from datasets import Dataset
from sentence_transformers.sampler import GroupByLabelBatchSampler
@pytest.fixture
def dummy_dataset():
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": [0, 1, 2, ..., 99],
"label_a": [0, 1, 0, 1, ..., 0, 1],
"label_b": [0, 1, 2, 3, 4, 0, ..., 4]
}
"""
data = {"data": list(range(100)), "label_a": [i % 2 for i in range(100)], "label_b": [i % 5 for i in range(100)]}
return Dataset.from_dict(data)
@pytest.fixture
def dummy_uneven_dataset():
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": ["a"] * 51,
"label": [0] * 17 + [1] * 17 + [2] * 17,
}
"""
data = {"data": ["a"] * 51, "label": [0] * 17 + [1] * 17 + [2] * 17}
return Dataset.from_dict(data)
def test_group_by_label_batch_sampler_label_a(dummy_dataset: Dataset) -> None:
batch_size = 10
sampler = GroupByLabelBatchSampler(
dataset=dummy_dataset, batch_size=batch_size, drop_last=False, valid_label_columns=["label_a", "label_b"]
)
batches = list(iter(sampler))
assert all(len(batch) == batch_size for batch in batches)
# Check if all labels within each batch are identical
# In this case, label_a has 50 0's and 50 1's, so with a batch size of 10 we expect each batch to
# have only 0's or only 1's.
for batch in batches:
labels = [dummy_dataset[int(idx)]["label_a"] for idx in batch]
assert len(set(labels)) == 1, f"Batch {batch} does not have identical labels: {labels}"
def test_group_by_label_batch_sampler_label_b(dummy_dataset: Dataset) -> None:
batch_size = 8
sampler = GroupByLabelBatchSampler(
dataset=dummy_dataset, batch_size=batch_size, drop_last=True, valid_label_columns=["label_b"]
)
# drop_last=True, so each batch should be the same length and the last batch is dropped.
batches = list(iter(sampler))
assert all(
len(batch) == batch_size for batch in batches
), "Not all batches are the same size, while drop_last was True."
# Assert that we have the expected number of total samples in the batches.
assert sum(len(batch) for batch in batches) == 100 // batch_size * batch_size
# Since we have 20 occurrences each of label_b values 0, 1, 2, 3 and 4 and a batch_size of 8, we expect each batch
# to have either 4 or 8 samples with the same label. (The first two batches are 16 samples of the first label,
# leaving 4 for the third batch. There 4 of the next label are added, leaving 16 for the next two batches, and so on.)
for batch in batches:
labels = [dummy_dataset[int(idx)]["label_b"] for idx in batch]
counts = list(Counter(labels).values())
assert counts == [8] or counts == [4, 4]
def test_group_by_label_batch_sampler_uneven_dataset(dummy_uneven_dataset: Dataset) -> None:
batch_size = 8
sampler = GroupByLabelBatchSampler(
dataset=dummy_uneven_dataset, batch_size=batch_size, drop_last=False, valid_label_columns=["label"]
)
# With a batch_size of 8 and 17 samples per label; verify that every label in a batch occurs at least twice.
# We accept some tiny data loss (1 sample per label) due to the uneven number of samples per label.
batches = list(iter(sampler))
for batch in batches:
labels = [dummy_uneven_dataset[int(idx)]["label"] for idx in batch]
counts = list(Counter(labels).values())
assert [count > 1 for count in counts]
|
"""Retriever OpenAI agent."""
from typing import Any, cast
from llama_index.agent.openai_legacy.openai_agent import (
OpenAIAgent,
)
from llama_index.core.objects.base import ObjectRetriever
from llama_index.core.tools.types import BaseTool
class FnRetrieverOpenAIAgent(OpenAIAgent):
"""
Function Retriever OpenAI Agent.
Uses our object retriever module to retrieve openai agent.
NOTE: This is deprecated, you can just use the base `OpenAIAgent` class by
specifying the following:
```
agent = OpenAIAgent.from_tools(tool_retriever=retriever, ...)
```
"""
@classmethod
def from_retriever(
cls, retriever: ObjectRetriever[BaseTool], **kwargs: Any
) -> "FnRetrieverOpenAIAgent":
return cast(
FnRetrieverOpenAIAgent, cls.from_tools(tool_retriever=retriever, **kwargs)
)
|
"""Retriever OpenAI agent."""
from typing import Any, cast
from llama_index.agent.openai_legacy.openai_agent import (
OpenAIAgent,
)
from llama_index.core.objects.base import ObjectRetriever
from llama_index.core.tools.types import BaseTool
class FnRetrieverOpenAIAgent(OpenAIAgent):
"""Function Retriever OpenAI Agent.
Uses our object retriever module to retrieve openai agent.
NOTE: This is deprecated, you can just use the base `OpenAIAgent` class by
specifying the following:
```
agent = OpenAIAgent.from_tools(tool_retriever=retriever, ...)
```
"""
@classmethod
def from_retriever(
cls, retriever: ObjectRetriever[BaseTool], **kwargs: Any
) -> "FnRetrieverOpenAIAgent":
return cast(
FnRetrieverOpenAIAgent, cls.from_tools(tool_retriever=retriever, **kwargs)
)
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)],
# multiscale_mode='range'
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)],
# multiscale_mode='range'
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
|
import json
from contextlib import nullcontext
from typing import Union, TextIO, TYPE_CHECKING, Type, List
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
class JsonIOMixin:
"""Save/load a array into a JSON file."""
def save_json(
self,
file: Union[str, TextIO],
protocol: str = 'jsonschema',
encoding: str = 'utf-8',
**kwargs
) -> None:
"""Save array elements into a JSON file.
Comparing to :meth:`save_binary`, it is human-readable but slower to save/load and the file size larger.
:param file: File or filename to which the data is saved.
:param protocol: `jsonschema` or `protobuf`
:param encoding: encoding used to save data into a JSON file. By default, ``utf-8`` is used.
"""
if hasattr(file, 'write'):
file_ctx = nullcontext(file)
else:
file_ctx = open(file, 'w', encoding=encoding)
with file_ctx as fp:
fp.write(self.to_json(protocol=protocol, **kwargs))
@classmethod
def load_json(
cls: Type['T'],
file: Union[str, TextIO],
protocol: str = 'jsonschema',
encoding: str = 'utf-8',
**kwargs
) -> 'T':
"""Load array elements from a JSON file.
:param file: File or filename or a JSON string to which the data is saved.
:param protocol: `jsonschema` or `protobuf`
:param encoding: encoding used to load data from a JSON file. By default, ``utf-8`` is used.
:return: a DocumentArrayLike object
"""
if hasattr(file, 'read'):
file_ctx = nullcontext(file)
else:
file_ctx = open(file, 'r', encoding=encoding)
with file_ctx as fp:
return cls.from_json(fp.read(), protocol=protocol, **kwargs)
@classmethod
def from_json(
cls: Type['T'],
file: Union[str, bytes, bytearray],
protocol: str = 'jsonschema',
**kwargs
) -> 'T':
from docarray import Document
json_docs = json.loads(file)
return cls(
[Document.from_dict(v, protocol=protocol) for v in json_docs], **kwargs
)
@classmethod
def from_list(
cls: Type['T'], values: List, protocol: str = 'jsonschema', **kwargs
) -> 'T':
from docarray import Document
return cls(Document.from_dict(v, protocol=protocol, **kwargs) for v in values)
def to_list(self, protocol: str = 'jsonschema', **kwargs) -> List:
"""Convert the object into a Python list.
:param protocol: `jsonschema` or `protobuf`
:return: a Python list
"""
return [d.to_dict(protocol=protocol, **kwargs) for d in self]
def to_json(self, protocol: str = 'jsonschema', **kwargs) -> str:
"""Convert the object into a JSON string. Can be loaded via :meth:`.load_json`.
:param protocol: `jsonschema` or `protobuf`
:return: a Python list
"""
return json.dumps(self.to_list(protocol=protocol, **kwargs))
# to comply with Document interfaces but less semantically accurate
to_dict = to_list
from_dict = from_list
|
import json
from contextlib import nullcontext
from typing import Union, TextIO, TYPE_CHECKING, Type, List
if TYPE_CHECKING:
from docarray.typing import T
class JsonIOMixin:
"""Save/load a array into a JSON file."""
def save_json(
self,
file: Union[str, TextIO],
protocol: str = 'jsonschema',
encoding: str = 'utf-8',
**kwargs
) -> None:
"""Save array elements into a JSON file.
Comparing to :meth:`save_binary`, it is human-readable but slower to save/load and the file size larger.
:param file: File or filename to which the data is saved.
:param protocol: `jsonschema` or `protobuf`
:param encoding: encoding used to save data into a JSON file. By default, ``utf-8`` is used.
"""
if hasattr(file, 'write'):
file_ctx = nullcontext(file)
else:
file_ctx = open(file, 'w', encoding=encoding)
with file_ctx as fp:
fp.write(self.to_json(protocol=protocol, **kwargs))
@classmethod
def load_json(
cls: Type['T'],
file: Union[str, TextIO],
protocol: str = 'jsonschema',
encoding: str = 'utf-8',
**kwargs
) -> 'T':
"""Load array elements from a JSON file.
:param file: File or filename or a JSON string to which the data is saved.
:param protocol: `jsonschema` or `protobuf`
:param encoding: encoding used to load data from a JSON file. By default, ``utf-8`` is used.
:return: a DocumentArrayLike object
"""
if hasattr(file, 'read'):
file_ctx = nullcontext(file)
else:
file_ctx = open(file, 'r', encoding=encoding)
with file_ctx as fp:
return cls.from_json(fp.read(), protocol=protocol, **kwargs)
@classmethod
def from_json(
cls: Type['T'],
file: Union[str, bytes, bytearray],
protocol: str = 'jsonschema',
**kwargs
) -> 'T':
from docarray import Document
json_docs = json.loads(file)
return cls(
[Document.from_dict(v, protocol=protocol) for v in json_docs], **kwargs
)
@classmethod
def from_list(
cls: Type['T'], values: List, protocol: str = 'jsonschema', **kwargs
) -> 'T':
from docarray import Document
return cls(Document.from_dict(v, protocol=protocol, **kwargs) for v in values)
def to_list(self, protocol: str = 'jsonschema', **kwargs) -> List:
"""Convert the object into a Python list.
:param protocol: `jsonschema` or `protobuf`
:return: a Python list
"""
return [d.to_dict(protocol=protocol, **kwargs) for d in self]
def to_json(self, protocol: str = 'jsonschema', **kwargs) -> str:
"""Convert the object into a JSON string. Can be loaded via :meth:`.load_json`.
:param protocol: `jsonschema` or `protobuf`
:return: a Python list
"""
return json.dumps(self.to_list(protocol=protocol, **kwargs))
# to comply with Document interfaces but less semantically accurate
to_dict = to_list
from_dict = from_list
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.