input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
"""txtai reader."""
from typing import Any, Dict, List
import numpy as np
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class TxtaiReader(BaseReader):
"""
txtai reader.
Retrieves documents through an existing in-memory txtai index.
These documents can then be used in a downstream LlamaIndex data structure.
If you wish use txtai itself as an index to to organize documents,
insert documents, and perform queries on them, please use VectorStoreIndex
with TxtaiVectorStore.
Args:
txtai_index (txtai.ann.ANN): A txtai Index object (required)
"""
def __init__(self, index: Any):
"""Initialize with parameters."""
import_err_msg = """
`txtai` package not found. For instructions on
how to install `txtai` please visit
https://neuml.github.io/txtai/install/
"""
try:
import txtai # noqa
except ImportError:
raise ImportError(import_err_msg)
self._index = index
def load_data(
self,
query: np.ndarray,
id_to_text_map: Dict[str, str],
k: int = 4,
separate_documents: bool = True,
) -> List[Document]:
"""
Load data from txtai index.
Args:
query (np.ndarray): A 2D numpy array of query vectors.
id_to_text_map (Dict[str, str]): A map from ID's to text.
k (int): Number of nearest neighbors to retrieve. Defaults to 4.
separate_documents (Optional[bool]): Whether to return separate
documents. Defaults to True.
Returns:
List[Document]: A list of documents.
"""
search_result = self._index.search(query, k)
documents = []
for query_result in search_result:
for doc_id, _ in query_result:
doc_id = str(doc_id)
if doc_id not in id_to_text_map:
raise ValueError(
f"Document ID {doc_id} not found in id_to_text_map."
)
text = id_to_text_map[doc_id]
documents.append(Document(text=text))
if not separate_documents:
# join all documents into one
text_list = [doc.get_content() for doc in documents]
text = "\n\n".join(text_list)
documents = [Document(text=text)]
return documents
|
"""txtai reader."""
from typing import Any, Dict, List
import numpy as np
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class TxtaiReader(BaseReader):
"""txtai reader.
Retrieves documents through an existing in-memory txtai index.
These documents can then be used in a downstream LlamaIndex data structure.
If you wish use txtai itself as an index to to organize documents,
insert documents, and perform queries on them, please use VectorStoreIndex
with TxtaiVectorStore.
Args:
txtai_index (txtai.ann.ANN): A txtai Index object (required)
"""
def __init__(self, index: Any):
"""Initialize with parameters."""
import_err_msg = """
`txtai` package not found. For instructions on
how to install `txtai` please visit
https://neuml.github.io/txtai/install/
"""
try:
import txtai # noqa
except ImportError:
raise ImportError(import_err_msg)
self._index = index
def load_data(
self,
query: np.ndarray,
id_to_text_map: Dict[str, str],
k: int = 4,
separate_documents: bool = True,
) -> List[Document]:
"""Load data from txtai index.
Args:
query (np.ndarray): A 2D numpy array of query vectors.
id_to_text_map (Dict[str, str]): A map from ID's to text.
k (int): Number of nearest neighbors to retrieve. Defaults to 4.
separate_documents (Optional[bool]): Whether to return separate
documents. Defaults to True.
Returns:
List[Document]: A list of documents.
"""
search_result = self._index.search(query, k)
documents = []
for query_result in search_result:
for doc_id, _ in query_result:
doc_id = str(doc_id)
if doc_id not in id_to_text_map:
raise ValueError(
f"Document ID {doc_id} not found in id_to_text_map."
)
text = id_to_text_map[doc_id]
documents.append(Document(text=text))
if not separate_documents:
# join all documents into one
text_list = [doc.get_content() for doc in documents]
text = "\n\n".join(text_list)
documents = [Document(text=text)]
return documents
|
from typing import TYPE_CHECKING, Type
if TYPE_CHECKING: # pragma: no cover
from pandas import DataFrame
from docarray.typing import T
class DataframeIOMixin:
"""Save/load from :class:`pandas.dataframe`
.. note::
These functions require you to install `pandas`
"""
def to_dataframe(self, **kwargs) -> 'DataFrame':
"""Export itself to a :class:`pandas.DataFrame` object.
:param kwargs: the extra kwargs will be passed to :meth:`pandas.DataFrame.from_dict`.
:return: a :class:`pandas.DataFrame` object
"""
from pandas import DataFrame
return DataFrame.from_dict(self.to_list(), **kwargs)
@classmethod
def from_dataframe(cls: Type['T'], df: 'DataFrame', *args, **kwargs) -> 'T':
"""Import a :class:`DocumentArray` from a :class:`pandas.DataFrame` object.
:param df: a :class:`pandas.DataFrame` object.
:return: a :class:`DocumentArray` object
"""
da = cls(**kwargs)
from docarray import Document
for m in df.to_dict(orient='records'):
# drop nan
da.append(
Document(
{k: v for k, v in m.items() if (not isinstance(v, float) or v == v)}
)
)
return da
|
from typing import TYPE_CHECKING, Type
if TYPE_CHECKING:
from pandas import DataFrame
from docarray.typing import T
class DataframeIOMixin:
"""Save/load from :class:`pandas.dataframe`
.. note::
These functions require you to install `pandas`
"""
def to_dataframe(self, **kwargs) -> 'DataFrame':
"""Export itself to a :class:`pandas.DataFrame` object.
:param kwargs: the extra kwargs will be passed to :meth:`pandas.DataFrame.from_dict`.
:return: a :class:`pandas.DataFrame` object
"""
from pandas import DataFrame
return DataFrame.from_dict(self.to_list(), **kwargs)
@classmethod
def from_dataframe(cls: Type['T'], df: 'DataFrame', *args, **kwargs) -> 'T':
"""Import a :class:`DocumentArray` from a :class:`pandas.DataFrame` object.
:param df: a :class:`pandas.DataFrame` object.
:return: a :class:`DocumentArray` object
"""
da = cls(**kwargs)
from docarray import Document
for m in df.to_dict(orient='records'):
# drop nan
da.append(
Document(
{k: v for k, v in m.items() if (not isinstance(v, float) or v == v)}
)
)
return da
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda
from torchaudio_unittest.models.conformer.conformer_test_impl import ConformerTestImpl
@skipIfNoCuda
class ConformerFloat32GPUTest(ConformerTestImpl, PytorchTestCase):
dtype = torch.float32
device = torch.device("cuda")
@skipIfNoCuda
class ConformerFloat64GPUTest(ConformerTestImpl, PytorchTestCase):
dtype = torch.float64
device = torch.device("cuda")
|
import torch
from torchaudio_unittest.common_utils import skipIfNoCuda, PytorchTestCase
from torchaudio_unittest.models.conformer.conformer_test_impl import ConformerTestImpl
@skipIfNoCuda
class ConformerFloat32GPUTest(ConformerTestImpl, PytorchTestCase):
dtype = torch.float32
device = torch.device("cuda")
@skipIfNoCuda
class ConformerFloat64GPUTest(ConformerTestImpl, PytorchTestCase):
dtype = torch.float64
device = torch.device("cuda")
|
import contextlib
import json
import re
from typing import Any, List
with contextlib.suppress(ImportError):
import yaml
from llama_index.core.output_parsers.base import OutputParserException
def _marshal_llm_to_json(output: str) -> str:
"""
Extract a substring containing valid JSON or array from a string.
Args:
output: A string that may contain a valid JSON object or array surrounded by
extraneous characters or information.
Returns:
A string containing a valid JSON object or array.
"""
output = output.strip()
left_square = output.find("[")
left_brace = output.find("{")
if left_square < left_brace and left_square != -1:
left = left_square
right = output.rfind("]")
else:
left = left_brace
right = output.rfind("}")
return output[left : right + 1]
def parse_json_markdown(text: str) -> Any:
if "```json" in text:
text = text.split("```json")[1].strip().strip("```").strip()
json_string = _marshal_llm_to_json(text)
try:
json_obj = json.loads(json_string)
except json.JSONDecodeError as e_json:
try:
# NOTE: parsing again with pyyaml
# pyyaml is less strict, and allows for trailing commas
# right now we rely on this since guidance program generates
# trailing commas
json_obj = yaml.safe_load(json_string)
except yaml.YAMLError as e_yaml:
raise OutputParserException(
f"Got invalid JSON object. Error: {e_json} {e_yaml}. "
f"Got JSON string: {json_string}"
)
except NameError as exc:
raise ImportError("Please pip install PyYAML.") from exc
return json_obj
def parse_code_markdown(text: str, only_last: bool) -> List[str]:
# Regular expression pattern to match code within triple-backticks
pattern = r"```(.*?)```"
# Regular expression pattern to match code within triple backticks with
# a Python marker. Like: ```python df.columns```
python_str_pattern = re.compile(r"^```python", re.IGNORECASE)
text = python_str_pattern.sub("```", text)
# Find all matches of the pattern in the text
matches = re.findall(pattern, text, re.DOTALL)
# Return the last matched group if requested
code = matches[-1] if matches and only_last else matches
# If empty we optimistically assume the output is the code
if not code:
# we want to handle cases where the code may start or end with triple
# backticks
# we also want to handle cases where the code is surrounded by regular
# quotes
# we can't just remove all backticks due to JS template strings
candidate = text.strip()
if candidate.startswith('"') and candidate.endswith('"'):
candidate = candidate[1:-1]
if candidate.startswith("'") and candidate.endswith("'"):
candidate = candidate[1:-1]
if candidate.startswith("`") and candidate.endswith("`"):
candidate = candidate[1:-1]
# For triple backticks we split the handling of the start and end
# partly because there can be cases where only one and not the other
# is present, and partly because we don't need to be so worried
# about it being a string in a programming language
if candidate.startswith("```"):
candidate = re.sub(r"^```[a-zA-Z]*", "", candidate)
if candidate.endswith("```"):
candidate = candidate[:-3]
code = [candidate.strip()]
return code
def extract_json_str(text: str) -> str:
"""Extract JSON string from text."""
# NOTE: this regex parsing is taken from langchain.output_parsers.pydantic
match = re.search(r"\{.*\}", text.strip(), re.MULTILINE | re.IGNORECASE | re.DOTALL)
if not match:
raise ValueError(f"Could not extract json string from output: {text}")
return match.group()
|
import contextlib
import json
import re
from typing import Any, List
with contextlib.suppress(ImportError):
import yaml
from llama_index.core.output_parsers.base import OutputParserException
def _marshal_llm_to_json(output: str) -> str:
"""
Extract a substring containing valid JSON or array from a string.
Args:
output: A string that may contain a valid JSON object or array surrounded by
extraneous characters or information.
Returns:
A string containing a valid JSON object or array.
"""
output = output.strip()
left_square = output.find("[")
left_brace = output.find("{")
if left_square < left_brace and left_square != -1:
left = left_square
right = output.rfind("]")
else:
left = left_brace
right = output.rfind("}")
return output[left : right + 1]
def parse_json_markdown(text: str) -> Any:
if "```json" in text:
text = text.split("```json")[1].strip().strip("```").strip()
json_string = _marshal_llm_to_json(text)
try:
json_obj = json.loads(json_string)
except json.JSONDecodeError as e_json:
try:
# NOTE: parsing again with pyyaml
# pyyaml is less strict, and allows for trailing commas
# right now we rely on this since guidance program generates
# trailing commas
json_obj = yaml.safe_load(json_string)
except yaml.YAMLError as e_yaml:
raise OutputParserException(
f"Got invalid JSON object. Error: {e_json} {e_yaml}. "
f"Got JSON string: {json_string}"
)
except NameError as exc:
raise ImportError("Please pip install PyYAML.") from exc
return json_obj
def parse_code_markdown(text: str, only_last: bool) -> List[str]:
# Regular expression pattern to match code within triple-backticks
pattern = r"```(.*?)```"
# Regular expression pattern to match code within triple backticks with
# a Python marker. Like: ```python df.columns```
python_str_pattern = re.compile(r"^```python", re.IGNORECASE)
text = python_str_pattern.sub("```", text)
# Find all matches of the pattern in the text
matches = re.findall(pattern, text, re.DOTALL)
# Return the last matched group if requested
code = matches[-1] if matches and only_last else matches
# If empty we optimistically assume the output is the code
if not code:
# we want to handle cases where the code may start or end with triple
# backticks
# we also want to handle cases where the code is surrounded by regular
# quotes
# we can't just remove all backticks due to JS template strings
candidate = text.strip()
if candidate.startswith('"') and candidate.endswith('"'):
candidate = candidate[1:-1]
if candidate.startswith("'") and candidate.endswith("'"):
candidate = candidate[1:-1]
if candidate.startswith("`") and candidate.endswith("`"):
candidate = candidate[1:-1]
# For triple backticks we split the handling of the start and end
# partly because there can be cases where only one and not the other
# is present, and partly because we don't need to be so worried
# about it being a string in a programming language
if candidate.startswith("```"):
candidate = re.sub(r"^```[a-zA-Z]*", "", candidate)
if candidate.endswith("```"):
candidate = candidate[:-3]
code = [candidate.strip()]
return code
def extract_json_str(text: str) -> str:
"""Extract JSON string from text."""
# NOTE: this regex parsing is taken from langchain.output_parsers.pydantic
match = re.search(r"\{.*\}", text.strip(), re.MULTILINE | re.IGNORECASE | re.DOTALL)
if not match:
raise ValueError(f"Could not extract json string from output: {text}")
return match.group()
|
from pathlib import Path
import pytest
from jina import Document, DocumentArray, Executor
from jina.excepts import BadDocType
def test_load():
segmenter = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert type(segmenter).__name__ == 'VADSpeechSegmenter'
@pytest.mark.parametrize('_type', ['wav', 'mp3', 'blob', '', None])
def test_segment(build_da, segmenter, _type):
if _type == '':
with pytest.raises(
BadDocType, match='doc needs to have either a blob or a wav/mp3 uri'
):
segmenter.segment(DocumentArray(Document()))
return
elif _type is None:
segmenter.segment(DocumentArray())
return
docs = build_da(_type)
segmenter.segment(docs)
# assert doc has 4 chunks
for doc in docs:
assert len(doc.chunks) == 4
file_paths = [
f'doc_{_type}_original.wav',
f'chunk_{_type}_0_56500.wav',
f'chunk_{_type}_69500_92000.wav',
f'chunk_{_type}_94500_213000.wav',
f'chunk_{_type}_223500_270500.wav',
]
# assert dumped files exist
for file_path in file_paths:
assert (Path(segmenter.workspace) / f'audio/{file_path}').is_file()
# assert exception is raised when doc blob is provided by sample rate is not
if _type == 'blob':
docs[0].tags.pop('sample_rate')
with pytest.raises(
BadDocType, match='data is blob but sample rate is not provided'
):
segmenter.segment(docs)
docs[0].tags['sample_rate'] = 0
with pytest.raises(BadDocType, match='sample rate cannot be 0'):
segmenter.segment(docs)
|
from pathlib import Path
import pytest
from jina import Document, DocumentArray, Executor
from jina.excepts import BadDocType
from ...vad_speech_segmenter import VADSpeechSegmenter
def test_load():
segmenter = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert type(segmenter).__name__ == 'VADSpeechSegmenter'
@pytest.mark.parametrize('_type', ['wav', 'mp3', 'blob', '', None])
def test_segment(build_da, segmenter, _type):
if _type == '':
with pytest.raises(
BadDocType, match='doc needs to have either a blob or a wav/mp3 uri'
):
segmenter.segment(DocumentArray(Document()))
return
elif _type is None:
segmenter.segment(DocumentArray())
return
docs = build_da(_type)
segmenter.segment(docs)
# assert doc has 4 chunks
for doc in docs:
assert len(doc.chunks) == 4
file_paths = [
f'doc_{_type}_original.wav',
f'chunk_{_type}_0_56500.wav',
f'chunk_{_type}_69500_92000.wav',
f'chunk_{_type}_94500_213000.wav',
f'chunk_{_type}_223500_270500.wav',
]
# assert dumped files exist
for file_path in file_paths:
assert (Path(segmenter.workspace) / f'audio/{file_path}').is_file()
# assert exception is raised when doc blob is provided by sample rate is not
if _type == 'blob':
docs[0].tags.pop('sample_rate')
with pytest.raises(
BadDocType, match='data is blob but sample rate is not provided'
):
segmenter.segment(docs)
docs[0].tags['sample_rate'] = 0
with pytest.raises(BadDocType, match='sample rate cannot be 0'):
segmenter.segment(docs)
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Get image metas on a specific dataset.
Here is an example to run this script.
Example:
python tools/misc/get_image_metas.py ${CONFIG} \
--out ${OUTPUT FILE NAME}
"""
import argparse
import csv
import os.path as osp
from multiprocessing import Pool
import mmcv
from mmengine.config import Config
from mmengine.fileio import dump, get
def parse_args():
parser = argparse.ArgumentParser(description='Collect image metas')
parser.add_argument('config', help='Config file path')
parser.add_argument(
'--dataset',
default='val',
choices=['train', 'val', 'test'],
help='Collect image metas from which dataset')
parser.add_argument(
'--out',
default='validation-image-metas.pkl',
help='The output image metas file name. The save dir is in the '
'same directory as `dataset.ann_file` path')
parser.add_argument(
'--nproc',
default=4,
type=int,
help='Processes used for get image metas')
args = parser.parse_args()
return args
def get_metas_from_csv_style_ann_file(ann_file):
data_infos = []
cp_filename = None
with open(ann_file, 'r') as f:
reader = csv.reader(f)
for i, line in enumerate(reader):
if i == 0:
continue
img_id = line[0]
filename = f'{img_id}.jpg'
if filename != cp_filename:
data_infos.append(dict(filename=filename))
cp_filename = filename
return data_infos
def get_metas_from_txt_style_ann_file(ann_file):
with open(ann_file) as f:
lines = f.readlines()
i = 0
data_infos = []
while i < len(lines):
filename = lines[i].rstrip()
data_infos.append(dict(filename=filename))
skip_lines = int(lines[i + 2]) + 3
i += skip_lines
return data_infos
def get_image_metas(data_info, img_prefix):
filename = data_info.get('filename', None)
if filename is not None:
if img_prefix is not None:
filename = osp.join(img_prefix, filename)
img_bytes = get(filename)
img = mmcv.imfrombytes(img_bytes, flag='color')
shape = img.shape
meta = dict(filename=filename, ori_shape=shape)
else:
raise NotImplementedError('Missing `filename` in data_info')
return meta
def main():
args = parse_args()
assert args.out.endswith('pkl'), 'The output file name must be pkl suffix'
# load config files
cfg = Config.fromfile(args.config)
dataloader_cfg = cfg.get(f'{args.dataset}_dataloader')
ann_file = osp.join(dataloader_cfg.dataset.data_root,
dataloader_cfg.dataset.ann_file)
img_prefix = osp.join(dataloader_cfg.dataset.data_root,
dataloader_cfg.dataset.data_prefix['img'])
print(f'{"-" * 5} Start Processing {"-" * 5}')
if ann_file.endswith('csv'):
data_infos = get_metas_from_csv_style_ann_file(ann_file)
elif ann_file.endswith('txt'):
data_infos = get_metas_from_txt_style_ann_file(ann_file)
else:
shuffix = ann_file.split('.')[-1]
raise NotImplementedError('File name must be csv or txt suffix but '
f'get {shuffix}')
print(f'Successfully load annotation file from {ann_file}')
print(f'Processing {len(data_infos)} images...')
pool = Pool(args.nproc)
# get image metas with multiple processes
image_metas = pool.starmap(
get_image_metas,
zip(data_infos, [img_prefix for _ in range(len(data_infos))]),
)
pool.close()
# save image metas
root_path = dataloader_cfg.dataset.ann_file.rsplit('/', 1)[0]
save_path = osp.join(root_path, args.out)
dump(image_metas, save_path, protocol=4)
print(f'Image meta file save to: {save_path}')
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Get image metas on a specific dataset.
Here is an example to run this script.
Example:
python tools/misc/get_image_metas.py ${CONFIG} \
--out ${OUTPUT FILE NAME}
"""
import argparse
import csv
import os.path as osp
from multiprocessing import Pool
import mmcv
from mmengine.config import Config
from mmengine.fileio import FileClient, dump
def parse_args():
parser = argparse.ArgumentParser(description='Collect image metas')
parser.add_argument('config', help='Config file path')
parser.add_argument(
'--dataset',
default='val',
choices=['train', 'val', 'test'],
help='Collect image metas from which dataset')
parser.add_argument(
'--out',
default='validation-image-metas.pkl',
help='The output image metas file name. The save dir is in the '
'same directory as `dataset.ann_file` path')
parser.add_argument(
'--nproc',
default=4,
type=int,
help='Processes used for get image metas')
args = parser.parse_args()
return args
def get_metas_from_csv_style_ann_file(ann_file):
data_infos = []
cp_filename = None
with open(ann_file, 'r') as f:
reader = csv.reader(f)
for i, line in enumerate(reader):
if i == 0:
continue
img_id = line[0]
filename = f'{img_id}.jpg'
if filename != cp_filename:
data_infos.append(dict(filename=filename))
cp_filename = filename
return data_infos
def get_metas_from_txt_style_ann_file(ann_file):
with open(ann_file) as f:
lines = f.readlines()
i = 0
data_infos = []
while i < len(lines):
filename = lines[i].rstrip()
data_infos.append(dict(filename=filename))
skip_lines = int(lines[i + 2]) + 3
i += skip_lines
return data_infos
def get_image_metas(data_info, img_prefix):
file_client = FileClient(backend='disk')
filename = data_info.get('filename', None)
if filename is not None:
if img_prefix is not None:
filename = osp.join(img_prefix, filename)
img_bytes = file_client.get(filename)
img = mmcv.imfrombytes(img_bytes, flag='color')
shape = img.shape
meta = dict(filename=filename, ori_shape=shape)
else:
raise NotImplementedError('Missing `filename` in data_info')
return meta
def main():
args = parse_args()
assert args.out.endswith('pkl'), 'The output file name must be pkl suffix'
# load config files
cfg = Config.fromfile(args.config)
dataloader_cfg = cfg.get(f'{args.dataset}_dataloader')
ann_file = osp.join(dataloader_cfg.dataset.data_root,
dataloader_cfg.dataset.ann_file)
img_prefix = osp.join(dataloader_cfg.dataset.data_root,
dataloader_cfg.dataset.data_prefix['img'])
print(f'{"-" * 5} Start Processing {"-" * 5}')
if ann_file.endswith('csv'):
data_infos = get_metas_from_csv_style_ann_file(ann_file)
elif ann_file.endswith('txt'):
data_infos = get_metas_from_txt_style_ann_file(ann_file)
else:
shuffix = ann_file.split('.')[-1]
raise NotImplementedError('File name must be csv or txt suffix but '
f'get {shuffix}')
print(f'Successfully load annotation file from {ann_file}')
print(f'Processing {len(data_infos)} images...')
pool = Pool(args.nproc)
# get image metas with multiple processes
image_metas = pool.starmap(
get_image_metas,
zip(data_infos, [img_prefix for _ in range(len(data_infos))]),
)
pool.close()
# save image metas
root_path = dataloader_cfg.dataset.ann_file.rsplit('/', 1)[0]
save_path = osp.join(root_path, args.out)
dump(image_metas, save_path, protocol=4)
print(f'Image meta file save to: {save_path}')
if __name__ == '__main__':
main()
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from pathlib import Path
import cv2
import pytest
from jina import Executor, Document, DocumentArray
from ...yolov5_segmenter import YoloV5Segmenter
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_load():
segmenter = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert segmenter.model_name_or_path == 'yolov5s'
@pytest.mark.parametrize(
'model_path', [
os.path.join(cur_dir, '../data/models/yolov5s.pt'),
os.path.join(cur_dir, '../data/models/yolov5m.pt'),
'yolov5s',
'yolov5m'
]
)
def test_model_name_or_path(build_da, model_path):
da = build_da()
segmenter = YoloV5Segmenter(model_name_or_path=model_path)
segmenter.segment(da, parameters={})
for doc in da:
assert len(doc.chunks) > 0
for chunk in doc.chunks:
assert chunk.blob.ndim == 3
assert chunk.tags.get('label')
assert chunk.tags.get('conf')
@pytest.mark.parametrize(
'model_path, expected_detections', [
(os.path.join(cur_dir, '../data/models/yolov5s.pt'), {'bus.jpg': 5, 'zidane.jpg': 3, 'man.jpg': 3}),
(os.path.join(cur_dir, '../data/models/yolov5m.pt'), {'bus.jpg': 6, 'zidane.jpg': 3, 'man.jpg': 3}),
]
)
def test_n_detections(build_da, model_path, expected_detections):
da = build_da()
segmenter = YoloV5Segmenter(model_name_or_path=model_path)
segmenter.segment(da, parameters={})
for doc in da:
assert len(doc.chunks) == expected_detections[doc.tags['filename']]
@pytest.mark.parametrize(
'confidence_threshold, expected_detections', [
(0.3, {'bus.jpg': 6, 'zidane.jpg': 3, 'man.jpg': 3}),
(0.5, {'bus.jpg': 5, 'zidane.jpg': 3, 'man.jpg': 3}),
(0.8, {'bus.jpg': 3, 'zidane.jpg': 2, 'man.jpg': 0}),
]
)
def test_confidence_threshold(build_da, confidence_threshold, expected_detections):
da = build_da()
segmenter = YoloV5Segmenter(model_name_or_path=os.path.join(cur_dir, '../data/models/yolov5m.pt'),
default_confidence_threshold=confidence_threshold)
segmenter.segment(da, parameters={})
for doc in da:
assert len(doc.chunks) == expected_detections[doc.tags['filename']]
assert all(chunk.tags['conf'] >= confidence_threshold for chunk in doc.chunks)
def test_traversal_paths():
da = DocumentArray([
Document(
id='root',
blob=cv2.imread(os.path.join(cur_dir, '../data/img/man.jpg')),
),
])
segmenter = YoloV5Segmenter(model_name_or_path=os.path.join(cur_dir, '../data/models/yolov5m.pt'))
segmenter.segment(da, parameters={})
# detects 2 persons and 1 cell phone
assert len(da[0].chunks) == 3
assert da[0].chunks[0].tags['label'] == 'person'
assert da[0].chunks[1].tags['label'] == 'person'
assert da[0].chunks[2].tags['label'] == 'cell phone'
segmenter.segment(da, parameters={'traversal_paths': ['c']})
# the first detected person spans the whole image, so segmenting the chunk produces 3 detections
person_chunk = da[0].chunks[0]
assert len(person_chunk.chunks) == 3
assert person_chunk.chunks[0].tags['label'] == 'person'
assert person_chunk.chunks[1].tags['label'] == 'person'
assert person_chunk.chunks[2].tags['label'] == 'cell phone'
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from operator import itemgetter
import pytest
from jina import Executor, Document, DocumentArray
import cv2
from ...yolov5_segmenter import YoloV5Segmenter
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_load():
segmenter = Executor.load_config('config.yml')
assert segmenter.model_name_or_path == 'yolov5s'
@pytest.mark.parametrize(
'model_path', [
os.path.join(cur_dir, '../data/models/yolov5s.pt'),
os.path.join(cur_dir, '../data/models/yolov5m.pt'),
'yolov5s',
'yolov5m'
]
)
def test_model_name_or_path(build_da, model_path):
da = build_da()
segmenter = YoloV5Segmenter(model_name_or_path=model_path)
segmenter.segment(da, parameters={})
for doc in da:
assert len(doc.chunks) > 0
for chunk in doc.chunks:
assert chunk.blob.ndim == 3
assert chunk.tags.get('label')
assert chunk.tags.get('conf')
@pytest.mark.parametrize(
'model_path, expected_detections', [
(os.path.join(cur_dir, '../data/models/yolov5s.pt'), {'bus.jpg': 5, 'zidane.jpg': 3, 'man.jpg': 3}),
(os.path.join(cur_dir, '../data/models/yolov5m.pt'), {'bus.jpg': 6, 'zidane.jpg': 3, 'man.jpg': 3}),
]
)
def test_n_detections(build_da, model_path, expected_detections):
da = build_da()
segmenter = YoloV5Segmenter(model_name_or_path=model_path)
segmenter.segment(da, parameters={})
for doc in da:
assert len(doc.chunks) == expected_detections[doc.tags['filename']]
@pytest.mark.parametrize(
'confidence_threshold, expected_detections', [
(0.3, {'bus.jpg': 6, 'zidane.jpg': 3, 'man.jpg': 3}),
(0.5, {'bus.jpg': 5, 'zidane.jpg': 3, 'man.jpg': 3}),
(0.8, {'bus.jpg': 3, 'zidane.jpg': 2, 'man.jpg': 0}),
]
)
def test_confidence_threshold(build_da, confidence_threshold, expected_detections):
da = build_da()
segmenter = YoloV5Segmenter(model_name_or_path=os.path.join(cur_dir, '../data/models/yolov5m.pt'),
default_confidence_threshold=confidence_threshold)
segmenter.segment(da, parameters={})
for doc in da:
assert len(doc.chunks) == expected_detections[doc.tags['filename']]
assert all(chunk.tags['conf'] >= confidence_threshold for chunk in doc.chunks)
def test_traversal_paths():
da = DocumentArray([
Document(
id='root',
blob=cv2.imread(os.path.join(cur_dir, '../data/img/man.jpg')),
),
])
segmenter = YoloV5Segmenter(model_name_or_path=os.path.join(cur_dir, '../data/models/yolov5m.pt'))
segmenter.segment(da, parameters={})
# detects 2 persons and 1 cell phone
assert len(da[0].chunks) == 3
assert da[0].chunks[0].tags['label'] == 'person'
assert da[0].chunks[1].tags['label'] == 'person'
assert da[0].chunks[2].tags['label'] == 'cell phone'
segmenter.segment(da, parameters={'traversal_paths': ['c']})
# the first detected person spans the whole image, so segmenting the chunk produces 3 detections
person_chunk = da[0].chunks[0]
assert len(person_chunk.chunks) == 3
assert person_chunk.chunks[0].tags['label'] == 'person'
assert person_chunk.chunks[1].tags['label'] == 'person'
assert person_chunk.chunks[2].tags['label'] == 'cell phone'
|
from typing import Any, Dict, List, Optional
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.constants import DEFAULT_SIMILARITY_TOP_K
from llama_index.core.schema import NodeWithScore, QueryBundle
from llama_index.core.settings import Settings
from llama_index.core.vector_stores.types import MetadataFilters
from llama_index.indices.managed.bge_m3.base import BGEM3Index
class BGEM3Retriever(BaseRetriever):
"""
Vector index retriever.
Args:
index (BGEM3Index): BGEM3 index.
similarity_top_k (int): number of top k results to return.
filters (Optional[MetadataFilters]): metadata filters, defaults to None
doc_ids (Optional[List[str]]): list of documents to constrain search.
bge_m3_kwargs (dict): Additional bge_m3 specific kwargs to pass
through to the bge_m3 index at query time.
"""
def __init__(
self,
index: BGEM3Index,
similarity_top_k: int = DEFAULT_SIMILARITY_TOP_K,
filters: Optional[MetadataFilters] = None,
node_ids: Optional[List[str]] = None,
doc_ids: Optional[List[str]] = None,
callback_manager: Optional[CallbackManager] = None,
object_map: Optional[dict] = None,
verbose: bool = False,
**kwargs: Any,
) -> None:
"""Initialize params."""
self._index = index
self._docstore = self._index.docstore
self._similarity_top_k = similarity_top_k
self._node_ids = node_ids
self._doc_ids = doc_ids
self._filters = filters
self._kwargs: Dict[str, Any] = kwargs.get("bge_m3_kwargs", {})
self._model = self._index.model
self._batch_size = self._index.batch_size
self._query_maxlen = self._index.query_maxlen
self._weights_for_different_modes = self._index.weights_for_different_modes
super().__init__(
callback_manager=callback_manager or Settings.callback_manager,
object_map=object_map,
verbose=verbose,
)
def _retrieve(
self,
query_bundle: QueryBundle,
) -> List[NodeWithScore]:
return self._index.query(
query_str=query_bundle.query_str,
top_k=self._similarity_top_k,
**self._kwargs,
)
|
from typing import Any, Dict, List, Optional
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.constants import DEFAULT_SIMILARITY_TOP_K
from llama_index.core.schema import NodeWithScore, QueryBundle
from llama_index.core.settings import Settings
from llama_index.core.vector_stores.types import MetadataFilters
from llama_index.indices.managed.bge_m3.base import BGEM3Index
class BGEM3Retriever(BaseRetriever):
"""Vector index retriever.
Args:
index (BGEM3Index): BGEM3 index.
similarity_top_k (int): number of top k results to return.
filters (Optional[MetadataFilters]): metadata filters, defaults to None
doc_ids (Optional[List[str]]): list of documents to constrain search.
bge_m3_kwargs (dict): Additional bge_m3 specific kwargs to pass
through to the bge_m3 index at query time.
"""
def __init__(
self,
index: BGEM3Index,
similarity_top_k: int = DEFAULT_SIMILARITY_TOP_K,
filters: Optional[MetadataFilters] = None,
node_ids: Optional[List[str]] = None,
doc_ids: Optional[List[str]] = None,
callback_manager: Optional[CallbackManager] = None,
object_map: Optional[dict] = None,
verbose: bool = False,
**kwargs: Any,
) -> None:
"""Initialize params."""
self._index = index
self._docstore = self._index.docstore
self._similarity_top_k = similarity_top_k
self._node_ids = node_ids
self._doc_ids = doc_ids
self._filters = filters
self._kwargs: Dict[str, Any] = kwargs.get("bge_m3_kwargs", {})
self._model = self._index.model
self._batch_size = self._index.batch_size
self._query_maxlen = self._index.query_maxlen
self._weights_for_different_modes = self._index.weights_for_different_modes
super().__init__(
callback_manager=callback_manager or Settings.callback_manager,
object_map=object_map,
verbose=verbose,
)
def _retrieve(
self,
query_bundle: QueryBundle,
) -> List[NodeWithScore]:
return self._index.query(
query_str=query_bundle.query_str,
top_k=self._similarity_top_k,
**self._kwargs,
)
|
_base_ = '../ssd/ssd300_coco.py'
model = dict(
bbox_head=dict(type='PISASSDHead'),
train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)))
default_hooks = dict(
optimizer=dict(
_delete_=True,
type='OptimizerHook',
grad_clip=dict(max_norm=35, norm_type=2)))
|
_base_ = '../ssd/ssd300_coco.py'
model = dict(
bbox_head=dict(type='PISASSDHead'),
train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
import io
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.io import decode_jpeg, encode_jpeg
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_kernel_internal
def erase(
inpt: torch.Tensor,
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.RandomErase` for details."""
if torch.jit.is_scripting():
return erase_image(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
_log_api_usage_once(erase)
kernel = _get_kernel(erase, type(inpt))
return kernel(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
@_register_kernel_internal(erase, torch.Tensor)
@_register_kernel_internal(erase, tv_tensors.Image)
def erase_image(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@_register_kernel_internal(erase, PIL.Image.Image)
def _erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
@_register_kernel_internal(erase, tv_tensors.Video)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
def jpeg(image: torch.Tensor, quality: int) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.JPEG` for details."""
if torch.jit.is_scripting():
return jpeg_image(image, quality=quality)
_log_api_usage_once(jpeg)
kernel = _get_kernel(jpeg, type(image))
return kernel(image, quality=quality)
@_register_kernel_internal(jpeg, torch.Tensor)
@_register_kernel_internal(jpeg, tv_tensors.Image)
def jpeg_image(image: torch.Tensor, quality: int) -> torch.Tensor:
original_shape = image.shape
image = image.view((-1,) + image.shape[-3:])
if image.shape[0] == 0: # degenerate
return image.reshape(original_shape).clone()
image = [decode_jpeg(encode_jpeg(image[i], quality=quality)) for i in range(image.shape[0])]
image = torch.stack(image, dim=0).view(original_shape)
return image
@_register_kernel_internal(jpeg, tv_tensors.Video)
def jpeg_video(video: torch.Tensor, quality: int) -> torch.Tensor:
return jpeg_image(video, quality=quality)
@_register_kernel_internal(jpeg, PIL.Image.Image)
def _jpeg_image_pil(image: PIL.Image.Image, quality: int) -> PIL.Image.Image:
raw_jpeg = io.BytesIO()
image.save(raw_jpeg, format="JPEG", quality=quality)
# we need to copy since PIL.Image.open() will return PIL.JpegImagePlugin.JpegImageFile
# which is a sub-class of PIL.Image.Image. this will fail check_transform() test.
return PIL.Image.open(raw_jpeg).copy()
|
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_kernel_internal
def erase(
inpt: torch.Tensor,
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.RandomErase` for details."""
if torch.jit.is_scripting():
return erase_image(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
_log_api_usage_once(erase)
kernel = _get_kernel(erase, type(inpt))
return kernel(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
@_register_kernel_internal(erase, torch.Tensor)
@_register_kernel_internal(erase, tv_tensors.Image)
def erase_image(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@_register_kernel_internal(erase, PIL.Image.Image)
def _erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
@_register_kernel_internal(erase, tv_tensors.Video)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
|
import aiohttp
import pytest
from jina import Executor, Flow, requests
from jina.clients.base.helper import HTTPClientlet, WebsocketClientlet
from jina.clients.request.helper import _new_data_request
from jina.excepts import BadServer
from jina.logging.logger import JinaLogger
from jina.types.request.data import DataRequest
logger = JinaLogger('clientlet')
class ClientTestExecutor(Executor):
@requests
def error(self, **kwargs):
raise NotImplementedError
@pytest.fixture
def flow_with_exception_request():
return Flow().add(uses=ClientTestExecutor).add()
@pytest.mark.asyncio
async def test_http_clientlet():
from jina.helper import random_port
port = random_port()
with Flow(port=port, protocol='http').add():
async with HTTPClientlet(
url=f'http://localhost:{port}/post', logger=logger
) as iolet:
request = _new_data_request('/', None, {'a': 'b'})
r = await iolet.send_message(request)
response = DataRequest(await r.json())
assert response.header.exec_endpoint == '/'
assert response.parameters == {'a': 'b'}
@pytest.mark.asyncio
async def test_websocket_clientlet():
with pytest.raises(aiohttp.ClientError):
async with WebsocketClientlet(url='ws://localhost:12345', logger=logger):
pass
def test_client_behaviour(flow_with_exception_request, mocker):
on_done_mock = mocker.Mock()
on_always_mock = mocker.Mock()
on_error_mock = None
with pytest.raises(BadServer):
with flow_with_exception_request as f:
f.post(
'',
on_done=on_done_mock,
on_error=on_error_mock,
on_always=on_always_mock,
)
on_always_mock.assert_called_once()
on_done_mock.assert_not_called()
|
import aiohttp
import pytest
from jina import Executor, Flow, requests
from jina.clients.base.helper import HTTPClientlet, WebsocketClientlet
from jina.clients.request.helper import _new_data_request
from jina.excepts import BadServer
from jina.logging.logger import JinaLogger
from jina.types.request.data import DataRequest
logger = JinaLogger('clientlet')
class ClientTestExecutor(Executor):
@requests
def error(self, **kwargs):
raise NotImplementedError
@pytest.fixture
def flow_with_exception_request():
return Flow().add(uses=ClientTestExecutor).add()
@pytest.mark.asyncio
async def test_http_clientlet():
from jina.helper import random_port
port = random_port()
with Flow(port=port, protocol='http').add():
async with HTTPClientlet(
url=f'http://localhost:{port}/post', logger=logger
) as iolet:
request = _new_data_request('/', None, {'a': 'b'})
r = await iolet.send_message(request)
response = DataRequest(await r.json())
assert response.header.exec_endpoint == '/'
assert response.parameters == {'a': 'b'}
@pytest.mark.asyncio
async def test_websocket_clientlet():
with pytest.raises(aiohttp.ClientError):
async with WebsocketClientlet(
url='ws://localhost:12345', logger=logger
) as iolet:
pass
def test_client_behaviour(flow_with_exception_request, mocker):
on_done_mock = mocker.Mock()
on_always_mock = mocker.Mock()
on_error_mock = None
with pytest.raises(BadServer):
with flow_with_exception_request as f:
f.post(
'',
on_done=on_done_mock,
on_error=on_error_mock,
on_always=on_always_mock,
)
on_always_mock.assert_called_once()
on_done_mock.assert_not_called()
on_error_mock = mocker.Mock()
on_done_mock = mocker.Mock()
on_always_mock = mocker.Mock()
with flow_with_exception_request as f:
f.post(
'', on_done=on_done_mock, on_error=on_error_mock, on_always=on_always_mock
)
on_always_mock.assert_called_once()
on_done_mock.assert_not_called()
on_error_mock.assert_called_once()
|
from docarray import Document, DocumentArray
import numpy as np
def test_success_find_with_added_kwargs(start_storage, monkeypatch):
nrof_docs = 1000
num_candidates = 100
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'distance': 'l2_norm',
'index_name': 'test_success_find_with_added_kwargs',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id=f'r{i}', embedding=np.ones((3,)) * i)
for i in range(nrof_docs)
],
)
def _mock_knn_search(**kwargs):
assert kwargs['knn']['num_candidates'] == num_candidates
return {'hits': {'hits': []}}
monkeypatch.setattr(elastic_doc._client, 'knn_search', _mock_knn_search)
np_query = np.array([2, 1, 3])
elastic_doc.find(np_query, limit=10, num_candidates=num_candidates)
def test_filter(start_storage):
import random
import string
elastic_da = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 2,
'columns': {
'A': 'str',
'B': 'str',
'V': 'str',
'D': 'str',
'E': 'str',
'F': 'str',
'G': 'str',
},
},
)
def ran():
return ''.join(random.choices(string.ascii_uppercase + string.digits, k=10))
def ran_size():
sizes = ['S', 'M', 'L', 'XL']
return sizes[random.randint(0, len(sizes) - 1)]
def ran_type():
types = ['A', 'B', 'C', 'D']
return types[random.randint(0, len(types) - 1)]
def ran_stype():
stypes = ['SA', 'SB', 'SC', 'SD']
return stypes[random.randint(0, len(stypes) - 1)]
docs = DocumentArray(
[
Document(
id=f'r{i}',
embedding=np.random.rand(2),
tags={
'A': ran(),
'B': ran_stype(),
'C': ran_size(),
'D': ran_type(),
'E': ran(),
'F': ran_type(),
'G': f'G{i}',
},
)
for i in range(50)
]
)
with elastic_da:
elastic_da.extend(docs)
res = elastic_da.find(query=Document(embedding=docs[0].embedding))
assert len(res) > 0
assert res[0][0].tags['G'] == 'G0'
filter_ = {'match': {'G': 'G3'}}
res = elastic_da.find(filter=filter_)
assert len(res) > 0
assert res[0].tags['G'] == 'G3'
res = elastic_da.find(query=Document(embedding=docs[0].embedding), filter=filter_)
assert len(res) > 0
assert res[0][0].tags['G'] == 'G3'
|
from docarray import Document, DocumentArray
import numpy as np
def test_success_find_with_added_kwargs(start_storage, monkeypatch):
nrof_docs = 1000
num_candidates = 100
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'distance': 'l2_norm',
'index_name': 'test_success_find_with_added_kwargs',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id=f'r{i}', embedding=np.ones((3,)) * i)
for i in range(nrof_docs)
],
)
def _mock_knn_search(**kwargs):
assert kwargs['knn']['num_candidates'] == num_candidates
return {'hits': {'hits': []}}
monkeypatch.setattr(elastic_doc._client, 'knn_search', _mock_knn_search)
np_query = np.array([2, 1, 3])
elastic_doc.find(np_query, limit=10, num_candidates=num_candidates)
|
from unittest.mock import patch, MagicMock
import pytest
from llama_index.utils.workflow import (
draw_all_possible_flows,
draw_most_recent_execution,
)
@pytest.mark.asyncio
async def test_workflow_draw_methods(workflow):
with patch("pyvis.network.Network") as mock_network:
draw_all_possible_flows(workflow, filename="test_all_flows.html")
mock_network.assert_called_once()
mock_network.return_value.show.assert_called_once_with(
"test_all_flows.html", notebook=False
)
await workflow.run()
with patch("pyvis.network.Network") as mock_network:
draw_most_recent_execution(workflow, filename="test_recent_execution.html")
mock_network.assert_called_once()
mock_network.return_value.show.assert_called_once_with(
"test_recent_execution.html", notebook=False
)
@pytest.mark.asyncio
async def test_draw_all_possible_flows_with_max_label_length(workflow):
"""Test the max_label_length parameter."""
with patch("pyvis.network.Network") as mock_network:
mock_net_instance = MagicMock()
mock_network.return_value = mock_net_instance
# Test with max_label_length=10
draw_all_possible_flows(
workflow, filename="test_truncated.html", max_label_length=10
)
# Extract actual label mappings from add_node calls
label_mappings = {}
for call in mock_net_instance.add_node.call_args_list:
_, kwargs = call
label = kwargs.get("label")
title = kwargs.get("title")
# For items with titles (truncated), map title->label
if title:
label_mappings[title] = label
# For items without titles (not truncated), map label->label
elif label:
label_mappings[label] = label
# Test cases using actual events from DummyWorkflow fixture
test_cases = [
("OneTestEvent", "OneTestEv*"), # 12 chars -> truncated to 10
("LastEvent", "LastEvent"), # 9 chars -> no truncation
(
"StartEvent",
"StartEvent",
), # 10 chars -> no truncation (exactly at limit)
("StopEvent", "StopEvent"), # 9 chars -> no truncation
]
# Verify actual results match expected for available test cases
for original, expected_label in test_cases:
if original in label_mappings:
actual_label = label_mappings[original]
assert actual_label == expected_label, (
f"Expected '{original}' to become '{expected_label}', but got '{actual_label}'"
)
assert len(actual_label) <= 10, (
f"Label '{actual_label}' exceeds max_label_length=10"
)
|
from unittest.mock import patch
import pytest
from llama_index.utils.workflow import (
draw_all_possible_flows,
draw_most_recent_execution,
)
@pytest.mark.asyncio
async def test_workflow_draw_methods(workflow):
with patch("pyvis.network.Network") as mock_network:
draw_all_possible_flows(workflow, filename="test_all_flows.html")
mock_network.assert_called_once()
mock_network.return_value.show.assert_called_once_with(
"test_all_flows.html", notebook=False
)
await workflow.run()
with patch("pyvis.network.Network") as mock_network:
draw_most_recent_execution(workflow, filename="test_recent_execution.html")
mock_network.assert_called_once()
mock_network.return_value.show.assert_called_once_with(
"test_recent_execution.html", notebook=False
)
|
from torchaudio import ( # noqa: F401
_extension,
compliance,
datasets,
functional,
io,
kaldi_io,
models,
pipelines,
sox_effects,
transforms,
utils,
)
from torchaudio.backend import get_audio_backend, list_audio_backends, set_audio_backend
try:
from .version import __version__, git_version # noqa: F401
except ImportError:
pass
__all__ = [
"io",
"compliance",
"datasets",
"functional",
"models",
"pipelines",
"kaldi_io",
"utils",
"sox_effects",
"transforms",
"list_audio_backends",
"get_audio_backend",
"set_audio_backend",
]
|
from torchaudio import ( # noqa: F401
_extension,
compliance,
datasets,
functional,
io,
kaldi_io,
models,
pipelines,
sox_effects,
transforms,
utils,
)
from torchaudio.backend import get_audio_backend, list_audio_backends, set_audio_backend
try:
from .version import __version__, git_version # noqa: F401
except ImportError:
pass
__all__ = [
"io",
"compliance",
"datasets",
"functional",
"models",
"pipelines",
"kaldi_io",
"utils",
"sox_effects",
"transforms",
"list_audio_backends",
"get_audio_backend",
"set_audio_backend",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmdet.models.dense_heads import YOLOXHead
def test_yolox_head_loss():
"""Tests yolox head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='SimOTAAssigner',
center_radius=2.5,
candidate_topk=10,
iou_weight=3.0,
cls_weight=1.0)))
self = YOLOXHead(
num_classes=4, in_channels=1, use_depthwise=False, train_cfg=train_cfg)
assert not self.use_l1
assert isinstance(self.multi_level_cls_convs[0][0], ConvModule)
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16]
]
cls_scores, bbox_preds, objectnesses = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
empty_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses,
gt_bboxes, gt_labels, img_metas)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_obj_loss = empty_gt_losses['loss_obj'].sum()
assert empty_cls_loss.item() == 0, (
'there should be no cls loss when there are no true boxes')
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_obj_loss.item() > 0, 'objectness loss should be non-zero'
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
self = YOLOXHead(
num_classes=4, in_channels=1, use_depthwise=True, train_cfg=train_cfg)
assert isinstance(self.multi_level_cls_convs[0][0],
DepthwiseSeparableConvModule)
self.use_l1 = True
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses, gt_bboxes,
gt_labels, img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_obj_loss = one_gt_losses['loss_obj'].sum()
onegt_l1_loss = one_gt_losses['loss_l1'].sum()
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_obj_loss.item() > 0, 'obj loss should be non-zero'
assert onegt_l1_loss.item() > 0, 'l1 loss should be non-zero'
|
import mmcv
import torch
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmdet.models.dense_heads import YOLOXHead
def test_yolox_head_loss():
"""Tests yolox head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='SimOTAAssigner',
center_radius=2.5,
candidate_topk=10,
iou_weight=3.0,
cls_weight=1.0)))
self = YOLOXHead(
num_classes=4, in_channels=1, use_depthwise=False, train_cfg=train_cfg)
assert not self.use_l1
assert isinstance(self.multi_level_cls_convs[0][0], ConvModule)
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16]
]
cls_scores, bbox_preds, objectnesses = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
empty_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses,
gt_bboxes, gt_labels, img_metas)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_obj_loss = empty_gt_losses['loss_obj'].sum()
assert empty_cls_loss.item() == 0, (
'there should be no cls loss when there are no true boxes')
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_obj_loss.item() > 0, 'objectness loss should be non-zero'
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
self = YOLOXHead(
num_classes=4, in_channels=1, use_depthwise=True, train_cfg=train_cfg)
assert isinstance(self.multi_level_cls_convs[0][0],
DepthwiseSeparableConvModule)
self.use_l1 = True
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses, gt_bboxes,
gt_labels, img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_obj_loss = one_gt_losses['loss_obj'].sum()
onegt_l1_loss = one_gt_losses['loss_l1'].sum()
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_obj_loss.item() > 0, 'obj loss should be non-zero'
assert onegt_l1_loss.item() > 0, 'l1 loss should be non-zero'
|
from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import get_graph, get_node
from backend.data.queue import RedisExecutionEventBus
from backend.data.user import get_user_metadata, update_user_metadata
from backend.util.service import AppService, expose
from backend.util.settings import Config
P = ParamSpec("P")
R = TypeVar("R")
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return Config().database_api_port
@expose
def send_execution_update(self, execution_result_dict: dict[Any, Any]):
self.event_queue.publish(ExecutionResult(**execution_result_dict))
@staticmethod
def exposed_run_and_wait(
f: Callable[P, Coroutine[None, None, R]]
) -> Callable[Concatenate[object, P], R]:
@expose
@wraps(f)
def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> R:
coroutine = f(*args, **kwargs)
res = self.run_and_wait(coroutine)
return res
return wrapper
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
# Credits
user_credit_model = get_user_credit_model()
get_or_refill_credit = cast(
Callable[[Any, str], int],
exposed_run_and_wait(user_credit_model.get_or_refill_credit),
)
spend_credits = cast(
Callable[[Any, str, int, str, dict[str, str], float, float], int],
exposed_run_and_wait(user_credit_model.spend_credits),
)
# User + User Metadata
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
|
from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import get_graph, get_node
from backend.data.queue import RedisEventQueue
from backend.data.user import get_user_metadata, update_user_metadata
from backend.util.service import AppService, expose
from backend.util.settings import Config
P = ParamSpec("P")
R = TypeVar("R")
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisEventQueue()
@classmethod
def get_port(cls) -> int:
return Config().database_api_port
@expose
def send_execution_update(self, execution_result_dict: dict[Any, Any]):
self.event_queue.put(ExecutionResult(**execution_result_dict))
@staticmethod
def exposed_run_and_wait(
f: Callable[P, Coroutine[None, None, R]]
) -> Callable[Concatenate[object, P], R]:
@expose
@wraps(f)
def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> R:
coroutine = f(*args, **kwargs)
res = self.run_and_wait(coroutine)
return res
return wrapper
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
# Credits
user_credit_model = get_user_credit_model()
get_or_refill_credit = cast(
Callable[[Any, str], int],
exposed_run_and_wait(user_credit_model.get_or_refill_credit),
)
spend_credits = cast(
Callable[[Any, str, int, str, dict[str, str], float, float], int],
exposed_run_and_wait(user_credit_model.spend_credits),
)
# User + User Metadata
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import mmcv
import numpy as np
from mmcv import Config, DictAction
from mmdet.core.utils import mask2ndarray
from mmdet.datasets.builder import build_dataset
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# register all modules in mmdet into the registries
register_all_modules()
dataset = build_dataset(cfg.train_dataloader.dataset)
visualizer = VISUALIZERS.build(cfg.visualizer)
visualizer.dataset_meta = dataset.metainfo
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
img = item['inputs'].permute(1, 2, 0).numpy()
data_sample = item['data_sample'].numpy()
gt_instances = data_sample.gt_instances
img_path = osp.basename(item['data_sample'].img_path)
out_file = osp.join(
args.output_dir,
osp.basename(img_path)) if args.output_dir is not None else None
img = img[..., [2, 1, 0]] # bgr to rgb
gt_masks = gt_instances.get('masks', None)
if gt_masks is not None:
masks = mask2ndarray(gt_masks)
gt_instances.masks = masks.astype(np.bool)
data_sample.gt_instances = gt_instances
visualizer.add_datasample(
osp.basename(img_path),
img,
data_sample,
show=not args.not_show,
wait_time=args.show_interval,
out_file=out_file)
progress_bar.update()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import mmcv
import numpy as np
from mmcv import Config, DictAction
from mmdet.core.utils import mask2ndarray
from mmdet.datasets.builder import build_dataset
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# register all modules in mmdet into the registries
register_all_modules()
dataset = build_dataset(cfg.train_dataloader.dataset)
visualizer = VISUALIZERS.build(cfg.visualizer)
visualizer.dataset_meta = dataset.METAINFO
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
img = item['inputs'].permute(1, 2, 0).numpy()
data_sample = item['data_sample'].numpy()
gt_instances = data_sample.gt_instances
img_path = osp.basename(item['data_sample'].img_path)
out_file = osp.join(
args.output_dir,
osp.basename(img_path)) if args.output_dir is not None else None
img = img[..., [2, 1, 0]] # bgr to rgb
gt_masks = gt_instances.get('masks', None)
if gt_masks is not None:
masks = mask2ndarray(gt_masks)
gt_instances.masks = masks.astype(np.bool)
data_sample.gt_instances = gt_instances
visualizer.add_datasample(
osp.basename(img_path),
img,
data_sample,
show=not args.not_show,
wait_time=args.show_interval,
out_file=out_file)
progress_bar.update()
if __name__ == '__main__':
main()
|
from .filtering import (
allpass_biquad,
band_biquad,
bandpass_biquad,
bandreject_biquad,
bass_biquad,
biquad,
contrast,
dcshift,
deemph_biquad,
dither,
equalizer_biquad,
filtfilt,
flanger,
gain,
highpass_biquad,
lfilter,
lowpass_biquad,
overdrive,
phaser,
riaa_biquad,
treble_biquad,
vad,
)
from .functional import (
add_noise,
amplitude_to_DB,
apply_beamforming,
apply_codec,
compute_deltas,
convolve,
create_dct,
DB_to_amplitude,
deemphasis,
detect_pitch_frequency,
edit_distance,
fftconvolve,
forced_align,
griffinlim,
inverse_spectrogram,
linear_fbanks,
loudness,
mask_along_axis,
mask_along_axis_iid,
melscale_fbanks,
merge_tokens,
mu_law_decoding,
mu_law_encoding,
mvdr_weights_rtf,
mvdr_weights_souden,
phase_vocoder,
pitch_shift,
preemphasis,
psd,
resample,
rnnt_loss,
rtf_evd,
rtf_power,
sliding_window_cmn,
spectral_centroid,
spectrogram,
speed,
TokenSpan,
)
__all__ = [
"amplitude_to_DB",
"compute_deltas",
"create_dct",
"melscale_fbanks",
"linear_fbanks",
"DB_to_amplitude",
"loudness",
"detect_pitch_frequency",
"griffinlim",
"mask_along_axis",
"mask_along_axis_iid",
"mu_law_encoding",
"mu_law_decoding",
"phase_vocoder",
"sliding_window_cmn",
"spectrogram",
"inverse_spectrogram",
"spectral_centroid",
"allpass_biquad",
"band_biquad",
"bandpass_biquad",
"bandreject_biquad",
"bass_biquad",
"biquad",
"contrast",
"dither",
"dcshift",
"deemph_biquad",
"equalizer_biquad",
"filtfilt",
"flanger",
"forced_align",
"merge_tokens",
"TokenSpan",
"gain",
"highpass_biquad",
"lfilter",
"lowpass_biquad",
"overdrive",
"phaser",
"riaa_biquad",
"treble_biquad",
"vad",
"apply_codec",
"resample",
"edit_distance",
"pitch_shift",
"rnnt_loss",
"psd",
"mvdr_weights_souden",
"mvdr_weights_rtf",
"rtf_evd",
"rtf_power",
"apply_beamforming",
"fftconvolve",
"convolve",
"add_noise",
"speed",
"preemphasis",
"deemphasis",
]
|
from .filtering import (
allpass_biquad,
band_biquad,
bandpass_biquad,
bandreject_biquad,
bass_biquad,
biquad,
contrast,
dcshift,
deemph_biquad,
dither,
equalizer_biquad,
filtfilt,
flanger,
gain,
highpass_biquad,
lfilter,
lowpass_biquad,
overdrive,
phaser,
riaa_biquad,
treble_biquad,
vad,
)
from .functional import (
add_noise,
amplitude_to_DB,
apply_beamforming,
apply_codec,
compute_deltas,
convolve,
create_dct,
DB_to_amplitude,
deemphasis,
detect_pitch_frequency,
edit_distance,
fftconvolve,
forced_align,
griffinlim,
inverse_spectrogram,
linear_fbanks,
loudness,
mask_along_axis,
mask_along_axis_iid,
melscale_fbanks,
mu_law_decoding,
mu_law_encoding,
mvdr_weights_rtf,
mvdr_weights_souden,
phase_vocoder,
pitch_shift,
preemphasis,
psd,
resample,
rnnt_loss,
rtf_evd,
rtf_power,
sliding_window_cmn,
spectral_centroid,
spectrogram,
speed,
)
__all__ = [
"amplitude_to_DB",
"compute_deltas",
"create_dct",
"melscale_fbanks",
"linear_fbanks",
"DB_to_amplitude",
"loudness",
"detect_pitch_frequency",
"griffinlim",
"mask_along_axis",
"mask_along_axis_iid",
"mu_law_encoding",
"mu_law_decoding",
"phase_vocoder",
"sliding_window_cmn",
"spectrogram",
"inverse_spectrogram",
"spectral_centroid",
"allpass_biquad",
"band_biquad",
"bandpass_biquad",
"bandreject_biquad",
"bass_biquad",
"biquad",
"contrast",
"dither",
"dcshift",
"deemph_biquad",
"equalizer_biquad",
"filtfilt",
"flanger",
"forced_align",
"gain",
"highpass_biquad",
"lfilter",
"lowpass_biquad",
"overdrive",
"phaser",
"riaa_biquad",
"treble_biquad",
"vad",
"apply_codec",
"resample",
"edit_distance",
"pitch_shift",
"rnnt_loss",
"psd",
"mvdr_weights_souden",
"mvdr_weights_rtf",
"rtf_evd",
"rtf_power",
"apply_beamforming",
"fftconvolve",
"convolve",
"add_noise",
"speed",
"preemphasis",
"deemphasis",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class NASFCOS(SingleStageDetector):
"""NAS-FCOS: Fast Neural Architecture Search for Object Detection.
https://arxiv.org/abs/1906.0442
"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(NASFCOS, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class NASFCOS(SingleStageDetector):
"""NAS-FCOS: Fast Neural Architecture Search for Object Detection.
https://arxiv.org/abs/1906.0442
"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(NASFCOS, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
_base_ = './mask-rcnn_r50_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
# Copyright (c) OpenMMLab. All rights reserved.
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .dynamic_soft_label_assigner import DynamicSoftLabelAssigner
from .grid_assigner import GridAssigner
from .hungarian_assigner import HungarianAssigner
from .iou2d_calculator import BboxOverlaps2D
from .match_cost import (BBoxL1Cost, ClassificationCost, CrossEntropyLossCost,
DiceCost, FocalLossCost, IoUCost)
from .max_iou_assigner import MaxIoUAssigner
from .multi_instance_assigner import MultiInstanceAssigner
from .point_assigner import PointAssigner
from .region_assigner import RegionAssigner
from .sim_ota_assigner import SimOTAAssigner
from .task_aligned_assigner import TaskAlignedAssigner
from .topk_hungarian_assigner import TopkHungarianAssigner
from .uniform_assigner import UniformAssigner
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner',
'TaskAlignedAssigner', 'TopkHungarianAssigner', 'BBoxL1Cost',
'ClassificationCost', 'CrossEntropyLossCost', 'DiceCost', 'FocalLossCost',
'IoUCost', 'BboxOverlaps2D', 'DynamicSoftLabelAssigner',
'MultiInstanceAssigner'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .dynamic_soft_label_assigner import DynamicSoftLabelAssigner
from .grid_assigner import GridAssigner
from .hungarian_assigner import HungarianAssigner
from .iou2d_calculator import BboxOverlaps2D
from .match_cost import (BBoxL1Cost, ClassificationCost, CrossEntropyLossCost,
DiceCost, FocalLossCost, IoUCost)
from .max_iou_assigner import MaxIoUAssigner
from .multi_instance_assigner import MultiInstanceAssigner
from .point_assigner import PointAssigner
from .region_assigner import RegionAssigner
from .sim_ota_assigner import SimOTAAssigner
from .task_aligned_assigner import TaskAlignedAssigner
from .uniform_assigner import UniformAssigner
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner',
'TaskAlignedAssigner', 'BBoxL1Cost', 'ClassificationCost',
'CrossEntropyLossCost', 'DiceCost', 'FocalLossCost', 'IoUCost',
'BboxOverlaps2D', 'DynamicSoftLabelAssigner', 'MultiInstanceAssigner'
]
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='ATSS',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=128),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=[
dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
dict(
type='DyHead',
in_channels=256,
out_channels=256,
num_blocks=6,
# disable zero_init_offset to follow official implementation
zero_init_offset=False)
],
bbox_head=dict(
type='ATSSHead',
num_classes=80,
in_channels=256,
pred_kernel_size=1, # follow DyHead official implementation
stacked_convs=0,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128],
center_offset=0.5), # follow DyHead official implementation
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend='pillow'),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend='pillow'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='ATSS',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=128),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=[
dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
dict(
type='DyHead',
in_channels=256,
out_channels=256,
num_blocks=6,
# disable zero_init_offset to follow official implementation
zero_init_offset=False)
],
bbox_head=dict(
type='ATSSHead',
num_classes=80,
in_channels=256,
pred_kernel_size=1, # follow DyHead official implementation
stacked_convs=0,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128],
center_offset=0.5), # follow DyHead official implementation
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend='pillow'),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend='pillow'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import _tf_keras
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import layers
from keras.api import legacy
from keras.api import losses
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import preprocessing
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import saving
from keras.api import tree
from keras.api import utils
from keras.api import visualization
from keras.src.backend import Variable
from keras.src.backend import device
from keras.src.backend import name_scope
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import __version__
from keras.src.version import version
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import _tf_keras
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import layers
from keras.api import legacy
from keras.api import losses
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import preprocessing
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import saving
from keras.api import tree
from keras.api import utils
from keras.src.backend import Variable
from keras.src.backend import device
from keras.src.backend import name_scope
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import __version__
from keras.src.version import version
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import numpy as np
from mmengine.fileio import dump, load
from mmengine.utils import mkdir_or_exist, track_parallel_progress
prog_description = '''K-Fold coco split.
To split coco data for semi-supervised object detection:
python tools/misc/split_coco.py
'''
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--data-root',
type=str,
help='The data root of coco dataset.',
default='./data/coco/')
parser.add_argument(
'--out-dir',
type=str,
help='The output directory of coco semi-supervised annotations.',
default='./data/coco_semi_annos/')
parser.add_argument(
'--labeled-percent',
type=float,
nargs='+',
help='The percentage of labeled data in the training set.',
default=[1, 2, 5, 10])
parser.add_argument(
'--fold',
type=int,
help='K-fold cross validation for semi-supervised object detection.',
default=5)
args = parser.parse_args()
return args
def split_coco(data_root, out_dir, percent, fold):
"""Split COCO data for Semi-supervised object detection.
Args:
data_root (str): The data root of coco dataset.
out_dir (str): The output directory of coco semi-supervised
annotations.
percent (float): The percentage of labeled data in the training set.
fold (int): The fold of dataset and set as random seed for data split.
"""
def save_anns(name, images, annotations):
sub_anns = dict()
sub_anns['images'] = images
sub_anns['annotations'] = annotations
sub_anns['licenses'] = anns['licenses']
sub_anns['categories'] = anns['categories']
sub_anns['info'] = anns['info']
mkdir_or_exist(out_dir)
dump(sub_anns, f'{out_dir}/{name}.json')
# set random seed with the fold
np.random.seed(fold)
ann_file = osp.join(data_root, 'annotations/instances_train2017.json')
anns = load(ann_file)
image_list = anns['images']
labeled_total = int(percent / 100. * len(image_list))
labeled_inds = set(
np.random.choice(range(len(image_list)), size=labeled_total))
labeled_ids, labeled_images, unlabeled_images = [], [], []
for i in range(len(image_list)):
if i in labeled_inds:
labeled_images.append(image_list[i])
labeled_ids.append(image_list[i]['id'])
else:
unlabeled_images.append(image_list[i])
# get all annotations of labeled images
labeled_ids = set(labeled_ids)
labeled_annotations, unlabeled_annotations = [], []
for ann in anns['annotations']:
if ann['image_id'] in labeled_ids:
labeled_annotations.append(ann)
else:
unlabeled_annotations.append(ann)
# save labeled and unlabeled
labeled_name = f'instances_train2017.{fold}@{percent}'
unlabeled_name = f'instances_train2017.{fold}@{percent}-unlabeled'
save_anns(labeled_name, labeled_images, labeled_annotations)
save_anns(unlabeled_name, unlabeled_images, unlabeled_annotations)
def multi_wrapper(args):
return split_coco(*args)
if __name__ == '__main__':
args = parse_args()
arguments_list = [(args.data_root, args.out_dir, p, f)
for f in range(1, args.fold + 1)
for p in args.labeled_percent]
track_parallel_progress(multi_wrapper, arguments_list, args.fold)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import mmcv
import numpy as np
prog_description = '''K-Fold coco split.
To split coco data for semi-supervised object detection:
python tools/misc/split_coco.py
'''
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--data-root',
type=str,
help='The data root of coco dataset.',
default='./data/coco/')
parser.add_argument(
'--out-dir',
type=str,
help='The output directory of coco semi-supervised annotations.',
default='./data/coco_semi_annos/')
parser.add_argument(
'--labeled-percent',
type=float,
nargs='+',
help='The percentage of labeled data in the training set.',
default=[1, 2, 5, 10])
parser.add_argument(
'--fold',
type=int,
help='K-fold cross validation for semi-supervised object detection.',
default=5)
args = parser.parse_args()
return args
def split_coco(data_root, out_dir, percent, fold):
"""Split COCO data for Semi-supervised object detection.
Args:
data_root (str): The data root of coco dataset.
out_dir (str): The output directory of coco semi-supervised
annotations.
percent (float): The percentage of labeled data in the training set.
fold (int): The fold of dataset and set as random seed for data split.
"""
def save_anns(name, images, annotations):
sub_anns = dict()
sub_anns['images'] = images
sub_anns['annotations'] = annotations
sub_anns['licenses'] = anns['licenses']
sub_anns['categories'] = anns['categories']
sub_anns['info'] = anns['info']
mmcv.mkdir_or_exist(out_dir)
mmcv.dump(sub_anns, f'{out_dir}/{name}.json')
# set random seed with the fold
np.random.seed(fold)
ann_file = osp.join(data_root, 'annotations/instances_train2017.json')
anns = mmcv.load(ann_file)
image_list = anns['images']
labeled_total = int(percent / 100. * len(image_list))
labeled_inds = set(
np.random.choice(range(len(image_list)), size=labeled_total))
labeled_ids, labeled_images, unlabeled_images = [], [], []
for i in range(len(image_list)):
if i in labeled_inds:
labeled_images.append(image_list[i])
labeled_ids.append(image_list[i]['id'])
else:
unlabeled_images.append(image_list[i])
# get all annotations of labeled images
labeled_ids = set(labeled_ids)
labeled_annotations, unlabeled_annotations = [], []
for ann in anns['annotations']:
if ann['image_id'] in labeled_ids:
labeled_annotations.append(ann)
else:
unlabeled_annotations.append(ann)
# save labeled and unlabeled
labeled_name = f'instances_train2017.{fold}@{percent}'
unlabeled_name = f'instances_train2017.{fold}@{percent}-unlabeled'
save_anns(labeled_name, labeled_images, labeled_annotations)
save_anns(unlabeled_name, unlabeled_images, unlabeled_annotations)
def multi_wrapper(args):
return split_coco(*args)
if __name__ == '__main__':
args = parse_args()
arguments_list = [(args.data_root, args.out_dir, p, f)
for f in range(1, args.fold + 1)
for p in args.labeled_percent]
mmcv.track_parallel_progress(multi_wrapper, arguments_list, args.fold)
|
from __future__ import annotations
from typing import Any, Callable, List, Tuple, Type, Union
import PIL.Image
from torchvision import datapoints
from torchvision._utils import sequence_to_str
from torchvision.transforms.v2.functional import get_dimensions, get_size, is_pure_tensor
def get_bounding_boxes(flat_inputs: List[Any]) -> datapoints.BoundingBoxes:
# This assumes there is only one bbox per sample as per the general convention
try:
return next(inpt for inpt in flat_inputs if isinstance(inpt, datapoints.BoundingBoxes))
except StopIteration:
raise ValueError("No bounding boxes were found in the sample")
def query_chw(flat_inputs: List[Any]) -> Tuple[int, int, int]:
chws = {
tuple(get_dimensions(inpt))
for inpt in flat_inputs
if check_type(inpt, (is_pure_tensor, datapoints.Image, PIL.Image.Image, datapoints.Video))
}
if not chws:
raise TypeError("No image or video was found in the sample")
elif len(chws) > 1:
raise ValueError(f"Found multiple CxHxW dimensions in the sample: {sequence_to_str(sorted(chws))}")
c, h, w = chws.pop()
return c, h, w
def query_size(flat_inputs: List[Any]) -> Tuple[int, int]:
sizes = {
tuple(get_size(inpt))
for inpt in flat_inputs
if check_type(
inpt,
(
is_pure_tensor,
datapoints.Image,
PIL.Image.Image,
datapoints.Video,
datapoints.Mask,
datapoints.BoundingBoxes,
),
)
}
if not sizes:
raise TypeError("No image, video, mask or bounding box was found in the sample")
elif len(sizes) > 1:
raise ValueError(f"Found multiple HxW dimensions in the sample: {sequence_to_str(sorted(sizes))}")
h, w = sizes.pop()
return h, w
def check_type(obj: Any, types_or_checks: Tuple[Union[Type, Callable[[Any], bool]], ...]) -> bool:
for type_or_check in types_or_checks:
if isinstance(obj, type_or_check) if isinstance(type_or_check, type) else type_or_check(obj):
return True
return False
def has_any(flat_inputs: List[Any], *types_or_checks: Union[Type, Callable[[Any], bool]]) -> bool:
for inpt in flat_inputs:
if check_type(inpt, types_or_checks):
return True
return False
def has_all(flat_inputs: List[Any], *types_or_checks: Union[Type, Callable[[Any], bool]]) -> bool:
for type_or_check in types_or_checks:
for inpt in flat_inputs:
if isinstance(inpt, type_or_check) if isinstance(type_or_check, type) else type_or_check(inpt):
break
else:
return False
return True
|
from __future__ import annotations
from typing import Any, Callable, List, Tuple, Type, Union
import PIL.Image
from torchvision import datapoints
from torchvision._utils import sequence_to_str
from torchvision.transforms.v2.functional import get_dimensions, get_size, is_simple_tensor
def get_bounding_boxes(flat_inputs: List[Any]) -> datapoints.BoundingBoxes:
# This assumes there is only one bbox per sample as per the general convention
try:
return next(inpt for inpt in flat_inputs if isinstance(inpt, datapoints.BoundingBoxes))
except StopIteration:
raise ValueError("No bounding boxes were found in the sample")
def query_chw(flat_inputs: List[Any]) -> Tuple[int, int, int]:
chws = {
tuple(get_dimensions(inpt))
for inpt in flat_inputs
if check_type(inpt, (is_simple_tensor, datapoints.Image, PIL.Image.Image, datapoints.Video))
}
if not chws:
raise TypeError("No image or video was found in the sample")
elif len(chws) > 1:
raise ValueError(f"Found multiple CxHxW dimensions in the sample: {sequence_to_str(sorted(chws))}")
c, h, w = chws.pop()
return c, h, w
def query_size(flat_inputs: List[Any]) -> Tuple[int, int]:
sizes = {
tuple(get_size(inpt))
for inpt in flat_inputs
if check_type(
inpt,
(
is_simple_tensor,
datapoints.Image,
PIL.Image.Image,
datapoints.Video,
datapoints.Mask,
datapoints.BoundingBoxes,
),
)
}
if not sizes:
raise TypeError("No image, video, mask or bounding box was found in the sample")
elif len(sizes) > 1:
raise ValueError(f"Found multiple HxW dimensions in the sample: {sequence_to_str(sorted(sizes))}")
h, w = sizes.pop()
return h, w
def check_type(obj: Any, types_or_checks: Tuple[Union[Type, Callable[[Any], bool]], ...]) -> bool:
for type_or_check in types_or_checks:
if isinstance(obj, type_or_check) if isinstance(type_or_check, type) else type_or_check(obj):
return True
return False
def has_any(flat_inputs: List[Any], *types_or_checks: Union[Type, Callable[[Any], bool]]) -> bool:
for inpt in flat_inputs:
if check_type(inpt, types_or_checks):
return True
return False
def has_all(flat_inputs: List[Any], *types_or_checks: Union[Type, Callable[[Any], bool]]) -> bool:
for type_or_check in types_or_checks:
for inpt in flat_inputs:
if isinstance(inpt, type_or_check) if isinstance(type_or_check, type) else type_or_check(inpt):
break
else:
return False
return True
|
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import layers
from keras.src import ops
from keras.src import random
from keras.src import testing
class SolarizationTest(testing.TestCase):
def _test_input_output(self, layer, input_value, expected_value, dtype):
input = np.ones(shape=(2, 224, 224, 3), dtype=dtype) * input_value
expected_output = ops.clip(
(
np.ones(shape=(2, 224, 224, 3), dtype=layer.compute_dtype)
* expected_value
),
0,
255,
)
output = layer(input)
self.assertAllClose(output, expected_output)
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.Solarization,
init_kwargs={
"addition_factor": 0.75,
"value_range": (20, 200),
"threshold_factor": (0, 1),
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
@parameterized.named_parameters(
("0_255", 0, 255),
("64_191", 64, 191),
("127_128", 127, 128),
("191_64", 191, 64),
("255_0", 255, 0),
)
def test_output_values(self, input_value, expected_value):
solarization = layers.Solarization(value_range=(0, 255))
self._test_input_output(
layer=solarization,
input_value=input_value,
expected_value=expected_value,
dtype="uint8",
)
@parameterized.named_parameters(
("0_0", 0, 0),
("191_64", 191, 64),
("255_0", 255, 0),
)
def test_only_values_above_threshold_are_solarized(
self, input_value, output_value
):
solarization = layers.Solarization(
threshold_factor=(128.0 / 255.0, 128.0 / 255.0),
value_range=(0, 255),
)
self._test_input_output(
layer=solarization,
input_value=input_value,
expected_value=output_value,
dtype="uint8",
)
def test_random_augmentation_applied_per_sample(self):
image = random.uniform((16, 16, 3), minval=0, maxval=255)
images = ops.stack([image, image])
layer = layers.Solarization(
value_range=(0, 255), threshold_factor=0.5, addition_factor=0.5
)
outputs = layer(images)
self.assertNotAllClose(outputs[0], outputs[1])
|
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import layers
from keras.src import ops
from keras.src import random
from keras.src import testing
class SolarizationTest(testing.TestCase, parameterized.TestCase):
def _test_input_output(self, layer, input_value, expected_value, dtype):
input = np.ones(shape=(2, 224, 224, 3), dtype=dtype) * input_value
expected_output = ops.clip(
(
np.ones(shape=(2, 224, 224, 3), dtype=layer.compute_dtype)
* expected_value
),
0,
255,
)
output = layer(input)
self.assertAllClose(output, expected_output)
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.Solarization,
init_kwargs={
"addition_factor": 0.75,
"value_range": (20, 200),
"threshold_factor": (0, 1),
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
@parameterized.named_parameters(
("0_255", 0, 255),
("64_191", 64, 191),
("127_128", 127, 128),
("191_64", 191, 64),
("255_0", 255, 0),
)
def test_output_values(self, input_value, expected_value):
solarization = layers.Solarization(value_range=(0, 255))
self._test_input_output(
layer=solarization,
input_value=input_value,
expected_value=expected_value,
dtype="uint8",
)
@parameterized.named_parameters(
("0_0", 0, 0),
("191_64", 191, 64),
("255_0", 255, 0),
)
def test_only_values_above_threshold_are_solarized(
self, input_value, output_value
):
solarization = layers.Solarization(
threshold_factor=(128.0 / 255.0, 128.0 / 255.0),
value_range=(0, 255),
)
self._test_input_output(
layer=solarization,
input_value=input_value,
expected_value=output_value,
dtype="uint8",
)
def test_random_augmentation_applied_per_sample(self):
image = random.uniform((16, 16, 3), minval=0, maxval=255)
images = ops.stack([image, image])
layer = layers.Solarization(
value_range=(0, 255), threshold_factor=0.5, addition_factor=0.5
)
outputs = layer(images)
self.assertNotAllClose(outputs[0], outputs[1])
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
DATASETS_ON_HF_GCP = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def list_datasets_on_hf_gcp_parameters(with_config=True):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=True))
class TestDatasetOnHfGcp(TestCase):
dataset = None
config_name = None
def test_dataset_info_available(self, dataset, config_name):
with TemporaryDirectory() as tmp_dir:
dataset_module = dataset_module_factory(dataset, cache_dir=tmp_dir)
builder_cls = import_main_class(dataset_module.module_path, dataset=True)
builder_instance: DatasetBuilder = builder_cls(
cache_dir=tmp_dir,
config_name=config_name,
hash=dataset_module.hash,
)
dataset_info_url = os.path.join(
HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=False), config.DATASET_INFO_FILENAME
).replace(os.sep, "/")
datset_info_path = cached_path(dataset_info_url, cache_dir=tmp_dir)
self.assertTrue(os.path.exists(datset_info_path))
@pytest.mark.integration
def test_wikipedia_frr(tmp_path_factory):
tmp_dir = tmp_path_factory.mktemp("test_hf_gcp") / "test_wikipedia_simple"
dataset_module = dataset_module_factory("wikipedia", cache_dir=tmp_dir)
builder_cls = import_main_class(dataset_module.module_path, dataset=True)
builder_instance: DatasetBuilder = builder_cls(
cache_dir=tmp_dir,
config_name="20220301.frr",
hash=dataset_module.hash,
)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
builder_instance._download_and_prepare = None
builder_instance.download_and_prepare()
ds = builder_instance.as_dataset()
assert ds is not None
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
DATASETS_ON_HF_GCP = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def list_datasets_on_hf_gcp_parameters(with_config=True):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=True))
class TestDatasetOnHfGcp(TestCase):
dataset = None
config_name = None
def test_dataset_info_available(self, dataset, config_name):
with TemporaryDirectory() as tmp_dir:
dataset_module = dataset_module_factory(dataset, cache_dir=tmp_dir)
builder_cls = import_main_class(dataset_module.module_path, dataset=True)
builder_instance: DatasetBuilder = builder_cls(
cache_dir=tmp_dir,
config_name=config_name,
hash=dataset_module.hash,
)
dataset_info_url = os.path.join(
HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=False), config.DATASET_INFO_FILENAME
).replace(os.sep, "/")
datset_info_path = cached_path(dataset_info_url, cache_dir=tmp_dir)
self.assertTrue(os.path.exists(datset_info_path))
|
from __future__ import annotations
from collections.abc import Sequence
from copy import deepcopy
from typing import Any, Optional, Union
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import Callbacks
from langchain_core.documents import BaseDocumentCompressor, Document
from langchain_core.utils import get_from_dict_or_env
from pydantic import ConfigDict, model_validator
@deprecated(
since="0.0.30", removal="1.0", alternative_import="langchain_cohere.CohereRerank"
)
class CohereRerank(BaseDocumentCompressor):
"""Document compressor that uses `Cohere Rerank API`."""
client: Any = None
"""Cohere client to use for compressing documents."""
top_n: Optional[int] = 3
"""Number of documents to return."""
model: str = "rerank-english-v2.0"
"""Model to use for reranking."""
cohere_api_key: Optional[str] = None
"""Cohere API key. Must be specified directly or via environment variable
COHERE_API_KEY."""
user_agent: str = "langchain"
"""Identifier for the application making the request."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: dict) -> Any:
"""Validate that api key and python package exists in environment."""
if not values.get("client"):
try:
import cohere
except ImportError:
raise ImportError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
client_name = values.get("user_agent", "langchain")
values["client"] = cohere.Client(cohere_api_key, client_name=client_name)
return values
def rerank(
self,
documents: Sequence[Union[str, Document, dict]],
query: str,
*,
model: Optional[str] = None,
top_n: Optional[int] = -1,
max_chunks_per_doc: Optional[int] = None,
) -> list[dict[str, Any]]:
"""Returns an ordered list of documents ordered by their relevance to the provided query.
Args:
query: The query to use for reranking.
documents: A sequence of documents to rerank.
model: The model to use for re-ranking. Default to self.model.
top_n : The number of results to return. If None returns all results.
Defaults to self.top_n.
max_chunks_per_doc : The maximum number of chunks derived from a document.
""" # noqa: E501
if len(documents) == 0: # to avoid empty api call
return []
docs = [
doc.page_content if isinstance(doc, Document) else doc for doc in documents
]
model = model or self.model
top_n = top_n if (top_n is None or top_n > 0) else self.top_n
results = self.client.rerank(
query=query,
documents=docs,
model=model,
top_n=top_n,
max_chunks_per_doc=max_chunks_per_doc,
)
if hasattr(results, "results"):
results = getattr(results, "results")
result_dicts = []
for res in results:
result_dicts.append(
{
"index": res.index,
"relevance_score": res.relevance_score,
}
)
return result_dicts
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""
Compress documents using Cohere's rerank API.
Args:
documents: A sequence of documents to compress.
query: The query to use for compressing the documents.
callbacks: Callbacks to run during the compression process.
Returns:
A sequence of compressed documents.
"""
compressed = []
for res in self.rerank(documents, query):
doc = documents[res["index"]]
doc_copy = Document(doc.page_content, metadata=deepcopy(doc.metadata))
doc_copy.metadata["relevance_score"] = res["relevance_score"]
compressed.append(doc_copy)
return compressed
|
from __future__ import annotations
from collections.abc import Sequence
from copy import deepcopy
from typing import Any, Optional, Union
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import Callbacks
from langchain_core.documents import BaseDocumentCompressor, Document
from langchain_core.utils import get_from_dict_or_env
from pydantic import ConfigDict, model_validator
@deprecated(
since="0.0.30", removal="1.0", alternative_import="langchain_cohere.CohereRerank"
)
class CohereRerank(BaseDocumentCompressor):
"""Document compressor that uses `Cohere Rerank API`."""
client: Any = None
"""Cohere client to use for compressing documents."""
top_n: Optional[int] = 3
"""Number of documents to return."""
model: str = "rerank-english-v2.0"
"""Model to use for reranking."""
cohere_api_key: Optional[str] = None
"""Cohere API key. Must be specified directly or via environment variable
COHERE_API_KEY."""
user_agent: str = "langchain"
"""Identifier for the application making the request."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: dict) -> Any:
"""Validate that api key and python package exists in environment."""
if not values.get("client"):
try:
import cohere
except ImportError:
raise ImportError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
client_name = values.get("user_agent", "langchain")
values["client"] = cohere.Client(cohere_api_key, client_name=client_name)
return values
def rerank(
self,
documents: Sequence[Union[str, Document, dict]],
query: str,
*,
model: Optional[str] = None,
top_n: Optional[int] = -1,
max_chunks_per_doc: Optional[int] = None,
) -> list[dict[str, Any]]:
"""Returns an ordered list of documents ordered by their relevance to the provided query.
Args:
query: The query to use for reranking.
documents: A sequence of documents to rerank.
model: The model to use for re-ranking. Default to self.model.
top_n : The number of results to return. If None returns all results.
Defaults to self.top_n.
max_chunks_per_doc : The maximum number of chunks derived from a document.
""" # noqa: E501
if len(documents) == 0: # to avoid empty api call
return []
docs = [
doc.page_content if isinstance(doc, Document) else doc for doc in documents
]
model = model or self.model
top_n = top_n if (top_n is None or top_n > 0) else self.top_n
results = self.client.rerank(
query=query,
documents=docs,
model=model,
top_n=top_n,
max_chunks_per_doc=max_chunks_per_doc,
)
if hasattr(results, "results"):
results = getattr(results, "results")
result_dicts = []
for res in results:
result_dicts.append(
{
"index": res.index,
"relevance_score": res.relevance_score,
}
)
return result_dicts
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""
Compress documents using Cohere's rerank API.
Args:
documents: A sequence of documents to compress.
query: The query to use for compressing the documents.
callbacks: Callbacks to run during the compression process.
Returns:
A sequence of compressed documents.
"""
compressed = []
for res in self.rerank(documents, query):
doc = documents[res["index"]]
doc_copy = Document(doc.page_content, metadata=deepcopy(doc.metadata))
doc_copy.metadata["relevance_score"] = res["relevance_score"]
compressed.append(doc_copy)
return compressed
|
"""**Messages** are objects used in prompts and chat conversations.
**Class hierarchy:**
.. code-block::
BaseMessage --> SystemMessage, AIMessage, HumanMessage, ChatMessage, FunctionMessage, ToolMessage
--> BaseMessageChunk --> SystemMessageChunk, AIMessageChunk, HumanMessageChunk, ChatMessageChunk, FunctionMessageChunk, ToolMessageChunk
**Main helpers:**
.. code-block::
ChatPromptTemplate
""" # noqa: E501
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.messages.ai import (
AIMessage,
AIMessageChunk,
)
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
message_to_dict,
messages_to_dict,
)
from langchain_core.messages.chat import ChatMessage, ChatMessageChunk
from langchain_core.messages.function import FunctionMessage, FunctionMessageChunk
from langchain_core.messages.human import HumanMessage, HumanMessageChunk
from langchain_core.messages.modifier import RemoveMessage
from langchain_core.messages.system import SystemMessage, SystemMessageChunk
from langchain_core.messages.tool import (
InvalidToolCall,
ToolCall,
ToolCallChunk,
ToolMessage,
ToolMessageChunk,
)
from langchain_core.messages.utils import (
AnyMessage,
MessageLikeRepresentation,
_message_from_dict,
convert_to_messages,
convert_to_openai_messages,
filter_messages,
get_buffer_string,
merge_message_runs,
message_chunk_to_message,
messages_from_dict,
trim_messages,
)
__all__ = [
"AIMessage",
"AIMessageChunk",
"AnyMessage",
"BaseMessage",
"BaseMessageChunk",
"ChatMessage",
"ChatMessageChunk",
"FunctionMessage",
"FunctionMessageChunk",
"HumanMessage",
"HumanMessageChunk",
"InvalidToolCall",
"MessageLikeRepresentation",
"SystemMessage",
"SystemMessageChunk",
"ToolCall",
"ToolCallChunk",
"ToolMessage",
"ToolMessageChunk",
"RemoveMessage",
"_message_from_dict",
"convert_to_messages",
"get_buffer_string",
"merge_content",
"message_chunk_to_message",
"message_to_dict",
"messages_from_dict",
"messages_to_dict",
"filter_messages",
"merge_message_runs",
"trim_messages",
"convert_to_openai_messages",
]
_dynamic_imports = {
"AIMessage": "ai",
"AIMessageChunk": "ai",
"BaseMessage": "base",
"BaseMessageChunk": "base",
"merge_content": "base",
"message_to_dict": "base",
"messages_to_dict": "base",
"ChatMessage": "chat",
"ChatMessageChunk": "chat",
"FunctionMessage": "function",
"FunctionMessageChunk": "function",
"HumanMessage": "human",
"HumanMessageChunk": "human",
"RemoveMessage": "modifier",
"SystemMessage": "system",
"SystemMessageChunk": "system",
"InvalidToolCall": "tool",
"ToolCall": "tool",
"ToolCallChunk": "tool",
"ToolMessage": "tool",
"ToolMessageChunk": "tool",
"AnyMessage": "utils",
"MessageLikeRepresentation": "utils",
"_message_from_dict": "utils",
"convert_to_messages": "utils",
"convert_to_openai_messages": "utils",
"filter_messages": "utils",
"get_buffer_string": "utils",
"merge_message_runs": "utils",
"message_chunk_to_message": "utils",
"messages_from_dict": "utils",
"trim_messages": "utils",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent # type: ignore[name-defined]
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""**Messages** are objects used in prompts and chat conversations.
**Class hierarchy:**
.. code-block::
BaseMessage --> SystemMessage, AIMessage, HumanMessage, ChatMessage, FunctionMessage, ToolMessage
--> BaseMessageChunk --> SystemMessageChunk, AIMessageChunk, HumanMessageChunk, ChatMessageChunk, FunctionMessageChunk, ToolMessageChunk
**Main helpers:**
.. code-block::
ChatPromptTemplate
""" # noqa: E501
from langchain_core.messages.ai import (
AIMessage,
AIMessageChunk,
)
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
message_to_dict,
messages_to_dict,
)
from langchain_core.messages.chat import ChatMessage, ChatMessageChunk
from langchain_core.messages.function import FunctionMessage, FunctionMessageChunk
from langchain_core.messages.human import HumanMessage, HumanMessageChunk
from langchain_core.messages.modifier import RemoveMessage
from langchain_core.messages.system import SystemMessage, SystemMessageChunk
from langchain_core.messages.tool import (
InvalidToolCall,
ToolCall,
ToolCallChunk,
ToolMessage,
ToolMessageChunk,
)
from langchain_core.messages.utils import (
AnyMessage,
MessageLikeRepresentation,
_message_from_dict,
convert_to_messages,
convert_to_openai_messages,
filter_messages,
get_buffer_string,
merge_message_runs,
message_chunk_to_message,
messages_from_dict,
trim_messages,
)
__all__ = [
"AIMessage",
"AIMessageChunk",
"AnyMessage",
"BaseMessage",
"BaseMessageChunk",
"ChatMessage",
"ChatMessageChunk",
"FunctionMessage",
"FunctionMessageChunk",
"HumanMessage",
"HumanMessageChunk",
"InvalidToolCall",
"MessageLikeRepresentation",
"SystemMessage",
"SystemMessageChunk",
"ToolCall",
"ToolCallChunk",
"ToolMessage",
"ToolMessageChunk",
"RemoveMessage",
"_message_from_dict",
"convert_to_messages",
"get_buffer_string",
"merge_content",
"message_chunk_to_message",
"message_to_dict",
"messages_from_dict",
"messages_to_dict",
"filter_messages",
"merge_message_runs",
"trim_messages",
"convert_to_openai_messages",
]
|
"""Embedding adapter model."""
import logging
from typing import Any, List, Optional, Type, cast
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.callbacks import CallbackManager
from llama_index.core.constants import DEFAULT_EMBED_BATCH_SIZE
from llama_index.core.utils import infer_torch_device
logger = logging.getLogger(__name__)
class AdapterEmbeddingModel(BaseEmbedding):
"""
Adapter for any embedding model.
This is a wrapper around any embedding model that adds an adapter layer \
on top of it.
This is useful for finetuning an embedding model on a downstream task.
The embedding model can be any model - it does not need to expose gradients.
Args:
base_embed_model (BaseEmbedding): Base embedding model.
adapter_path (str): Path to adapter.
adapter_cls (Optional[Type[Any]]): Adapter class. Defaults to None, in which \
case a linear adapter is used.
transform_query (bool): Whether to transform query embeddings. Defaults to True.
device (Optional[str]): Device to use. Defaults to None.
embed_batch_size (int): Batch size for embedding. Defaults to 10.
callback_manager (Optional[CallbackManager]): Callback manager. \
Defaults to None.
"""
_base_embed_model: BaseEmbedding = PrivateAttr()
_adapter: Any = PrivateAttr()
_transform_query: bool = PrivateAttr()
_device: Optional[str] = PrivateAttr()
_target_device: Any = PrivateAttr()
def __init__(
self,
base_embed_model: BaseEmbedding,
adapter_path: str,
adapter_cls: Optional[Type[Any]] = None,
transform_query: bool = True,
device: Optional[str] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
) -> None:
"""Init params."""
import torch
from llama_index.embeddings.adapter.utils import BaseAdapter, LinearLayer
super().__init__(
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
model_name=f"Adapter for {base_embed_model.model_name}",
)
if device is None:
device = infer_torch_device()
logger.info(f"Use pytorch device: {device}")
self._target_device = torch.device(device)
self._base_embed_model = base_embed_model
if adapter_cls is None:
adapter_cls = LinearLayer
else:
adapter_cls = cast(Type[BaseAdapter], adapter_cls)
adapter = adapter_cls.load(adapter_path)
self._adapter = cast(BaseAdapter, adapter)
self._adapter.to(self._target_device)
self._transform_query = transform_query
@classmethod
def class_name(cls) -> str:
return "AdapterEmbeddingModel"
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
import torch
query_embedding = self._base_embed_model._get_query_embedding(query)
if self._transform_query:
query_embedding_t = torch.tensor(query_embedding).to(self._target_device)
query_embedding_t = self._adapter.forward(query_embedding_t)
query_embedding = query_embedding_t.tolist()
return query_embedding
async def _aget_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
import torch
query_embedding = await self._base_embed_model._aget_query_embedding(query)
if self._transform_query:
query_embedding_t = torch.tensor(query_embedding).to(self._target_device)
query_embedding_t = self._adapter.forward(query_embedding_t)
query_embedding = query_embedding_t.tolist()
return query_embedding
def _get_text_embedding(self, text: str) -> List[float]:
return self._base_embed_model._get_text_embedding(text)
async def _aget_text_embedding(self, text: str) -> List[float]:
return await self._base_embed_model._aget_text_embedding(text)
# Maintain for backwards compatibility
LinearAdapterEmbeddingModel = AdapterEmbeddingModel
|
"""Embedding adapter model."""
import logging
from typing import Any, List, Optional, Type, cast
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.callbacks import CallbackManager
from llama_index.core.constants import DEFAULT_EMBED_BATCH_SIZE
from llama_index.core.utils import infer_torch_device
logger = logging.getLogger(__name__)
class AdapterEmbeddingModel(BaseEmbedding):
"""Adapter for any embedding model.
This is a wrapper around any embedding model that adds an adapter layer \
on top of it.
This is useful for finetuning an embedding model on a downstream task.
The embedding model can be any model - it does not need to expose gradients.
Args:
base_embed_model (BaseEmbedding): Base embedding model.
adapter_path (str): Path to adapter.
adapter_cls (Optional[Type[Any]]): Adapter class. Defaults to None, in which \
case a linear adapter is used.
transform_query (bool): Whether to transform query embeddings. Defaults to True.
device (Optional[str]): Device to use. Defaults to None.
embed_batch_size (int): Batch size for embedding. Defaults to 10.
callback_manager (Optional[CallbackManager]): Callback manager. \
Defaults to None.
"""
_base_embed_model: BaseEmbedding = PrivateAttr()
_adapter: Any = PrivateAttr()
_transform_query: bool = PrivateAttr()
_device: Optional[str] = PrivateAttr()
_target_device: Any = PrivateAttr()
def __init__(
self,
base_embed_model: BaseEmbedding,
adapter_path: str,
adapter_cls: Optional[Type[Any]] = None,
transform_query: bool = True,
device: Optional[str] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
) -> None:
"""Init params."""
import torch
from llama_index.embeddings.adapter.utils import BaseAdapter, LinearLayer
super().__init__(
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
model_name=f"Adapter for {base_embed_model.model_name}",
)
if device is None:
device = infer_torch_device()
logger.info(f"Use pytorch device: {device}")
self._target_device = torch.device(device)
self._base_embed_model = base_embed_model
if adapter_cls is None:
adapter_cls = LinearLayer
else:
adapter_cls = cast(Type[BaseAdapter], adapter_cls)
adapter = adapter_cls.load(adapter_path)
self._adapter = cast(BaseAdapter, adapter)
self._adapter.to(self._target_device)
self._transform_query = transform_query
@classmethod
def class_name(cls) -> str:
return "AdapterEmbeddingModel"
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
import torch
query_embedding = self._base_embed_model._get_query_embedding(query)
if self._transform_query:
query_embedding_t = torch.tensor(query_embedding).to(self._target_device)
query_embedding_t = self._adapter.forward(query_embedding_t)
query_embedding = query_embedding_t.tolist()
return query_embedding
async def _aget_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
import torch
query_embedding = await self._base_embed_model._aget_query_embedding(query)
if self._transform_query:
query_embedding_t = torch.tensor(query_embedding).to(self._target_device)
query_embedding_t = self._adapter.forward(query_embedding_t)
query_embedding = query_embedding_t.tolist()
return query_embedding
def _get_text_embedding(self, text: str) -> List[float]:
return self._base_embed_model._get_text_embedding(text)
async def _aget_text_embedding(self, text: str) -> List[float]:
return await self._base_embed_model._aget_text_embedding(text)
# Maintain for backwards compatibility
LinearAdapterEmbeddingModel = AdapterEmbeddingModel
|
from typing import Dict, Type
from llama_index.core.llms.llm import LLM
from llama_index.core.llms.mock import MockLLM
RECOGNIZED_LLMS: Dict[str, Type[LLM]] = {
MockLLM.class_name(): MockLLM,
}
# Conditionals for llama-cloud support
try:
from llama_index.llms.openai import OpenAI # pants: no-infer-dep
RECOGNIZED_LLMS[OpenAI.class_name()] = OpenAI # pants: no-infer-dep
except ImportError:
pass
try:
from llama_index.llms.azure_openai import AzureOpenAI # pants: no-infer-dep
RECOGNIZED_LLMS[AzureOpenAI.class_name()] = AzureOpenAI # pants: no-infer-dep
except ImportError:
pass
try:
from llama_index.llms.huggingface_api import (
HuggingFaceInferenceAPI,
) # pants: no-infer-dep
RECOGNIZED_LLMS[HuggingFaceInferenceAPI.class_name()] = HuggingFaceInferenceAPI
except ImportError:
pass
def load_llm(data: dict) -> LLM:
"""Load LLM by name."""
if isinstance(data, LLM):
return data
llm_name = data.get("class_name")
if llm_name is None:
raise ValueError("LLM loading requires a class_name")
if llm_name not in RECOGNIZED_LLMS:
raise ValueError(f"Invalid LLM name: {llm_name}")
return RECOGNIZED_LLMS[llm_name].from_dict(data)
|
from typing import Dict, Type
from llama_index.core.llms.llm import LLM
from llama_index.core.llms.mock import MockLLM
RECOGNIZED_LLMS: Dict[str, Type[LLM]] = {
MockLLM.class_name(): MockLLM,
}
# Conditionals for llama-cloud support
try:
from llama_index.llms.openai import OpenAI # pants: no-infer-dep
RECOGNIZED_LLMS[OpenAI.class_name()] = OpenAI # pants: no-infer-dep
except ImportError:
pass
try:
from llama_index.llms.azure_openai import AzureOpenAI # pants: no-infer-dep
RECOGNIZED_LLMS[AzureOpenAI.class_name()] = AzureOpenAI # pants: no-infer-dep
except ImportError:
pass
try:
from llama_index.llms.huggingface_api import (
HuggingFaceInferenceAPI,
) # pants: no-infer-dep
RECOGNIZED_LLMS[HuggingFaceInferenceAPI.class_name()] = HuggingFaceInferenceAPI
except ImportError:
pass
def load_llm(data: dict) -> LLM:
"""Load LLM by name."""
if isinstance(data, LLM):
return data
llm_name = data.get("class_name", None)
if llm_name is None:
raise ValueError("LLM loading requires a class_name")
if llm_name not in RECOGNIZED_LLMS:
raise ValueError(f"Invalid LLM name: {llm_name}")
return RECOGNIZED_LLMS[llm_name].from_dict(data)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import pytest
from simpleranker import SimpleRanker
@pytest.mark.parametrize('default_traversal_paths', [['r'], ['c']])
@pytest.mark.parametrize('ranking', ['min', 'max'])
def test_ranking(
documents_chunk, documents_chunk_chunk, default_traversal_paths, ranking
):
ranker = SimpleRanker(
metric='cosine',
ranking=ranking,
default_traversal_paths=default_traversal_paths,
)
if default_traversal_paths == ['r']:
ranking_docs = documents_chunk
else:
ranking_docs = documents_chunk_chunk
ranker.rank(ranking_docs, parameters={})
assert ranking_docs
for doc in ranking_docs.traverse_flat(default_traversal_paths):
assert doc.matches
for i in range(len(doc.matches) - 1):
match = doc.matches[i]
assert match.tags
if ranking == 'min':
assert (
match.scores['cosine'].value
<= doc.matches[i + 1].scores['cosine'].value
)
else:
assert (
match.scores['cosine'].value
>= doc.matches[i + 1].scores['cosine'].value
)
@pytest.mark.parametrize('ranking', ['mean_min', 'mean_max'])
def test_mean_ranking(documents_chunk, ranking):
default_traversal_paths = ['r']
ranker = SimpleRanker(
metric='cosine',
ranking=ranking,
default_traversal_paths=default_traversal_paths,
)
ranking_docs = documents_chunk
mean_scores = []
for doc in ranking_docs[0].chunks:
scores = []
for match in doc.matches:
scores.append(match.scores['cosine'].value)
mean_scores.append(sum(scores) / 10)
mean_scores.sort(reverse=ranking == 'mean_max')
ranker.rank(ranking_docs, parameters={})
assert ranking_docs
for doc in ranking_docs.traverse_flat(default_traversal_paths):
assert doc.matches
for i in range(len(doc.matches) - 1):
match = doc.matches[i]
assert match.tags
assert match.scores['cosine'].value == pytest.approx(mean_scores[i], 1e-5)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import pytest
from ...simpleranker import SimpleRanker
@pytest.mark.parametrize('default_traversal_paths', [['r'], ['c']])
@pytest.mark.parametrize('ranking', ['min', 'max'])
def test_ranking(
documents_chunk, documents_chunk_chunk, default_traversal_paths, ranking
):
ranker = SimpleRanker(
metric='cosine',
ranking=ranking,
default_traversal_paths=default_traversal_paths,
)
if default_traversal_paths == ['r']:
ranking_docs = documents_chunk
else:
ranking_docs = documents_chunk_chunk
ranker.rank(ranking_docs, parameters={})
assert ranking_docs
for doc in ranking_docs.traverse_flat(default_traversal_paths):
assert doc.matches
for i in range(len(doc.matches) - 1):
match = doc.matches[i]
assert match.tags
if ranking == 'min':
assert (
match.scores['cosine'].value
<= doc.matches[i + 1].scores['cosine'].value
)
else:
assert (
match.scores['cosine'].value
>= doc.matches[i + 1].scores['cosine'].value
)
@pytest.mark.parametrize('ranking', ['mean_min', 'mean_max'])
def test_mean_ranking(documents_chunk, ranking):
default_traversal_paths = ['r']
ranker = SimpleRanker(
metric='cosine',
ranking=ranking,
default_traversal_paths=default_traversal_paths,
)
ranking_docs = documents_chunk
mean_scores = []
for doc in ranking_docs[0].chunks:
scores = []
for match in doc.matches:
scores.append(match.scores['cosine'].value)
mean_scores.append(sum(scores) / 10)
mean_scores.sort(reverse=ranking == 'mean_max')
ranker.rank(ranking_docs, parameters={})
assert ranking_docs
for doc in ranking_docs.traverse_flat(default_traversal_paths):
assert doc.matches
for i in range(len(doc.matches) - 1):
match = doc.matches[i]
assert match.tags
assert match.scores['cosine'].value == pytest.approx(mean_scores[i], 1e-5)
|
_base_ = ['./yolov3_mobilenetv2_mstrain-416_300e_coco.py']
# yapf:disable
model = dict(
bbox_head=dict(
anchor_generator=dict(
base_sizes=[[(220, 125), (128, 222), (264, 266)],
[(35, 87), (102, 96), (60, 170)],
[(10, 15), (24, 36), (72, 42)]])))
# yapf:enable
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
input_size = (320, 320)
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
# `mean` and `to_rgb` should be the same with the `preprocess_cfg`
dict(
type='Expand',
mean=[123.675, 116.28, 103.53],
to_rgb=True,
ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=input_size, keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=input_size, keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
_base_ = ['./yolov3_mobilenetv2_mstrain-416_300e_coco.py']
# yapf:disable
model = dict(
bbox_head=dict(
anchor_generator=dict(
base_sizes=[[(220, 125), (128, 222), (264, 266)],
[(35, 87), (102, 96), (60, 170)],
[(10, 15), (24, 36), (72, 42)]])))
# yapf:enable
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
input_size = (320, 320)
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
# `mean` and `to_rgb` should be the same with the `preprocess_cfg`
dict(
type='Expand',
mean=[123.675, 116.28, 103.53],
to_rgb=True,
ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=input_size, keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=input_size, keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
from docarray import Document, DocumentArray
import numpy as np
import pytest
@pytest.mark.filterwarnings('ignore::UserWarning')
@pytest.mark.parametrize('columns', [[('price', 'int')], {'price': 'int'}])
def test_add_ignore_existing_doc_id(start_storage, columns):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': columns,
'distance': 'l2_norm',
'index_name': 'test_add_ignore_existing_doc_id',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='r0', embedding=[0, 0, 0]),
Document(id='r1', embedding=[1, 1, 1]),
Document(id='r2', embedding=[2, 2, 2]),
Document(id='r3', embedding=[3, 3, 3]),
Document(id='r4', embedding=[4, 4, 4]),
]
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='r0', embedding=[0, 0, 0]),
Document(id='r2', embedding=[2, 2, 2]),
Document(id='r4', embedding=[4, 4, 4]),
Document(id='r5', embedding=[2, 2, 2]),
Document(id='r6', embedding=[4, 4, 4]),
]
)
indexed_offset_count = elastic_doc._client.count(
index=elastic_doc._index_name_offset2id
)['count']
assert len(elastic_doc) == len(elastic_doc[:, 'embedding'])
assert len(elastic_doc) == indexed_offset_count
assert len(elastic_doc[:, 'embedding']) == 7
@pytest.mark.filterwarnings('ignore::UserWarning')
@pytest.mark.parametrize('columns', [[('price', 'int')], {'price': 'int'}])
def test_add_skip_wrong_data_type_and_fix_offset(start_storage, columns):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': columns,
'index_name': 'test_add_skip_wrong_data_type_and_fix_offset',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='0', price=1000),
Document(id='1', price=20000),
Document(id='2', price=103000),
]
)
with pytest.raises(IndexError):
with elastic_doc:
elastic_doc.extend(
[
Document(id='0', price=10000),
Document(id='1', price=20000),
Document(id='3', price=30000),
Document(id='4', price=100000000000), # overflow int32
Document(id='5', price=2000),
Document(id='6', price=100000000000), # overflow int32
Document(id='7', price=30000),
]
)
expected_ids = ['0', '1', '2', '3', '5', '7']
assert len(elastic_doc) == 6
assert len(elastic_doc[:, 'id']) == 6
assert elastic_doc[:, 'id'] == expected_ids
assert elastic_doc._offset2ids.ids == expected_ids
@pytest.mark.filterwarnings('ignore::UserWarning')
@pytest.mark.parametrize("assert_customization_propagation", [True, False])
@pytest.mark.parametrize(
'columns',
[
[
('is_true', 'bool'),
('test_long', 'long'),
('test_double', 'double'),
],
{'is_true': 'bool', 'test_long': 'long', 'test_double': 'double'},
],
)
def test_succes_add_bulk_custom_params(
monkeypatch, start_storage, assert_customization_propagation, columns
):
bulk_custom_params = {
'thread_count': 4,
'chunk_size': 100,
'max_chunk_bytes': 104857600,
'queue_size': 4,
}
nrof_docs = 100
def _mock_send_requests(requests, **kwargs):
# Currently only self._send_requests from extend method which
# receive customization
if (
not requests[0]['_index'].startswith('offset2id__')
and requests[0]['_op_type'] == 'index'
):
assert kwargs == bulk_custom_params
return [{'index': {'_id': f'r{i}'}} for i in range(nrof_docs)]
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': columns,
'distance': 'l2_norm',
'index_name': 'test_succes_add_bulk_custom_params',
},
)
if assert_customization_propagation:
monkeypatch.setattr(elastic_doc, '_send_requests', _mock_send_requests)
with elastic_doc:
elastic_doc.extend(
[
Document(id=f'r{i}', embedding=np.ones((3,)) * i)
for i in range(nrof_docs)
],
**bulk_custom_params,
)
|
from docarray import Document, DocumentArray
import numpy as np
import pytest
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_add_ignore_existing_doc_id(start_storage):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': [('price', 'int')],
'distance': 'l2_norm',
'index_name': 'test_add_ignore_existing_doc_id',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='r0', embedding=[0, 0, 0]),
Document(id='r1', embedding=[1, 1, 1]),
Document(id='r2', embedding=[2, 2, 2]),
Document(id='r3', embedding=[3, 3, 3]),
Document(id='r4', embedding=[4, 4, 4]),
]
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='r0', embedding=[0, 0, 0]),
Document(id='r2', embedding=[2, 2, 2]),
Document(id='r4', embedding=[4, 4, 4]),
Document(id='r5', embedding=[2, 2, 2]),
Document(id='r6', embedding=[4, 4, 4]),
]
)
indexed_offset_count = elastic_doc._client.count(
index=elastic_doc._index_name_offset2id
)['count']
assert len(elastic_doc) == len(elastic_doc[:, 'embedding'])
assert len(elastic_doc) == indexed_offset_count
assert len(elastic_doc[:, 'embedding']) == 7
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_add_skip_wrong_data_type_and_fix_offset(start_storage):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': [('price', 'int')],
'index_name': 'test_add_skip_wrong_data_type_and_fix_offset',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='0', price=1000),
Document(id='1', price=20000),
Document(id='2', price=103000),
]
)
with pytest.raises(IndexError):
with elastic_doc:
elastic_doc.extend(
[
Document(id='0', price=10000),
Document(id='1', price=20000),
Document(id='3', price=30000),
Document(id='4', price=100000000000), # overflow int32
Document(id='5', price=2000),
Document(id='6', price=100000000000), # overflow int32
Document(id='7', price=30000),
]
)
expected_ids = ['0', '1', '2', '3', '5', '7']
assert len(elastic_doc) == 6
assert len(elastic_doc[:, 'id']) == 6
assert elastic_doc[:, 'id'] == expected_ids
assert elastic_doc._offset2ids.ids == expected_ids
@pytest.mark.filterwarnings('ignore::UserWarning')
@pytest.mark.parametrize("assert_customization_propagation", [True, False])
def test_succes_add_bulk_custom_params(
monkeypatch, start_storage, assert_customization_propagation
):
bulk_custom_params = {
'thread_count': 4,
'chunk_size': 100,
'max_chunk_bytes': 104857600,
'queue_size': 4,
}
nrof_docs = 100
def _mock_send_requests(requests, **kwargs):
# Currently only self._send_requests from extend method which
# receive customization
if (
not requests[0]['_index'].startswith('offset2id__')
and requests[0]['_op_type'] == 'index'
):
assert kwargs == bulk_custom_params
return [{'index': {'_id': f'r{i}'}} for i in range(nrof_docs)]
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': [
('is_true', 'bool'),
('test_long', 'long'),
('test_double', 'double'),
],
'distance': 'l2_norm',
'index_name': 'test_succes_add_bulk_custom_params',
},
)
if assert_customization_propagation:
monkeypatch.setattr(elastic_doc, '_send_requests', _mock_send_requests)
with elastic_doc:
elastic_doc.extend(
[
Document(id=f'r{i}', embedding=np.ones((3,)) * i)
for i in range(nrof_docs)
],
**bulk_custom_params,
)
|
from typing import Optional, List
from docarray.base_document.document import BaseDocument
def test_base_document_init():
doc = BaseDocument()
assert doc.id is not None
def test_update():
class MyDocument(BaseDocument):
content: str
title: Optional[str] = None
tags_: List
doc1 = MyDocument(
content='Core content of the document', title='Title', tags_=['python', 'AI']
)
doc2 = MyDocument(content='Core content updated', tags_=['docarray'])
doc1.update(doc2)
assert doc1.content == 'Core content updated'
assert doc1.title == 'Title'
assert doc1.tags_ == ['python', 'AI', 'docarray']
|
from docarray.base_document.document import BaseDocument
def test_base_document_init():
doc = BaseDocument()
assert doc.id is not None
|
import os
from typing import Any, Callable, List, Optional, Tuple
import torch.utils.data as data
from ..utils import _log_api_usage_once
class VisionDataset(data.Dataset):
"""
Base Class For making datasets which are compatible with torchvision.
It is necessary to override the ``__getitem__`` and ``__len__`` method.
Args:
root (string): Root directory of dataset.
transforms (callable, optional): A function/transforms that takes in
an image and a label and returns the transformed versions of both.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
.. note::
:attr:`transforms` and the combination of :attr:`transform` and :attr:`target_transform` are mutually exclusive.
"""
_repr_indent = 4
def __init__(
self,
root: str,
transforms: Optional[Callable] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> None:
_log_api_usage_once(self)
if isinstance(root, str):
root = os.path.expanduser(root)
self.root = root
has_transforms = transforms is not None
has_separate_transform = transform is not None or target_transform is not None
if has_transforms and has_separate_transform:
raise ValueError("Only transforms or transform/target_transform can be passed as argument")
# for backwards-compatibility
self.transform = transform
self.target_transform = target_transform
if has_separate_transform:
transforms = StandardTransform(transform, target_transform)
self.transforms = transforms
def __getitem__(self, index: int) -> Any:
"""
Args:
index (int): Index
Returns:
(Any): Sample and meta data, optionally transformed by the respective transforms.
"""
raise NotImplementedError
def __len__(self) -> int:
raise NotImplementedError
def __repr__(self) -> str:
head = "Dataset " + self.__class__.__name__
body = [f"Number of datapoints: {self.__len__()}"]
if self.root is not None:
body.append(f"Root location: {self.root}")
body += self.extra_repr().splitlines()
if hasattr(self, "transforms") and self.transforms is not None:
body += [repr(self.transforms)]
lines = [head] + [" " * self._repr_indent + line for line in body]
return "\n".join(lines)
def _format_transform_repr(self, transform: Callable, head: str) -> List[str]:
lines = transform.__repr__().splitlines()
return [f"{head}{lines[0]}"] + ["{}{}".format(" " * len(head), line) for line in lines[1:]]
def extra_repr(self) -> str:
return ""
class StandardTransform:
def __init__(self, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None) -> None:
self.transform = transform
self.target_transform = target_transform
def __call__(self, input: Any, target: Any) -> Tuple[Any, Any]:
if self.transform is not None:
input = self.transform(input)
if self.target_transform is not None:
target = self.target_transform(target)
return input, target
def _format_transform_repr(self, transform: Callable, head: str) -> List[str]:
lines = transform.__repr__().splitlines()
return [f"{head}{lines[0]}"] + ["{}{}".format(" " * len(head), line) for line in lines[1:]]
def __repr__(self) -> str:
body = [self.__class__.__name__]
if self.transform is not None:
body += self._format_transform_repr(self.transform, "Transform: ")
if self.target_transform is not None:
body += self._format_transform_repr(self.target_transform, "Target transform: ")
return "\n".join(body)
|
import os
from typing import Any, Callable, List, Optional, Tuple
import torch
import torch.utils.data as data
from ..utils import _log_api_usage_once
class VisionDataset(data.Dataset):
"""
Base Class For making datasets which are compatible with torchvision.
It is necessary to override the ``__getitem__`` and ``__len__`` method.
Args:
root (string): Root directory of dataset.
transforms (callable, optional): A function/transforms that takes in
an image and a label and returns the transformed versions of both.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
.. note::
:attr:`transforms` and the combination of :attr:`transform` and :attr:`target_transform` are mutually exclusive.
"""
_repr_indent = 4
def __init__(
self,
root: str,
transforms: Optional[Callable] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> None:
_log_api_usage_once(self)
if isinstance(root, str):
root = os.path.expanduser(root)
self.root = root
has_transforms = transforms is not None
has_separate_transform = transform is not None or target_transform is not None
if has_transforms and has_separate_transform:
raise ValueError("Only transforms or transform/target_transform can be passed as argument")
# for backwards-compatibility
self.transform = transform
self.target_transform = target_transform
if has_separate_transform:
transforms = StandardTransform(transform, target_transform)
self.transforms = transforms
def __getitem__(self, index: int) -> Any:
"""
Args:
index (int): Index
Returns:
(Any): Sample and meta data, optionally transformed by the respective transforms.
"""
raise NotImplementedError
def __len__(self) -> int:
raise NotImplementedError
def __repr__(self) -> str:
head = "Dataset " + self.__class__.__name__
body = [f"Number of datapoints: {self.__len__()}"]
if self.root is not None:
body.append(f"Root location: {self.root}")
body += self.extra_repr().splitlines()
if hasattr(self, "transforms") and self.transforms is not None:
body += [repr(self.transforms)]
lines = [head] + [" " * self._repr_indent + line for line in body]
return "\n".join(lines)
def _format_transform_repr(self, transform: Callable, head: str) -> List[str]:
lines = transform.__repr__().splitlines()
return [f"{head}{lines[0]}"] + ["{}{}".format(" " * len(head), line) for line in lines[1:]]
def extra_repr(self) -> str:
return ""
class StandardTransform:
def __init__(self, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None) -> None:
self.transform = transform
self.target_transform = target_transform
def __call__(self, input: Any, target: Any) -> Tuple[Any, Any]:
if self.transform is not None:
input = self.transform(input)
if self.target_transform is not None:
target = self.target_transform(target)
return input, target
def _format_transform_repr(self, transform: Callable, head: str) -> List[str]:
lines = transform.__repr__().splitlines()
return [f"{head}{lines[0]}"] + ["{}{}".format(" " * len(head), line) for line in lines[1:]]
def __repr__(self) -> str:
body = [self.__class__.__name__]
if self.transform is not None:
body += self._format_transform_repr(self.transform, "Transform: ")
if self.target_transform is not None:
body += self._format_transform_repr(self.target_transform, "Target transform: ")
return "\n".join(body)
|
from typing import Dict, TYPE_CHECKING, Optional
if TYPE_CHECKING: # pragma: no cover
from docarray import Document
from docarray.array.queryset.lookup import Q, LookupNode, LookupLeaf
LOGICAL_OPERATORS = {'$and': 'and', '$or': 'or', '$not': True}
COMPARISON_OPERATORS = {
'$lt': 'lt',
'$gt': 'gt',
'$lte': 'lte',
'$gte': 'gte',
'$eq': 'exact',
'$neq': 'neq',
'$exists': 'exists',
}
REGEX_OPERATORS = {'$regex': 'regex'}
ARRAY_OPERATORS = {'$size': 'size'}
MEMBERSHIP_OPERATORS = {'$in': 'in', '$nin': 'nin'}
SUPPORTED_OPERATORS = {
**COMPARISON_OPERATORS,
**ARRAY_OPERATORS,
**REGEX_OPERATORS,
**MEMBERSHIP_OPERATORS,
}
def _parse_lookups(data: Dict = {}, root_node: Optional[LookupNode] = None):
if isinstance(data, dict):
for key, value in data.items():
if isinstance(root_node, LookupLeaf):
root = LookupNode()
root.add_child(root_node)
root_node = root
if key in LOGICAL_OPERATORS:
if key == '$not':
node = LookupNode(negate=LOGICAL_OPERATORS[key])
else:
node = LookupNode(op=LOGICAL_OPERATORS[key])
node = _parse_lookups(value, root_node=node)
elif key.startswith('$'):
raise ValueError(
f'The operator {key} is not supported yet, please double check the given filters!'
)
else:
if not value or not isinstance(value, dict):
raise ValueError(
'''Not a valid query. It should follow the format:
{ <field1>: { <operator1>: <value1> }, ... }
'''
)
items = list(value.items())
if len(items) == 1:
op, val = items[0]
if op in LOGICAL_OPERATORS:
if op == '$not':
node = LookupNode(negate=LOGICAL_OPERATORS[op])
else:
node = LookupNode(op=LOGICAL_OPERATORS[op])
node = _parse_lookups(val, root_node=node)
elif op in SUPPORTED_OPERATORS:
node = Q(**{f'{key}__{SUPPORTED_OPERATORS[op]}': val})
else:
raise ValueError(
f'The operator {op} is not supported yet, please double check the given filters!'
)
else:
node = LookupNode()
for op, val in items:
_node = _parse_lookups({key: {op: val}})
node.add_child(_node)
if root_node and node:
root_node.add_child(node)
elif node:
root_node = node
elif isinstance(data, list):
for d in data:
node = _parse_lookups(d)
if root_node and node:
root_node.add_child(node)
elif node:
root_node = node
else:
raise ValueError(f'The query is illegal: `{data}`')
return root_node
class QueryParser:
"""A class to parse dict condition to lookup query."""
def __init__(self, conditions: Dict = {}):
self.conditions = conditions
self.lookup_groups = _parse_lookups(self.conditions)
def evaluate(self, doc: 'Document'):
return self.lookup_groups.evaluate(doc) if self.lookup_groups else True
def __call__(self, doc: 'Document'):
return self.evaluate(doc)
|
from typing import Dict, TYPE_CHECKING, Optional
if TYPE_CHECKING:
from docarray import Document
from docarray.array.queryset.lookup import Q, LookupNode, LookupLeaf
LOGICAL_OPERATORS = {'$and': 'and', '$or': 'or', '$not': True}
COMPARISON_OPERATORS = {
'$lt': 'lt',
'$gt': 'gt',
'$lte': 'lte',
'$gte': 'gte',
'$eq': 'exact',
'$neq': 'neq',
'$exists': 'exists',
}
REGEX_OPERATORS = {'$regex': 'regex'}
ARRAY_OPERATORS = {'$size': 'size'}
MEMBERSHIP_OPERATORS = {'$in': 'in', '$nin': 'nin'}
SUPPORTED_OPERATORS = {
**COMPARISON_OPERATORS,
**ARRAY_OPERATORS,
**REGEX_OPERATORS,
**MEMBERSHIP_OPERATORS,
}
def _parse_lookups(data: Dict = {}, root_node: Optional[LookupNode] = None):
if isinstance(data, dict):
for key, value in data.items():
if isinstance(root_node, LookupLeaf):
root = LookupNode()
root.add_child(root_node)
root_node = root
if key in LOGICAL_OPERATORS:
if key == '$not':
node = LookupNode(negate=LOGICAL_OPERATORS[key])
else:
node = LookupNode(op=LOGICAL_OPERATORS[key])
node = _parse_lookups(value, root_node=node)
elif key.startswith('$'):
raise ValueError(
f'The operator {key} is not supported yet, please double check the given filters!'
)
else:
if not value or not isinstance(value, dict):
raise ValueError(
'''Not a valid query. It should follow the format:
{ <field1>: { <operator1>: <value1> }, ... }
'''
)
items = list(value.items())
if len(items) == 1:
op, val = items[0]
if op in LOGICAL_OPERATORS:
if op == '$not':
node = LookupNode(negate=LOGICAL_OPERATORS[op])
else:
node = LookupNode(op=LOGICAL_OPERATORS[op])
node = _parse_lookups(val, root_node=node)
elif op in SUPPORTED_OPERATORS:
node = Q(**{f'{key}__{SUPPORTED_OPERATORS[op]}': val})
else:
raise ValueError(
f'The operator {op} is not supported yet, please double check the given filters!'
)
else:
node = LookupNode()
for op, val in items:
_node = _parse_lookups({key: {op: val}})
node.add_child(_node)
if root_node and node:
root_node.add_child(node)
elif node:
root_node = node
elif isinstance(data, list):
for d in data:
node = _parse_lookups(d)
if root_node and node:
root_node.add_child(node)
elif node:
root_node = node
else:
raise ValueError(f'The query is illegal: `{data}`')
return root_node
class QueryParser:
"""A class to parse dict condition to lookup query."""
def __init__(self, conditions: Dict = {}):
self.conditions = conditions
self.lookup_groups = _parse_lookups(self.conditions)
def evaluate(self, doc: 'Document'):
return self.lookup_groups.evaluate(doc) if self.lookup_groups else True
def __call__(self, doc: 'Document'):
return self.evaluate(doc)
|
from collections.abc import Sequence as ABCSequence
from typing import Any
BASE_TYPES = (int, str, bool, bytes, float)
def _is_otel_supported_type(obj: Any) -> bool:
# If it's one of the base types
if isinstance(obj, BASE_TYPES):
return True
# If it's a sequence (but not a string or bytes, which are sequences too)
if isinstance(obj, ABCSequence) and not isinstance(obj, (str, bytes)):
return all(isinstance(item, BASE_TYPES) for item in obj)
return False
def filter_model_fields(model_dict: dict) -> dict:
newdict = {}
for field in model_dict:
if _is_otel_supported_type(model_dict[field]):
newdict.update({field: model_dict[field]})
return newdict
|
from collections.abc import Sequence as ABCSequence
from typing import Any
BASE_TYPES = (int, str, bool, bytes, float)
def _is_otel_supported_type(obj: Any) -> bool:
# If it's one of the base types
if isinstance(obj, BASE_TYPES):
return True
# If it's a sequence (but not a string or bytes, which are sequences too)
if isinstance(obj, ABCSequence) and not isinstance(obj, (str, bytes)):
return all(isinstance(item, BASE_TYPES) for item in obj)
return False
def filter_model_fields(model_dict: dict) -> dict:
newdict = {}
for field in model_dict:
if _is_otel_supported_type(model_dict[field]):
newdict.update({field: model_dict[field]})
return newdict
|
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["pipeline_sana"] = ["SanaPipeline"]
_import_structure["pipeline_sana_sprint"] = ["SanaSprintPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import *
else:
from .pipeline_sana import SanaPipeline
from .pipeline_sana_sprint import SanaSprintPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
|
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["pipeline_sana"] = ["SanaPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import *
else:
from .pipeline_sana import SanaPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
|
from backend.app import run_processes
from backend.executor import DatabaseManager, ExecutionScheduler
from backend.server.rest_api import AgentServer
def main():
"""
Run all the processes required for the AutoGPT-server REST API.
"""
run_processes(
DatabaseManager(),
ExecutionScheduler(),
AgentServer(),
)
if __name__ == "__main__":
main()
|
from backend.app import run_processes
from backend.executor import ExecutionScheduler
from backend.server.rest_api import AgentServer
def main():
"""
Run all the processes required for the AutoGPT-server REST API.
"""
run_processes(
ExecutionScheduler(),
AgentServer(),
)
if __name__ == "__main__":
main()
|
import asyncio
import logging
import os
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
__all__ = ['WebSocketGatewayRuntime']
class WebSocketGatewayRuntime(GatewayRuntime):
"""Runtime for Websocket interface."""
async def async_setup(self):
"""
The async method setup the runtime.
Setup the uvicorn server.
"""
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
self.install_signal_handlers()
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
from jina.helper import extend_rest_interface
uvicorn_kwargs = self.args.uvicorn_kwargs or {}
for ssl_file in ['ssl_keyfile', 'ssl_certfile']:
if getattr(self.args, ssl_file):
if ssl_file not in uvicorn_kwargs.keys():
uvicorn_kwargs[ssl_file] = getattr(self.args, ssl_file)
self._server = UviServer(
config=Config(
app=extend_rest_interface(
get_fastapi_app(
args=self.args,
logger=self.logger,
timeout_send=self.timeout_send,
metrics_registry=self.metrics_registry,
)
),
host=__default_host__,
port=self.args.port,
ws_max_size=1024 * 1024 * 1024,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs
)
)
await self._server.setup()
async def async_run_forever(self):
"""Running method of ther server."""
await self._server.serve()
async def _wait_for_cancel(self):
"""Do NOT override this method when inheriting from :class:`GatewayPod`"""
# handle terminate signals
while not self.is_cancel.is_set() and not self._server.should_exit:
await asyncio.sleep(0.1)
await self.async_cancel()
async def async_teardown(self):
"""Shutdown the server."""
await self._server.shutdown()
async def async_cancel(self):
"""Stop the server."""
self._server.should_exit = True
|
import asyncio
import logging
import os
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
__all__ = ['WebSocketGatewayRuntime']
class WebSocketGatewayRuntime(GatewayRuntime):
"""Runtime for Websocket interface."""
async def async_setup(self):
"""
The async method setup the runtime.
Setup the uvicorn server.
"""
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
self.install_signal_handlers()
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
from jina.helper import extend_rest_interface
uvicorn_kwargs = self.args.uvicorn_kwargs or {}
for ssl_file in ['ssl_keyfile', 'ssl_certfile']:
if getattr(self.args, ssl_file):
if ssl_file not in uvicorn_kwargs.keys():
uvicorn_kwargs[ssl_file] = getattr(self.args, ssl_file)
self._set_topology_graph()
self._set_connection_pool()
self._server = UviServer(
config=Config(
app=extend_rest_interface(
get_fastapi_app(
self.args,
topology_graph=self._topology_graph,
connection_pool=self._connection_pool,
logger=self.logger,
metrics_registry=self.metrics_registry,
)
),
host=__default_host__,
port=self.args.port,
ws_max_size=1024 * 1024 * 1024,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs
)
)
await self._server.setup()
async def async_run_forever(self):
"""Running method of ther server."""
await self._server.serve()
async def _wait_for_cancel(self):
"""Do NOT override this method when inheriting from :class:`GatewayPod`"""
# handle terminate signals
while not self.is_cancel.is_set() and not self._server.should_exit:
await asyncio.sleep(0.1)
await self.async_cancel()
async def async_teardown(self):
"""Shutdown the server."""
await self._server.shutdown()
await self._connection_pool.close()
async def async_cancel(self):
"""Stop the server."""
self._server.should_exit = True
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint_hook import CheckpointHook
from .early_stopping_hook import EarlyStoppingHook
from .ema_hook import EMAHook
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .logger_hook import LoggerHook
from .naive_visualization_hook import NaiveVisualizationHook
from .param_scheduler_hook import ParamSchedulerHook
from .profiler_hook import NPUProfilerHook, ProfilerHook
from .runtime_info_hook import RuntimeInfoHook
from .sampler_seed_hook import DistSamplerSeedHook
from .sync_buffer_hook import SyncBuffersHook
from .test_time_aug_hook import PrepareTTAHook
__all__ = [
'Hook', 'IterTimerHook', 'DistSamplerSeedHook', 'ParamSchedulerHook',
'SyncBuffersHook', 'EmptyCacheHook', 'CheckpointHook', 'LoggerHook',
'NaiveVisualizationHook', 'EMAHook', 'RuntimeInfoHook', 'ProfilerHook',
'PrepareTTAHook', 'NPUProfilerHook', 'EarlyStoppingHook'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint_hook import CheckpointHook
from .ema_hook import EMAHook
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .logger_hook import LoggerHook
from .naive_visualization_hook import NaiveVisualizationHook
from .param_scheduler_hook import ParamSchedulerHook
from .profiler_hook import NPUProfilerHook, ProfilerHook
from .runtime_info_hook import RuntimeInfoHook
from .sampler_seed_hook import DistSamplerSeedHook
from .sync_buffer_hook import SyncBuffersHook
from .test_time_aug_hook import PrepareTTAHook
__all__ = [
'Hook', 'IterTimerHook', 'DistSamplerSeedHook', 'ParamSchedulerHook',
'SyncBuffersHook', 'EmptyCacheHook', 'CheckpointHook', 'LoggerHook',
'NaiveVisualizationHook', 'EMAHook', 'RuntimeInfoHook', 'ProfilerHook',
'NPUProfilerHook', 'PrepareTTAHook'
]
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'data_sample']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'data_sample']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(metric=['bbox', 'segm'])
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(metric=['bbox', 'segm'])
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import pytest
import torch
import torch.nn as nn
from torch.distributed import destroy_process_group, init_process_group
from torch.nn.parallel import DataParallel, DistributedDataParallel
from mmengine.model import (MMDistributedDataParallel,
MMSeparateDistributedDataParallel,
convert_sync_batchnorm, is_model_wrapper,
revert_sync_batchnorm)
from mmengine.registry import MODEL_WRAPPERS, Registry
from mmengine.utils import is_installed
@pytest.mark.skipif(
torch.__version__ == 'parrots', reason='not supported in parrots now')
def test_revert_syncbn():
# conv = ConvModule(3, 8, 2, norm_cfg=dict(type='SyncBN'))
conv = nn.Sequential(nn.Conv2d(3, 8, 2), nn.SyncBatchNorm(8))
x = torch.randn(1, 3, 10, 10)
# Expect a ValueError prompting that SyncBN is not supported on CPU
with pytest.raises(ValueError):
y = conv(x)
conv = revert_sync_batchnorm(conv)
y = conv(x)
assert y.shape == (1, 8, 9, 9)
@pytest.mark.skipif(
torch.__version__ == 'parrots', reason='not supported in parrots now')
def test_convert_syncbn():
# conv = ConvModule(3, 8, 2, norm_cfg=dict(type='SyncBN'))
conv = nn.Sequential(nn.Conv2d(3, 8, 2), nn.BatchNorm2d(8))
x = torch.randn(1, 3, 10, 10)
y = conv(x)
assert y.shape == (1, 8, 9, 9)
# Test convert to mmcv SyncBatchNorm
if is_installed('mmcv'):
# MMCV SyncBatchNorm is only supported on distributed training.
with pytest.raises((RuntimeError, AssertionError)):
convert_sync_batchnorm(conv, implementation='mmcv')
# Test convert to Pytorch SyncBatchNorm
# Expect a ValueError prompting that SyncBN is not supported on CPU
converted_conv = convert_sync_batchnorm(conv)
assert isinstance(converted_conv[1], torch.nn.SyncBatchNorm)
with pytest.raises(ValueError):
converted_conv(x)
def test_is_model_wrapper():
# Test basic module wrapper.
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29510'
os.environ['RANK'] = str(0)
init_process_group(backend='gloo', rank=0, world_size=1)
model = nn.Linear(1, 1)
for wrapper in [
DistributedDataParallel, MMDistributedDataParallel,
MMSeparateDistributedDataParallel, DataParallel
]:
wrapper_model = wrapper(model)
assert is_model_wrapper(wrapper_model)
# Test `is_model_wrapper` can check model wrapper registered in custom
# registry.
CHILD_REGISTRY = Registry('test_is_model_wrapper', parent=MODEL_WRAPPERS)
class CustomModelWrapper(nn.Module):
def __init__(self, model):
super().__init__()
self.module = model
pass
CHILD_REGISTRY.register_module(module=CustomModelWrapper, force=True)
for wrapper in [
DistributedDataParallel, MMDistributedDataParallel,
MMSeparateDistributedDataParallel, DataParallel, CustomModelWrapper
]:
wrapper_model = wrapper(model)
assert is_model_wrapper(wrapper_model)
# Test `is_model_wrapper` will not check model wrapper in parent
# registry from a child registry.
for wrapper in [
DistributedDataParallel, MMDistributedDataParallel,
MMSeparateDistributedDataParallel, DataParallel
]:
wrapper_model = wrapper(model)
assert not is_model_wrapper(wrapper_model, registry=CHILD_REGISTRY)
wrapper_model = CustomModelWrapper(model)
assert is_model_wrapper(wrapper_model, registry=CHILD_REGISTRY)
destroy_process_group()
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import pytest
import torch
import torch.nn as nn
from torch.distributed import destroy_process_group, init_process_group
from torch.nn.parallel import DataParallel, DistributedDataParallel
from mmengine.model import (MMDistributedDataParallel,
MMSeparateDistributedDataParallel,
convert_sync_batchnorm, is_model_wrapper,
revert_sync_batchnorm)
from mmengine.registry import MODEL_WRAPPERS, Registry
from mmengine.utils import is_installed
@pytest.mark.skipif(
torch.__version__ == 'parrots', reason='not supported in parrots now')
def test_revert_syncbn():
# conv = ConvModule(3, 8, 2, norm_cfg=dict(type='SyncBN'))
conv = nn.Sequential(nn.Conv2d(3, 8, 2), nn.SyncBatchNorm(8))
x = torch.randn(1, 3, 10, 10)
# Expect a ValueError prompting that SyncBN is not supported on CPU
with pytest.raises(ValueError):
y = conv(x)
conv = revert_sync_batchnorm(conv)
y = conv(x)
assert y.shape == (1, 8, 9, 9)
@pytest.mark.skipif(
torch.__version__ == 'parrots', reason='not supported in parrots now')
def test_convert_syncbn():
# conv = ConvModule(3, 8, 2, norm_cfg=dict(type='SyncBN'))
conv = nn.Sequential(nn.Conv2d(3, 8, 2), nn.BatchNorm2d(8))
x = torch.randn(1, 3, 10, 10)
y = conv(x)
assert y.shape == (1, 8, 9, 9)
# Test convert to mmcv SyncBatchNorm
if is_installed('mmcv'):
# MMCV SyncBatchNorm is only supported on distributed training.
with pytest.raises((RuntimeError, AssertionError)):
convert_sync_batchnorm(conv, implementation='mmcv')
# Test convert to Pytorch SyncBatchNorm
# Expect a ValueError prompting that SyncBN is not supported on CPU
converted_conv = convert_sync_batchnorm(conv)
assert isinstance(converted_conv[1], torch.nn.SyncBatchNorm)
with pytest.raises(ValueError):
converted_conv(x)
def test_is_model_wrapper():
# Test basic module wrapper.
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29510'
os.environ['RANK'] = str(0)
init_process_group(backend='gloo', rank=0, world_size=1)
model = nn.Linear(1, 1)
for wrapper in [
DistributedDataParallel, MMDistributedDataParallel,
MMSeparateDistributedDataParallel, DataParallel
]:
wrapper_model = wrapper(model)
assert is_model_wrapper(wrapper_model)
# Test `is_model_wrapper` can check model wrapper registered in custom
# registry.
CHILD_REGISTRY = Registry('test_is_model_wrapper', parent=MODEL_WRAPPERS)
class CustomModelWrapper(nn.Module):
def __init__(self, model):
super().__init__()
self.module = model
pass
CHILD_REGISTRY.register_module(module=CustomModelWrapper)
for wrapper in [
DistributedDataParallel, MMDistributedDataParallel,
MMSeparateDistributedDataParallel, DataParallel, CustomModelWrapper
]:
wrapper_model = wrapper(model)
assert is_model_wrapper(wrapper_model)
# Test `is_model_wrapper` will not check model wrapper in parent
# registry from a child registry.
for wrapper in [
DistributedDataParallel, MMDistributedDataParallel,
MMSeparateDistributedDataParallel, DataParallel
]:
wrapper_model = wrapper(model)
assert not is_model_wrapper(wrapper_model, registry=CHILD_REGISTRY)
wrapper_model = CustomModelWrapper(model)
assert is_model_wrapper(wrapper_model, registry=CHILD_REGISTRY)
destroy_process_group()
|
# Copyright 2024 The OpenXLA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Boilerplate utilities for kernel testing."""
from typing import Optional
import numpy as np
from xla.codegen.testlib import _extension
from xla.python import xla_extension
def create_scalar_literal(value, dtype: np.dtype) -> xla_extension.Literal:
shape = xla_extension.Shape.scalar_shape(dtype)
literal = xla_extension.Literal(shape)
np.copyto(np.asarray(literal), value)
return literal
def create_literal_from_np(
array: np.ndarray, layout: Optional[list[int]] = None
) -> xla_extension.Literal:
if np.ndim(array) == 0:
return create_scalar_literal(array.item(), array.dtype)
shape = xla_extension.Shape.array_shape(array.dtype, array.shape, layout)
literal = xla_extension.Literal(shape)
np.copyto(np.asarray(literal), array)
return literal
# Intentionally rexport-ed to be avalable in the public API.
opcode_arity = _extension.opcode_arity
|
# Copyright 2024 The OpenXLA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Boilerplate utilities for kernel testing."""
import numpy as np
from xla.codegen.testlib import _extension
from xla.python import xla_extension
def create_scalar_literal(value, dtype: np.dtype) -> xla_extension.Literal:
shape = xla_extension.Shape.scalar_shape(dtype)
literal = xla_extension.Literal(shape)
np.copyto(np.asarray(literal), value)
return literal
def create_literal_from_np(array: np.ndarray) -> xla_extension.Literal:
if np.ndim(array) == 0:
return create_scalar_literal(array.item(), array.dtype)
shape = xla_extension.Shape.array_shape(array.dtype, array.shape)
literal = xla_extension.Literal(shape)
np.copyto(np.asarray(literal), array)
return literal
# Intentionally rexport-ed to be avalable in the public API.
opcode_arity = _extension.opcode_arity
|
import os
import torch
import torchaudio.prototype.transforms as T
import torchaudio.transforms as transforms
from torchaudio_unittest.common_utils import TorchaudioTestCase
class BatchConsistencyTest(TorchaudioTestCase):
def assert_batch_consistency(self, transform, batch, *args, atol=1e-8, rtol=1e-5, seed=42, **kwargs):
n = batch.size(0)
# Compute items separately, then batch the result
torch.random.manual_seed(seed)
items_input = batch.clone()
items_result = torch.stack([transform(items_input[i], *args, **kwargs) for i in range(n)])
# Batch the input and run
torch.random.manual_seed(seed)
batch_input = batch.clone()
batch_result = transform(batch_input, *args, **kwargs)
self.assertEqual(items_input, batch_input, rtol=rtol, atol=atol)
self.assertEqual(items_result, batch_result, rtol=rtol, atol=atol)
def test_batch_BarkScale(self):
specgram = torch.randn(3, 2, 201, 256)
atol = 1e-6 if os.name == "nt" else 1e-8
transform = T.BarkScale()
self.assert_batch_consistency(transform, specgram, atol=atol)
def test_batch_InverseBarkScale(self):
n_barks = 32
n_stft = 5
bark_spec = torch.randn(3, 2, n_barks, 32) ** 2
transform = T.InverseBarkScale(n_stft, n_barks)
# Because InverseBarkScale runs SGD on randomly initialized values so they do not yield
# exactly same result. For this reason, tolerance is very relaxed here.
self.assert_batch_consistency(transform, bark_spec, atol=1.0, rtol=1e-5)
|
import os
import torch
import torchaudio.prototype.transforms as T
import torchaudio.transforms as transforms
from torchaudio_unittest.common_utils import TorchaudioTestCase
class BatchConsistencyTest(TorchaudioTestCase):
def assert_batch_consistency(self, transform, batch, *args, atol=1e-8, rtol=1e-5, seed=42, **kwargs):
n = batch.size(0)
# Compute items separately, then batch the result
torch.random.manual_seed(seed)
items_input = batch.clone()
items_result = torch.stack([transform(items_input[i], *args, **kwargs) for i in range(n)])
# Batch the input and run
torch.random.manual_seed(seed)
batch_input = batch.clone()
batch_result = transform(batch_input, *args, **kwargs)
self.assertEqual(items_input, batch_input, rtol=rtol, atol=atol)
self.assertEqual(items_result, batch_result, rtol=rtol, atol=atol)
def test_batch_BarkScale(self):
specgram = torch.randn(3, 2, 201, 256)
atol = 1e-6 if os.name == "nt" else 1e-8
transform = T.BarkScale()
self.assert_batch_consistency(transform, specgram, atol=atol)
def test_batch_InverseBarkScale(self):
n_barks = 32
n_stft = 5
bark_spec = torch.randn(3, 2, n_barks, 32) ** 2
transform = transforms.InverseMelScale(n_stft, n_barks)
# Because InverseBarkScale runs SGD on randomly initialized values so they do not yield
# exactly same result. For this reason, tolerance is very relaxed here.
self.assert_batch_consistency(transform, bark_spec, atol=1.0, rtol=1e-5)
|
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseRerankingEvaluator,
)
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with queries, positives, and negatives
eval_dataset = load_dataset("microsoft/ms_marco", "v1.1", split="validation").select(range(1000))
samples = [
{
"query": sample["query"],
"positive": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if is_selected
],
"negative": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if not is_selected
],
}
for sample in eval_dataset
]
# Now evaluate using only the documents from the 1000 samples
reranking_evaluator = SparseRerankingEvaluator(
samples=samples,
name="ms-marco-dev-small",
show_progress_bar=True,
batch_size=32,
)
results = reranking_evaluator(model)
"""
RerankingEvaluator: Evaluating the model on the ms-marco-dev-small dataset:
Queries: 967 Positives: Min 1.0, Mean 1.1, Max 3.0 Negatives: Min 1.0, Mean 7.1, Max 9.0
MAP: 53.46
MRR@10: 54.18
NDCG@10: 65.10
"""
# Print the results
print(f"Primary metric: {reranking_evaluator.primary_metric}")
# => Primary metric: ms-marco-dev-small_ndcg@10
print(f"Primary metric value: {results[reranking_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6510
|
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseRerankingEvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load a dataset with queries, positives, and negatives
eval_dataset = load_dataset("microsoft/ms_marco", "v1.1", split="validation").select(range(1000))
samples = [
{
"query": sample["query"],
"positive": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if is_selected
],
"negative": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if not is_selected
],
}
for sample in eval_dataset
]
# Now evaluate using only the documents from the 1000 samples
reranking_evaluator = SparseRerankingEvaluator(
samples=samples,
name="ms-marco-dev-small",
show_progress_bar=True,
batch_size=32,
)
results = reranking_evaluator(model)
# Print the results
print(f"Primary metric: {reranking_evaluator.primary_metric}")
print(f"Primary metric value: {results[reranking_evaluator.primary_metric]:.4f}")
|
from pathlib import Path
from typing import Union, Optional, Callable, TYPE_CHECKING, Generator
if TYPE_CHECKING: # pragma: no cover
from docarray import DocumentArray
from docarray.typing import T
from multiprocessing.pool import ThreadPool, Pool
class DataLoaderMixin:
@classmethod
def dataloader(
cls,
path: Union[str, Path],
func: Callable[['DocumentArray'], 'T'],
batch_size: int,
protocol: str = 'protobuf',
compress: Optional[str] = None,
backend: str = 'thread',
num_worker: Optional[int] = None,
pool: Optional[Union['Pool', 'ThreadPool']] = None,
show_progress: bool = False,
) -> Generator['DocumentArray', None, None]:
"""Load array elements, batches and maps them with a function in parallel, finally yield the batch in DocumentArray
:param path: Path or filename where the data is stored.
:param func: a function that takes :class:`DocumentArray` as input and outputs anything. You can either modify elements
in-place (only with `thread` backend) or work later on return elements.
:param batch_size: Size of each generated batch (except the last one, which might be smaller)
:param protocol: protocol to use
:param compress: compress algorithm to use
:param backend: if to use multi-`process` or multi-`thread` as the parallelization backend. In general, if your
``func`` is IO-bound then perhaps `thread` is good enough. If your ``func`` is CPU-bound then you may use `process`.
In practice, you should try yourselves to figure out the best value. However, if you wish to modify the elements
in-place, regardless of IO/CPU-bound, you should always use `thread` backend.
.. warning::
When using `process` backend, you should not expect ``func`` modify elements in-place. This is because
the multiprocessing backing pass the variable via pickle and work in another process. The passed object
and the original object do **not** share the same memory.
:param num_worker: the number of parallel workers. If not given, then the number of CPUs in the system will be used.
:param pool: use an existing/external pool. If given, `backend` is ignored and you will be responsible for closing the pool.
:param show_progress: if set, show a progressbar
:return:
"""
from docarray.array.mixins.dataloader.helper import DocumentArrayLoader
for da in DocumentArrayLoader(
path, protocol=protocol, compress=compress, show_progress=False
).map_batch(
func,
batch_size=batch_size,
backend=backend,
num_worker=num_worker,
pool=pool,
show_progress=show_progress,
):
yield da
|
from pathlib import Path
from typing import Union, Optional, Callable, TYPE_CHECKING, Generator
if TYPE_CHECKING:
from docarray import DocumentArray
from docarray.typing import T
from multiprocessing.pool import ThreadPool, Pool
class DataLoaderMixin:
@classmethod
def dataloader(
cls,
path: Union[str, Path],
func: Callable[['DocumentArray'], 'T'],
batch_size: int,
protocol: str = 'protobuf',
compress: Optional[str] = None,
backend: str = 'thread',
num_worker: Optional[int] = None,
pool: Optional[Union['Pool', 'ThreadPool']] = None,
show_progress: bool = False,
) -> Generator['DocumentArray', None, None]:
"""Load array elements, batches and maps them with a function in parallel, finally yield the batch in DocumentArray
:param path: Path or filename where the data is stored.
:param func: a function that takes :class:`DocumentArray` as input and outputs anything. You can either modify elements
in-place (only with `thread` backend) or work later on return elements.
:param batch_size: Size of each generated batch (except the last one, which might be smaller)
:param protocol: protocol to use
:param compress: compress algorithm to use
:param backend: if to use multi-`process` or multi-`thread` as the parallelization backend. In general, if your
``func`` is IO-bound then perhaps `thread` is good enough. If your ``func`` is CPU-bound then you may use `process`.
In practice, you should try yourselves to figure out the best value. However, if you wish to modify the elements
in-place, regardless of IO/CPU-bound, you should always use `thread` backend.
.. warning::
When using `process` backend, you should not expect ``func`` modify elements in-place. This is because
the multiprocessing backing pass the variable via pickle and work in another process. The passed object
and the original object do **not** share the same memory.
:param num_worker: the number of parallel workers. If not given, then the number of CPUs in the system will be used.
:param pool: use an existing/external pool. If given, `backend` is ignored and you will be responsible for closing the pool.
:param show_progress: if set, show a progressbar
:return:
"""
from docarray.array.mixins.dataloader.helper import DocumentArrayLoader
for da in DocumentArrayLoader(
path, protocol=protocol, compress=compress, show_progress=False
).map_batch(
func,
batch_size=batch_size,
backend=backend,
num_worker=num_worker,
pool=pool,
show_progress=show_progress,
):
yield da
|
# Copyright (c) OpenMMLab. All rights reserved.
from .inference import (async_inference_detector, inference_detector,
init_detector)
__all__ = [
'init_detector',
'async_inference_detector',
'inference_detector',
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .inference import (async_inference_detector, inference_detector,
init_detector, show_result_pyplot)
from .test import multi_gpu_test, single_gpu_test
from .train import (get_root_logger, init_random_seed, set_random_seed,
train_detector)
__all__ = [
'get_root_logger', 'set_random_seed', 'train_detector', 'init_detector',
'async_inference_detector', 'inference_detector', 'show_result_pyplot',
'multi_gpu_test', 'single_gpu_test', 'init_random_seed'
]
|
_base_ = [
'mmcls::_base_/datasets/imagenet_bs256_rsb_a12.py',
'mmcls::_base_/schedules/imagenet_bs2048_rsb.py',
'mmcls::_base_/default_runtime.py'
]
model = dict(
type='ImageClassifier',
backbone=dict(
type='mmdet.CSPNeXt',
arch='P5',
out_indices=(4, ),
expand_ratio=0.5,
deepen_factor=0.33,
widen_factor=0.5,
channel_attention=True,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='mmdet.SiLU')),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=512,
loss=dict(
type='LabelSmoothLoss',
label_smooth_val=0.1,
mode='original',
loss_weight=1.0),
topk=(1, 5)),
train_cfg=dict(augments=[
dict(type='Mixup', alpha=0.2),
dict(type='CutMix', alpha=1.0)
]))
# dataset settings
train_dataloader = dict(sampler=dict(type='RepeatAugSampler', shuffle=True))
# schedule settings
optim_wrapper = dict(
optimizer=dict(weight_decay=0.01),
paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.),
)
param_scheduler = [
# warm up learning rate scheduler
dict(
type='LinearLR',
start_factor=0.0001,
by_epoch=True,
begin=0,
end=5,
# update by iter
convert_to_iter_based=True),
# main learning rate scheduler
dict(
type='CosineAnnealingLR',
T_max=595,
eta_min=1.0e-6,
by_epoch=True,
begin=5,
end=600)
]
train_cfg = dict(by_epoch=True, max_epochs=600)
|
_base_ = [
'mmcls::_base_/datasets/imagenet_bs256_rsb_a12.py',
'mmcls::_base_/schedules/imagenet_bs2048_rsb.py',
'mmcls::_base_/default_runtime.py'
]
custom_imports = dict(imports=['mmdet.models'], allow_failed_imports=False)
model = dict(
type='ImageClassifier',
backbone=dict(
type='mmdet.CSPNeXt',
arch='P5',
out_indices=(4, ),
expand_ratio=0.5,
deepen_factor=0.33,
widen_factor=0.5,
channel_attention=True,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='mmdet.SiLU')),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=512,
loss=dict(
type='LabelSmoothLoss',
label_smooth_val=0.1,
mode='original',
loss_weight=1.0),
topk=(1, 5)),
train_cfg=dict(augments=[
dict(type='Mixup', alpha=0.2),
dict(type='CutMix', alpha=1.0)
]))
# dataset settings
train_dataloader = dict(sampler=dict(type='RepeatAugSampler', shuffle=True))
# schedule settings
optim_wrapper = dict(
optimizer=dict(weight_decay=0.01),
paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.),
)
param_scheduler = [
# warm up learning rate scheduler
dict(
type='LinearLR',
start_factor=0.0001,
by_epoch=True,
begin=0,
end=5,
# update by iter
convert_to_iter_based=True),
# main learning rate scheduler
dict(
type='CosineAnnealingLR',
T_max=595,
eta_min=1.0e-6,
by_epoch=True,
begin=5,
end=600)
]
train_cfg = dict(by_epoch=True, max_epochs=600)
|
"""Test Aleph Alpha API wrapper."""
from langchain_community.llms.aleph_alpha import AlephAlpha
def test_aleph_alpha_call() -> None:
"""Test valid call to cohere."""
llm = AlephAlpha(maximum_tokens=10)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
|
"""Test Aleph Alpha API wrapper."""
from langchain_community.llms.aleph_alpha import AlephAlpha
def test_aleph_alpha_call() -> None:
"""Test valid call to cohere."""
llm = AlephAlpha(maximum_tokens=10) # type: ignore[call-arg]
output = llm.invoke("Say foo:")
assert isinstance(output, str)
|
from collections import namedtuple
from typing import TYPE_CHECKING, Dict, NamedTuple, Optional
from urllib.parse import urlparse
if TYPE_CHECKING:
from ... import DocumentArray
_ParsedHost = namedtuple('ParsedHost', 'on host port version scheme')
def _parse_host(host: str) -> NamedTuple:
"""Parse a host string into namedtuple object.
A parsed host's components are `on`, `host`, `port`, `version`, `scheme`.
:param host: a host string. Can be one of the following:
- `grpc://192.168.0.123:8080/endpoint`
- `ws://192.168.0.123:8080/endpoint`
- `http://192.168.0.123:8080/endpoint`
- `jinahub://Hello/endpoint`
- `jinahub+docker://Hello/endpoint`
- `jinahub+docker://Hello/v0.0.1/endpoint`
- `jinahub+docker://Hello/latest/endpoint`
- `jinahub+sandbox://Hello/endpoint`
"""
r = urlparse(host)
on = r.path or '/'
host = (
r._replace(netloc=r.netloc.replace(f':{r.port}', ''))._replace(path='').geturl()
)
port = r.port or None
version = None
scheme = r.scheme
splited_path = list(filter(None, r.path.split('/')))
if len(splited_path) == 2:
# path includes version and endpoint
version = splited_path[0]
host = host + '/' + version
on = '/' + splited_path[1]
return _ParsedHost(on=on, host=host, port=port, version=version, scheme=scheme)
class PostMixin:
"""Helper functions for posting DocumentArray to Jina Flow."""
def post(
self,
host: str,
show_progress: bool = False,
batch_size: Optional[int] = None,
parameters: Optional[Dict] = None,
**kwargs,
) -> 'DocumentArray':
"""Posting itself to a remote Flow/Sandbox and get the modified DocumentArray back
:param host: a host string. Can be one of the following:
- `grpc://192.168.0.123:8080/endpoint`
- `ws://192.168.0.123:8080/endpoint`
- `http://192.168.0.123:8080/endpoint`
- `jinahub://Hello/endpoint`
- `jinahub+docker://Hello/endpoint`
- `jinahub+docker://Hello/v0.0.1/endpoint`
- `jinahub+docker://Hello/latest/endpoint`
- `jinahub+sandbox://Hello/endpoint`
:param show_progress: if to show a progressbar
:param batch_size: number of Document on each request
:param parameters: parameters to send in the request
:return: the new DocumentArray returned from remote
"""
if not self:
return
parsed_host = _parse_host(host)
batch_size = batch_size or len(self)
scheme = parsed_host.scheme
host = parsed_host.host
if scheme in ('grpcs', 'https', 'wss'):
scheme = scheme[:-1]
if scheme == 'ws':
scheme = 'websocket' # temp fix for the core
if scheme.startswith('jinahub'):
from jina import Flow
f = Flow(quiet=True, prefetch=1).add(uses=host, **kwargs)
with f:
return f.post(
parsed_host.on,
inputs=self,
show_progress=show_progress,
request_size=batch_size,
parameters=parameters,
**kwargs,
)
elif scheme in ('grpc', 'http', 'ws', 'websocket'):
from jina import Client
if parsed_host.port:
host += f':{parsed_host.port}'
c = Client(host=host)
return c.post(
parsed_host.on,
inputs=self,
show_progress=show_progress,
request_size=batch_size,
parameters=parameters,
**kwargs,
)
else:
raise ValueError(f'unsupported scheme: {scheme}')
|
from typing import TYPE_CHECKING, Optional, Dict
if TYPE_CHECKING:
from ... import DocumentArray
class PostMixin:
"""Helper functions for posting DocumentArray to Jina Flow."""
def post(
self,
host: str,
show_progress: bool = False,
batch_size: Optional[int] = None,
parameters: Optional[Dict] = None,
**kwargs,
) -> 'DocumentArray':
"""Posting itself to a remote Flow/Sandbox and get the modified DocumentArray back
:param host: a host string. Can be one of the following:
- `grpc://192.168.0.123:8080/endpoint`
- `ws://192.168.0.123:8080/endpoint`
- `http://192.168.0.123:8080/endpoint`
- `jinahub://Hello/endpoint`
- `jinahub+docker://Hello/endpoint`
- `jinahub+sandbox://Hello/endpoint`
:param show_progress: if to show a progressbar
:param batch_size: number of Document on each request
:param parameters: parameters to send in the request
:return: the new DocumentArray returned from remote
"""
if not self:
return
from urllib.parse import urlparse
r = urlparse(host)
_on = r.path or '/'
_port = r.port or None
standardized_host = (
r._replace(netloc=r.netloc.replace(f':{r.port}', ''))
._replace(path='')
.geturl()
)
batch_size = batch_size or len(self)
_scheme = r.scheme
_tls = False
if _scheme in ('grpcs', 'https', 'wss'):
_scheme = _scheme[:-1]
_tls = True
if _scheme == 'ws':
_scheme = 'websocket' # temp fix for the core
if _scheme.startswith('jinahub'):
from jina import Flow
f = Flow(quiet=True, prefetch=1).add(uses=standardized_host)
with f:
return f.post(
_on,
inputs=self,
show_progress=show_progress,
request_size=batch_size,
parameters=parameters,
**kwargs,
)
elif _scheme in ('grpc', 'http', 'ws', 'websocket'):
from jina import Client
if _port:
standardized_host += f':{_port}'
c = Client(host=standardized_host)
return c.post(
_on,
inputs=self,
show_progress=show_progress,
request_size=batch_size,
parameters=parameters,
**kwargs,
)
else:
raise ValueError(f'unsupported scheme: {r.scheme}')
|
import logging
import os
import zlib
from contextlib import asynccontextmanager
from urllib.parse import parse_qsl, urlencode, urlparse, urlunparse
from uuid import uuid4
from dotenv import load_dotenv
from prisma import Prisma
from pydantic import BaseModel, Field, field_validator
from backend.util.retry import conn_retry
load_dotenv()
PRISMA_SCHEMA = os.getenv("PRISMA_SCHEMA", "schema.prisma")
os.environ["PRISMA_SCHEMA_PATH"] = PRISMA_SCHEMA
def add_param(url: str, key: str, value: str) -> str:
p = urlparse(url)
qs = dict(parse_qsl(p.query))
qs[key] = value
return urlunparse(p._replace(query=urlencode(qs)))
DATABASE_URL = os.getenv("DATABASE_URL", "postgresql://localhost:5432")
CONN_LIMIT = os.getenv("DB_CONNECTION_LIMIT")
if CONN_LIMIT:
DATABASE_URL = add_param(DATABASE_URL, "connection_limit", CONN_LIMIT)
CONN_TIMEOUT = os.getenv("DB_CONNECT_TIMEOUT")
if CONN_TIMEOUT:
DATABASE_URL = add_param(DATABASE_URL, "connect_timeout", CONN_TIMEOUT)
POOL_TIMEOUT = os.getenv("DB_POOL_TIMEOUT")
if POOL_TIMEOUT:
DATABASE_URL = add_param(DATABASE_URL, "pool_timeout", POOL_TIMEOUT)
HTTP_TIMEOUT = int(POOL_TIMEOUT) if POOL_TIMEOUT else None
prisma = Prisma(
auto_register=True,
http={"timeout": HTTP_TIMEOUT},
datasource={"url": DATABASE_URL},
)
logger = logging.getLogger(__name__)
@conn_retry("Prisma", "Acquiring connection")
async def connect():
if prisma.is_connected():
return
await prisma.connect()
if not prisma.is_connected():
raise ConnectionError("Failed to connect to Prisma.")
# Connection acquired from a pool like Supabase somehow still possibly allows
# the db client obtains a connection but still reject query connection afterward.
try:
await prisma.execute_raw("SELECT 1")
except Exception as e:
raise ConnectionError("Failed to connect to Prisma.") from e
@conn_retry("Prisma", "Releasing connection")
async def disconnect():
if not prisma.is_connected():
return
await prisma.disconnect()
if prisma.is_connected():
raise ConnectionError("Failed to disconnect from Prisma.")
@asynccontextmanager
async def transaction():
async with prisma.tx() as tx:
yield tx
@asynccontextmanager
async def locked_transaction(key: str):
lock_key = zlib.crc32(key.encode("utf-8"))
async with transaction() as tx:
await tx.execute_raw(f"SELECT pg_advisory_xact_lock({lock_key})")
yield tx
class BaseDbModel(BaseModel):
id: str = Field(default_factory=lambda: str(uuid4()))
@field_validator("id", mode="before")
def set_model_id(cls, id: str) -> str:
# In case an empty ID is submitted
return id or str(uuid4())
|
import logging
import os
import zlib
from contextlib import asynccontextmanager
from urllib.parse import parse_qsl, urlencode, urlparse, urlunparse
from uuid import uuid4
from dotenv import load_dotenv
from prisma import Prisma
from pydantic import BaseModel, Field, field_validator
from backend.util.retry import conn_retry
load_dotenv()
PRISMA_SCHEMA = os.getenv("PRISMA_SCHEMA", "schema.prisma")
os.environ["PRISMA_SCHEMA_PATH"] = PRISMA_SCHEMA
def add_param(url: str, key: str, value: str) -> str:
p = urlparse(url)
qs = dict(parse_qsl(p.query))
qs[key] = value
return urlunparse(p._replace(query=urlencode(qs)))
DATABASE_URL = os.getenv("DATABASE_URL")
if not DATABASE_URL:
raise ValueError("DATABASE_URL is not set.")
CONN_LIMIT = os.getenv("DB_CONNECTION_LIMIT")
if CONN_LIMIT:
DATABASE_URL = add_param(DATABASE_URL, "connection_limit", CONN_LIMIT)
CONN_TIMEOUT = os.getenv("DB_CONNECT_TIMEOUT")
if CONN_TIMEOUT:
DATABASE_URL = add_param(DATABASE_URL, "connect_timeout", CONN_TIMEOUT)
POOL_TIMEOUT = os.getenv("DB_POOL_TIMEOUT")
if POOL_TIMEOUT:
DATABASE_URL = add_param(DATABASE_URL, "pool_timeout", POOL_TIMEOUT)
HTTP_TIMEOUT = int(POOL_TIMEOUT) if POOL_TIMEOUT else None
prisma = Prisma(
auto_register=True,
http={"timeout": HTTP_TIMEOUT},
datasource={"url": DATABASE_URL},
)
logger = logging.getLogger(__name__)
@conn_retry("Prisma", "Acquiring connection")
async def connect():
if prisma.is_connected():
return
await prisma.connect()
if not prisma.is_connected():
raise ConnectionError("Failed to connect to Prisma.")
# Connection acquired from a pool like Supabase somehow still possibly allows
# the db client obtains a connection but still reject query connection afterward.
try:
await prisma.execute_raw("SELECT 1")
except Exception as e:
raise ConnectionError("Failed to connect to Prisma.") from e
@conn_retry("Prisma", "Releasing connection")
async def disconnect():
if not prisma.is_connected():
return
await prisma.disconnect()
if prisma.is_connected():
raise ConnectionError("Failed to disconnect from Prisma.")
@asynccontextmanager
async def transaction():
async with prisma.tx() as tx:
yield tx
@asynccontextmanager
async def locked_transaction(key: str):
lock_key = zlib.crc32(key.encode("utf-8"))
async with transaction() as tx:
await tx.execute_raw(f"SELECT pg_advisory_xact_lock({lock_key})")
yield tx
class BaseDbModel(BaseModel):
id: str = Field(default_factory=lambda: str(uuid4()))
@field_validator("id", mode="before")
def set_model_id(cls, id: str) -> str:
# In case an empty ID is submitted
return id or str(uuid4())
|
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
from .source_separation_pipeline import (
CONVTASNET_BASE_LIBRI2MIX,
HDEMUCS_HIGH_MUSDB,
HDEMUCS_HIGH_MUSDB_PLUS,
SourceSeparationBundle,
)
__all__ = [
"CONVTASNET_BASE_LIBRI2MIX",
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
"SourceSeparationBundle",
"HDEMUCS_HIGH_MUSDB_PLUS",
"HDEMUCS_HIGH_MUSDB",
]
|
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
from .source_separation_pipeline import CONVTASNET_BASE_LIBRI2MIX, HDEMUCS_HIGH_MUSDB_PLUS, SourceSeparationBundle
__all__ = [
"CONVTASNET_BASE_LIBRI2MIX",
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
"SourceSeparationBundle",
"HDEMUCS_HIGH_MUSDB_PLUS",
]
|
from unittest import TestCase, mock
import boto3
from llama_index.core.postprocessor.types import (
BaseNodePostprocessor,
NodeWithScore,
QueryBundle,
)
from llama_index.core.schema import TextNode
from llama_index.postprocessor.bedrock_rerank import BedrockRerank
class TestBedrockRerank(TestCase):
def test_class(self):
names_of_base_classes = [b.__name__ for b in BedrockRerank.__mro__]
self.assertIn(BaseNodePostprocessor.__name__, names_of_base_classes)
def test_bedrock_rerank(self):
exp_rerank_response = {
"results": [
{
"index": 2,
"relevanceScore": 0.9,
},
{
"index": 3,
"relevanceScore": 0.8,
},
]
}
input_nodes = [
NodeWithScore(node=TextNode(id_="1", text="first 1")),
NodeWithScore(node=TextNode(id_="2", text="first 2")),
NodeWithScore(node=TextNode(id_="3", text="last 1")),
NodeWithScore(node=TextNode(id_="4", text="last 2")),
]
expected_nodes = [
NodeWithScore(node=TextNode(id_="3", text="last 1"), score=0.9),
NodeWithScore(node=TextNode(id_="4", text="last 2"), score=0.8),
]
bedrock_client = boto3.client("bedrock-agent-runtime", region_name="us-west-2")
reranker = BedrockRerank(client=bedrock_client, num_results=2)
with mock.patch.object(
bedrock_client, "rerank", return_value=exp_rerank_response
):
query_bundle = QueryBundle(query_str="last")
actual_nodes = reranker.postprocess_nodes(
input_nodes, query_bundle=query_bundle
)
self.assertEqual(len(actual_nodes), len(expected_nodes))
for actual_node_with_score, expected_node_with_score in zip(
actual_nodes, expected_nodes
):
self.assertEqual(
actual_node_with_score.node.get_content(),
expected_node_with_score.node.get_content(),
)
self.assertAlmostEqual(
actual_node_with_score.score, expected_node_with_score.score
)
|
from unittest import TestCase, mock
import boto3
from llama_index.core.postprocessor.types import (
BaseNodePostprocessor,
NodeWithScore,
QueryBundle,
)
from llama_index.core.schema import TextNode
from llama_index.postprocessor.bedrock_rerank import AWSBedrockRerank
class TestAWSBedrockRerank(TestCase):
def test_class(self):
names_of_base_classes = [b.__name__ for b in AWSBedrockRerank.__mro__]
self.assertIn(BaseNodePostprocessor.__name__, names_of_base_classes)
def test_bedrock_rerank(self):
exp_rerank_response = {
"results": [
{
"index": 2,
"relevanceScore": 0.9,
},
{
"index": 3,
"relevanceScore": 0.8,
},
]
}
input_nodes = [
NodeWithScore(node=TextNode(id_="1", text="first 1")),
NodeWithScore(node=TextNode(id_="2", text="first 2")),
NodeWithScore(node=TextNode(id_="3", text="last 1")),
NodeWithScore(node=TextNode(id_="4", text="last 2")),
]
expected_nodes = [
NodeWithScore(node=TextNode(id_="3", text="last 1"), score=0.9),
NodeWithScore(node=TextNode(id_="4", text="last 2"), score=0.8),
]
bedrock_client = boto3.client("bedrock-agent-runtime", region_name="us-west-2")
reranker = AWSBedrockRerank(client=bedrock_client, num_results=2)
with mock.patch.object(
bedrock_client, "rerank", return_value=exp_rerank_response
):
query_bundle = QueryBundle(query_str="last")
actual_nodes = reranker.postprocess_nodes(
input_nodes, query_bundle=query_bundle
)
self.assertEqual(len(actual_nodes), len(expected_nodes))
for actual_node_with_score, expected_node_with_score in zip(
actual_nodes, expected_nodes
):
self.assertEqual(
actual_node_with_score.node.get_content(),
expected_node_with_score.node.get_content(),
)
self.assertAlmostEqual(
actual_node_with_score.score, expected_node_with_score.score
)
|
_base_ = './yolov3_d53_8xb8-ms-608-273e_coco.py'
input_size = (320, 320)
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
# `mean` and `to_rgb` should be the same with the `preprocess_cfg`
dict(type='Expand', mean=[0, 0, 0], to_rgb=True, ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=input_size, keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=input_size, keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
_base_ = './yolov3_d53_8xb8-ms-608-273e_coco.py'
# dataset settings
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
input_size = (320, 320)
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
# `mean` and `to_rgb` should be the same with the `preprocess_cfg`
dict(type='Expand', mean=[0, 0, 0], to_rgb=True, ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=input_size, keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=input_size, keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import tempfile
import safetensors
sys.path.append("..")
from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class TextToImageLCM(ExamplesTestsAccelerate):
def test_text_to_image_lcm_lora_sdxl(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f"""
examples/consistency_distillation/train_lcm_distill_lora_sdxl.py
--pretrained_teacher_model hf-internal-testing/tiny-stable-diffusion-xl-pipe
--dataset_name hf-internal-testing/dummy_image_text_data
--resolution 64
--lora_rank 4
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 2
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--output_dir {tmpdir}
""".split()
run_command(self._launch_args + test_args)
# save_pretrained smoke test
self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")))
# make sure the state_dict has the correct naming in the parameters.
lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))
is_lora = all("lora" in k for k in lora_state_dict.keys())
self.assertTrue(is_lora)
def test_text_to_image_lcm_lora_sdxl_checkpointing(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f"""
examples/consistency_distillation/train_lcm_distill_lora_sdxl.py
--pretrained_teacher_model hf-internal-testing/tiny-stable-diffusion-xl-pipe
--dataset_name hf-internal-testing/dummy_image_text_data
--resolution 64
--lora_rank 4
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 7
--checkpointing_steps 2
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--output_dir {tmpdir}
""".split()
run_command(self._launch_args + test_args)
self.assertEqual(
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
{"checkpoint-2", "checkpoint-4", "checkpoint-6"},
)
test_args = f"""
examples/consistency_distillation/train_lcm_distill_lora_sdxl.py
--pretrained_teacher_model hf-internal-testing/tiny-stable-diffusion-xl-pipe
--dataset_name hf-internal-testing/dummy_image_text_data
--resolution 64
--lora_rank 4
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 9
--checkpointing_steps 2
--resume_from_checkpoint latest
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--output_dir {tmpdir}
""".split()
run_command(self._launch_args + test_args)
self.assertEqual(
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
{"checkpoint-2", "checkpoint-4", "checkpoint-6", "checkpoint-8"},
)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import tempfile
import safetensors
sys.path.append("..")
from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class TextToImageLCM(ExamplesTestsAccelerate):
def test_text_to_image_lcm_lora_sdxl(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f"""
examples/consistency_distillation/train_lcm_distill_lora_sdxl.py
--pretrained_teacher_model hf-internal-testing/tiny-stable-diffusion-xl-pipe
--dataset_name hf-internal-testing/dummy_image_text_data
--resolution 64
--lora_rank 4
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 2
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--output_dir {tmpdir}
""".split()
run_command(self._launch_args + test_args)
# save_pretrained smoke test
self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")))
# make sure the state_dict has the correct naming in the parameters.
lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))
is_lora = all("lora" in k for k in lora_state_dict.keys())
self.assertTrue(is_lora)
def test_text_to_image_lcm_lora_sdxl_checkpointing(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f"""
examples/consistency_distillation/train_lcm_distill_lora_sdxl.py
--pretrained_teacher_model hf-internal-testing/tiny-stable-diffusion-xl-pipe
--dataset_name hf-internal-testing/dummy_image_text_data
--resolution 64
--lora_rank 4
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 7
--checkpointing_steps 2
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--output_dir {tmpdir}
""".split()
run_command(self._launch_args + test_args)
self.assertEqual(
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
{"checkpoint-2", "checkpoint-4", "checkpoint-6"},
)
test_args = f"""
examples/consistency_distillation/train_lcm_distill_lora_sdxl.py
--pretrained_teacher_model hf-internal-testing/tiny-stable-diffusion-xl-pipe
--dataset_name hf-internal-testing/dummy_image_text_data
--resolution 64
--lora_rank 4
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 9
--checkpointing_steps 2
--resume_from_checkpoint latest
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--output_dir {tmpdir}
""".split()
run_command(self._launch_args + test_args)
self.assertEqual(
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
{"checkpoint-2", "checkpoint-4", "checkpoint-6", "checkpoint-8"},
)
|
_base_ = './retinanet_r50_caffe_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768),
(1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './retinanet_r50_caffe_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768),
(1333, 800)]),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
import time
import pytest
from typing import List
from llama_index.core.schema import Document, TextNode
from llama_index.core.node_parser import SentenceSplitter
from redis import Redis
import docker
docker_client = docker.from_env()
docker_client.ping()
container = docker_client.containers.run(
"redis/redis-stack:latest",
detach=True,
name="redis",
ports={"6379/tcp": 6379, "8001/tcp": 8001},
)
# wait for redis to be ready
time.sleep(2)
@pytest.fixture(scope="session", autouse=True)
def docker_setup():
yield container
container.stop()
container.remove()
@pytest.fixture()
def dummy_embedding() -> List:
return [0] * 1536
@pytest.fixture()
def turtle_test() -> dict:
return {
"text": "something about turtles",
"metadata": {"animal": "turtle"},
"question": "turtle stuff",
"doc_id": "1234",
}
@pytest.fixture()
def documents(turtle_test, dummy_embedding) -> List[Document]:
"""
List of documents represents data to be embedded in the datastore.
Minimum requirements for Documents in the /upsert endpoint's UpsertRequest.
"""
return [
Document(
text=turtle_test["text"],
metadata=turtle_test["metadata"],
doc_id=turtle_test["doc_id"],
embedding=dummy_embedding,
),
Document(
text="something about whales",
metadata={"animal": "whale"},
doc_id="5678",
embedding=dummy_embedding,
),
]
@pytest.fixture()
def test_nodes(documents) -> TextNode:
parser = SentenceSplitter()
return parser.get_nodes_from_documents(documents)
@pytest.fixture()
def redis_client() -> Redis:
return Redis.from_url("redis://localhost:6379/0")
|
import time
import pytest
from typing import List
from llama_index.core.schema import Document, TextNode
from llama_index.core.node_parser import SentenceSplitter
from redis import Redis
import docker
docker_client = docker.from_env()
docker_client.ping()
container = docker_client.containers.run(
"redis/redis-stack:latest",
detach=True,
name="redis",
ports={"6379/tcp": 6379, "8001/tcp": 8001},
)
# wait for redis to be ready
time.sleep(2)
@pytest.fixture(scope="session", autouse=True)
def docker_setup():
yield container
container.stop()
container.remove()
@pytest.fixture()
def dummy_embedding() -> List:
return [0] * 1536
@pytest.fixture()
def turtle_test() -> dict:
return {
"text": "something about turtles",
"metadata": {"animal": "turtle"},
"question": "turtle stuff",
"doc_id": "1234",
}
@pytest.fixture()
def documents(turtle_test, dummy_embedding) -> List[Document]:
"""List of documents represents data to be embedded in the datastore.
Minimum requirements for Documents in the /upsert endpoint's UpsertRequest.
"""
return [
Document(
text=turtle_test["text"],
metadata=turtle_test["metadata"],
doc_id=turtle_test["doc_id"],
embedding=dummy_embedding,
),
Document(
text="something about whales",
metadata={"animal": "whale"},
doc_id="5678",
embedding=dummy_embedding,
),
]
@pytest.fixture()
def test_nodes(documents) -> TextNode:
parser = SentenceSplitter()
return parser.get_nodes_from_documents(documents)
@pytest.fixture()
def redis_client() -> Redis:
return Redis.from_url("redis://localhost:6379/0")
|
"""Select and order examples based on ngram overlap score (sentence_bleu score).
https://www.nltk.org/_modules/nltk/translate/bleu_score.html
https://aclanthology.org/P02-1040.pdf
"""
from typing import Any, Dict, List
import numpy as np
from langchain_core.example_selectors import BaseExampleSelector
from langchain_core.prompts import PromptTemplate
from pydantic import BaseModel, model_validator
def ngram_overlap_score(source: List[str], example: List[str]) -> float:
"""Compute ngram overlap score of source and example as sentence_bleu score
from NLTK package.
Use sentence_bleu with method1 smoothing function and auto reweighting.
Return float value between 0.0 and 1.0 inclusive.
https://www.nltk.org/_modules/nltk/translate/bleu_score.html
https://aclanthology.org/P02-1040.pdf
"""
from nltk.translate.bleu_score import (
SmoothingFunction,
sentence_bleu,
)
hypotheses = source[0].split()
references = [s.split() for s in example]
return float(
sentence_bleu(
references,
hypotheses,
smoothing_function=SmoothingFunction().method1,
auto_reweigh=True,
)
)
class NGramOverlapExampleSelector(BaseExampleSelector, BaseModel):
"""Select and order examples based on ngram overlap score (sentence_bleu score
from NLTK package).
https://www.nltk.org/_modules/nltk/translate/bleu_score.html
https://aclanthology.org/P02-1040.pdf
"""
examples: List[dict]
"""A list of the examples that the prompt template expects."""
example_prompt: PromptTemplate
"""Prompt template used to format the examples."""
threshold: float = -1.0
"""Threshold at which algorithm stops. Set to -1.0 by default.
For negative threshold:
select_examples sorts examples by ngram_overlap_score, but excludes none.
For threshold greater than 1.0:
select_examples excludes all examples, and returns an empty list.
For threshold equal to 0.0:
select_examples sorts examples by ngram_overlap_score,
and excludes examples with no ngram overlap with input.
"""
@model_validator(mode="before")
@classmethod
def check_dependencies(cls, values: Dict) -> Any:
"""Check that valid dependencies exist."""
try:
from nltk.translate.bleu_score import ( # noqa: F401
SmoothingFunction,
sentence_bleu,
)
except ImportError as e:
raise ImportError(
"Not all the correct dependencies for this ExampleSelect exist."
"Please install nltk with `pip install nltk`."
) from e
return values
def add_example(self, example: Dict[str, str]) -> None:
"""Add new example to list."""
self.examples.append(example)
def select_examples(self, input_variables: Dict[str, str]) -> List[dict]:
"""Return list of examples sorted by ngram_overlap_score with input.
Descending order.
Excludes any examples with ngram_overlap_score less than or equal to threshold.
"""
inputs = list(input_variables.values())
examples = []
k = len(self.examples)
score = [0.0] * k
first_prompt_template_key = self.example_prompt.input_variables[0]
for i in range(k):
score[i] = ngram_overlap_score(
inputs, [self.examples[i][first_prompt_template_key]]
)
while True:
arg_max = np.argmax(score)
if (score[arg_max] < self.threshold) or abs(
score[arg_max] - self.threshold
) < 1e-9:
break
examples.append(self.examples[arg_max])
score[arg_max] = self.threshold - 1.0
return examples
|
"""Select and order examples based on ngram overlap score (sentence_bleu score).
https://www.nltk.org/_modules/nltk/translate/bleu_score.html
https://aclanthology.org/P02-1040.pdf
"""
from typing import Any, Dict, List
import numpy as np
from langchain_core.example_selectors import BaseExampleSelector
from langchain_core.prompts import PromptTemplate
from pydantic import BaseModel, model_validator
def ngram_overlap_score(source: List[str], example: List[str]) -> float:
"""Compute ngram overlap score of source and example as sentence_bleu score
from NLTK package.
Use sentence_bleu with method1 smoothing function and auto reweighting.
Return float value between 0.0 and 1.0 inclusive.
https://www.nltk.org/_modules/nltk/translate/bleu_score.html
https://aclanthology.org/P02-1040.pdf
"""
from nltk.translate.bleu_score import (
SmoothingFunction, # type: ignore
sentence_bleu,
)
hypotheses = source[0].split()
references = [s.split() for s in example]
return float(
sentence_bleu(
references,
hypotheses,
smoothing_function=SmoothingFunction().method1,
auto_reweigh=True,
)
)
class NGramOverlapExampleSelector(BaseExampleSelector, BaseModel):
"""Select and order examples based on ngram overlap score (sentence_bleu score
from NLTK package).
https://www.nltk.org/_modules/nltk/translate/bleu_score.html
https://aclanthology.org/P02-1040.pdf
"""
examples: List[dict]
"""A list of the examples that the prompt template expects."""
example_prompt: PromptTemplate
"""Prompt template used to format the examples."""
threshold: float = -1.0
"""Threshold at which algorithm stops. Set to -1.0 by default.
For negative threshold:
select_examples sorts examples by ngram_overlap_score, but excludes none.
For threshold greater than 1.0:
select_examples excludes all examples, and returns an empty list.
For threshold equal to 0.0:
select_examples sorts examples by ngram_overlap_score,
and excludes examples with no ngram overlap with input.
"""
@model_validator(mode="before")
@classmethod
def check_dependencies(cls, values: Dict) -> Any:
"""Check that valid dependencies exist."""
try:
from nltk.translate.bleu_score import ( # noqa: F401
SmoothingFunction,
sentence_bleu,
)
except ImportError as e:
raise ImportError(
"Not all the correct dependencies for this ExampleSelect exist."
"Please install nltk with `pip install nltk`."
) from e
return values
def add_example(self, example: Dict[str, str]) -> None:
"""Add new example to list."""
self.examples.append(example)
def select_examples(self, input_variables: Dict[str, str]) -> List[dict]:
"""Return list of examples sorted by ngram_overlap_score with input.
Descending order.
Excludes any examples with ngram_overlap_score less than or equal to threshold.
"""
inputs = list(input_variables.values())
examples = []
k = len(self.examples)
score = [0.0] * k
first_prompt_template_key = self.example_prompt.input_variables[0]
for i in range(k):
score[i] = ngram_overlap_score(
inputs, [self.examples[i][first_prompt_template_key]]
)
while True:
arg_max = np.argmax(score)
if (score[arg_max] < self.threshold) or abs(
score[arg_max] - self.threshold
) < 1e-9:
break
examples.append(self.examples[arg_max])
score[arg_max] = self.threshold - 1.0
return examples
|
# Copyright (c) OpenMMLab. All rights reserved.
import datetime
import os.path as osp
import warnings
from typing import Optional
from mmengine.fileio import dump
from mmengine.logging import print_log
from . import root
from .default_scope import DefaultScope
from .registry import Registry
def traverse_registry_tree(registry: Registry, verbose: bool = True) -> list:
"""Traverse the whole registry tree from any given node, and collect
information of all registered modules in this registry tree.
Args:
registry (Registry): a registry node in the registry tree.
verbose (bool): Whether to print log. Default: True
Returns:
list: Statistic results of all modules in each node of the registry
tree.
"""
root_registry = registry.root
modules_info = []
def _dfs_registry(_registry):
if isinstance(_registry, Registry):
num_modules = len(_registry.module_dict)
scope = _registry.scope
registry_info = dict(num_modules=num_modules, scope=scope)
for name, registered_class in _registry.module_dict.items():
folder = '/'.join(registered_class.__module__.split('.')[:-1])
if folder in registry_info:
registry_info[folder].append(name)
else:
registry_info[folder] = [name]
if verbose:
print_log(
f"Find {num_modules} modules in {scope}'s "
f"'{_registry.name}' registry ",
logger='current')
modules_info.append(registry_info)
else:
return
for _, child in _registry.children.items():
_dfs_registry(child)
_dfs_registry(root_registry)
return modules_info
def count_registered_modules(save_path: Optional[str] = None,
verbose: bool = True) -> dict:
"""Scan all modules in MMEngine's root and child registries and dump to
json.
Args:
save_path (str, optional): Path to save the json file.
verbose (bool): Whether to print log. Defaults to True.
Returns:
dict: Statistic results of all registered modules.
"""
# import modules to trigger registering
import mmengine.dataset
import mmengine.evaluator
import mmengine.hooks
import mmengine.model
import mmengine.optim
import mmengine.runner
import mmengine.visualization # noqa: F401
registries_info = {}
# traverse all registries in MMEngine
for item in dir(root):
if not item.startswith('__'):
registry = getattr(root, item)
if isinstance(registry, Registry):
registries_info[item] = traverse_registry_tree(
registry, verbose)
scan_data = dict(
scan_date=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
registries=registries_info)
if verbose:
print_log(
f'Finish registry analysis, got: {scan_data}', logger='current')
if save_path is not None:
json_path = osp.join(save_path, 'modules_statistic_results.json')
dump(scan_data, json_path, indent=2)
print_log(f'Result has been saved to {json_path}', logger='current')
return scan_data
def init_default_scope(scope: str) -> None:
"""Initialize the given default scope.
Args:
scope (str): The name of the default scope.
"""
never_created = DefaultScope.get_current_instance(
) is None or not DefaultScope.check_instance_created(scope)
if never_created:
DefaultScope.get_instance(scope, scope_name=scope)
return
current_scope = DefaultScope.get_current_instance() # type: ignore
if current_scope.scope_name != scope: # type: ignore
warnings.warn('The current default scope ' # type: ignore
f'"{current_scope.scope_name}" is not "{scope}", '
'`init_default_scope` will force set the current'
f'default scope to "{scope}".')
# avoid name conflict
new_instance_name = f'{scope}-{datetime.datetime.now()}'
DefaultScope.get_instance(new_instance_name, scope_name=scope)
|
# Copyright (c) OpenMMLab. All rights reserved.
import datetime
import os.path as osp
from typing import Optional
from mmengine.fileio import dump
from mmengine.logging import print_log
from . import root
from .registry import Registry
def traverse_registry_tree(registry: Registry, verbose: bool = True) -> list:
"""Traverse the whole registry tree from any given node, and collect
information of all registered modules in this registry tree.
Args:
registry (Registry): a registry node in the registry tree.
verbose (bool): Whether to print log. Default: True
Returns:
list: Statistic results of all modules in each node of the registry
tree.
"""
root_registry = registry.root
modules_info = []
def _dfs_registry(_registry):
if isinstance(_registry, Registry):
num_modules = len(_registry.module_dict)
scope = _registry.scope
registry_info = dict(num_modules=num_modules, scope=scope)
for name, registered_class in _registry.module_dict.items():
folder = '/'.join(registered_class.__module__.split('.')[:-1])
if folder in registry_info:
registry_info[folder].append(name)
else:
registry_info[folder] = [name]
if verbose:
print_log(
f"Find {num_modules} modules in {scope}'s "
f"'{_registry.name}' registry ",
logger='current')
modules_info.append(registry_info)
else:
return
for _, child in _registry.children.items():
_dfs_registry(child)
_dfs_registry(root_registry)
return modules_info
def count_registered_modules(save_path: Optional[str] = None,
verbose: bool = True) -> dict:
"""Scan all modules in MMEngine's root and child registries and dump to
json.
Args:
save_path (str, optional): Path to save the json file.
verbose (bool): Whether to print log. Defaults to True.
Returns:
dict: Statistic results of all registered modules.
"""
# import modules to trigger registering
import mmengine.dataset
import mmengine.evaluator
import mmengine.hooks
import mmengine.model
import mmengine.optim
import mmengine.runner
import mmengine.visualization # noqa: F401
registries_info = {}
# traverse all registries in MMEngine
for item in dir(root):
if not item.startswith('__'):
registry = getattr(root, item)
if isinstance(registry, Registry):
registries_info[item] = traverse_registry_tree(
registry, verbose)
scan_data = dict(
scan_date=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
registries=registries_info)
if verbose:
print_log(
f'Finish registry analysis, got: {scan_data}', logger='current')
if save_path is not None:
json_path = osp.join(save_path, 'modules_statistic_results.json')
dump(scan_data, json_path, indent=2)
print_log(f'Result has been saved to {json_path}', logger='current')
return scan_data
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.3.2'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.3.1'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
from typing import Optional
from docarray.document import BaseDocument
from docarray.typing import AnyTensor, Embedding, PointCloud3DUrl
class PointCloud3D(BaseDocument):
"""
Document for handling point clouds for 3D data representation.
Point cloud is a representation of a 3D mesh. It is made by repeatedly and uniformly
sampling points within the surface of the 3D body. Compared to the mesh
representation, the point cloud is a fixed size ndarray (shape=(n_samples, 3)) and
hence easier for deep learning algorithms to handle.
A PointCloud3D Document can contain an PointCloud3DUrl (`PointCloud3D.url`), an
AnyTensor (`PointCloud3D.tensor`), and an Embedding (`PointCloud3D.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray import PointCloud3D
# use it directly
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
You can extend this Document:
.. code-block:: python
from docarray import PointCloud3D
from docarray.typing import Embedding
from typing import Optional
# extend it
class MyPointCloud3D(PointCloud3D):
second_embedding: Optional[Embedding]
pc = MyPointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
pc.second_embedding = model(pc.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import Document, PointCloud3D, Text
# compose it
class MultiModalDoc(Document):
point_cloud: PointCloud3D
text: Text
mmdoc = MultiModalDoc(
point_cloud=PointCloud3D(
url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.point_cloud.tensor = mmdoc.point_cloud.url.load(samples=100)
"""
url: Optional[PointCloud3DUrl]
tensor: Optional[AnyTensor]
embedding: Optional[Embedding]
|
from typing import Optional
from docarray.document import BaseDocument
from docarray.typing import Embedding, PointCloud3DUrl, Tensor
class PointCloud3D(BaseDocument):
"""
Document for handling point clouds for 3D data representation.
Point cloud is a representation of a 3D mesh. It is made by repeatedly and uniformly
sampling points within the surface of the 3D body. Compared to the mesh
representation, the point cloud is a fixed size ndarray (shape=(n_samples, 3)) and
hence easier for deep learning algorithms to handle.
A PointCloud3D Document can contain an PointCloud3DUrl (`PointCloud3D.url`), a
Tensor (`PointCloud3D.tensor`), and an Embedding (`PointCloud3D.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray import PointCloud3D
# use it directly
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
You can extend this Document:
.. code-block:: python
from docarray import PointCloud3D
from docarray.typing import Embedding
from typing import Optional
# extend it
class MyPointCloud3D(PointCloud3D):
second_embedding: Optional[Embedding]
pc = MyPointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
pc.second_embedding = model(pc.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import Document, PointCloud3D, Text
# compose it
class MultiModalDoc(Document):
point_cloud: PointCloud3D
text: Text
mmdoc = MultiModalDoc(
point_cloud=PointCloud3D(
url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.point_cloud.tensor = mmdoc.point_cloud.url.load(samples=100)
"""
url: Optional[PointCloud3DUrl]
tensor: Optional[Tensor]
embedding: Optional[Embedding]
|
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import torch
from transformers import AutoTokenizer, T5EncoderModel
from diffusers import (
AutoencoderKLWan,
FlowMatchEulerDiscreteScheduler,
WanPipeline,
WanTransformer3DModel,
)
from diffusers.utils.testing_utils import floats_tensor, require_peft_backend, skip_mps
sys.path.append(".")
from utils import PeftLoraLoaderMixinTests # noqa: E402
@require_peft_backend
@skip_mps
class WanLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
pipeline_class = WanPipeline
scheduler_cls = FlowMatchEulerDiscreteScheduler
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
scheduler_kwargs = {}
transformer_kwargs = {
"patch_size": (1, 2, 2),
"num_attention_heads": 2,
"attention_head_dim": 12,
"in_channels": 16,
"out_channels": 16,
"text_dim": 32,
"freq_dim": 256,
"ffn_dim": 32,
"num_layers": 2,
"cross_attn_norm": True,
"qk_norm": "rms_norm_across_heads",
"rope_max_seq_len": 32,
}
transformer_cls = WanTransformer3DModel
vae_kwargs = {
"base_dim": 3,
"z_dim": 16,
"dim_mult": [1, 1, 1, 1],
"num_res_blocks": 1,
"temperal_downsample": [False, True, True],
}
vae_cls = AutoencoderKLWan
has_two_text_encoders = True
tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5"
text_encoder_cls, text_encoder_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5"
text_encoder_target_modules = ["q", "k", "v", "o"]
@property
def output_shape(self):
return (1, 9, 32, 32, 3)
def get_dummy_inputs(self, with_generator=True):
batch_size = 1
sequence_length = 16
num_channels = 4
num_frames = 9
num_latent_frames = 3 # (num_frames - 1) // temporal_compression_ratio + 1
sizes = (4, 4)
generator = torch.manual_seed(0)
noise = floats_tensor((batch_size, num_latent_frames, num_channels) + sizes)
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
pipeline_inputs = {
"prompt": "",
"num_frames": num_frames,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"height": 32,
"width": 32,
"max_sequence_length": sequence_length,
"output_type": "np",
}
if with_generator:
pipeline_inputs.update({"generator": generator})
return noise, input_ids, pipeline_inputs
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
def test_simple_inference_with_text_denoiser_lora_unfused(self):
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
@unittest.skip("Not supported in Wan.")
def test_simple_inference_with_text_denoiser_block_scale(self):
pass
@unittest.skip("Not supported in Wan.")
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
pass
@unittest.skip("Not supported in Wan.")
def test_modify_padding_mode(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Wan.")
def test_simple_inference_with_partial_text_lora(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Wan.")
def test_simple_inference_with_text_lora(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Wan.")
def test_simple_inference_with_text_lora_and_scale(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Wan.")
def test_simple_inference_with_text_lora_fused(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Wan.")
def test_simple_inference_with_text_lora_save_load(self):
pass
|
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import torch
from transformers import AutoTokenizer, T5EncoderModel
from diffusers import (
AutoencoderKLWan,
FlowMatchEulerDiscreteScheduler,
WanPipeline,
WanTransformer3DModel,
)
from diffusers.utils.testing_utils import (
floats_tensor,
require_peft_backend,
skip_mps,
)
sys.path.append(".")
from utils import PeftLoraLoaderMixinTests # noqa: E402
@require_peft_backend
@skip_mps
class WanLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
pipeline_class = WanPipeline
scheduler_cls = FlowMatchEulerDiscreteScheduler
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
scheduler_kwargs = {}
transformer_kwargs = {
"patch_size": (1, 2, 2),
"num_attention_heads": 2,
"attention_head_dim": 12,
"in_channels": 16,
"out_channels": 16,
"text_dim": 32,
"freq_dim": 256,
"ffn_dim": 32,
"num_layers": 2,
"cross_attn_norm": True,
"qk_norm": "rms_norm_across_heads",
"rope_max_seq_len": 32,
}
transformer_cls = WanTransformer3DModel
vae_kwargs = {
"base_dim": 3,
"z_dim": 16,
"dim_mult": [1, 1, 1, 1],
"num_res_blocks": 1,
"temperal_downsample": [False, True, True],
}
vae_cls = AutoencoderKLWan
has_two_text_encoders = True
tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5"
text_encoder_cls, text_encoder_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5"
text_encoder_target_modules = ["q", "k", "v", "o"]
@property
def output_shape(self):
return (1, 9, 32, 32, 3)
def get_dummy_inputs(self, with_generator=True):
batch_size = 1
sequence_length = 16
num_channels = 4
num_frames = 9
num_latent_frames = 3 # (num_frames - 1) // temporal_compression_ratio + 1
sizes = (4, 4)
generator = torch.manual_seed(0)
noise = floats_tensor((batch_size, num_latent_frames, num_channels) + sizes)
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
pipeline_inputs = {
"prompt": "",
"num_frames": num_frames,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"height": 32,
"width": 32,
"max_sequence_length": sequence_length,
"output_type": "np",
}
if with_generator:
pipeline_inputs.update({"generator": generator})
return noise, input_ids, pipeline_inputs
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
def test_simple_inference_with_text_denoiser_lora_unfused(self):
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
@unittest.skip("Not supported in Wan.")
def test_simple_inference_with_text_denoiser_block_scale(self):
pass
@unittest.skip("Not supported in Wan.")
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
pass
@unittest.skip("Not supported in Wan.")
def test_modify_padding_mode(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Wan.")
def test_simple_inference_with_partial_text_lora(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Wan.")
def test_simple_inference_with_text_lora(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Wan.")
def test_simple_inference_with_text_lora_and_scale(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Wan.")
def test_simple_inference_with_text_lora_fused(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Wan.")
def test_simple_inference_with_text_lora_save_load(self):
pass
|
"""
This script contains an example how to perform semantic search with Seismic.
For more information, please refer to the documentation:
https://github.com/TusKANNy/seismic/blob/main/docs/Guidelines.md
All you need is installing the `pyseismic-lsr` package:
```
pip install pyseismic-lsr
```
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_seismic
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 4. Encode the corpus
print("Start encoding corpus...")
start_time = time.time()
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=16, show_progress_bar=True)
corpus_embeddings_decoded = sparse_model.decode(corpus_embeddings)
print(f"Corpus encoding time: {time.time() - start_time:.6f} seconds")
corpus_index = None
while True:
# 5. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
query_embeddings_decoded = sparse_model.decode(query_embeddings)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using Seismic
results, search_time, corpus_index = semantic_search_seismic(
query_embeddings_decoded,
corpus_embeddings_decoded=corpus_embeddings_decoded if corpus_index is None else None,
corpus_index=corpus_index,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""
This script contains an example how to perform semantic search with Seismic.
For more information, please refer to the documentation:
https://github.com/TusKANNy/seismic/blob/main/docs/Guidelines.md
All you need is installing the `pyseismic-lsr` package:
```
pip install pyseismic-lsr
```
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_seismic
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 4. Encode the corpus
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True)
corpus_index = None
while True:
# 5. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using qdrant
results, search_time, corpus_index = semantic_search_seismic(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import SchedulerMixin, UNet2DModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class CustomLocalPipeline(DiffusionPipeline):
r"""
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Parameters:
unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
[`DDPMScheduler`], or [`DDIMScheduler`].
"""
def __init__(self, unet: UNet2DModel, scheduler: SchedulerMixin):
super().__init__()
self.register_modules(unet=unet, scheduler=scheduler)
@torch.no_grad()
def __call__(
self,
batch_size: int = 1,
generator: Optional[torch.Generator] = None,
num_inference_steps: int = 50,
output_type: Optional[str] = "pil",
return_dict: bool = True,
**kwargs,
) -> Union[ImagePipelineOutput, Tuple]:
r"""
Args:
batch_size (`int`, *optional*, defaults to 1):
The number of images to generate.
generator (`torch.Generator`, *optional*):
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
deterministic.
eta (`float`, *optional*, defaults to 0.0):
The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM).
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple.
Returns:
[`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if
`return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
generated images.
"""
# Sample gaussian noise to begin loop
image = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
generator=generator,
)
image = image.to(self.device)
# set step values
self.scheduler.set_timesteps(num_inference_steps)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
model_output = self.unet(image, t).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
image = self.scheduler.step(model_output, t, image).prev_sample
image = (image / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=image), "This is a local test"
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import SchedulerMixin, UNet2DModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class CustomLocalPipeline(DiffusionPipeline):
r"""
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Parameters:
unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
[`DDPMScheduler`], or [`DDIMScheduler`].
"""
def __init__(self, unet: UNet2DModel, scheduler: SchedulerMixin):
super().__init__()
self.register_modules(unet=unet, scheduler=scheduler)
@torch.no_grad()
def __call__(
self,
batch_size: int = 1,
generator: Optional[torch.Generator] = None,
num_inference_steps: int = 50,
output_type: Optional[str] = "pil",
return_dict: bool = True,
**kwargs,
) -> Union[ImagePipelineOutput, Tuple]:
r"""
Args:
batch_size (`int`, *optional*, defaults to 1):
The number of images to generate.
generator (`torch.Generator`, *optional*):
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
deterministic.
eta (`float`, *optional*, defaults to 0.0):
The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM).
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple.
Returns:
[`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if
`return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
generated images.
"""
# Sample gaussian noise to begin loop
image = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
generator=generator,
)
image = image.to(self.device)
# set step values
self.scheduler.set_timesteps(num_inference_steps)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
model_output = self.unet(image, t).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
image = self.scheduler.step(model_output, t, image).prev_sample
image = (image / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=image), "This is a local test"
|
import inspect
import logging
from typing import Any, Callable, Optional
from fastapi import HTTPException, Request, Security
from fastapi.security import APIKeyHeader, HTTPBearer
from starlette.status import HTTP_401_UNAUTHORIZED
from .config import settings
from .jwt_utils import parse_jwt_token
security = HTTPBearer()
logger = logging.getLogger(__name__)
async def auth_middleware(request: Request):
if not settings.ENABLE_AUTH:
# If authentication is disabled, allow the request to proceed
logger.warning("Auth disabled")
return {}
security = HTTPBearer()
credentials = await security(request)
if not credentials:
raise HTTPException(status_code=401, detail="Authorization header is missing")
try:
payload = parse_jwt_token(credentials.credentials)
request.state.user = payload
logger.debug("Token decoded successfully")
except ValueError as e:
raise HTTPException(status_code=401, detail=str(e))
return payload
class APIKeyValidator:
"""
Configurable API key validator that supports custom validation functions
for FastAPI applications.
This class provides a flexible way to implement API key authentication with optional
custom validation logic. It can be used for simple token matching
or more complex validation scenarios like database lookups.
Examples:
Simple token validation:
```python
validator = APIKeyValidator(
header_name="X-API-Key",
expected_token="your-secret-token"
)
@app.get("/protected", dependencies=[Depends(validator.get_dependency())])
def protected_endpoint():
return {"message": "Access granted"}
```
Custom validation with database lookup:
```python
async def validate_with_db(api_key: str):
api_key_obj = await db.get_api_key(api_key)
return api_key_obj if api_key_obj and api_key_obj.is_active else None
validator = APIKeyValidator(
header_name="X-API-Key",
validate_fn=validate_with_db
)
```
Args:
header_name (str): The name of the header containing the API key
expected_token (Optional[str]): The expected API key value for simple token matching
validate_fn (Optional[Callable]): Custom validation function that takes an API key
string and returns a boolean or object. Can be async.
error_status (int): HTTP status code to use for validation errors
error_message (str): Error message to return when validation fails
"""
def __init__(
self,
header_name: str,
expected_token: Optional[str] = None,
validate_fn: Optional[Callable[[str], bool]] = None,
error_status: int = HTTP_401_UNAUTHORIZED,
error_message: str = "Invalid API key",
):
# Create the APIKeyHeader as a class property
self.security_scheme = APIKeyHeader(name=header_name)
self.expected_token = expected_token
self.custom_validate_fn = validate_fn
self.error_status = error_status
self.error_message = error_message
async def default_validator(self, api_key: str) -> bool:
return api_key == self.expected_token
async def __call__(
self, request: Request, api_key: str = Security(APIKeyHeader)
) -> Any:
if api_key is None:
raise HTTPException(status_code=self.error_status, detail="Missing API key")
# Use custom validation if provided, otherwise use default equality check
validator = self.custom_validate_fn or self.default_validator
result = (
await validator(api_key)
if inspect.iscoroutinefunction(validator)
else validator(api_key)
)
if not result:
raise HTTPException(
status_code=self.error_status, detail=self.error_message
)
# Store validation result in request state if it's not just a boolean
if result is not True:
request.state.api_key = result
return result
def get_dependency(self):
"""
Returns a callable dependency that FastAPI will recognize as a security scheme
"""
async def validate_api_key(
request: Request, api_key: str = Security(self.security_scheme)
) -> Any:
return await self(request, api_key)
# This helps FastAPI recognize it as a security dependency
validate_api_key.__name__ = f"validate_{self.security_scheme.model.name}"
return validate_api_key
|
import inspect
import logging
from typing import Any, Callable, Optional
from fastapi import HTTPException, Request, Security
from fastapi.security import APIKeyHeader, HTTPBearer
from starlette.status import HTTP_401_UNAUTHORIZED
from .config import settings
from .jwt_utils import parse_jwt_token
security = HTTPBearer()
logger = logging.getLogger(__name__)
async def auth_middleware(request: Request):
if not settings.ENABLE_AUTH:
# If authentication is disabled, allow the request to proceed
logger.warn("Auth disabled")
return {}
security = HTTPBearer()
credentials = await security(request)
if not credentials:
raise HTTPException(status_code=401, detail="Authorization header is missing")
try:
payload = parse_jwt_token(credentials.credentials)
request.state.user = payload
logger.debug("Token decoded successfully")
except ValueError as e:
raise HTTPException(status_code=401, detail=str(e))
return payload
class APIKeyValidator:
"""
Configurable API key validator that supports custom validation functions
for FastAPI applications.
This class provides a flexible way to implement API key authentication with optional
custom validation logic. It can be used for simple token matching
or more complex validation scenarios like database lookups.
Examples:
Simple token validation:
```python
validator = APIKeyValidator(
header_name="X-API-Key",
expected_token="your-secret-token"
)
@app.get("/protected", dependencies=[Depends(validator.get_dependency())])
def protected_endpoint():
return {"message": "Access granted"}
```
Custom validation with database lookup:
```python
async def validate_with_db(api_key: str):
api_key_obj = await db.get_api_key(api_key)
return api_key_obj if api_key_obj and api_key_obj.is_active else None
validator = APIKeyValidator(
header_name="X-API-Key",
validate_fn=validate_with_db
)
```
Args:
header_name (str): The name of the header containing the API key
expected_token (Optional[str]): The expected API key value for simple token matching
validate_fn (Optional[Callable]): Custom validation function that takes an API key
string and returns a boolean or object. Can be async.
error_status (int): HTTP status code to use for validation errors
error_message (str): Error message to return when validation fails
"""
def __init__(
self,
header_name: str,
expected_token: Optional[str] = None,
validate_fn: Optional[Callable[[str], bool]] = None,
error_status: int = HTTP_401_UNAUTHORIZED,
error_message: str = "Invalid API key",
):
# Create the APIKeyHeader as a class property
self.security_scheme = APIKeyHeader(name=header_name)
self.expected_token = expected_token
self.custom_validate_fn = validate_fn
self.error_status = error_status
self.error_message = error_message
async def default_validator(self, api_key: str) -> bool:
return api_key == self.expected_token
async def __call__(
self, request: Request, api_key: str = Security(APIKeyHeader)
) -> Any:
if api_key is None:
raise HTTPException(status_code=self.error_status, detail="Missing API key")
# Use custom validation if provided, otherwise use default equality check
validator = self.custom_validate_fn or self.default_validator
result = (
await validator(api_key)
if inspect.iscoroutinefunction(validator)
else validator(api_key)
)
if not result:
raise HTTPException(
status_code=self.error_status, detail=self.error_message
)
# Store validation result in request state if it's not just a boolean
if result is not True:
request.state.api_key = result
return result
def get_dependency(self):
"""
Returns a callable dependency that FastAPI will recognize as a security scheme
"""
async def validate_api_key(
request: Request, api_key: str = Security(self.security_scheme)
) -> Any:
return await self(request, api_key)
# This helps FastAPI recognize it as a security dependency
validate_api_key.__name__ = f"validate_{self.security_scheme.model.name}"
return validate_api_key
|
from typing import TYPE_CHECKING
from docarray.math.ndarray import get_array_type
if TYPE_CHECKING:
from docarray.typing import ArrayType
import numpy as np
def pdist(
x_mat: 'ArrayType',
metric: str,
) -> 'np.ndarray':
"""Computes Pairwise distances between observations in n-dimensional space.
:param x_mat: Union['np.ndarray','scipy.sparse.csr_matrix', 'scipy.sparse.coo_matrix'] of ndim 2
:param metric: string describing the metric type
:return: np.ndarray of ndim 2
"""
return cdist(x_mat, x_mat, metric)
def cdist(
x_mat: 'ArrayType', y_mat: 'ArrayType', metric: str, device: str = 'cpu'
) -> 'np.ndarray':
"""Computes the pairwise distance between each row of X and each row on Y according to `metric`.
- Let `n_x = x_mat.shape[0]`
- Let `n_y = y_mat.shape[0]`
- Returns a matrix `dist` of shape `(n_x, n_y)` with `dist[i,j] = metric(x_mat[i], y_mat[j])`.
:param x_mat: numpy or scipy array of ndim 2
:param y_mat: numpy or scipy array of ndim 2
:param metric: string describing the metric type
:param device: the computational device, can be either `cpu` or `cuda`.
:return: np.ndarray of ndim 2
"""
x_type = get_array_type(x_mat)
y_type = get_array_type(y_mat)
if x_type != y_type:
raise ValueError(
f'The type of your left-hand side is {x_type}, whereas your right-hand side is {y_type}. '
f'`.cdist()` requires left must be the same type as right.'
)
framework, is_sparse = get_array_type(x_mat)
dists = None
if metric == 'cosine':
if framework == 'scipy' and is_sparse:
from docarray.math.distance.numpy import sparse_cosine
dists = sparse_cosine(x_mat, y_mat)
elif framework == 'numpy':
from docarray.math.distance.numpy import cosine
dists = cosine(x_mat, y_mat)
elif framework == 'tensorflow':
from docarray.math.distance.tensorflow import cosine
dists = cosine(x_mat, y_mat, device=device)
elif framework == 'torch':
from docarray.math.distance.torch import cosine
dists = cosine(x_mat, y_mat, device=device)
elif framework == 'paddle':
from docarray.math.distance.paddle import cosine
dists = cosine(x_mat, y_mat, device=device)
elif metric == 'sqeuclidean':
if framework == 'scipy' and is_sparse:
from docarray.math.distance.numpy import sparse_sqeuclidean
dists = sparse_sqeuclidean(x_mat, y_mat)
elif framework == 'numpy':
from docarray.math.distance.numpy import sqeuclidean
dists = sqeuclidean(x_mat, y_mat)
elif framework == 'tensorflow':
from docarray.math.distance.tensorflow import sqeuclidean
dists = sqeuclidean(x_mat, y_mat, device=device)
elif framework == 'torch':
from docarray.math.distance.torch import sqeuclidean
dists = sqeuclidean(x_mat, y_mat, device=device)
elif framework == 'paddle':
from docarray.math.distance.paddle import sqeuclidean
dists = sqeuclidean(x_mat, y_mat, device=device)
elif metric == 'euclidean':
if framework == 'scipy' and is_sparse:
from docarray.math.distance.numpy import sparse_euclidean
dists = sparse_euclidean(x_mat, y_mat)
elif framework == 'numpy':
from docarray.math.distance.numpy import euclidean
dists = euclidean(x_mat, y_mat)
elif framework == 'tensorflow':
from docarray.math.distance.tensorflow import euclidean
dists = euclidean(x_mat, y_mat, device=device)
elif framework == 'torch':
from docarray.math.distance.torch import euclidean
dists = euclidean(x_mat, y_mat, device=device)
elif framework == 'paddle':
from docarray.math.distance.paddle import euclidean
dists = euclidean(x_mat, y_mat, device=device)
else:
raise NotImplementedError(f'Input metric={metric} is not supported')
if dists is None:
raise NotImplementedError(
f'{framework} sparse={is_sparse} array is not supported'
)
return dists
|
from typing import TYPE_CHECKING
from ..ndarray import get_array_type
if TYPE_CHECKING:
from ...typing import ArrayType
import numpy as np
def pdist(
x_mat: 'ArrayType',
metric: str,
) -> 'np.ndarray':
"""Computes Pairwise distances between observations in n-dimensional space.
:param x_mat: Union['np.ndarray','scipy.sparse.csr_matrix', 'scipy.sparse.coo_matrix'] of ndim 2
:param metric: string describing the metric type
:return: np.ndarray of ndim 2
"""
return cdist(x_mat, x_mat, metric)
def cdist(
x_mat: 'ArrayType', y_mat: 'ArrayType', metric: str, device: str = 'cpu'
) -> 'np.ndarray':
"""Computes the pairwise distance between each row of X and each row on Y according to `metric`.
- Let `n_x = x_mat.shape[0]`
- Let `n_y = y_mat.shape[0]`
- Returns a matrix `dist` of shape `(n_x, n_y)` with `dist[i,j] = metric(x_mat[i], y_mat[j])`.
:param x_mat: numpy or scipy array of ndim 2
:param y_mat: numpy or scipy array of ndim 2
:param metric: string describing the metric type
:param device: the computational device, can be either `cpu` or `cuda`.
:return: np.ndarray of ndim 2
"""
x_type = get_array_type(x_mat)
y_type = get_array_type(y_mat)
if x_type != y_type:
raise ValueError(
f'The type of your left-hand side is {x_type}, whereas your right-hand side is {y_type}. '
f'`.cdist()` requires left must be the same type as right.'
)
framework, is_sparse = get_array_type(x_mat)
dists = None
if metric == 'cosine':
if framework == 'scipy' and is_sparse:
from .numpy import sparse_cosine
dists = sparse_cosine(x_mat, y_mat)
elif framework == 'numpy':
from .numpy import cosine
dists = cosine(x_mat, y_mat)
elif framework == 'tensorflow':
from .tensorflow import cosine
dists = cosine(x_mat, y_mat, device=device)
elif framework == 'torch':
from .torch import cosine
dists = cosine(x_mat, y_mat, device=device)
elif framework == 'paddle':
from .paddle import cosine
dists = cosine(x_mat, y_mat, device=device)
elif metric == 'sqeuclidean':
if framework == 'scipy' and is_sparse:
from .numpy import sparse_sqeuclidean
dists = sparse_sqeuclidean(x_mat, y_mat)
elif framework == 'numpy':
from .numpy import sqeuclidean
dists = sqeuclidean(x_mat, y_mat)
elif framework == 'tensorflow':
from .tensorflow import sqeuclidean
dists = sqeuclidean(x_mat, y_mat, device=device)
elif framework == 'torch':
from .torch import sqeuclidean
dists = sqeuclidean(x_mat, y_mat, device=device)
elif framework == 'paddle':
from .paddle import sqeuclidean
dists = sqeuclidean(x_mat, y_mat, device=device)
elif metric == 'euclidean':
if framework == 'scipy' and is_sparse:
from .numpy import sparse_euclidean
dists = sparse_euclidean(x_mat, y_mat)
elif framework == 'numpy':
from .numpy import euclidean
dists = euclidean(x_mat, y_mat)
elif framework == 'tensorflow':
from .tensorflow import euclidean
dists = euclidean(x_mat, y_mat, device=device)
elif framework == 'torch':
from .torch import euclidean
dists = euclidean(x_mat, y_mat, device=device)
elif framework == 'paddle':
from .paddle import euclidean
dists = euclidean(x_mat, y_mat, device=device)
else:
raise NotImplementedError(f'Input metric={metric} is not supported')
if dists is None:
raise NotImplementedError(
f'{framework} sparse={is_sparse} array is not supported'
)
return dists
|
import numpy as np
from docarray import BaseDoc
from docarray.typing import NdArray
def test_set_tensor():
class MyDocument(BaseDoc):
tensor: NdArray
d = MyDocument(tensor=np.zeros((3, 224, 224)))
assert isinstance(d.tensor, NdArray)
assert isinstance(d.tensor, np.ndarray)
assert (d.tensor == np.zeros((3, 224, 224))).all()
|
import numpy as np
from docarray import BaseDocument
from docarray.typing import NdArray
def test_set_tensor():
class MyDocument(BaseDocument):
tensor: NdArray
d = MyDocument(tensor=np.zeros((3, 224, 224)))
assert isinstance(d.tensor, NdArray)
assert isinstance(d.tensor, np.ndarray)
assert (d.tensor == np.zeros((3, 224, 224))).all()
|
_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='InstaBoost',
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 48
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[32, 44],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# only keep latest 3 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=3))
|
_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='InstaBoost',
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
data = dict(train=dict(pipeline=train_pipeline))
# learning policy
lr_config = dict(step=[32, 44])
runner = dict(type='EpochBasedRunner', max_epochs=48)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.google_trends.tool import GoogleTrendsQueryRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GoogleTrendsQueryRun": "langchain_community.tools.google_trends.tool",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GoogleTrendsQueryRun",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.google_trends.tool import GoogleTrendsQueryRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GoogleTrendsQueryRun": "langchain_community.tools.google_trends.tool"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GoogleTrendsQueryRun",
]
|
from __future__ import annotations
from typing import Any, Union
from langchain_core.retrievers import (
BaseRetriever,
RetrieverOutput,
)
from langchain_core.runnables import Runnable, RunnablePassthrough
def create_retrieval_chain(
retriever: Union[BaseRetriever, Runnable[dict, RetrieverOutput]],
combine_docs_chain: Runnable[dict[str, Any], str],
) -> Runnable:
"""Create retrieval chain that retrieves documents and then passes them on.
Args:
retriever: Retriever-like object that returns list of documents. Should
either be a subclass of BaseRetriever or a Runnable that returns
a list of documents. If a subclass of BaseRetriever, then it
is expected that an `input` key be passed in - this is what
is will be used to pass into the retriever. If this is NOT a
subclass of BaseRetriever, then all the inputs will be passed
into this runnable, meaning that runnable should take a dictionary
as input.
combine_docs_chain: Runnable that takes inputs and produces a string output.
The inputs to this will be any original inputs to this chain, a new
context key with the retrieved documents, and chat_history (if not present
in the inputs) with a value of `[]` (to easily enable conversational
retrieval.
Returns:
An LCEL Runnable. The Runnable return is a dictionary containing at the very
least a `context` and `answer` key.
Example:
.. code-block:: python
# pip install -U langchain langchain-community
from langchain_community.chat_models import ChatOpenAI
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains import create_retrieval_chain
from langchain import hub
retrieval_qa_chat_prompt = hub.pull("langchain-ai/retrieval-qa-chat")
llm = ChatOpenAI()
retriever = ...
combine_docs_chain = create_stuff_documents_chain(
llm, retrieval_qa_chat_prompt
)
retrieval_chain = create_retrieval_chain(retriever, combine_docs_chain)
retrieval_chain.invoke({"input": "..."})
"""
if not isinstance(retriever, BaseRetriever):
retrieval_docs: Runnable[dict, RetrieverOutput] = retriever
else:
retrieval_docs = (lambda x: x["input"]) | retriever
return (
RunnablePassthrough.assign(
context=retrieval_docs.with_config(run_name="retrieve_documents"),
).assign(answer=combine_docs_chain)
).with_config(run_name="retrieval_chain")
|
from __future__ import annotations
from typing import Any, Union
from langchain_core.retrievers import (
BaseRetriever,
RetrieverOutput,
)
from langchain_core.runnables import Runnable, RunnablePassthrough
def create_retrieval_chain(
retriever: Union[BaseRetriever, Runnable[dict, RetrieverOutput]],
combine_docs_chain: Runnable[dict[str, Any], str],
) -> Runnable:
"""Create retrieval chain that retrieves documents and then passes them on.
Args:
retriever: Retriever-like object that returns list of documents. Should
either be a subclass of BaseRetriever or a Runnable that returns
a list of documents. If a subclass of BaseRetriever, then it
is expected that an `input` key be passed in - this is what
is will be used to pass into the retriever. If this is NOT a
subclass of BaseRetriever, then all the inputs will be passed
into this runnable, meaning that runnable should take a dictionary
as input.
combine_docs_chain: Runnable that takes inputs and produces a string output.
The inputs to this will be any original inputs to this chain, a new
context key with the retrieved documents, and chat_history (if not present
in the inputs) with a value of `[]` (to easily enable conversational
retrieval.
Returns:
An LCEL Runnable. The Runnable return is a dictionary containing at the very
least a `context` and `answer` key.
Example:
.. code-block:: python
# pip install -U langchain langchain-community
from langchain_community.chat_models import ChatOpenAI
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains import create_retrieval_chain
from langchain import hub
retrieval_qa_chat_prompt = hub.pull("langchain-ai/retrieval-qa-chat")
llm = ChatOpenAI()
retriever = ...
combine_docs_chain = create_stuff_documents_chain(
llm, retrieval_qa_chat_prompt
)
retrieval_chain = create_retrieval_chain(retriever, combine_docs_chain)
retrieval_chain.invoke({"input": "..."})
"""
if not isinstance(retriever, BaseRetriever):
retrieval_docs: Runnable[dict, RetrieverOutput] = retriever
else:
retrieval_docs = (lambda x: x["input"]) | retriever
retrieval_chain = (
RunnablePassthrough.assign(
context=retrieval_docs.with_config(run_name="retrieve_documents"),
).assign(answer=combine_docs_chain)
).with_config(run_name="retrieval_chain")
return retrieval_chain
|
_base_ = './htc_r50_fpn_20e_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './htc_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
# learning policy
lr_config = dict(step=[16, 19])
runner = dict(type='EpochBasedRunner', max_epochs=20)
|
"""Agent components."""
from typing import Any, Callable, Dict, Optional, Set
from llama_index.core.base.query_pipeline.query import (
QueryComponent,
)
from llama_index.core.bridge.pydantic import Field
from llama_index.core.query_pipeline.components.function import (
FnComponent,
get_parameters,
)
# from llama_index.core.query_pipeline.components.input import InputComponent
class BaseStatefulComponent(QueryComponent):
"""Takes in agent inputs and transforms it into desired outputs."""
state: Dict[str, Any] = Field(
default_factory=dict, description="State of the pipeline."
)
def reset_state(self) -> None:
"""Reset state."""
self.state = {}
class StatefulFnComponent(BaseStatefulComponent, FnComponent):
"""
Query component that takes in an arbitrary function.
Stateful version of `FnComponent`. Expects functions to have `state` as the first argument.
"""
def __init__(
self,
fn: Callable,
req_params: Optional[Set[str]] = None,
opt_params: Optional[Set[str]] = None,
state: Optional[Dict[str, Any]] = None,
**kwargs: Any
) -> None:
"""Init params."""
# determine parameters
default_req_params, default_opt_params = get_parameters(fn)
# make sure task and step are part of the list, and remove them from the list
if "state" not in default_req_params:
raise ValueError(
"StatefulFnComponent must have 'state' as required parameters"
)
default_req_params = default_req_params - {"state"}
default_opt_params = default_opt_params - {"state"}
if req_params is None:
req_params = default_req_params
if opt_params is None:
opt_params = default_opt_params
super().__init__(
fn=fn,
req_params=req_params,
opt_params=opt_params,
state=state or {},
**kwargs
)
def _run_component(self, **kwargs: Any) -> Dict:
"""Run component."""
kwargs.update({"state": self.state})
return super()._run_component(**kwargs)
async def _arun_component(self, **kwargs: Any) -> Any:
"""Async run component."""
kwargs.update({"state": self.state})
return await super()._arun_component(**kwargs)
|
"""Agent components."""
from typing import Any, Callable, Dict, Optional, Set
from llama_index.core.base.query_pipeline.query import (
QueryComponent,
)
from llama_index.core.bridge.pydantic import Field
from llama_index.core.query_pipeline.components.function import (
FnComponent,
get_parameters,
)
# from llama_index.core.query_pipeline.components.input import InputComponent
class BaseStatefulComponent(QueryComponent):
"""Takes in agent inputs and transforms it into desired outputs."""
state: Dict[str, Any] = Field(
default_factory=dict, description="State of the pipeline."
)
def reset_state(self) -> None:
"""Reset state."""
self.state = {}
class StatefulFnComponent(BaseStatefulComponent, FnComponent):
"""Query component that takes in an arbitrary function.
Stateful version of `FnComponent`. Expects functions to have `state` as the first argument.
"""
def __init__(
self,
fn: Callable,
req_params: Optional[Set[str]] = None,
opt_params: Optional[Set[str]] = None,
state: Optional[Dict[str, Any]] = None,
**kwargs: Any
) -> None:
"""Init params."""
# determine parameters
default_req_params, default_opt_params = get_parameters(fn)
# make sure task and step are part of the list, and remove them from the list
if "state" not in default_req_params:
raise ValueError(
"StatefulFnComponent must have 'state' as required parameters"
)
default_req_params = default_req_params - {"state"}
default_opt_params = default_opt_params - {"state"}
if req_params is None:
req_params = default_req_params
if opt_params is None:
opt_params = default_opt_params
super().__init__(
fn=fn,
req_params=req_params,
opt_params=opt_params,
state=state or {},
**kwargs
)
def _run_component(self, **kwargs: Any) -> Dict:
"""Run component."""
kwargs.update({"state": self.state})
return super()._run_component(**kwargs)
async def _arun_component(self, **kwargs: Any) -> Any:
"""Async run component."""
kwargs.update({"state": self.state})
return await super()._arun_component(**kwargs)
|
from langchain_core.utils.function_calling import convert_pydantic_to_openai_function
from pydantic import BaseModel, Field
def test_convert_pydantic_to_openai_function() -> None:
class Data(BaseModel):
"""The data to return."""
key: str = Field(..., description="API key")
days: int = Field(default=0, description="Number of days to forecast")
actual = convert_pydantic_to_openai_function(Data)
expected = {
"name": "Data",
"description": "The data to return.",
"parameters": {
"type": "object",
"properties": {
"key": {"description": "API key", "type": "string"},
"days": {
"description": "Number of days to forecast",
"default": 0,
"type": "integer",
},
},
"required": ["key"],
},
}
assert actual == expected
def test_convert_pydantic_to_openai_function_nested() -> None:
class Data(BaseModel):
"""The data to return."""
key: str = Field(..., description="API key")
days: int = Field(default=0, description="Number of days to forecast")
class Model(BaseModel):
"""The model to return."""
data: Data
actual = convert_pydantic_to_openai_function(Model)
expected = {
"name": "Model",
"description": "The model to return.",
"parameters": {
"type": "object",
"properties": {
"data": {
"description": "The data to return.",
"type": "object",
"properties": {
"key": {
"description": "API key",
"type": "string",
},
"days": {
"description": "Number of days to forecast",
"default": 0,
"type": "integer",
},
},
"required": ["key"],
},
},
"required": ["data"],
},
}
assert actual == expected
|
from langchain_core.utils.function_calling import convert_pydantic_to_openai_function
from pydantic import BaseModel, Field
def test_convert_pydantic_to_openai_function() -> None:
class Data(BaseModel):
"""The data to return."""
key: str = Field(..., description="API key")
days: int = Field(default=0, description="Number of days to forecast")
actual = convert_pydantic_to_openai_function(Data)
expected = {
"name": "Data",
"description": "The data to return.",
"parameters": {
"type": "object",
"properties": {
"key": {"description": "API key", "type": "string"},
"days": {
"description": "Number of days to forecast",
"default": 0,
"type": "integer",
},
},
"required": ["key"],
},
}
assert actual == expected
def test_convert_pydantic_to_openai_function_nested() -> None:
class Data(BaseModel):
"""The data to return."""
key: str = Field(..., description="API key")
days: int = Field(default=0, description="Number of days to forecast")
class Model(BaseModel):
"""The model to return."""
data: Data
actual = convert_pydantic_to_openai_function(Model)
expected = {
"name": "Model",
"description": "The model to return.",
"parameters": {
"type": "object",
"properties": {
"data": {
"description": "The data to return.",
"type": "object",
"properties": {
"key": {
"description": "API key",
"type": "string",
},
"days": {
"description": "Number of days to forecast",
"default": 0,
"type": "integer",
},
},
"required": ["key"],
}
},
"required": ["data"],
},
}
assert actual == expected
|
from typing import TYPE_CHECKING, Type, Optional
if TYPE_CHECKING:
from docarray.typing import T
from docarray.proto.docarray_pb2 import DocumentProto
class ProtobufMixin:
@classmethod
def from_protobuf(cls: Type['T'], pb_msg: 'DocumentProto') -> 'T':
from docarray.proto.io import parse_proto
return parse_proto(pb_msg)
def to_protobuf(self, ndarray_type: Optional[str] = None) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:param ndarray_type: can be ``list`` or ``numpy``, if set it will force all ndarray-like object to be ``List`` or ``numpy.ndarray``.
:return: the protobuf message
"""
from docarray.proto.io import flush_proto
return flush_proto(self, ndarray_type)
|
from typing import TYPE_CHECKING, Type, Optional
if TYPE_CHECKING:
from ...typing import T
from ...proto.docarray_pb2 import DocumentProto
class ProtobufMixin:
@classmethod
def from_protobuf(cls: Type['T'], pb_msg: 'DocumentProto') -> 'T':
from ...proto.io import parse_proto
return parse_proto(pb_msg)
def to_protobuf(self, ndarray_type: Optional[str] = None) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:param ndarray_type: can be ``list`` or ``numpy``, if set it will force all ndarray-like object to be ``List`` or ``numpy.ndarray``.
:return: the protobuf message
"""
from ...proto.io import flush_proto
return flush_proto(self, ndarray_type)
|
from typing import Sequence, cast
import prisma.enums
import prisma.types
AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = {
"Input": True,
"Output": True,
"Webhook": True,
"AgentBlock": True,
}
AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = {
"Nodes": {"include": AGENT_NODE_INCLUDE}
}
EXECUTION_RESULT_ORDER: list[prisma.types.AgentNodeExecutionOrderByInput] = [
{"queuedTime": "desc"},
# Fallback: Incomplete execs has no queuedTime.
{"addedTime": "desc"},
]
EXECUTION_RESULT_INCLUDE: prisma.types.AgentNodeExecutionInclude = {
"Input": {"order_by": {"time": "asc"}},
"Output": {"order_by": {"time": "asc"}},
"Node": True,
"GraphExecution": True,
}
MAX_NODE_EXECUTIONS_FETCH = 1000
GRAPH_EXECUTION_INCLUDE_WITH_NODES: prisma.types.AgentGraphExecutionInclude = {
"NodeExecutions": {
"include": EXECUTION_RESULT_INCLUDE,
"order_by": EXECUTION_RESULT_ORDER,
"take": MAX_NODE_EXECUTIONS_FETCH, # Avoid loading excessive node executions.
}
}
def graph_execution_include(
include_block_ids: Sequence[str],
) -> prisma.types.AgentGraphExecutionInclude:
return {
"NodeExecutions": {
**cast(
prisma.types.FindManyAgentNodeExecutionArgsFromAgentGraphExecution,
GRAPH_EXECUTION_INCLUDE_WITH_NODES["NodeExecutions"], # type: ignore
),
"where": {
"Node": {
"is": {"AgentBlock": {"is": {"id": {"in": include_block_ids}}}}
},
"NOT": [
{"executionStatus": prisma.enums.AgentExecutionStatus.INCOMPLETE}
],
},
}
}
INTEGRATION_WEBHOOK_INCLUDE: prisma.types.IntegrationWebhookInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE}
}
def library_agent_include(user_id: str) -> prisma.types.LibraryAgentInclude:
return {
"AgentGraph": {
"include": {
**AGENT_GRAPH_INCLUDE,
"Executions": {"where": {"userId": user_id}},
}
},
"Creator": True,
}
|
from typing import Sequence, cast
import prisma.enums
import prisma.types
AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = {
"Input": True,
"Output": True,
"Webhook": True,
"AgentBlock": True,
}
AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = {
"Nodes": {"include": AGENT_NODE_INCLUDE}
}
EXECUTION_RESULT_INCLUDE: prisma.types.AgentNodeExecutionInclude = {
"Input": True,
"Output": True,
"Node": True,
"GraphExecution": True,
}
MAX_NODE_EXECUTIONS_FETCH = 1000
GRAPH_EXECUTION_INCLUDE_WITH_NODES: prisma.types.AgentGraphExecutionInclude = {
"NodeExecutions": {
"include": {
"Input": True,
"Output": True,
"Node": True,
"GraphExecution": True,
},
"order_by": [
{"queuedTime": "desc"},
# Fallback: Incomplete execs has no queuedTime.
{"addedTime": "desc"},
],
"take": MAX_NODE_EXECUTIONS_FETCH, # Avoid loading excessive node executions.
}
}
def graph_execution_include(
include_block_ids: Sequence[str],
) -> prisma.types.AgentGraphExecutionInclude:
return {
"NodeExecutions": {
**cast(
prisma.types.FindManyAgentNodeExecutionArgsFromAgentGraphExecution,
GRAPH_EXECUTION_INCLUDE_WITH_NODES["NodeExecutions"], # type: ignore
),
"where": {
"Node": {
"is": {"AgentBlock": {"is": {"id": {"in": include_block_ids}}}}
},
"NOT": [
{"executionStatus": prisma.enums.AgentExecutionStatus.INCOMPLETE}
],
},
}
}
INTEGRATION_WEBHOOK_INCLUDE: prisma.types.IntegrationWebhookInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE}
}
def library_agent_include(user_id: str) -> prisma.types.LibraryAgentInclude:
return {
"AgentGraph": {
"include": {
**AGENT_GRAPH_INCLUDE,
"Executions": {"where": {"userId": user_id}},
}
},
"Creator": True,
}
|
import importlib
class LazyModule:
def __init__(self, name, pip_name=None, import_error_msg=None):
self.name = name
self.pip_name = pip_name or name
self.import_error_msg = import_error_msg or (
f"This requires the {self.name} module. "
f"You can install it via `pip install {self.pip_name}`"
)
self.module = None
self._available = None
@property
def available(self):
if self._available is None:
try:
self.initialize()
self._available = True
except ImportError:
self._available = False
return self._available
def initialize(self):
try:
self.module = importlib.import_module(self.name)
except ImportError:
raise ImportError(self.import_error_msg)
def __getattr__(self, name):
if name == "_api_export_path":
raise AttributeError
if self.module is None:
self.initialize()
return getattr(self.module, name)
def __repr__(self):
return f"LazyModule({self.name})"
tensorflow = LazyModule("tensorflow")
gfile = LazyModule("tensorflow.io.gfile", pip_name="tensorflow")
tensorflow_io = LazyModule("tensorflow_io")
scipy = LazyModule("scipy")
jax = LazyModule("jax")
torchvision = LazyModule("torchvision")
torch_xla = LazyModule(
"torch_xla",
import_error_msg=(
"This requires the torch_xla module. You can install it via "
"`pip install torch-xla`. Additionally, you may need to update "
"LD_LIBRARY_PATH if necessary. Torch XLA builds a shared library, "
"_XLAC.so, which needs to link to the version of Python it was built "
"with. Use the following command to update LD_LIBRARY_PATH: "
"`export LD_LIBRARY_PATH=<path to Python>/lib:$LD_LIBRARY_PATH`"
),
)
optree = LazyModule("optree")
dmtree = LazyModule("tree")
|
import importlib
class LazyModule:
def __init__(self, name, pip_name=None):
self.name = name
pip_name = pip_name or name
self.pip_name = pip_name
self.module = None
self._available = None
@property
def available(self):
if self._available is None:
try:
self.initialize()
self._available = True
except ImportError:
self._available = False
return self._available
def initialize(self):
try:
self.module = importlib.import_module(self.name)
except ImportError:
raise ImportError(
f"This requires the {self.name} module. "
f"You can install it via `pip install {self.pip_name}`"
)
def __getattr__(self, name):
if name == "_api_export_path":
raise AttributeError
if self.module is None:
self.initialize()
return getattr(self.module, name)
def __repr__(self):
return f"LazyModule({self.name})"
tensorflow = LazyModule("tensorflow")
gfile = LazyModule("tensorflow.io.gfile", pip_name="tensorflow")
tensorflow_io = LazyModule("tensorflow_io")
scipy = LazyModule("scipy")
jax = LazyModule("jax")
torchvision = LazyModule("torchvision")
optree = LazyModule("optree")
dmtree = LazyModule("tree")
|
import pytest
import torch
from docarray.computation.torch_backend import TorchCompBackend
def test_to_device():
t = torch.rand(10, 3)
assert t.device == torch.device('cpu')
t = TorchCompBackend.to_device(t, 'meta')
assert t.device == torch.device('meta')
@pytest.mark.parametrize(
'array,result',
[
(torch.zeros((5)), 1),
(torch.zeros((1, 5)), 2),
(torch.zeros((5, 5)), 2),
(torch.zeros(()), 0),
],
)
def test_n_dim(array, result):
assert TorchCompBackend.n_dim(array) == result
@pytest.mark.parametrize(
'array,result',
[
(torch.zeros((10,)), (10,)),
(torch.zeros((5, 5)), (5, 5)),
(torch.zeros(()), ()),
],
)
def test_shape(array, result):
shape = TorchCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
def test_empty():
tensor = TorchCompBackend.empty((10, 3))
assert tensor.shape == (10, 3)
def test_empty_dtype():
tensor = TorchCompBackend.empty((10, 3), dtype=torch.int32)
assert tensor.shape == (10, 3)
assert tensor.dtype == torch.int32
def test_empty_device():
tensor = TorchCompBackend.empty((10, 3), device='meta')
assert tensor.shape == (10, 3)
assert tensor.device == torch.device('meta')
|
import pytest
import torch
from docarray.computation.torch_backend import TorchCompBackend
def test_to_device():
t = torch.rand(10, 3)
assert t.device == torch.device('cpu')
t = TorchCompBackend.to_device(t, 'meta')
assert t.device == torch.device('meta')
@pytest.mark.parametrize(
'array,result',
[
(torch.zeros((5)), 1),
(torch.zeros((1, 5)), 2),
(torch.zeros((5, 5)), 2),
(torch.zeros(()), 0),
],
)
def test_n_dim(array, result):
assert TorchCompBackend.n_dim(array) == result
@pytest.mark.parametrize(
'array,result',
[
(torch.zeros((10,)), (10,)),
(torch.zeros((5, 5)), (5, 5)),
(torch.zeros(()), ()),
],
)
def test_shape(array, result):
shape = TorchCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
def test_empty():
tensor = TorchCompBackend.empty((10, 3))
assert tensor.shape == (10, 3)
|
from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.numpy import core
from keras.src.backend.numpy import image
from keras.src.backend.numpy import linalg
from keras.src.backend.numpy import math
from keras.src.backend.numpy import nn
from keras.src.backend.numpy import numpy
from keras.src.backend.numpy import random
from keras.src.backend.numpy.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.numpy.core import Variable
from keras.src.backend.numpy.core import cast
from keras.src.backend.numpy.core import compute_output_spec
from keras.src.backend.numpy.core import cond
from keras.src.backend.numpy.core import convert_to_numpy
from keras.src.backend.numpy.core import convert_to_tensor
from keras.src.backend.numpy.core import device_scope
from keras.src.backend.numpy.core import is_tensor
from keras.src.backend.numpy.core import random_seed_dtype
from keras.src.backend.numpy.core import shape
from keras.src.backend.numpy.core import vectorized_map
from keras.src.backend.numpy.rnn import cudnn_ok
from keras.src.backend.numpy.rnn import gru
from keras.src.backend.numpy.rnn import lstm
from keras.src.backend.numpy.rnn import rnn
|
from keras.src.backend.numpy import core
from keras.src.backend.numpy import image
from keras.src.backend.numpy import linalg
from keras.src.backend.numpy import math
from keras.src.backend.numpy import nn
from keras.src.backend.numpy import numpy
from keras.src.backend.numpy import random
from keras.src.backend.numpy.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.numpy.core import Variable
from keras.src.backend.numpy.core import cast
from keras.src.backend.numpy.core import compute_output_spec
from keras.src.backend.numpy.core import cond
from keras.src.backend.numpy.core import convert_to_numpy
from keras.src.backend.numpy.core import convert_to_tensor
from keras.src.backend.numpy.core import is_tensor
from keras.src.backend.numpy.core import random_seed_dtype
from keras.src.backend.numpy.core import shape
from keras.src.backend.numpy.core import vectorized_map
from keras.src.backend.numpy.rnn import cudnn_ok
from keras.src.backend.numpy.rnn import gru
from keras.src.backend.numpy.rnn import lstm
from keras.src.backend.numpy.rnn import rnn
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from collections import Sequence
from pathlib import Path
import mmcv
import numpy as np
from mmcv import Config, DictAction
from mmdet.core.utils import mask2ndarray
from mmdet.core.visualization import imshow_det_bboxes
from mmdet.datasets.builder import build_dataset
from mmdet.utils import replace_cfg_vals, update_data_root
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--skip-type',
type=str,
nargs='+',
default=['DefaultFormatBundle', 'Normalize', 'Collect'],
help='skip some useless pipeline')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def retrieve_data_cfg(config_path, skip_type, cfg_options):
def skip_pipeline_steps(config):
config['pipeline'] = [
x for x in config.pipeline if x['type'] not in skip_type
]
cfg = Config.fromfile(config_path)
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
if cfg_options is not None:
cfg.merge_from_dict(cfg_options)
train_data_cfg = cfg.data.train
while 'dataset' in train_data_cfg and train_data_cfg[
'type'] != 'MultiImageMixDataset':
train_data_cfg = train_data_cfg['dataset']
if isinstance(train_data_cfg, Sequence):
[skip_pipeline_steps(c) for c in train_data_cfg]
else:
skip_pipeline_steps(train_data_cfg)
return cfg
def main():
args = parse_args()
cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options)
if 'gt_semantic_seg' in cfg.train_pipeline[-1]['keys']:
cfg.data.train.pipeline = [
p for p in cfg.data.train.pipeline if p['type'] != 'SegRescale'
]
dataset = build_dataset(cfg.data.train)
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
filename = os.path.join(args.output_dir,
Path(item['filename']).name
) if args.output_dir is not None else None
gt_bboxes = item['gt_bboxes']
gt_labels = item['gt_labels']
gt_masks = item.get('gt_masks', None)
if gt_masks is not None:
gt_masks = mask2ndarray(gt_masks)
gt_seg = item.get('gt_semantic_seg', None)
if gt_seg is not None:
pad_value = 255 # the padding value of gt_seg
sem_labels = np.unique(gt_seg)
all_labels = np.concatenate((gt_labels, sem_labels), axis=0)
all_labels, counts = np.unique(all_labels, return_counts=True)
stuff_labels = all_labels[np.logical_and(counts < 2,
all_labels != pad_value)]
stuff_masks = gt_seg[None] == stuff_labels[:, None, None]
gt_labels = np.concatenate((gt_labels, stuff_labels), axis=0)
gt_masks = np.concatenate((gt_masks, stuff_masks.astype(np.uint8)),
axis=0)
# If you need to show the bounding boxes,
# please comment the following line
gt_bboxes = None
imshow_det_bboxes(
item['img'],
gt_bboxes,
gt_labels,
gt_masks,
class_names=dataset.CLASSES,
show=not args.not_show,
wait_time=args.show_interval,
out_file=filename,
bbox_color=dataset.PALETTE,
text_color=(200, 200, 200),
mask_color=dataset.PALETTE)
progress_bar.update()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from collections import Sequence
from pathlib import Path
import mmcv
import numpy as np
from mmcv import Config, DictAction
from mmdet.core.utils import mask2ndarray
from mmdet.core.visualization import imshow_det_bboxes
from mmdet.datasets.builder import build_dataset
from mmdet.utils import update_data_root
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--skip-type',
type=str,
nargs='+',
default=['DefaultFormatBundle', 'Normalize', 'Collect'],
help='skip some useless pipeline')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def retrieve_data_cfg(config_path, skip_type, cfg_options):
def skip_pipeline_steps(config):
config['pipeline'] = [
x for x in config.pipeline if x['type'] not in skip_type
]
cfg = Config.fromfile(config_path)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
if cfg_options is not None:
cfg.merge_from_dict(cfg_options)
train_data_cfg = cfg.data.train
while 'dataset' in train_data_cfg and train_data_cfg[
'type'] != 'MultiImageMixDataset':
train_data_cfg = train_data_cfg['dataset']
if isinstance(train_data_cfg, Sequence):
[skip_pipeline_steps(c) for c in train_data_cfg]
else:
skip_pipeline_steps(train_data_cfg)
return cfg
def main():
args = parse_args()
cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options)
if 'gt_semantic_seg' in cfg.train_pipeline[-1]['keys']:
cfg.data.train.pipeline = [
p for p in cfg.data.train.pipeline if p['type'] != 'SegRescale'
]
dataset = build_dataset(cfg.data.train)
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
filename = os.path.join(args.output_dir,
Path(item['filename']).name
) if args.output_dir is not None else None
gt_bboxes = item['gt_bboxes']
gt_labels = item['gt_labels']
gt_masks = item.get('gt_masks', None)
if gt_masks is not None:
gt_masks = mask2ndarray(gt_masks)
gt_seg = item.get('gt_semantic_seg', None)
if gt_seg is not None:
pad_value = 255 # the padding value of gt_seg
sem_labels = np.unique(gt_seg)
all_labels = np.concatenate((gt_labels, sem_labels), axis=0)
all_labels, counts = np.unique(all_labels, return_counts=True)
stuff_labels = all_labels[np.logical_and(counts < 2,
all_labels != pad_value)]
stuff_masks = gt_seg[None] == stuff_labels[:, None, None]
gt_labels = np.concatenate((gt_labels, stuff_labels), axis=0)
gt_masks = np.concatenate((gt_masks, stuff_masks.astype(np.uint8)),
axis=0)
# If you need to show the bounding boxes,
# please comment the following line
gt_bboxes = None
imshow_det_bboxes(
item['img'],
gt_bboxes,
gt_labels,
gt_masks,
class_names=dataset.CLASSES,
show=not args.not_show,
wait_time=args.show_interval,
out_file=filename,
bbox_color=dataset.PALETTE,
text_color=(200, 200, 200),
mask_color=dataset.PALETTE)
progress_bar.update()
if __name__ == '__main__':
main()
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class FlopsLoss(nn.Module):
def __init__(self, model: SparseEncoder, threshold: float = None) -> None:
"""
FlopsLoss implements a regularization technique to promote sparsity in sparse encoder models.
It calculates the squared L2 norm of the mean embedding vector, which helps reduce the number of floating-point
operations (FLOPs) required during inference by encouraging more zero values in the embeddings.
It can use a threshold to ignore embeddings with too few non-zero elements.
This loss is used as a regularization component within other losses like :class:`SpladeLoss` rather than
being used as a standalone loss function.
Args:
model: SparseEncoder model to be regularized
threshold: Optional threshold for the number of non-zero elements in the embeddings.
If specified, only embeddings with more than this number of non-zero elements will be considered.
This can help to ignore embeddings that are too sparse and may not contribute meaningfully to the loss.
References:
- For further details, see: https://arxiv.org/pdf/2004.05665 for the general FLOPS loss and https://arxiv.org/pdf/2504.14839 for FLOPS with thresholds, a.k.a. FLOPS with l0 masking.
Relations:
- Used as a component within :class:`SpladeLoss` to regularize both query and document embeddings
Example:
- This loss is typically used within the :class:`SpladeLoss` class, which combines it with other loss components.
"""
super().__init__()
self.model = model
self.threshold = threshold
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise NotImplementedError(
"FlopsLoss is not intended to be used directly. Use it as a regulizer within the SpladeLoss class."
)
def compute_loss_from_embeddings(self, embeddings: list[torch.Tensor]) -> torch.Tensor:
if self.threshold is not None:
l0_norm = (embeddings != 0).sum(dim=1)
mask = (l0_norm > self.threshold).float()
embeddings = embeddings * mask.unsqueeze(1)
return torch.sum(torch.mean(embeddings, dim=0) ** 2)
@property
def citation(self) -> str:
return """
@article{paria2020minimizing,
title={Minimizing flops to learn efficient sparse representations},
author={Paria, Biswajit and Yeh, Chih-Kuan and Yen, Ian EH and Xu, Ning and Ravikumar, Pradeep and P{\'o}czos, Barnab{\'a}s},
journal={arXiv preprint arXiv:2004.05665},
year={2020}
}
"""
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class FlopsLoss(nn.Module):
def __init__(self, model: SparseEncoder) -> None:
super().__init__()
self.model = model
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
# Compute the embeddings and distribute them to anchor and candidates (positive and optionally negatives)
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(embeddings)
def compute_loss_from_embeddings(self, embeddings: list[torch.Tensor], embeddings_type: str) -> torch.Tensor:
anchors = embeddings[0] # (batch_size, embedding_dim)
candidates = torch.cat(embeddings[1:]) # (batch_size * (1 + num_negatives), embedding_dim)
if embeddings_type == "query":
return torch.sum(torch.mean(anchors, dim=0) ** 2)
else:
return torch.sum(torch.mean(candidates, dim=0) ** 2)
@property
def citation(self) -> str:
return """
@article{paria2020minimizing,
title={Minimizing flops to learn efficient sparse representations},
author={Paria, Biswajit and Yeh, Chih-Kuan and Yen, Ian EH and Xu, Ning and Ravikumar, Pradeep and P{\'o}czos, Barnab{\'a}s},
journal={arXiv preprint arXiv:2004.05665},
year={2020}
}
"""
|
from typing import Any, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.prototype.datapoints import Label, OneHotLabel
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import _get_fill, _setup_fill_arg, _setup_size
from torchvision.transforms.v2.utils import get_bounding_boxes, has_any, is_simple_tensor, query_size
class FixedSizeCrop(Transform):
def __init__(
self,
size: Union[int, Sequence[int]],
fill: Union[datapoints._FillType, Dict[Union[Type, str], datapoints._FillType]] = 0,
padding_mode: str = "constant",
) -> None:
super().__init__()
size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
self.crop_height = size[0]
self.crop_width = size[1]
self.fill = fill
self._fill = _setup_fill_arg(fill)
self.padding_mode = padding_mode
def _check_inputs(self, flat_inputs: List[Any]) -> None:
if not has_any(
flat_inputs,
PIL.Image.Image,
datapoints.Image,
is_simple_tensor,
datapoints.Video,
):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain an tensor or PIL image or a Video."
)
if has_any(flat_inputs, datapoints.BoundingBoxes) and not has_any(flat_inputs, Label, OneHotLabel):
raise TypeError(
f"If a BoundingBoxes is contained in the input sample, "
f"{type(self).__name__}() also requires it to contain a Label or OneHotLabel."
)
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_size(flat_inputs)
new_height = min(height, self.crop_height)
new_width = min(width, self.crop_width)
needs_crop = new_height != height or new_width != width
offset_height = max(height - self.crop_height, 0)
offset_width = max(width - self.crop_width, 0)
r = torch.rand(1)
top = int(offset_height * r)
left = int(offset_width * r)
bounding_boxes: Optional[torch.Tensor]
try:
bounding_boxes = get_bounding_boxes(flat_inputs)
except ValueError:
bounding_boxes = None
if needs_crop and bounding_boxes is not None:
format = bounding_boxes.format
bounding_boxes, canvas_size = F.crop_bounding_boxes(
bounding_boxes.as_subclass(torch.Tensor),
format=format,
top=top,
left=left,
height=new_height,
width=new_width,
)
bounding_boxes = F.clamp_bounding_boxes(bounding_boxes, format=format, canvas_size=canvas_size)
height_and_width = F.convert_format_bounding_boxes(
bounding_boxes, old_format=format, new_format=datapoints.BoundingBoxFormat.XYWH
)[..., 2:]
is_valid = torch.all(height_and_width > 0, dim=-1)
else:
is_valid = None
pad_bottom = max(self.crop_height - new_height, 0)
pad_right = max(self.crop_width - new_width, 0)
needs_pad = pad_bottom != 0 or pad_right != 0
return dict(
needs_crop=needs_crop,
top=top,
left=left,
height=new_height,
width=new_width,
is_valid=is_valid,
padding=[0, 0, pad_right, pad_bottom],
needs_pad=needs_pad,
)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["needs_crop"]:
inpt = F.crop(
inpt,
top=params["top"],
left=params["left"],
height=params["height"],
width=params["width"],
)
if params["is_valid"] is not None:
if isinstance(inpt, (Label, OneHotLabel, datapoints.Mask)):
inpt = inpt.wrap_like(inpt, inpt[params["is_valid"]]) # type: ignore[arg-type]
elif isinstance(inpt, datapoints.BoundingBoxes):
inpt = datapoints.BoundingBoxes.wrap_like(
inpt,
F.clamp_bounding_boxes(inpt[params["is_valid"]], format=inpt.format, canvas_size=inpt.canvas_size),
)
if params["needs_pad"]:
fill = _get_fill(self._fill, type(inpt))
inpt = F.pad(inpt, params["padding"], fill=fill, padding_mode=self.padding_mode)
return inpt
|
from typing import Any, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.prototype.datapoints import Label, OneHotLabel
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import _get_fill, _setup_fill_arg, _setup_size
from torchvision.transforms.v2.utils import has_any, is_simple_tensor, query_bounding_boxes, query_size
class FixedSizeCrop(Transform):
def __init__(
self,
size: Union[int, Sequence[int]],
fill: Union[datapoints._FillType, Dict[Union[Type, str], datapoints._FillType]] = 0,
padding_mode: str = "constant",
) -> None:
super().__init__()
size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
self.crop_height = size[0]
self.crop_width = size[1]
self.fill = fill
self._fill = _setup_fill_arg(fill)
self.padding_mode = padding_mode
def _check_inputs(self, flat_inputs: List[Any]) -> None:
if not has_any(
flat_inputs,
PIL.Image.Image,
datapoints.Image,
is_simple_tensor,
datapoints.Video,
):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain an tensor or PIL image or a Video."
)
if has_any(flat_inputs, datapoints.BoundingBoxes) and not has_any(flat_inputs, Label, OneHotLabel):
raise TypeError(
f"If a BoundingBoxes is contained in the input sample, "
f"{type(self).__name__}() also requires it to contain a Label or OneHotLabel."
)
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_size(flat_inputs)
new_height = min(height, self.crop_height)
new_width = min(width, self.crop_width)
needs_crop = new_height != height or new_width != width
offset_height = max(height - self.crop_height, 0)
offset_width = max(width - self.crop_width, 0)
r = torch.rand(1)
top = int(offset_height * r)
left = int(offset_width * r)
bounding_boxes: Optional[torch.Tensor]
try:
bounding_boxes = query_bounding_boxes(flat_inputs)
except ValueError:
bounding_boxes = None
if needs_crop and bounding_boxes is not None:
format = bounding_boxes.format
bounding_boxes, canvas_size = F.crop_bounding_boxes(
bounding_boxes.as_subclass(torch.Tensor),
format=format,
top=top,
left=left,
height=new_height,
width=new_width,
)
bounding_boxes = F.clamp_bounding_boxes(bounding_boxes, format=format, canvas_size=canvas_size)
height_and_width = F.convert_format_bounding_boxes(
bounding_boxes, old_format=format, new_format=datapoints.BoundingBoxFormat.XYWH
)[..., 2:]
is_valid = torch.all(height_and_width > 0, dim=-1)
else:
is_valid = None
pad_bottom = max(self.crop_height - new_height, 0)
pad_right = max(self.crop_width - new_width, 0)
needs_pad = pad_bottom != 0 or pad_right != 0
return dict(
needs_crop=needs_crop,
top=top,
left=left,
height=new_height,
width=new_width,
is_valid=is_valid,
padding=[0, 0, pad_right, pad_bottom],
needs_pad=needs_pad,
)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["needs_crop"]:
inpt = F.crop(
inpt,
top=params["top"],
left=params["left"],
height=params["height"],
width=params["width"],
)
if params["is_valid"] is not None:
if isinstance(inpt, (Label, OneHotLabel, datapoints.Mask)):
inpt = inpt.wrap_like(inpt, inpt[params["is_valid"]]) # type: ignore[arg-type]
elif isinstance(inpt, datapoints.BoundingBoxes):
inpt = datapoints.BoundingBoxes.wrap_like(
inpt,
F.clamp_bounding_boxes(inpt[params["is_valid"]], format=inpt.format, canvas_size=inpt.canvas_size),
)
if params["needs_pad"]:
fill = _get_fill(self._fill, type(inpt))
inpt = F.pad(inpt, params["padding"], fill=fill, padding_mode=self.padding_mode)
return inpt
|
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
import torch.nn.functional as F
from sentence_transformers.sparse_encoder import SparseEncoder
def normalized_mean_squared_error(reconstruction: torch.Tensor, original_input: torch.Tensor) -> torch.Tensor:
"""
:param reconstruction: output of Autoencoder.decode (shape: [batch, n_inputs])
:param original_input: input of Autoencoder.encode (shape: [batch, n_inputs])
:return: normalized mean squared error (shape: [1])
"""
return (((reconstruction - original_input) ** 2).mean(dim=1) / (original_input**2).mean(dim=1)).mean()
class CSRReconstructionLoss(nn.Module):
def __init__(self, model: SparseEncoder, beta: float = 1.0) -> None:
"""
CSRReconstructionLoss implements the reconstruction loss component for Contrastive Sparse Representation (CSR) models.
This loss ensures that the sparse encoding can accurately reconstruct the original model embeddings through
three components:
1. A primary reconstruction loss (L_k) that measures the error between the original embedding and its
reconstruction using the top-k sparse components.
2. A secondary reconstruction loss (L_4k) that measures the error using the top-4k sparse components.
3. An auxiliary loss (L_aux) that helps to learn residual information.
Args:
model: SparseEncoder model with autoencoder components
beta: Weight for the auxiliary loss component (L_aux)
References:
- For more details, see the paper "Beyond Matryoshka: Revisiting Sparse Coding for Adaptive Representation"
https://arxiv.org/abs/2503.01776
Requirements:
1. The model must be configured to output the necessary reconstruction components
2. Used with SparseEncoder models that implement compositional sparse autoencoding
Relations:
- Used as a component within :class:`CSRLoss` combined with a contrastive loss
Example:
::
- This loss is never used standalone, but instead used within the :class:`CSRLoss` class. See that loss for more details.
"""
super().__init__()
self.model = model
self.beta = beta
def forward(self, sentence_features: Iterable[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
# Compute embeddings using the model
outputs = [self.model(sentence_feature) for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(outputs)
def compute_loss_from_embeddings(self, outputs: list[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
"""
Compute the CSRReconstruction loss from embeddings.
Args:
outputs: List of dictionaries containing sentence embeddings and their sparse representations
Returns:
total_loss: The total reconstruction loss value
"""
# Initialize loss components
total_L_k = 0.0
total_L_4k = 0.0
total_L_aux = 0.0
# Process each sentence feature
for features in outputs:
x = features["sentence_embedding_backbone"]
recons_k = features["decoded_embedding_k"]
recons_4k = features["decoded_embedding_4k"]
recons_aux = features["decoded_embedding_aux"]
reconsk_pre_bias = features["decoded_embedding_k_pre_bias"]
# L(k) = ||f(x) - f(dx)_k||₂²
L_k = F.mse_loss(x, recons_k)
# L(4k) = ||f(x) - f(dx)_4k||₂²
L_4k = F.mse_loss(x, recons_4k)
# L_aux = ||e - ê||₂²
L_aux = normalized_mean_squared_error(recons_aux, x - reconsk_pre_bias)
# Accumulate losses
total_L_k += L_k
total_L_4k += L_4k
total_L_aux += L_aux
# Average losses over batch
batch_size = len(outputs)
if batch_size > 0:
total_L_k /= batch_size
total_L_4k /= batch_size
total_L_aux /= batch_size
# Total loss: L_recon = L(k) + L(4k)/8 + β*L_aux
total_loss = total_L_k + total_L_4k / 8 + self.beta * total_L_aux
return total_loss
def get_config_dict(self):
"""
Get the configuration dictionary.
Returns:
Dictionary containing the configuration parameters
"""
return {"beta": self.beta}
|
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
import torch.nn.functional as F
from sentence_transformers.sparse_encoder import SparseEncoder
def normalized_mean_squared_error(reconstruction: torch.Tensor, original_input: torch.Tensor) -> torch.Tensor:
"""
:param reconstruction: output of Autoencoder.decode (shape: [batch, n_inputs])
:param original_input: input of Autoencoder.encode (shape: [batch, n_inputs])
:return: normalized mean squared error (shape: [1])
"""
return (((reconstruction - original_input) ** 2).mean(dim=1) / (original_input**2).mean(dim=1)).mean()
class CSRReconstructionLoss(nn.Module):
def __init__(self, model: SparseEncoder, beta: float = 1.0) -> None:
"""
CSRReconstructionLoss implements the reconstruction loss component for Contrastive Sparse Representation (CSR) models.
This loss ensures that the sparse encoding can accurately reconstruct the original model embeddings through
three components:
1. A primary reconstruction loss (L_k) that measures the error between the original embedding and its
reconstruction using the top-k sparse components.
2. A secondary reconstruction loss (L_4k) that measures the error using the top-4k sparse components.
3. An auxiliary loss (L_aux) that helps to learn residual information.
Args:
model: SparseEncoder model with autoencoder components
beta: Weight for the auxiliary loss component (L_aux)
References:
- For more details, see the paper "Beyond Matryoshka: Revisiting Sparse Coding for Adaptive Representation"
https://arxiv.org/abs/2503.01776
Requirements:
1. The model must be configured to output the necessary reconstruction components
2. Used with SparseEncoder models that implement compositional sparse autoencoding
Relations:
- Used as a component within :class:`CSRLoss` combined with a contrastive loss
Example:
::
This loss is typically used within the :class:`CSRLoss` class, which combines it with other loss components.
"""
super().__init__()
self.model = model
self.beta = beta
def forward(self, sentence_features: Iterable[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
"""
Forward pass of the CSRReconstruction Loss module.
This method is used when the loss is computed as part of the model's forward pass.
Args:
sentence_features: Iterable of dictionaries containing sentence embeddings and their sparse representations
Returns:
Dictionary containing the total loss and individual loss components
"""
# Compute embeddings using the model
outputs = [self.model(sentence_feature) for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(outputs)
def compute_loss_from_embeddings(self, outputs: list[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
"""
Compute the CSRReconstruction loss from embeddings.
Args:
outputs: List of dictionaries containing sentence embeddings and their sparse representations
Returns:
Dictionary containing the total loss and individual loss components
"""
# Initialize loss components
total_L_k = 0.0
total_L_4k = 0.0
total_L_aux = 0.0
# Process each sentence feature
for features in outputs:
x = features["sentence_embedding_backbone"]
recons_k = features["decoded_embedding_k"]
recons_4k = features["decoded_embedding_4k"]
recons_aux = features["decoded_embedding_aux"]
reconsk_pre_bias = features["decoded_embedding_k_pre_bias"]
# L(k) = ||f(x) - f(dx)_k||₂²
L_k = F.mse_loss(x, recons_k)
# L(4k) = ||f(x) - f(dx)_4k||₂²
L_4k = F.mse_loss(x, recons_4k)
# L_aux = ||e - ê||₂²
L_aux = normalized_mean_squared_error(recons_aux, x - reconsk_pre_bias)
# Accumulate losses
total_L_k += L_k
total_L_4k += L_4k
total_L_aux += L_aux
# Average losses over batch
batch_size = len(outputs)
if batch_size > 0:
total_L_k /= batch_size
total_L_4k /= batch_size
total_L_aux /= batch_size
# Total loss: L_recon = L(k) + L(4k)/8 + β*L_aux
total_loss = total_L_k + total_L_4k / 8 + self.beta * total_L_aux
return total_loss
def get_config_dict(self):
"""
Get the configuration dictionary.
Returns:
Dictionary containing the configuration parameters
"""
return {"beta": self.beta}
|
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_explicit_noop, _register_kernel_internal
@_register_explicit_noop(
PIL.Image.Image, datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask, warn_passthrough=True
)
def uniform_temporal_subsample(inpt: torch.Tensor, num_samples: int) -> torch.Tensor:
if torch.jit.is_scripting():
return uniform_temporal_subsample_video(inpt, num_samples=num_samples)
_log_api_usage_once(uniform_temporal_subsample)
kernel = _get_kernel(uniform_temporal_subsample, type(inpt))
return kernel(inpt, num_samples=num_samples)
@_register_kernel_internal(uniform_temporal_subsample, torch.Tensor)
@_register_kernel_internal(uniform_temporal_subsample, datapoints.Video)
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchvideo/transforms/functional.py#L19
t_max = video.shape[-4] - 1
indices = torch.linspace(0, t_max, num_samples, device=video.device).long()
return torch.index_select(video, -4, indices)
|
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_explicit_noop, _register_kernel_internal
@_register_explicit_noop(
PIL.Image.Image, datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask, warn_passthrough=True
)
def uniform_temporal_subsample(inpt: datapoints._VideoTypeJIT, num_samples: int) -> datapoints._VideoTypeJIT:
if torch.jit.is_scripting():
return uniform_temporal_subsample_video(inpt, num_samples=num_samples)
_log_api_usage_once(uniform_temporal_subsample)
kernel = _get_kernel(uniform_temporal_subsample, type(inpt))
return kernel(inpt, num_samples=num_samples)
@_register_kernel_internal(uniform_temporal_subsample, torch.Tensor)
@_register_kernel_internal(uniform_temporal_subsample, datapoints.Video)
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchvideo/transforms/functional.py#L19
t_max = video.shape[-4] - 1
indices = torch.linspace(0, t_max, num_samples, device=video.device).long()
return torch.index_select(video, -4, indices)
|
import os
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
from PIL import Image
from .utils import check_integrity, download_and_extract_archive, download_url
from .vision import VisionDataset
class SBU(VisionDataset):
"""`SBU Captioned Photo <http://www.cs.virginia.edu/~vicente/sbucaptions/>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where tarball
``SBUCaptionedPhotoDataset.tar.gz`` exists.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
url = "https://www.cs.rice.edu/~vo9/sbucaptions/SBUCaptionedPhotoDataset.tar.gz"
filename = "SBUCaptionedPhotoDataset.tar.gz"
md5_checksum = "9aec147b3488753cf758b4d493422285"
def __init__(
self,
root: Union[str, Path],
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = True,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
# Read the caption for each photo
self.photos = []
self.captions = []
file1 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")
file2 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_captions.txt")
for line1, line2 in zip(open(file1), open(file2)):
url = line1.rstrip()
photo = os.path.basename(url)
filename = os.path.join(self.root, "dataset", photo)
if os.path.exists(filename):
caption = line2.rstrip()
self.photos.append(photo)
self.captions.append(caption)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a caption for the photo.
"""
filename = os.path.join(self.root, "dataset", self.photos[index])
img = Image.open(filename).convert("RGB")
if self.transform is not None:
img = self.transform(img)
target = self.captions[index]
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
"""The number of photos in the dataset."""
return len(self.photos)
def _check_integrity(self) -> bool:
"""Check the md5 checksum of the downloaded tarball."""
root = self.root
fpath = os.path.join(root, self.filename)
if not check_integrity(fpath, self.md5_checksum):
return False
return True
def download(self) -> None:
"""Download and extract the tarball, and download each individual photo."""
if self._check_integrity():
return
download_and_extract_archive(self.url, self.root, self.root, self.filename, self.md5_checksum)
# Download individual photos
with open(os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")) as fh:
for line in fh:
url = line.rstrip()
try:
download_url(url, os.path.join(self.root, "dataset"))
except OSError:
# The images point to public images on Flickr.
# Note: Images might be removed by users at anytime.
pass
|
import os
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
from PIL import Image
from .utils import check_integrity, download_and_extract_archive, download_url
from .vision import VisionDataset
class SBU(VisionDataset):
"""`SBU Captioned Photo <http://www.cs.virginia.edu/~vicente/sbucaptions/>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where tarball
``SBUCaptionedPhotoDataset.tar.gz`` exists.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
url = "https://www.cs.rice.edu/~vo9/sbucaptions/SBUCaptionedPhotoDataset.tar.gz"
filename = "SBUCaptionedPhotoDataset.tar.gz"
md5_checksum = "9aec147b3488753cf758b4d493422285"
def __init__(
self,
root: Union[str, Path],
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = True,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
# Read the caption for each photo
self.photos = []
self.captions = []
file1 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")
file2 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_captions.txt")
for line1, line2 in zip(open(file1), open(file2)):
url = line1.rstrip()
photo = os.path.basename(url)
filename = os.path.join(self.root, "dataset", photo)
if os.path.exists(filename):
caption = line2.rstrip()
self.photos.append(photo)
self.captions.append(caption)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a caption for the photo.
"""
filename = os.path.join(self.root, "dataset", self.photos[index])
img = Image.open(filename).convert("RGB")
if self.transform is not None:
img = self.transform(img)
target = self.captions[index]
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
"""The number of photos in the dataset."""
return len(self.photos)
def _check_integrity(self) -> bool:
"""Check the md5 checksum of the downloaded tarball."""
root = self.root
fpath = os.path.join(root, self.filename)
if not check_integrity(fpath, self.md5_checksum):
return False
return True
def download(self) -> None:
"""Download and extract the tarball, and download each individual photo."""
if self._check_integrity():
print("Files already downloaded and verified")
return
download_and_extract_archive(self.url, self.root, self.root, self.filename, self.md5_checksum)
# Download individual photos
with open(os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")) as fh:
for line in fh:
url = line.rstrip()
try:
download_url(url, os.path.join(self.root, "dataset"))
except OSError:
# The images point to public images on Flickr.
# Note: Images might be removed by users at anytime.
pass
|
from __future__ import annotations
from typing import Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from pydantic import BaseModel
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_community.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
class CurrentWebPageToolInput(BaseModel):
"""Explicit no-args input for CurrentWebPageTool."""
class CurrentWebPageTool(BaseBrowserTool):
"""Tool for getting the URL of the current webpage."""
name: str = "current_webpage"
description: str = "Returns the URL of the current page"
args_schema: Type[BaseModel] = CurrentWebPageToolInput
def _run(
self,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
return str(page.url)
async def _arun(
self,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
return str(page.url)
|
from __future__ import annotations
from typing import Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from pydantic import BaseModel
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_community.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
class CurrentWebPageToolInput(BaseModel):
"""Explicit no-args input for CurrentWebPageTool."""
class CurrentWebPageTool(BaseBrowserTool): # type: ignore[override, override]
"""Tool for getting the URL of the current webpage."""
name: str = "current_webpage"
description: str = "Returns the URL of the current page"
args_schema: Type[BaseModel] = CurrentWebPageToolInput
def _run(
self,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
return str(page.url)
async def _arun(
self,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
return str(page.url)
|
"""Embeddings."""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.embeddings.embeddings import Embeddings
from langchain_core.embeddings.fake import (
DeterministicFakeEmbedding,
FakeEmbeddings,
)
__all__ = ["DeterministicFakeEmbedding", "Embeddings", "FakeEmbeddings"]
_dynamic_imports = {
"Embeddings": "embeddings",
"DeterministicFakeEmbedding": "fake",
"FakeEmbeddings": "fake",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""Embeddings."""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.embeddings.embeddings import Embeddings
from langchain_core.embeddings.fake import (
DeterministicFakeEmbedding,
FakeEmbeddings,
)
__all__ = ["DeterministicFakeEmbedding", "Embeddings", "FakeEmbeddings"]
_dynamic_imports = {
"Embeddings": "embeddings",
"DeterministicFakeEmbedding": "fake",
"FakeEmbeddings": "fake",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent # type: ignore[name-defined]
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 Imperial College London (Pingchuan Ma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import warnings
import numpy as np
from ibug.face_detection import RetinaFacePredictor
warnings.filterwarnings("ignore")
class LandmarksDetector:
def __init__(self, device="cuda:0", model_name="resnet50"):
self.face_detector = RetinaFacePredictor(
device=device, threshold=0.8, model=RetinaFacePredictor.get_model(model_name)
)
def __call__(self, video_frames):
landmarks = []
for frame in video_frames:
detected_faces = self.face_detector(frame, rgb=False)
if len(detected_faces) >= 1:
landmarks.append(np.reshape(detected_faces[0][:4], (2, 2)))
else:
landmarks.append(None)
return landmarks
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 Imperial College London (Pingchuan Ma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import warnings
import numpy as np
import torchvision
from ibug.face_detection import RetinaFacePredictor
warnings.filterwarnings("ignore")
class LandmarksDetector:
def __init__(self, device="cuda:0", model_name="resnet50"):
self.face_detector = RetinaFacePredictor(
device=device, threshold=0.8, model=RetinaFacePredictor.get_model(model_name)
)
def __call__(self, filename):
video_frames = torchvision.io.read_video(filename, pts_unit="sec")[0].numpy()
landmarks = []
for frame in video_frames:
detected_faces = self.face_detector(frame, rgb=False)
if len(detected_faces) >= 1:
landmarks.append(np.reshape(detected_faces[0][:4], (2, 2)))
else:
landmarks.append(None)
return landmarks
|
import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import sparse_plus
from keras.src.activations.activations import sparsemax
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
from keras.src.activations.activations import threshold
from keras.src.api_export import keras_export
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
relu,
leaky_relu,
relu6,
softmax,
celu,
elu,
selu,
softplus,
softsign,
squareplus,
soft_shrink,
sparse_plus,
silu,
gelu,
glu,
tanh,
tanh_shrink,
threshold,
sigmoid,
exponential,
hard_sigmoid,
hard_silu,
hard_tanh,
hard_shrink,
linear,
mish,
log_softmax,
log_sigmoid,
sparsemax,
}
ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS}
# Additional aliases
ALL_OBJECTS_DICT["swish"] = silu
ALL_OBJECTS_DICT["hard_swish"] = hard_silu
@keras_export("keras.activations.serialize")
def serialize(activation):
fn_config = serialization_lib.serialize_keras_object(activation)
if "config" not in fn_config:
raise ValueError(
f"Unknown activation function '{activation}' cannot be "
"serialized due to invalid function name. Make sure to use "
"an activation name that matches the references defined in "
"activations.py or use "
"`@keras.saving.register_keras_serializable()`"
"to register any custom activations. "
f"config={fn_config}"
)
if not isinstance(activation, types.FunctionType):
# Case for additional custom activations represented by objects
return fn_config
if (
isinstance(fn_config["config"], str)
and fn_config["config"] not in globals()
):
# Case for custom activation functions from external activations modules
fn_config["config"] = object_registration.get_registered_name(
activation
)
return fn_config
# Case for keras.activations builtins (simply return name)
return fn_config["config"]
@keras_export("keras.activations.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras activation function via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.activations.get")
def get(identifier):
"""Retrieve a Keras activation function via an identifier."""
if identifier is None:
return linear
if isinstance(identifier, dict):
obj = serialization_lib.deserialize_keras_object(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
return obj
raise ValueError(
f"Could not interpret activation function identifier: {identifier}"
)
|
import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import sparse_plus
from keras.src.activations.activations import sparsemax
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
from keras.src.activations.activations import threshold
from keras.src.api_export import keras_export
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
relu,
leaky_relu,
relu6,
softmax,
celu,
elu,
selu,
softplus,
softsign,
squareplus,
soft_shrink,
sparse_plus,
silu,
gelu,
glu,
tanh,
tanh_shrink,
threshold,
sigmoid,
exponential,
hard_sigmoid,
hard_silu,
hard_tanh,
hard_shrink,
linear,
mish,
log_softmax,
log_sigmoid,
sparsemax,
}
ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS}
# Additional aliases
ALL_OBJECTS_DICT["swish"] = silu
ALL_OBJECTS_DICT["hard_swish"] = hard_silu
@keras_export("keras.activations.serialize")
def serialize(activation):
fn_config = serialization_lib.serialize_keras_object(activation)
if "config" not in fn_config:
raise ValueError(
f"Unknown activation function '{activation}' cannot be "
"serialized due to invalid function name. Make sure to use "
"an activation name that matches the references defined in "
"activations.py or use "
"`@keras.saving.register_keras_serializable()`"
"to register any custom activations. "
f"config={fn_config}"
)
if not isinstance(activation, types.FunctionType):
# Case for additional custom activations represented by objects
return fn_config
if (
isinstance(fn_config["config"], str)
and fn_config["config"] not in globals()
):
# Case for custom activation functions from external activations modules
fn_config["config"] = object_registration.get_registered_name(
activation
)
return fn_config
# Case for keras.activations builtins (simply return name)
return fn_config["config"]
@keras_export("keras.activations.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras activation function via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.activations.get")
def get(identifier):
"""Retrieve a Keras activation function via an identifier."""
if identifier is None:
return linear
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
return obj
raise ValueError(
f"Could not interpret activation function identifier: {identifier}"
)
|
import numpy as np
import pytest
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
@pytest.mark.parametrize('shuffle', [False, True])
@pytest.mark.parametrize('stack', [False, True])
@pytest.mark.parametrize('batch_size,n_batches', [(16, 7), (10, 10)])
def test_batch(shuffle, stack, batch_size, n_batches):
class MyDoc(BaseDoc):
id: int
tensor: NdArray
t_shape = (32, 32)
da = DocList[MyDoc](
[
MyDoc(
id=i,
tensor=np.zeros(t_shape),
)
for i in range(100)
]
)
if stack:
da = da.to_doc_vec()
batches = list(da._batch(batch_size=batch_size, shuffle=shuffle))
assert len(batches) == n_batches
for i, batch in enumerate(batches):
if i < n_batches - 1:
assert len(batch) == batch_size
if stack:
assert batch.tensor.shape == (batch_size, *t_shape)
else:
assert len(batch) <= batch_size
non_shuffled_ids = [
i for i in range(i * batch_size, min((i + 1) * batch_size, len(da)))
]
if not shuffle:
assert batch.id == non_shuffled_ids
else:
assert not (batch.id == non_shuffled_ids)
|
import numpy as np
import pytest
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
@pytest.mark.parametrize('shuffle', [False, True])
@pytest.mark.parametrize('stack', [False, True])
@pytest.mark.parametrize('batch_size,n_batches', [(16, 7), (10, 10)])
def test_batch(shuffle, stack, batch_size, n_batches):
class MyDoc(BaseDoc):
id: int
tensor: NdArray
t_shape = (32, 32)
da = DocList[MyDoc](
[
MyDoc(
id=i,
tensor=np.zeros(t_shape),
)
for i in range(100)
]
)
if stack:
da = da.stack()
batches = list(da._batch(batch_size=batch_size, shuffle=shuffle))
assert len(batches) == n_batches
for i, batch in enumerate(batches):
if i < n_batches - 1:
assert len(batch) == batch_size
if stack:
assert batch.tensor.shape == (batch_size, *t_shape)
else:
assert len(batch) <= batch_size
non_shuffled_ids = [
i for i in range(i * batch_size, min((i + 1) * batch_size, len(da)))
]
if not shuffle:
assert batch.id == non_shuffled_ids
else:
assert not (batch.id == non_shuffled_ids)
|
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Type, TypeVar
from pydantic import create_model, create_model_from_typeddict
from pydantic.config import BaseConfig
from typing_extensions import TypedDict
from docarray import BaseDoc
if TYPE_CHECKING:
from pydantic.typing import AnyClassMethod
T_doc = TypeVar('T_doc', bound=BaseDoc)
def create_doc(
__model_name: str,
*,
__config__: Optional[Type[BaseConfig]] = None,
__base__: Type['T_doc'] = BaseDoc, # type: ignore
__module__: str = __name__,
__validators__: Dict[str, 'AnyClassMethod'] = None, # type: ignore
__cls_kwargs__: Dict[str, Any] = None, # type: ignore
__slots__: Optional[Tuple[str, ...]] = None,
**field_definitions: Any,
) -> Type['T_doc']:
"""
Dynamically create a subclass of BaseDoc. This is a wrapper around pydantic's create_model.
```python
from docarray.documents import Audio
from docarray.documents.helper import create_doc
from docarray.typing.tensor.audio import AudioNdArray
MyAudio = create_doc(
'MyAudio',
__base__=Audio,
title=(str, ...),
tensor=(AudioNdArray, ...),
)
assert issubclass(MyAudio, BaseDoc)
assert issubclass(MyAudio, Audio)
```
:param __model_name: name of the created model
:param __config__: config class to use for the new model
:param __base__: base class for the new model to inherit from, must be BaseDoc or its subclass
:param __module__: module of the created model
:param __validators__: a dict of method names and @validator class methods
:param __cls_kwargs__: a dict for class creation
:param __slots__: Deprecated, `__slots__` should not be passed to `create_model`
:param field_definitions: fields of the model (or extra fields if a base is supplied)
in the format `<name>=(<type>, <default default>)` or `<name>=<default value>`
:return: the new Document class
"""
if not issubclass(__base__, BaseDoc):
raise ValueError(f'{type(__base__)} is not a BaseDoc or its subclass')
doc = create_model(
__model_name,
__config__=__config__,
__base__=__base__,
__module__=__module__,
__validators__=__validators__,
__cls_kwargs__=__cls_kwargs__,
__slots__=__slots__,
**field_definitions,
)
return doc
def create_doc_from_typeddict(
typeddict_cls: Type['TypedDict'], # type: ignore
**kwargs: Any,
):
"""
Create a subclass of BaseDoc based on the fields of a `TypedDict`. This is a wrapper around pydantic's create_model_from_typeddict.
---
```python
from typing_extensions import TypedDict
from docarray import BaseDoc
from docarray.documents import Audio
from docarray.documents.helper import create_doc_from_typeddict
from docarray.typing.tensor.audio import AudioNdArray
class MyAudio(TypedDict):
title: str
tensor: AudioNdArray
Doc = create_doc_from_typeddict(MyAudio, __base__=Audio)
assert issubclass(Doc, BaseDoc)
assert issubclass(Doc, Audio)
```
---
:param typeddict_cls: TypedDict class to use for the new Document class
:param kwargs: extra arguments to pass to `create_model_from_typeddict`
:return: the new Document class
"""
if '__base__' in kwargs:
if not issubclass(kwargs['__base__'], BaseDoc):
raise ValueError(f'{kwargs["__base__"]} is not a BaseDoc or its subclass')
else:
kwargs['__base__'] = BaseDoc
doc = create_model_from_typeddict(typeddict_cls, **kwargs)
return doc
def create_doc_from_dict(model_name: str, data_dict: Dict[str, Any]) -> Type['T_doc']:
"""
Create a subclass of BaseDoc based on example data given as a dictionary.
In case the example contains None as a value,
corresponding field will be viewed as the type Any.
---
```python
import numpy as np
from docarray.documents import ImageDoc
from docarray.documents.helper import create_doc_from_dict
data_dict = {'image': ImageDoc(tensor=np.random.rand(3, 224, 224)), 'author': 'me'}
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict=data_dict)
assert issubclass(MyDoc, BaseDoc)
```
---
:param model_name: Name of the new Document class
:param data_dict: Dictionary of field types to their corresponding values.
:return: the new Document class
"""
if not data_dict:
raise ValueError('`data_dict` should contain at least one item')
field_types = {
field: (type(value) if value else Any, ...)
for field, value in data_dict.items()
}
return create_doc(__model_name=model_name, **field_types) # type: ignore
|
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Type, TypeVar
from pydantic import create_model, create_model_from_typeddict
from pydantic.config import BaseConfig
from typing_extensions import TypedDict
from docarray import BaseDoc
if TYPE_CHECKING:
from pydantic.typing import AnyClassMethod
T_doc = TypeVar('T_doc', bound=BaseDoc)
def create_doc(
__model_name: str,
*,
__config__: Optional[Type[BaseConfig]] = None,
__base__: Type['T_doc'] = BaseDoc, # type: ignore
__module__: str = __name__,
__validators__: Dict[str, 'AnyClassMethod'] = None, # type: ignore
__cls_kwargs__: Dict[str, Any] = None, # type: ignore
__slots__: Optional[Tuple[str, ...]] = None,
**field_definitions: Any,
) -> Type['T_doc']:
"""
Dynamically create a subclass of BaseDoc. This is a wrapper around pydantic's create_model.
:param __model_name: name of the created model
:param __config__: config class to use for the new model
:param __base__: base class for the new model to inherit from, must be BaseDoc or its subclass
:param __module__: module of the created model
:param __validators__: a dict of method names and @validator class methods
:param __cls_kwargs__: a dict for class creation
:param __slots__: Deprecated, `__slots__` should not be passed to `create_model`
:param field_definitions: fields of the model (or extra fields if a base is supplied)
in the format `<name>=(<type>, <default default>)` or `<name>=<default value>`
:return: the new Document class
```python
from docarray.documents import Audio
from docarray.documents.helper import create_doc
from docarray.typing.tensor.audio import AudioNdArray
MyAudio = create_doc(
'MyAudio',
__base__=Audio,
title=(str, ...),
tensor=(AudioNdArray, ...),
)
assert issubclass(MyAudio, BaseDoc)
assert issubclass(MyAudio, Audio)
```
"""
if not issubclass(__base__, BaseDoc):
raise ValueError(f'{type(__base__)} is not a BaseDoc or its subclass')
doc = create_model(
__model_name,
__config__=__config__,
__base__=__base__,
__module__=__module__,
__validators__=__validators__,
__cls_kwargs__=__cls_kwargs__,
__slots__=__slots__,
**field_definitions,
)
return doc
def create_doc_from_typeddict(
typeddict_cls: Type['TypedDict'], # type: ignore
**kwargs: Any,
):
"""
Create a subclass of BaseDoc based on the fields of a `TypedDict`. This is a wrapper around pydantic's create_model_from_typeddict.
:param typeddict_cls: TypedDict class to use for the new Document class
:param kwargs: extra arguments to pass to `create_model_from_typeddict`
:return: the new Document class
EXAMPLE USAGE
.. code-block:: python
from typing_extensions import TypedDict
from docarray import BaseDoc
from docarray.documents import Audio
from docarray.documents.helper import create_doc_from_typeddict
from docarray.typing.tensor.audio import AudioNdArray
class MyAudio(TypedDict):
title: str
tensor: AudioNdArray
Doc = create_doc_from_typeddict(MyAudio, __base__=Audio)
assert issubclass(Doc, BaseDoc)
assert issubclass(Doc, Audio)
"""
if '__base__' in kwargs:
if not issubclass(kwargs['__base__'], BaseDoc):
raise ValueError(f'{kwargs["__base__"]} is not a BaseDoc or its subclass')
else:
kwargs['__base__'] = BaseDoc
doc = create_model_from_typeddict(typeddict_cls, **kwargs)
return doc
def create_doc_from_dict(model_name: str, data_dict: Dict[str, Any]) -> Type['T_doc']:
"""
Create a subclass of BaseDoc based on example data given as a dictionary.
In case the example contains None as a value,
corresponding field will be viewed as the type Any.
:param model_name: Name of the new Document class
:param data_dict: Dictionary of field types to their corresponding values.
:return: the new Document class
EXAMPLE USAGE
.. code-block:: python
import numpy as np
from docarray.documents import ImageDoc
from docarray.documents.helper import create_doc_from_dict
data_dict = {'image': ImageDoc(tensor=np.random.rand(3, 224, 224)), 'author': 'me'}
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict=data_dict)
assert issubclass(MyDoc, BaseDoc)
"""
if not data_dict:
raise ValueError('`data_dict` should contain at least one item')
field_types = {
field: (type(value) if value else Any, ...)
for field, value in data_dict.items()
}
return create_doc(__model_name=model_name, **field_types) # type: ignore
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
from .version import __version__, short_version
def digit_version(version_str):
digit_version = []
for x in version_str.split('.'):
if x.isdigit():
digit_version.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
digit_version.append(int(patch_version[0]) - 1)
digit_version.append(int(patch_version[1]))
return digit_version
mmcv_minimum_version = '1.3.17'
mmcv_maximum_version = '1.6.0'
mmcv_version = digit_version(mmcv.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version <= digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.'
__all__ = ['__version__', 'short_version']
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
from .version import __version__, short_version
def digit_version(version_str):
digit_version = []
for x in version_str.split('.'):
if x.isdigit():
digit_version.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
digit_version.append(int(patch_version[0]) - 1)
digit_version.append(int(patch_version[1]))
return digit_version
mmcv_minimum_version = '1.3.17'
mmcv_maximum_version = '1.5.0'
mmcv_version = digit_version(mmcv.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version <= digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.'
__all__ = ['__version__', 'short_version']
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py',
'./centernet_tta.py'
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# model settings
model = dict(
type='CenterNet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNet',
depth=18,
norm_eval=False,
norm_cfg=dict(type='BN'),
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(
type='CTResNetNeck',
in_channels=512,
num_deconv_filters=(256, 128, 64),
num_deconv_kernels=(4, 4, 4),
use_dcn=True),
bbox_head=dict(
type='CenterNetHead',
num_classes=80,
in_channels=64,
feat_channels=64,
loss_center_heatmap=dict(type='GaussianFocalLoss', loss_weight=1.0),
loss_wh=dict(type='L1Loss', loss_weight=0.1),
loss_offset=dict(type='L1Loss', loss_weight=1.0)),
train_cfg=None,
test_cfg=dict(topk=100, local_maximum_kernel=3, max_per_img=100))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
# The cropped images are padded into squares during training,
# but may be less than crop_size.
crop_size=(512, 512),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_pad_mode=None),
# Make sure the output is always crop_size.
dict(type='Resize', scale=(512, 512), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
backend_args={{_base_.backend_args}},
to_float32=True),
# don't need Resize
dict(
type='RandomCenterCropPad',
ratios=None,
border=None,
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_mode=True,
test_pad_mode=['logical_or', 31],
test_pad_add_pix=1),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'border'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(
batch_size=16,
num_workers=4,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args={{_base_.backend_args}},
)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
# Based on the default settings of modern detectors, the SGD effect is better
# than the Adam in the source code, so we use SGD default settings and
# if you use adam+lr5e-4, the map is 29.1.
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
max_epochs = 28
# learning policy
# Based on the default settings of modern detectors, we added warmup settings.
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[18, 24], # the real step is [18*5, 24*5]
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs) # the real epoch is 28*5=140
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (16 samples per GPU)
auto_scale_lr = dict(base_batch_size=128)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py',
'./centernet_tta.py'
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# model settings
model = dict(
type='CenterNet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNet',
depth=18,
norm_eval=False,
norm_cfg=dict(type='BN'),
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(
type='CTResNetNeck',
in_channels=512,
num_deconv_filters=(256, 128, 64),
num_deconv_kernels=(4, 4, 4),
use_dcn=True),
bbox_head=dict(
type='CenterNetHead',
num_classes=80,
in_channels=64,
feat_channels=64,
loss_center_heatmap=dict(type='GaussianFocalLoss', loss_weight=1.0),
loss_wh=dict(type='L1Loss', loss_weight=0.1),
loss_offset=dict(type='L1Loss', loss_weight=1.0)),
train_cfg=None,
test_cfg=dict(topk=100, local_maximum_kernel=3, max_per_img=100))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
# The cropped images are padded into squares during training,
# but may be less than crop_size.
crop_size=(512, 512),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_pad_mode=None),
# Make sure the output is always crop_size.
dict(type='Resize', scale=(512, 512), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
file_client_args={{_base_.file_client_args}}),
# don't need Resize
dict(
type='RandomCenterCropPad',
ratios=None,
border=None,
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_mode=True,
test_pad_mode=['logical_or', 31],
test_pad_add_pix=1),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'border'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(
batch_size=16,
num_workers=4,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
# Based on the default settings of modern detectors, the SGD effect is better
# than the Adam in the source code, so we use SGD default settings and
# if you use adam+lr5e-4, the map is 29.1.
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
max_epochs = 28
# learning policy
# Based on the default settings of modern detectors, we added warmup settings.
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[18, 24], # the real step is [18*5, 24*5]
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs) # the real epoch is 28*5=140
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (16 samples per GPU)
auto_scale_lr = dict(base_batch_size=128)
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
_set_start_method('fork')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.16.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
_set_start_method('fork')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.15.3'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseAnglELoss(SparseCoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0) -> None:
"""
This class implements AnglE (Angle Optimized).
This is a modification of :class:`SparseCoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SparseEncoder
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseCoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
- :class:`SparseCosineSimilarityLoss` seems to produce a weaker training signal than ``SparseCoSENTLoss`` or ``SparseAnglELoss``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(model=model, loss=losses.SparseAnglELoss(model), lambda_corpus=5e-5, all_docs=True)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
|
from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseAnglELoss(SparseCoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0) -> None:
"""
This class implements AnglE (Angle Optimized).
This is a modification of :class:`SparseCoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SparseEncoder
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseCoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
- :class:`SparseCosineSimilarityLoss` seems to produce a weaker training signal than ``SparseCoSENTLoss`` or ``SparseAnglELoss``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SparseAnglELoss(model)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from pathlib import Path
import mmcv
from mmcv import Config, DictAction
from mmdet.core.utils import mask2ndarray
from mmdet.core.visualization import imshow_det_bboxes
from mmdet.datasets.builder import build_dataset
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--skip-type',
type=str,
nargs='+',
default=['DefaultFormatBundle', 'Normalize', 'Collect'],
help='skip some useless pipeline')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def retrieve_data_cfg(config_path, skip_type, cfg_options):
cfg = Config.fromfile(config_path)
if cfg_options is not None:
cfg.merge_from_dict(cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
train_data_cfg = cfg.data.train
while 'dataset' in train_data_cfg and train_data_cfg[
'type'] != 'MultiImageMixDataset':
train_data_cfg = train_data_cfg['dataset']
train_data_cfg['pipeline'] = [
x for x in train_data_cfg.pipeline if x['type'] not in skip_type
]
return cfg
def main():
args = parse_args()
cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options)
dataset = build_dataset(cfg.data.train)
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
filename = os.path.join(args.output_dir,
Path(item['filename']).name
) if args.output_dir is not None else None
gt_masks = item.get('gt_masks', None)
if gt_masks is not None:
gt_masks = mask2ndarray(gt_masks)
imshow_det_bboxes(
item['img'],
item['gt_bboxes'],
item['gt_labels'],
gt_masks,
class_names=dataset.CLASSES,
show=not args.not_show,
wait_time=args.show_interval,
out_file=filename,
bbox_color=(255, 102, 61),
text_color=(255, 102, 61))
progress_bar.update()
if __name__ == '__main__':
main()
|
import argparse
import os
from pathlib import Path
import mmcv
from mmcv import Config, DictAction
from mmdet.core.utils import mask2ndarray
from mmdet.core.visualization import imshow_det_bboxes
from mmdet.datasets.builder import build_dataset
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--skip-type',
type=str,
nargs='+',
default=['DefaultFormatBundle', 'Normalize', 'Collect'],
help='skip some useless pipeline')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def retrieve_data_cfg(config_path, skip_type, cfg_options):
cfg = Config.fromfile(config_path)
if cfg_options is not None:
cfg.merge_from_dict(cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
train_data_cfg = cfg.data.train
while 'dataset' in train_data_cfg and train_data_cfg[
'type'] != 'MultiImageMixDataset':
train_data_cfg = train_data_cfg['dataset']
train_data_cfg['pipeline'] = [
x for x in train_data_cfg.pipeline if x['type'] not in skip_type
]
return cfg
def main():
args = parse_args()
cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options)
dataset = build_dataset(cfg.data.train)
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
filename = os.path.join(args.output_dir,
Path(item['filename']).name
) if args.output_dir is not None else None
gt_masks = item.get('gt_masks', None)
if gt_masks is not None:
gt_masks = mask2ndarray(gt_masks)
imshow_det_bboxes(
item['img'],
item['gt_bboxes'],
item['gt_labels'],
gt_masks,
class_names=dataset.CLASSES,
show=not args.not_show,
wait_time=args.show_interval,
out_file=filename,
bbox_color=(255, 102, 61),
text_color=(255, 102, 61))
progress_bar.update()
if __name__ == '__main__':
main()
|
from llama_index.core.graph_stores.types import GraphStore
from llama_index.graph_stores.memgraph import MemgraphGraphStore
def test_memgraph_graph_store():
names_of_bases = [b.__name__ for b in MemgraphGraphStore.__bases__]
assert GraphStore.__name__ in names_of_bases
|
from unittest.mock import MagicMock, patch
from llama_index.core.graph_stores.types import GraphStore
from llama_index.graph_stores.memgraph import MemgraphGraphStore
@patch("llama_index.graph_stores.memgraph.MemgraphGraphStore")
def test_memgraph_graph_store(MockMemgraphGraphStore: MagicMock):
instance: MemgraphGraphStore = MockMemgraphGraphStore.return_value()
assert isinstance(instance, GraphStore)
|
# coding=utf-8
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Tokenization classes for OpenAI GPT."""
from typing import Optional
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_openai import OpenAIGPTTokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
class OpenAIGPTTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" GPT Tokenizer (backed by HuggingFace's *tokenizers* library). Based on Byte-Pair-Encoding with
the following peculiarities:
- lower case all inputs
- uses BERT's BasicTokenizer for pre-BPE tokenization
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = OpenAIGPTTokenizer
def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token="<unk>", **kwargs):
super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, **kwargs)
@property
def do_lower_case(self):
return True
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
__all__ = ["OpenAIGPTTokenizerFast"]
|
# coding=utf-8
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Tokenization classes for OpenAI GPT."""
from typing import Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_openai import OpenAIGPTTokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
class OpenAIGPTTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" GPT Tokenizer (backed by HuggingFace's *tokenizers* library). Based on Byte-Pair-Encoding with
the following peculiarities:
- lower case all inputs
- uses BERT's BasicTokenizer for pre-BPE tokenization
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = OpenAIGPTTokenizer
def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token="<unk>", **kwargs):
super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, **kwargs)
@property
def do_lower_case(self):
return True
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
__all__ = ["OpenAIGPTTokenizerFast"]
|
# coding: utf-8
import pytest
import lightgbm as lgb
from .utils import pickle_obj, unpickle_obj
@pytest.mark.parametrize('serializer', ["pickle", "joblib", "cloudpickle"])
def test_early_stopping_callback_is_picklable(serializer, tmp_path):
rounds = 5
callback = lgb.early_stopping(stopping_rounds=rounds)
tmp_file = tmp_path / "early_stopping.pkl"
pickle_obj(
obj=callback,
filepath=tmp_file,
serializer=serializer
)
callback_from_disk = unpickle_obj(
filepath=tmp_file,
serializer=serializer
)
assert callback.stopping_rounds == callback_from_disk.stopping_rounds
assert callback.stopping_rounds == rounds
@pytest.mark.parametrize('serializer', ["pickle", "joblib", "cloudpickle"])
def test_log_evaluation_callback_is_picklable(serializer, tmp_path):
periods = 42
callback = lgb.log_evaluation(period=periods)
tmp_file = tmp_path / "log_evaluation.pkl"
pickle_obj(
obj=callback,
filepath=tmp_file,
serializer=serializer
)
callback_from_disk = unpickle_obj(
filepath=tmp_file,
serializer=serializer
)
assert callback.period == callback_from_disk.period
assert callback.period == periods
|
# coding: utf-8
import pytest
import lightgbm as lgb
from .utils import pickle_obj, unpickle_obj
@pytest.mark.parametrize('serializer', ["pickle", "joblib", "cloudpickle"])
def test_early_stopping_callback_is_picklable(serializer, tmp_path):
callback = lgb.early_stopping(stopping_rounds=5)
tmp_file = tmp_path / "early_stopping.pkl"
pickle_obj(
obj=callback,
filepath=tmp_file,
serializer=serializer
)
callback_from_disk = unpickle_obj(
filepath=tmp_file,
serializer=serializer
)
assert callback.stopping_rounds == callback_from_disk.stopping_rounds
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.