input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import xml.etree.ElementTree as ET
from mmengine.dist import is_main_process
from mmengine.fileio import get_local_path, list_from_file
from mmengine.utils import ProgressBar
from mmdet.registry import DATASETS
from mmdet.utils.typing_utils import List, Union
from .xml_style import XMLDataset
@DATASETS.register_module()
class WIDERFaceDataset(XMLDataset):
"""Reader for the WIDER Face dataset in PASCAL VOC format.
Conversion scripts can be found in
https://github.com/sovrasov/wider-face-pascal-voc-annotations
"""
METAINFO = {'classes': ('face', ), 'palette': [(0, 255, 0)]}
def load_data_list(self) -> List[dict]:
"""Load annotation from XML style ann_file.
Returns:
list[dict]: Annotation info from XML file.
"""
assert self._metainfo.get('classes', None) is not None, \
'classes in `XMLDataset` can not be None.'
self.cat2label = {
cat: i
for i, cat in enumerate(self._metainfo['classes'])
}
data_list = []
img_ids = list_from_file(self.ann_file, backend_args=self.backend_args)
# loading process takes around 10 mins
if is_main_process():
prog_bar = ProgressBar(len(img_ids))
for img_id in img_ids:
raw_img_info = {}
raw_img_info['img_id'] = img_id
raw_img_info['file_name'] = f'{img_id}.jpg'
parsed_data_info = self.parse_data_info(raw_img_info)
data_list.append(parsed_data_info)
if is_main_process():
prog_bar.update()
return data_list
def parse_data_info(self, img_info: dict) -> Union[dict, List[dict]]:
"""Parse raw annotation to target format.
Args:
img_info (dict): Raw image information, usually it includes
`img_id`, `file_name`, and `xml_path`.
Returns:
Union[dict, List[dict]]: Parsed annotation.
"""
data_info = {}
img_id = img_info['img_id']
xml_path = osp.join(self.data_prefix['img'], 'Annotations',
f'{img_id}.xml')
data_info['img_id'] = img_id
data_info['xml_path'] = xml_path
# deal with xml file
with get_local_path(
xml_path, backend_args=self.backend_args) as local_path:
raw_ann_info = ET.parse(local_path)
root = raw_ann_info.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
folder = root.find('folder').text
img_path = osp.join(self.data_prefix['img'], folder,
img_info['file_name'])
data_info['img_path'] = img_path
data_info['height'] = height
data_info['width'] = width
# Coordinates are in range [0, width - 1 or height - 1]
data_info['instances'] = self._parse_instance_info(
raw_ann_info, minus_one=False)
return data_info
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import xml.etree.ElementTree as ET
from mmengine.fileio import list_from_file
from mmdet.registry import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class WIDERFaceDataset(XMLDataset):
"""Reader for the WIDER Face dataset in PASCAL VOC format.
Conversion scripts can be found in
https://github.com/sovrasov/wider-face-pascal-voc-annotations
"""
METAINFO = {'classes': ('face', ), 'palette': [(0, 255, 0)]}
def __init__(self, **kwargs):
super(WIDERFaceDataset, self).__init__(**kwargs)
def load_annotations(self, ann_file):
"""Load annotation from WIDERFace XML style annotation file.
Args:
ann_file (str): Path of XML file.
Returns:
list[dict]: Annotation info from XML file.
"""
data_infos = []
img_ids = list_from_file(ann_file)
for img_id in img_ids:
filename = f'{img_id}.jpg'
xml_path = osp.join(self.img_prefix, 'Annotations',
f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
folder = root.find('folder').text
data_infos.append(
dict(
id=img_id,
filename=osp.join(folder, filename),
width=width,
height=height))
return data_infos
|
from enum import Enum
from typing import Callable, Union
from numpy import ndarray
from torch import Tensor
from .util import (
cos_sim,
dot_score,
euclidean_sim,
manhattan_sim,
pairwise_cos_sim,
pairwise_dot_score,
pairwise_euclidean_sim,
pairwise_manhattan_sim,
)
class SimilarityFunction(Enum):
"""
Enum class for supported similarity functions. The following functions are supported:
- ``SimilarityFunction.COSINE`` (``"cosine"``): Cosine similarity
- ``SimilarityFunction.DOT_PRODUCT`` (``"dot"``, ``dot_product``): Dot product similarity
- ``SimilarityFunction.EUCLIDEAN`` (``"euclidean"``): Euclidean distance
- ``SimilarityFunction.MANHATTAN`` (``"manhattan"``): Manhattan distance
"""
COSINE = "cosine"
DOT_PRODUCT = "dot"
DOT = "dot" # Alias for DOT_PRODUCT
EUCLIDEAN = "euclidean"
MANHATTAN = "manhattan"
@staticmethod
def to_similarity_fn(
similarity_function: Union[str, "SimilarityFunction"],
) -> Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]:
"""
Converts a similarity function name or enum value to the corresponding similarity function.
Args:
similarity_function (Union[str, SimilarityFunction]): The name or enum value of the similarity function.
Returns:
Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]: The corresponding similarity function.
Raises:
ValueError: If the provided function is not supported.
Example:
>>> similarity_fn = SimilarityFunction.to_similarity_fn("cosine")
>>> similarity_scores = similarity_fn(embeddings1, embeddings2)
>>> similarity_scores
tensor([[0.3952, 0.0554],
[0.0992, 0.1570]])
"""
similarity_function = SimilarityFunction(similarity_function)
if similarity_function == SimilarityFunction.COSINE:
return cos_sim
if similarity_function == SimilarityFunction.DOT_PRODUCT:
return dot_score
if similarity_function == SimilarityFunction.MANHATTAN:
return manhattan_sim
if similarity_function == SimilarityFunction.EUCLIDEAN:
return euclidean_sim
raise ValueError(
"The provided function {} is not supported. Use one of the supported values: {}.".format(
similarity_function, SimilarityFunction.possible_values()
)
)
@staticmethod
def to_similarity_pairwise_fn(
similarity_function: Union[str, "SimilarityFunction"],
) -> Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]:
"""
Converts a similarity function into a pairwise similarity function.
The pairwise similarity function returns the diagonal vector from the similarity matrix, i.e. it only
computes the similarity(a[i], b[i]) for each i in the range of the input tensors, rather than
computing the similarity between all pairs of a and b.
Args:
similarity_function (Union[str, SimilarityFunction]): The name or enum value of the similarity function.
Returns:
Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]: The pairwise similarity function.
Raises:
ValueError: If the provided similarity function is not supported.
Example:
>>> pairwise_fn = SimilarityFunction.to_similarity_pairwise_fn("cosine")
>>> similarity_scores = pairwise_fn(embeddings1, embeddings2)
>>> similarity_scores
tensor([0.3952, 0.1570])
"""
similarity_function = SimilarityFunction(similarity_function)
if similarity_function == SimilarityFunction.COSINE:
return pairwise_cos_sim
if similarity_function == SimilarityFunction.DOT_PRODUCT:
return pairwise_dot_score
if similarity_function == SimilarityFunction.MANHATTAN:
return pairwise_manhattan_sim
if similarity_function == SimilarityFunction.EUCLIDEAN:
return pairwise_euclidean_sim
raise ValueError(
"The provided function {} is not supported. Use one of the supported values: {}.".format(
similarity_function, SimilarityFunction.possible_values()
)
)
@staticmethod
def possible_values():
"""
Returns a list of possible values for the SimilarityFunction enum.
Returns:
list: A list of possible values for the SimilarityFunction enum.
Example:
>>> possible_values = SimilarityFunction.possible_values()
>>> possible_values
['cosine', 'dot', 'euclidean', 'manhattan']
"""
return [m.value for m in SimilarityFunction]
|
from enum import Enum
from typing import Callable, Union
from numpy import ndarray
from torch import Tensor
from .util import (
cos_sim,
manhattan_sim,
euclidean_sim,
dot_score,
pairwise_cos_sim,
pairwise_manhattan_sim,
pairwise_euclidean_sim,
pairwise_dot_score,
)
class SimilarityFunction(Enum):
"""
Enum class for supported similarity functions. The following functions are supported:
- ``SimilarityFunction.COSINE`` (``"cosine"``): Cosine similarity
- ``SimilarityFunction.DOT_PRODUCT`` (``"dot"``, ``dot_product``): Dot product similarity
- ``SimilarityFunction.EUCLIDEAN`` (``"euclidean"``): Euclidean distance
- ``SimilarityFunction.MANHATTAN`` (``"manhattan"``): Manhattan distance
"""
COSINE = "cosine"
DOT_PRODUCT = "dot"
DOT = "dot" # Alias for DOT_PRODUCT
EUCLIDEAN = "euclidean"
MANHATTAN = "manhattan"
@staticmethod
def to_similarity_fn(
similarity_function: Union[str, "SimilarityFunction"],
) -> Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]:
"""
Converts a similarity function name or enum value to the corresponding similarity function.
Args:
similarity_function (Union[str, SimilarityFunction]): The name or enum value of the similarity function.
Returns:
Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]: The corresponding similarity function.
Raises:
ValueError: If the provided function is not supported.
Example:
>>> similarity_fn = SimilarityFunction.to_similarity_fn("cosine")
>>> similarity_scores = similarity_fn(embeddings1, embeddings2)
>>> similarity_scores
tensor([[0.3952, 0.0554],
[0.0992, 0.1570]])
"""
similarity_function = SimilarityFunction(similarity_function)
if similarity_function == SimilarityFunction.COSINE:
return cos_sim
if similarity_function == SimilarityFunction.DOT_PRODUCT:
return dot_score
if similarity_function == SimilarityFunction.MANHATTAN:
return manhattan_sim
if similarity_function == SimilarityFunction.EUCLIDEAN:
return euclidean_sim
raise ValueError(
"The provided function {} is not supported. Use one of the supported values: {}.".format(
similarity_function, SimilarityFunction.possible_values()
)
)
@staticmethod
def to_similarity_pairwise_fn(
similarity_function: Union[str, "SimilarityFunction"],
) -> Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]:
"""
Converts a similarity function into a pairwise similarity function.
The pairwise similarity function returns the diagonal vector from the similarity matrix, i.e. it only
computes the similarity(a[i], b[i]) for each i in the range of the input tensors, rather than
computing the similarity between all pairs of a and b.
Args:
similarity_function (Union[str, SimilarityFunction]): The name or enum value of the similarity function.
Returns:
Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]: The pairwise similarity function.
Raises:
ValueError: If the provided similarity function is not supported.
Example:
>>> pairwise_fn = SimilarityFunction.to_similarity_pairwise_fn("cosine")
>>> similarity_scores = pairwise_fn(embeddings1, embeddings2)
>>> similarity_scores
tensor([0.3952, 0.1570])
"""
similarity_function = SimilarityFunction(similarity_function)
if similarity_function == SimilarityFunction.COSINE:
return pairwise_cos_sim
if similarity_function == SimilarityFunction.DOT_PRODUCT:
return pairwise_dot_score
if similarity_function == SimilarityFunction.MANHATTAN:
return pairwise_manhattan_sim
if similarity_function == SimilarityFunction.EUCLIDEAN:
return pairwise_euclidean_sim
raise ValueError(
"The provided function {} is not supported. Use one of the supported values: {}.".format(
similarity_function, SimilarityFunction.possible_values()
)
)
@staticmethod
def possible_values():
"""
Returns a list of possible values for the SimilarityFunction enum.
Returns:
list: A list of possible values for the SimilarityFunction enum.
Example:
>>> possible_values = SimilarityFunction.possible_values()
>>> possible_values
['cosine', 'dot', 'euclidean', 'manhattan']
"""
return [m.value for m in SimilarityFunction]
|
from __future__ import annotations
import logging
import torch
from torch import Tensor, nn
from sentence_transformers.models.Module import Module
logger = logging.getLogger(__name__)
class WordWeights(Module):
"""This model can weight word embeddings, for example, with idf-values."""
config_keys: list[str] = ["vocab", "word_weights", "unknown_word_weight"]
def __init__(self, vocab: list[str], word_weights: dict[str, float], unknown_word_weight: float = 1):
"""
Initializes the WordWeights class.
Args:
vocab (List[str]): Vocabulary of the tokenizer.
word_weights (Dict[str, float]): Mapping of tokens to a float weight value. Word embeddings are multiplied
by this float value. Tokens in word_weights must not be equal to the vocab (can contain more or less values).
unknown_word_weight (float, optional): Weight for words in vocab that do not appear in the word_weights lookup.
These can be, for example, rare words in the vocab where no weight exists. Defaults to 1.
"""
super().__init__()
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
weights.append(weight)
logger.info(
f"{num_unknown_words} of {len(vocab)} words without a weighting value. Set weight to {unknown_word_weight}"
)
self.emb_layer = nn.Embedding(len(vocab), 1)
self.emb_layer.load_state_dict({"weight": torch.FloatTensor(weights).unsqueeze(1)})
def forward(self, features: dict[str, Tensor]):
attention_mask = features["attention_mask"]
token_embeddings = features["token_embeddings"]
# Compute a weight value for each token
token_weights_raw = self.emb_layer(features["input_ids"]).squeeze(-1)
token_weights = token_weights_raw * attention_mask.float()
token_weights_sum = torch.sum(token_weights, 1)
# Multiply embedding by token weight value
token_weights_expanded = token_weights.unsqueeze(-1).expand(token_embeddings.size())
token_embeddings = token_embeddings * token_weights_expanded
features.update({"token_embeddings": token_embeddings, "token_weights_sum": token_weights_sum})
return features
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
self.save_config(output_path)
|
from __future__ import annotations
import json
import logging
import os
import torch
from torch import Tensor, nn
logger = logging.getLogger(__name__)
class WordWeights(nn.Module):
"""This model can weight word embeddings, for example, with idf-values."""
def __init__(self, vocab: list[str], word_weights: dict[str, float], unknown_word_weight: float = 1):
"""
Initializes the WordWeights class.
Args:
vocab (List[str]): Vocabulary of the tokenizer.
word_weights (Dict[str, float]): Mapping of tokens to a float weight value. Word embeddings are multiplied
by this float value. Tokens in word_weights must not be equal to the vocab (can contain more or less values).
unknown_word_weight (float, optional): Weight for words in vocab that do not appear in the word_weights lookup.
These can be, for example, rare words in the vocab where no weight exists. Defaults to 1.
"""
super().__init__()
self.config_keys = ["vocab", "word_weights", "unknown_word_weight"]
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
weights.append(weight)
logger.info(
f"{num_unknown_words} of {len(vocab)} words without a weighting value. Set weight to {unknown_word_weight}"
)
self.emb_layer = nn.Embedding(len(vocab), 1)
self.emb_layer.load_state_dict({"weight": torch.FloatTensor(weights).unsqueeze(1)})
def forward(self, features: dict[str, Tensor]):
attention_mask = features["attention_mask"]
token_embeddings = features["token_embeddings"]
# Compute a weight value for each token
token_weights_raw = self.emb_layer(features["input_ids"]).squeeze(-1)
token_weights = token_weights_raw * attention_mask.float()
token_weights_sum = torch.sum(token_weights, 1)
# Multiply embedding by token weight value
token_weights_expanded = token_weights.unsqueeze(-1).expand(token_embeddings.size())
token_embeddings = token_embeddings * token_weights_expanded
features.update({"token_embeddings": token_embeddings, "token_weights_sum": token_weights_sum})
return features
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return WordWeights(**config)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AudioUrl
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='Audio')
class Audio(BaseDocument):
"""
Document for handling audios.
The Audio Document can contain an AudioUrl (`Audio.url`), an AudioTensor
(`Audio.tensor`), and an AnyEmbedding (`Audio.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Audio
# use it directly
audio = Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor, audio.frame_rate = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import Audio, Text
from typing import Optional
# extend it
class MyAudio(Audio):
name: Optional[Text]
audio = MyAudio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor, audio.frame_rate = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
audio.name = Text(text='my first audio')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Audio, Text
# compose it
class MultiModalDoc(Document):
audio: Audio
text: Text
mmdoc = MultiModalDoc(
audio=Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.audio.tensor, mmdoc.audio.frame_rate = mmdoc.audio.url.load()
# equivalent to
mmdoc.audio.bytes = mmdoc.audio.url.load_bytes()
mmdoc.audio.tensor, mmdoc.audio.frame_rate = mmdoc.audio.bytes.load()
"""
url: Optional[AudioUrl]
tensor: Optional[AudioTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[AudioBytes]
frame_rate: Optional[int]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AudioUrl
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='Audio')
class Audio(BaseDocument):
"""
Document for handling audios.
The Audio Document can contain an AudioUrl (`Audio.url`), an AudioTensor
(`Audio.tensor`), and an AnyEmbedding (`Audio.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Audio
# use it directly
audio = Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import Audio, Text
from typing import Optional
# extend it
class MyAudio(Audio):
name: Optional[Text]
audio = MyAudio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
audio.name = Text(text='my first audio')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Audio, Text
# compose it
class MultiModalDoc(Document):
audio: Audio
text: Text
mmdoc = MultiModalDoc(
audio=Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.audio.tensor = mmdoc.audio.url.load()
# equivalent to
mmdoc.audio.bytes = mmdoc.audio.url.load_bytes()
mmdoc.audio.tensor = mmdoc.audio.bytes.load()
"""
url: Optional[AudioUrl]
tensor: Optional[AudioTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[AudioBytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
import subprocess
import sys
import pytest
from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-untyped]
@pytest.mark.parametrize(
"import_path",
[
pytest.param(
"from langchain_core.messages import HumanMessage", id="HumanMessage"
),
pytest.param("from langchain_core.tools import tool", id="tool"),
pytest.param(
"from langchain_core.callbacks import CallbackManager", id="CallbackManager"
),
pytest.param("from langchain_core.runnables import Runnable", id="Runnable"),
pytest.param(
"from langchain_core.language_models import BaseChatModel",
id="BaseChatModel",
),
pytest.param(
"from langchain_core.prompts import ChatPromptTemplate",
id="ChatPromptTemplate",
),
pytest.param("from langchain_core.documents import Document", id="Document"),
pytest.param(
"from langchain_core.vectorstores import InMemoryVectorStore",
id="InMemoryVectorStore",
),
pytest.param(
"from langchain_core.runnables import RunnableLambda",
id="RunnableLambda",
),
pytest.param(
"from langchain_core.tracers import LangChainTracer",
id="LangChainTracer",
),
pytest.param(
"from langchain_core.output_parsers import PydanticOutputParser",
id="PydanticOutputParser",
),
pytest.param(
"from langchain_core.rate_limiters import InMemoryRateLimiter",
id="InMemoryRateLimiter",
),
],
)
@pytest.mark.benchmark
def test_import_time(benchmark: BenchmarkFixture, import_path: str) -> None:
@benchmark # type: ignore[misc]
def import_in_subprocess() -> None:
subprocess.run([sys.executable, "-c", import_path], check=False)
|
import subprocess
import sys
import pytest
from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-untyped]
@pytest.mark.parametrize(
"import_path",
[
pytest.param(
"from langchain_core.messages import HumanMessage", id="HumanMessage"
),
pytest.param("from langchain_core.tools import tool", id="tool"),
pytest.param(
"from langchain_core.callbacks import CallbackManager", id="CallbackManager"
),
pytest.param("from langchain_core.runnables import Runnable", id="Runnable"),
pytest.param(
"from langchain_core.language_models import BaseChatModel",
id="BaseChatModel",
),
pytest.param(
"from langchain_core.prompts import ChatPromptTemplate",
id="ChatPromptTemplate",
),
pytest.param("from langchain_core.documents import Document", id="Document"),
pytest.param(
"from langchain_core.vectorstores import InMemoryVectorStore",
id="InMemoryVectorStore",
),
pytest.param(
"from langchain_core.runnables import RunnableLambda",
id="RunnableLambda",
),
pytest.param(
"from langchain_core.tracers import LangChainTracer",
id="LangChainTracer",
),
pytest.param(
"from langchain_core.output_parsers import PydanticOutputParser",
id="PydanticOutputParser",
),
pytest.param(
"from langchain_core.rate_limiters import InMemoryRateLimiter",
id="InMemoryRateLimiter",
),
],
)
@pytest.mark.benchmark
def test_import_time(benchmark: BenchmarkFixture, import_path: str) -> None:
@benchmark
def import_in_subprocess() -> None:
subprocess.run([sys.executable, "-c", import_path], check=False)
|
import gzip
import logging
import os
import sys
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import LoggingHandler, SentenceTransformer, datasets, evaluation, losses, models, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = "data/askubuntu"
result_folder = "output/askubuntu-tsdae-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
batch_size = 8
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ["text_tokenized.txt.gz", "dev.txt", "test.txt", "train_random.txt"]:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get("https://github.com/taolei87/askubuntu/raw/master/" + filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, "text_tokenized.txt.gz"), "rt", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: # Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append(
{
"query": corpus[query_id],
"positive": [corpus[pid] for pid in relevant_id],
"negative": [corpus[pid] for pid in negative_ids],
}
)
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "dev.txt"))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "test.txt"))
## Now we need a list of train sentences.
## In this example we simply use all sentences that don't appear in the train/dev set
train_sentences = []
for id, sentence in corpus.items():
if id not in dev_test_ids:
train_sentences.append(sentence)
logging.info(f"{len(train_sentences)} train sentences")
################# Initialize an SBERT model #################
model_name = sys.argv[1] if len(sys.argv) >= 2 else "bert-base-uncased"
word_embedding_model = models.Transformer(model_name)
# Apply **cls** pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), "cls")
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train and evaluate the model (it needs about 1 hour for one epoch of AskUbuntu) #################
# We wrap our training sentences in the DenoisingAutoEncoderDataset to add deletion noise on the fly
train_dataset = datasets.DenoisingAutoEncoderDataset(train_sentences)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.DenoisingAutoEncoderLoss(model, decoder_name_or_path=model_name, tie_encoder_decoder=True)
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name="AskUbuntu dev")
logging.info("Dev performance before training")
dev_evaluator(model)
total_steps = 20000
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
evaluation_steps=1000,
epochs=1,
steps_per_epoch=total_steps,
weight_decay=0,
scheduler="constantlr",
optimizer_params={"lr": 3e-5},
output_path=result_folder,
show_progress_bar=True,
)
|
import gzip
import logging
import os
import sys
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import LoggingHandler, SentenceTransformer, datasets, evaluation, losses, models, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = "data/askubuntu"
result_folder = "output/askubuntu-tsdae-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
batch_size = 8
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ["text_tokenized.txt.gz", "dev.txt", "test.txt", "train_random.txt"]:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get("https://github.com/taolei87/askubuntu/raw/master/" + filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, "text_tokenized.txt.gz"), "rt", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: # Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append(
{
"query": corpus[query_id],
"positive": [corpus[pid] for pid in relevant_id],
"negative": [corpus[pid] for pid in negative_ids],
}
)
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "dev.txt"))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "test.txt"))
## Now we need a list of train sentences.
## In this example we simply use all sentences that don't appear in the train/dev set
train_sentences = []
for id, sentence in corpus.items():
if id not in dev_test_ids:
train_sentences.append(sentence)
logging.info("{} train sentences".format(len(train_sentences)))
################# Initialize an SBERT model #################
model_name = sys.argv[1] if len(sys.argv) >= 2 else "bert-base-uncased"
word_embedding_model = models.Transformer(model_name)
# Apply **cls** pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), "cls")
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train and evaluate the model (it needs about 1 hour for one epoch of AskUbuntu) #################
# We wrap our training sentences in the DenoisingAutoEncoderDataset to add deletion noise on the fly
train_dataset = datasets.DenoisingAutoEncoderDataset(train_sentences)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.DenoisingAutoEncoderLoss(model, decoder_name_or_path=model_name, tie_encoder_decoder=True)
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name="AskUbuntu dev")
logging.info("Dev performance before training")
dev_evaluator(model)
total_steps = 20000
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
evaluation_steps=1000,
epochs=1,
steps_per_epoch=total_steps,
weight_decay=0,
scheduler="constantlr",
optimizer_params={"lr": 3e-5},
output_path=result_folder,
show_progress_bar=True,
)
|
import asyncio
import logging
import os
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.gateway.http.app import get_fastapi_app
__all__ = ['HTTPGatewayRuntime']
class HTTPGatewayRuntime(GatewayRuntime):
"""Runtime for HTTP interface."""
async def async_setup(self):
"""
The async method setup the runtime.
Setup the uvicorn server.
"""
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
self.install_signal_handlers()
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
from jina.helper import extend_rest_interface
uvicorn_kwargs = self.args.uvicorn_kwargs or {}
for ssl_file in ['ssl_keyfile', 'ssl_certfile']:
if getattr(self.args, ssl_file):
if ssl_file not in uvicorn_kwargs.keys():
uvicorn_kwargs[ssl_file] = getattr(self.args, ssl_file)
self._server = UviServer(
config=Config(
app=extend_rest_interface(
get_fastapi_app(
args=self.args,
logger=self.logger,
timeout_send=self.timeout_send,
metrics_registry=self.metrics_registry,
)
),
host=__default_host__,
port=self.args.port,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs
)
)
await self._server.setup()
async def async_run_forever(self):
"""Running method of the server."""
await self._server.serve()
async def _wait_for_cancel(self):
"""Do NOT override this method when inheriting from :class:`GatewayPod`"""
# handle terminate signals
while not self.is_cancel.is_set() and not self._server.should_exit:
await asyncio.sleep(0.1)
await self.async_cancel()
async def async_teardown(self):
"""Shutdown the server."""
await self._server.shutdown()
async def async_cancel(self):
"""Stop the server."""
self._server.should_exit = True
|
import asyncio
import logging
import os
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.gateway.http.app import get_fastapi_app
__all__ = ['HTTPGatewayRuntime']
class HTTPGatewayRuntime(GatewayRuntime):
"""Runtime for HTTP interface."""
async def async_setup(self):
"""
The async method setup the runtime.
Setup the uvicorn server.
"""
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
self.install_signal_handlers()
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
from jina.helper import extend_rest_interface
uvicorn_kwargs = self.args.uvicorn_kwargs or {}
for ssl_file in ['ssl_keyfile', 'ssl_certfile']:
if getattr(self.args, ssl_file):
if ssl_file not in uvicorn_kwargs.keys():
uvicorn_kwargs[ssl_file] = getattr(self.args, ssl_file)
self._server = UviServer(
config=Config(
app=extend_rest_interface(
get_fastapi_app(
args=self.args,
logger=self.logger,
timeout_send=self.timeout_send,
metrics_registry=self.metrics_registry,
)
),
host=__default_host__,
port=self.args.port,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs
)
)
await self._server.setup()
async def async_run_forever(self):
"""Running method of the server."""
await self._server.serve()
async def _wait_for_cancel(self):
"""Do NOT override this method when inheriting from :class:`GatewayPod`"""
# handle terminate signals
while not self.is_cancel.is_set() and not self._server.should_exit:
await asyncio.sleep(0.1)
await self.async_cancel()
async def async_teardown(self):
"""Shutdown the server."""
await self._server.shutdown()
async def async_cancel(self):
"""Stop the server."""
self._server.should_exit = True
|
import asyncio
import pytest
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.errors import WorkflowRuntimeError, WorkflowTimeoutError
from llama_index.core.workflow.events import Event, StartEvent, StopEvent
from llama_index.core.workflow.workflow import Workflow
from .conftest import OneTestEvent
class StreamingWorkflow(Workflow):
@step
async def chat(self, ctx: Context, ev: StartEvent) -> StopEvent:
async def stream_messages():
resp = "Paul Graham is a British-American computer scientist, entrepreneur, vc, and writer."
for word in resp.split():
yield word
async for w in stream_messages():
ctx.write_event_to_stream(Event(msg=w))
return StopEvent(result=None)
@pytest.mark.asyncio
async def test_e2e():
wf = StreamingWorkflow()
r = wf.run()
async for ev in r.stream_events():
if not isinstance(ev, StopEvent):
assert "msg" in ev
await r
@pytest.mark.asyncio
async def test_too_many_runs():
wf = StreamingWorkflow()
r = asyncio.gather(wf.run(), wf.run())
with pytest.raises(
WorkflowRuntimeError,
match="This workflow has multiple concurrent runs in progress and cannot stream events",
):
async for ev in wf.stream_events():
pass
await r
@pytest.mark.asyncio
async def test_task_raised():
class DummyWorkflow(Workflow):
@step
async def step(self, ctx: Context, ev: StartEvent) -> StopEvent:
ctx.write_event_to_stream(OneTestEvent(test_param="foo"))
raise ValueError("The step raised an error!")
wf = DummyWorkflow()
r = wf.run()
# Make sure we don't block indefinitely here because the step raised
async for ev in r.stream_events():
if not isinstance(ev, StopEvent):
assert ev.test_param == "foo"
# Make sure the await actually caught the exception
with pytest.raises(
WorkflowRuntimeError, match="Error in step 'step': The step raised an error!"
):
await r
@pytest.mark.asyncio
async def test_task_timeout():
class DummyWorkflow(Workflow):
@step
async def step(self, ctx: Context, ev: StartEvent) -> StopEvent:
ctx.write_event_to_stream(OneTestEvent(test_param="foo"))
await asyncio.sleep(2)
return StopEvent()
wf = DummyWorkflow(timeout=1)
r = wf.run()
# Make sure we don't block indefinitely here because the step raised
async for ev in r.stream_events():
if not isinstance(ev, StopEvent):
assert ev.test_param == "foo"
# Make sure the await actually caught the exception
with pytest.raises(WorkflowTimeoutError, match="Operation timed out"):
await r
@pytest.mark.asyncio
async def test_multiple_sequential_streams():
wf = StreamingWorkflow()
r = wf.run()
# stream 1
async for _ in r.stream_events():
pass
await r
# stream 2 -- should not raise an error
r = wf.run()
async for _ in r.stream_events():
pass
await r
@pytest.mark.asyncio
async def test_multiple_ongoing_streams():
wf = StreamingWorkflow()
stream_1 = wf.run()
stream_2 = wf.run()
async for ev in stream_1.stream_events():
if not isinstance(ev, StopEvent):
assert "msg" in ev
async for ev in stream_2.stream_events():
if not isinstance(ev, StopEvent):
assert "msg" in ev
await asyncio.gather(stream_1, stream_2)
@pytest.mark.asyncio
async def test_resume_streams():
class CounterWorkflow(Workflow):
@step
async def count(self, ctx: Context, ev: StartEvent) -> StopEvent:
ctx.write_event_to_stream(Event(msg="hello!"))
cur_count = await ctx.get("cur_count", default=0)
await ctx.set("cur_count", cur_count + 1)
return StopEvent(result="done")
wf = CounterWorkflow()
handler_1 = wf.run()
async for _ in handler_1.stream_events():
pass
await handler_1
assert handler_1.ctx
handler_2 = wf.run(ctx=handler_1.ctx)
async for _ in handler_2.stream_events():
pass
await handler_2
assert handler_2.ctx
assert await handler_2.ctx.get("cur_count") == 2
|
import asyncio
import pytest
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.errors import WorkflowRuntimeError, WorkflowTimeoutError
from llama_index.core.workflow.events import Event, StartEvent, StopEvent
from llama_index.core.workflow.workflow import Workflow
from .conftest import OneTestEvent
class StreamingWorkflow(Workflow):
@step
async def chat(self, ctx: Context, ev: StartEvent) -> StopEvent:
async def stream_messages():
resp = "Paul Graham is a British-American computer scientist, entrepreneur, vc, and writer."
for word in resp.split():
yield word
async for w in stream_messages():
ctx.write_event_to_stream(Event(msg=w))
return StopEvent(result=None)
@pytest.mark.asyncio()
async def test_e2e():
wf = StreamingWorkflow()
r = wf.run()
async for ev in r.stream_events():
if not isinstance(ev, StopEvent):
assert "msg" in ev
await r
@pytest.mark.asyncio()
async def test_too_many_runs():
wf = StreamingWorkflow()
r = asyncio.gather(wf.run(), wf.run())
with pytest.raises(
WorkflowRuntimeError,
match="This workflow has multiple concurrent runs in progress and cannot stream events",
):
async for ev in wf.stream_events():
pass
await r
@pytest.mark.asyncio()
async def test_task_raised():
class DummyWorkflow(Workflow):
@step
async def step(self, ctx: Context, ev: StartEvent) -> StopEvent:
ctx.write_event_to_stream(OneTestEvent(test_param="foo"))
raise ValueError("The step raised an error!")
wf = DummyWorkflow()
r = wf.run()
# Make sure we don't block indefinitely here because the step raised
async for ev in r.stream_events():
if not isinstance(ev, StopEvent):
assert ev.test_param == "foo"
# Make sure the await actually caught the exception
with pytest.raises(
WorkflowRuntimeError, match="Error in step 'step': The step raised an error!"
):
await r
@pytest.mark.asyncio()
async def test_task_timeout():
class DummyWorkflow(Workflow):
@step
async def step(self, ctx: Context, ev: StartEvent) -> StopEvent:
ctx.write_event_to_stream(OneTestEvent(test_param="foo"))
await asyncio.sleep(2)
return StopEvent()
wf = DummyWorkflow(timeout=1)
r = wf.run()
# Make sure we don't block indefinitely here because the step raised
async for ev in r.stream_events():
if not isinstance(ev, StopEvent):
assert ev.test_param == "foo"
# Make sure the await actually caught the exception
with pytest.raises(WorkflowTimeoutError, match="Operation timed out"):
await r
@pytest.mark.asyncio()
async def test_multiple_sequential_streams():
wf = StreamingWorkflow()
r = wf.run()
# stream 1
async for _ in r.stream_events():
pass
await r
# stream 2 -- should not raise an error
r = wf.run()
async for _ in r.stream_events():
pass
await r
@pytest.mark.asyncio()
async def test_multiple_ongoing_streams():
wf = StreamingWorkflow()
stream_1 = wf.run()
stream_2 = wf.run()
async for ev in stream_1.stream_events():
if not isinstance(ev, StopEvent):
assert "msg" in ev
async for ev in stream_2.stream_events():
if not isinstance(ev, StopEvent):
assert "msg" in ev
await asyncio.gather(stream_1, stream_2)
@pytest.mark.asyncio()
async def test_resume_streams():
class CounterWorkflow(Workflow):
@step
async def count(self, ctx: Context, ev: StartEvent) -> StopEvent:
ctx.write_event_to_stream(Event(msg="hello!"))
cur_count = await ctx.get("cur_count", default=0)
await ctx.set("cur_count", cur_count + 1)
return StopEvent(result="done")
wf = CounterWorkflow()
handler_1 = wf.run()
async for _ in handler_1.stream_events():
pass
await handler_1
assert handler_1.ctx
handler_2 = wf.run(ctx=handler_1.ctx)
async for _ in handler_2.stream_events():
pass
await handler_2
assert handler_2.ctx
assert await handler_2.ctx.get("cur_count") == 2
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
import pytest
from mmengine.hooks import ParamSchedulerHook
from mmengine.optim import _ParamScheduler
class TestParamSchedulerHook:
error_msg = ('runner.param_schedulers should be list of ParamScheduler or '
'a dict containing list of ParamScheduler')
def test_after_train_iter(self):
# runner.param_schedulers should be a list or dict
with pytest.raises(TypeError, match=self.error_msg):
hook = ParamSchedulerHook()
runner = Mock()
scheduler = Mock()
scheduler.step = Mock()
scheduler.by_epoch = False
runner.param_schedulers = scheduler
hook.after_train_iter(runner, 0)
scheduler.step.assert_called()
# runner.param_schedulers is a list of schedulers
hook = ParamSchedulerHook()
runner = Mock()
scheduler = Mock()
scheduler.step = Mock()
scheduler.by_epoch = False
runner.param_schedulers = [scheduler]
hook.after_train_iter(runner, 0)
scheduler.step.assert_called()
# runner.param_schedulers is a dict containing list of schedulers
scheduler1 = Mock()
scheduler1.step = Mock()
scheduler1.by_epoch = False
scheduler2 = Mock()
scheduler2.step = Mock()
scheduler2.by_epoch = False
runner.param_schedulers = dict(key1=[scheduler1], key2=[scheduler2])
hook.after_train_epoch(runner)
hook.after_train_iter(runner, 0)
scheduler1.step.assert_called()
scheduler2.step.assert_called()
def test_after_train_epoch(self):
# runner.param_schedulers should be a list or dict
with pytest.raises(TypeError, match=self.error_msg):
hook = ParamSchedulerHook()
runner = Mock()
scheduler = Mock()
scheduler.step = Mock()
scheduler.by_epoch = True
runner.param_schedulers = scheduler
hook.after_train_epoch(runner)
scheduler.step.assert_called()
# runner.param_schedulers is a list of schedulers
hook = ParamSchedulerHook()
runner = Mock()
scheduler = Mock()
scheduler.step = Mock()
scheduler.by_epoch = True
runner.param_schedulers = [scheduler]
hook.after_train_epoch(runner)
scheduler.step.assert_called()
# runner.param_schedulers is a dict containing list of schedulers
scheduler1 = Mock()
scheduler1.step = Mock()
scheduler1.by_epoch = True
scheduler2 = Mock()
scheduler2.step = Mock()
scheduler2.by_epoch = True
runner.param_schedulers = dict(key1=[scheduler1], key2=[scheduler2])
hook.after_train_epoch(runner)
scheduler1.step.assert_called()
scheduler2.step.assert_called()
def test_after_val_epoch(self):
metrics = dict(loss=1.0)
# mock super _ParamScheduler class
class MockParamScheduler(_ParamScheduler):
def __init__(self):
pass
def _get_value(self):
pass
# runner.param_schedulers should be a list or dict
with pytest.raises(TypeError, match=self.error_msg):
hook = ParamSchedulerHook()
runner = Mock()
scheduler = Mock()
scheduler.step = Mock()
scheduler.by_epoch = True
scheduler.need_val_args = True
runner.param_schedulers = scheduler
hook.after_val_epoch(runner, metrics)
# runner.param_schedulers is a list of schedulers
hook = ParamSchedulerHook()
runner = Mock()
scheduler = MockParamScheduler()
scheduler.step = Mock()
scheduler.by_epoch = True
scheduler.need_val_args = True
runner.param_schedulers = [scheduler]
hook.after_val_epoch(runner, metrics)
scheduler.step.assert_called_with(metrics)
# runner.param_schedulers is a dict containing list of schedulers
scheduler1 = MockParamScheduler()
scheduler1.step = Mock()
scheduler1.by_epoch = True
scheduler1.need_val_args = True
scheduler2 = MockParamScheduler()
scheduler2.step = Mock()
scheduler2.by_epoch = True
scheduler2.need_val_args = True
runner.param_schedulers = dict(key1=[scheduler1], key2=[scheduler2])
hook.after_val_epoch(runner, metrics)
scheduler1.step.assert_called_with(metrics)
scheduler2.step.assert_called_with(metrics)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
import pytest
from mmengine.hooks import ParamSchedulerHook
class TestParamSchedulerHook:
error_msg = ('runner.param_schedulers should be list of ParamScheduler or '
'a dict containing list of ParamScheduler')
def test_after_iter(self):
# runner.param_schedulers should be a list or dict
with pytest.raises(TypeError, match=self.error_msg):
hook = ParamSchedulerHook()
runner = Mock()
scheduler = Mock()
scheduler.step = Mock()
scheduler.by_epoch = False
runner.param_schedulers = scheduler
hook.after_train_iter(runner, 0)
scheduler.step.assert_called()
# runner.param_schedulers is a list of schedulers
hook = ParamSchedulerHook()
runner = Mock()
scheduler = Mock()
scheduler.step = Mock()
scheduler.by_epoch = False
runner.param_schedulers = [scheduler]
hook.after_train_iter(runner, 0)
scheduler.step.assert_called()
# runner.param_schedulers is a dict containing list of schedulers
scheduler1 = Mock()
scheduler1.step = Mock()
scheduler1.by_epoch = False
scheduler2 = Mock()
scheduler2.step = Mock()
scheduler2.by_epoch = False
runner.param_schedulers = dict(key1=[scheduler1], key2=[scheduler2])
hook.after_train_epoch(runner)
hook.after_train_iter(runner, 0)
scheduler2.step.assert_called()
def test_after_epoch(self):
# runner.param_schedulers should be a list or dict
with pytest.raises(TypeError, match=self.error_msg):
hook = ParamSchedulerHook()
runner = Mock()
scheduler = Mock()
scheduler.step = Mock()
scheduler.by_epoch = True
runner.param_schedulers = scheduler
hook.after_train_iter(runner, 0)
scheduler.step.assert_called()
# runner.param_schedulers is a list of schedulers
hook = ParamSchedulerHook()
runner = Mock()
scheduler = Mock()
scheduler.step = Mock()
scheduler.by_epoch = True
runner.param_schedulers = [scheduler]
hook.after_train_epoch(runner)
scheduler.step.assert_called()
# runner.param_schedulers is a dict containing list of schedulers
scheduler1 = Mock()
scheduler1.step = Mock()
scheduler1.by_epoch = True
scheduler2 = Mock()
scheduler2.step = Mock()
scheduler2.by_epoch = True
runner.param_schedulers = dict(key1=[scheduler1], key2=[scheduler2])
hook.after_train_epoch(runner)
scheduler1.step.assert_called()
scheduler2.step.assert_called()
|
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import Mesh3DUrl
from tests import TOYDATA_DIR
MESH_FILES = {
'obj': str(TOYDATA_DIR / 'tetrahedron.obj'),
'glb': str(TOYDATA_DIR / 'test.glb'),
'ply': str(TOYDATA_DIR / 'cube.ply'),
}
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load(file_format, file_path):
url = parse_obj_as(Mesh3DUrl, file_path)
vertices, faces = url.load()
assert isinstance(vertices, np.ndarray)
assert isinstance(faces, np.ndarray)
assert vertices.shape[1] == 3
assert faces.shape[1] == 3
def test_json_schema():
schema_json_of(Mesh3DUrl)
def test_dump_json():
url = parse_obj_as(Mesh3DUrl, REMOTE_OBJ_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'file_format,path_to_file',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('obj', REMOTE_OBJ_FILE),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
('illegal', 'my/local/text/file.png'),
],
)
def test_validation(file_format, path_to_file):
if file_format == 'illegal':
with pytest.raises(ValueError, match='Mesh3DUrl'):
parse_obj_as(Mesh3DUrl, path_to_file)
else:
url = parse_obj_as(Mesh3DUrl, path_to_file)
assert isinstance(url, Mesh3DUrl)
assert isinstance(url, str)
def test_proto_mesh_url():
uri = parse_obj_as(Mesh3DUrl, REMOTE_OBJ_FILE)
uri._to_node_protobuf()
|
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import Mesh3DUrl
from tests import TOYDATA_DIR
MESH_FILES = {
'obj': str(TOYDATA_DIR / 'tetrahedron.obj'),
'glb': str(TOYDATA_DIR / 'test.glb'),
'ply': str(TOYDATA_DIR / 'cube.ply'),
}
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load(file_format, file_path):
url = parse_obj_as(Mesh3DUrl, file_path)
vertices, faces = url.load()
assert isinstance(vertices, np.ndarray)
assert isinstance(faces, np.ndarray)
assert vertices.shape[1] == 3
assert faces.shape[1] == 3
def test_json_schema():
schema_json_of(Mesh3DUrl)
def test_dump_json():
url = parse_obj_as(Mesh3DUrl, REMOTE_OBJ_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'file_format,path_to_file',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('obj', REMOTE_OBJ_FILE),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
('illegal', 'my/local/text/file.png'),
],
)
def test_validation(file_format, path_to_file):
if file_format == 'illegal':
with pytest.raises(ValueError, match='Mesh3DUrl'):
parse_obj_as(Mesh3DUrl, path_to_file)
else:
url = parse_obj_as(Mesh3DUrl, path_to_file)
assert isinstance(url, Mesh3DUrl)
assert isinstance(url, str)
def test_proto_mesh_url():
uri = parse_obj_as(Mesh3DUrl, REMOTE_OBJ_FILE)
uri._to_node_protobuf()
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Iterable, Optional
import torch
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from laserembeddings import Laser
class LaserEncoder(Executor):
"""
LaserEncoder is a text encoder based on Facebook Research's LASER encoder.
:class:`LaserEncoder` is a encoder based on Facebook Research's LASER
(Language-Agnostic SEntence Representations) to compute multilingual
sentence embeddings: https://github.com/facebookresearch/LASER
This encoder is suitable for producing multi-lingual sentence embeddings, enabling
you to have sentences from multiple languages in the same latent space.
"""
def __init__(
self,
path_to_bpe_codes: Optional[str] = None,
path_to_bpe_vocab: Optional[str] = None,
path_to_encoder: Optional[str] = None,
download_data: bool = True,
language: str = 'en',
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs,
):
"""
:param path_to_bpe_codes: path to bpe codes from Laser. Defaults to
``Laser.DEFAULT_BPE_CODES_FILE.``
:param path_to_bpe_vocab: path to bpe vocabs from Laser. Defaults to
``Laser.DEFAULT_BPE_VOCAB_FILE``.
:param path_to_encoder: path to the encoder from Laser. Defaults to
``Laser.DEFAULT_ENCODER_FILE``.
:param download_data: Whether data should be downloaded on initialization. This is
convenient when just trying out the encoder, but should be turned off in a
production setting (where you should already have the data on disk), as it can
lead to large startup times.
:param language: The default language of the text. Can be overriden by a
request parameter. The full list of possible values can be found at
[LASER](https://github.com/facebookresearch/LASER#supported-languages)
with the language code
([ISO 639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes))
:param traversal_paths: traversal path of the Documents, (e.g. 'r', 'c')
:param batch_size: size of each batch
:param device: Device string ('cpu'/'cuda'/'cuda:2')
"""
super().__init__(*args, **kwargs)
self.logger = JinaLogger(self.__class__.__name__)
self._path_to_bpe_codes = path_to_bpe_codes
self._path_to_bpe_vocab = path_to_bpe_vocab
self._path_to_encoder = path_to_encoder
self.device = device
self.batch_size = batch_size
self.traversal_paths = traversal_paths
self.language = language
if download_data:
self.logger.info("Downloading data for the Laser model")
subprocess.run(
['python', '-m', 'laserembeddings', 'download-models'], check=True
)
self.model = Laser(
bpe_codes=self._path_to_bpe_codes,
bpe_vocab=self._path_to_bpe_vocab,
encoder=self._path_to_encoder,
embedding_options={'cpu': self.device == 'cpu'},
)
self.device = torch.device(device)
self.model.bpeSentenceEmbedding.encoder.encoder.to(self.device)
@requests
def encode(
self, docs: Optional[DocumentArray] = None, parameters: dict = {}, **kwargs
):
"""
Encode all docs with text and store the encodings in the embedding attribute
of the docs.
:param docs: documents sent to the encoder. The docs must have the ``text``
attribute.
:param parameters: dictionary to define the ``traversal_path``, the
``batch_size`` and ``language``. For example,
``{'traversal_paths': ['r'], 'batch_size': 10}``. This will override the
default parameters set at init.
"""
if docs is None:
return
document_batches_generator = docs.traverse_flat(
traversal_paths=parameters.get('traversal_paths', self.traversal_paths),
filter_fn=lambda doc: len(doc.text) > 0,
).batch(
batch_size=parameters.get('batch_size', self.batch_size),
)
for document_batch in document_batches_generator:
text_batch = document_batch.texts
language = parameters.get('language', self.language)
embeddings = self.model.embed_sentences(text_batch, lang=language)
for document, embedding in zip(document_batch, embeddings):
document.embedding = embedding
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Iterable, Optional
import torch
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from laserembeddings import Laser
class LaserEncoder(Executor):
"""
LaserEncoder is a text encoder based on Facebook Research's LASER encoder.
:class:`LaserEncoder` is a encoder based on Facebook Research's LASER
(Language-Agnostic SEntence Representations) to compute multilingual
sentence embeddings: https://github.com/facebookresearch/LASER
This encoder is suitable for producing multi-lingual sentence embeddings, enabling
you to have sentences from multiple languages in the same latent space.
"""
def __init__(
self,
path_to_bpe_codes: Optional[str] = None,
path_to_bpe_vocab: Optional[str] = None,
path_to_encoder: Optional[str] = None,
download_data: bool = True,
language: str = 'en',
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs,
):
"""
:param path_to_bpe_codes: path to bpe codes from Laser. Defaults to
``Laser.DEFAULT_BPE_CODES_FILE.``
:param path_to_bpe_vocab: path to bpe vocabs from Laser. Defaults to
``Laser.DEFAULT_BPE_VOCAB_FILE``.
:param path_to_encoder: path to the encoder from Laser. Defaults to
``Laser.DEFAULT_ENCODER_FILE``.
:param download_data: Whether data should be downloaded on initialization. This is
convenient when just trying out the encoder, but should be turned off in a
production setting (where you should already have the data on disk), as it can
lead to large startup times.
:param language: The default language of the text. Can be overriden by a
request parameter. The full list of possible values can be found at
[LASER](https://github.com/facebookresearch/LASER#supported-languages)
with the language code
([ISO 639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes))
:param traversal_paths: traversal path of the Documents, (e.g. 'r', 'c')
:param batch_size: size of each batch
:param device: Device string ('cpu'/'cuda'/'cuda:2')
"""
super().__init__(*args, **kwargs)
self.logger = JinaLogger(self.__class__.__name__)
self._path_to_bpe_codes = path_to_bpe_codes
self._path_to_bpe_vocab = path_to_bpe_vocab
self._path_to_encoder = path_to_encoder
self.device = device
self.batch_size = batch_size
self.traversal_paths = traversal_paths
self.language = language
if download_data:
self.logger.info("Downloading data for the Laser model")
subprocess.run(
['python', '-m', 'laserembeddings', 'download-models'], check=True
)
self.model = Laser(
bpe_codes=self._path_to_bpe_codes,
bpe_vocab=self._path_to_bpe_vocab,
encoder=self._path_to_encoder,
embedding_options={'cpu': self.device == 'cpu'},
)
self.device = torch.device(device)
self.model.bpeSentenceEmbedding.encoder.encoder.to(self.device)
@requests
def encode(
self, docs: Optional[DocumentArray] = None, parameters: dict = {}, **kwargs
):
"""
Encode all docs with text and store the encodings in the embedding attribute
of the docs.
:param docs: documents sent to the encoder. The docs must have the ``text``
attribute.
:param parameters: dictionary to define the ``traversal_path``, the
``batch_size`` and ``language``. For example,
``{'traversal_paths': ['r'], 'batch_size': 10}``. This will override the
default parameters set at init.
"""
if docs is None:
return
document_batches_generator = docs.traverse_flat(
traversal_paths=parameters.get('traversal_paths', self.traversal_paths),
filter_fn=lambda doc:len(doc.text)>0
).batch(
batch_size=parameters.get('batch_size', self.batch_size),
)
for document_batch in document_batches_generator:
text_batch = document_batch.texts
language = parameters.get('language', self.language)
embeddings = self.model.embed_sentences(text_batch, lang=language)
for document, embedding in zip(document_batch, embeddings):
document.embedding = embedding
|
import os
import random
import time
from typing import Dict, OrderedDict
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor, Flow, requests
from jina_commons.indexers.dump import dump_docs
from jinahub.indexers.compound.FaissLMDBSearcher.faiss_lmdb import FaissLMDBSearcher
from jinahub.indexers.storage.LMDBStorage.lmdb_storage import LMDBStorage
random.seed(0)
np.random.seed(0)
cur_dir = os.path.dirname(os.path.abspath(__file__))
ORIGIN_TAG = 'origin'
TOP_K = 100
class TagMatchMerger(Executor):
@requests(on='/tag_search')
def merge(self, docs_matrix, parameters: Dict, **kwargs):
if docs_matrix:
# noinspection PyTypeHints
results = OrderedDict()
for docs in docs_matrix:
for doc in docs:
if doc.id in results:
results[doc.id].matches.extend(doc.matches)
else:
results[doc.id] = doc
top_k = parameters.get('top_k')
if top_k:
top_k = int(top_k)
for doc in results.values():
doc.matches = sorted(
doc.matches,
key=lambda m: m.scores['l2'].value,
reverse=True,
)[:top_k]
docs = DocumentArray(list(results.values()))
return docs
class TaggingFileSearcher(LMDBStorage):
def __init__(
self,
**kwargs,
):
super().__init__(**kwargs)
def search(self, docs: DocumentArray, parameters: Dict = None, **kwargs) -> None:
# TODO shouldn't be necessary
parameters = {'traversal_paths': ['m']}
LMDBStorage.search(self, docs, parameters=parameters, **kwargs)
for doc in docs:
for match in doc.matches:
match.tags[ORIGIN_TAG] = self.runtime_args.pea_id
class FaissTaggingFileSearcher(FaissLMDBSearcher):
def __init__(
self,
dump_path=None,
**kwargs,
):
super().__init__(**kwargs)
self._kv_indexer = TaggingFileSearcher(dump_path=dump_path, **kwargs)
@requests(on='/tag_search')
def search(self, docs: 'DocumentArray', parameters: Dict = None, **kwargs):
super().search(docs, parameters, **kwargs)
def random_docs(start, end, embed_dim=10):
for j in range(start, end):
d = Document()
d.content = f'hello world from {j}'
d.embedding = np.random.random([embed_dim]).astype(dtype=np.float32)
yield d
def validate_diff_sources(results, num_shards, docs_before: DocumentArray):
distinct_shards = {}
for doc in results[0].docs:
for match in doc.matches:
if match.tags[ORIGIN_TAG] not in distinct_shards:
distinct_shards[match.tags[ORIGIN_TAG]] = 0
distinct_shards[match.tags[ORIGIN_TAG]] += 1
np.testing.assert_equal(len(distinct_shards.keys()), num_shards)
np.testing.assert_equal(sum(distinct_shards.values()), TOP_K)
# TODO we do not support shards=1 for replicas>1
def assert_folder(dump_path, num_shards):
assert os.path.exists(dump_path)
for i in range(num_shards):
assert os.path.exists(os.path.join(dump_path, str(i)))
assert os.path.exists(os.path.join(dump_path, str(i), 'ids'))
assert os.path.exists(os.path.join(dump_path, str(i), 'vectors'))
assert os.path.exists(os.path.join(dump_path, str(i), 'metas'))
@pytest.mark.parametrize('num_shards', (2, 3, 7))
def test_shards_numpy_filequery(tmpdir, num_shards):
pod_name = 'index'
os.environ['WORKSPACE'] = str(tmpdir)
os.environ['SHARDS'] = str(num_shards)
docs_indexed = list(random_docs(0, 201))
dump_path = os.path.join(tmpdir, 'dump_path')
dump_docs(docs_indexed, dump_path, num_shards)
assert_folder(dump_path, num_shards)
inputs = list(random_docs(0, 1))
# TODO workspace is wrongly saved to curdir
with Flow.load_config('flow.yml') as flow:
flow.rolling_update(pod_name=pod_name, dump_path=dump_path)
time.sleep(2)
results = flow.post(
on='/tag_search',
inputs=inputs,
parameters={'top_k': TOP_K},
return_results=True,
)
validate_diff_sources(results, num_shards, docs_indexed)
|
import os
import random
import time
from typing import Dict, OrderedDict
import numpy as np
import pytest
from jina import Document, Flow, DocumentArray, requests, Executor
from jina_commons.indexers.dump import dump_docs
from jinahub.indexers.searcher.compound.FaissLMDBSearcher.faiss_lmdb import FaissLMDBSearcher
from jinahub.indexers.storage.LMDBStorage.lmdb_storage import LMDBStorage
random.seed(0)
np.random.seed(0)
cur_dir = os.path.dirname(os.path.abspath(__file__))
ORIGIN_TAG = 'origin'
TOP_K = 100
class TagMatchMerger(Executor):
@requests(on='/tag_search')
def merge(self, docs_matrix, parameters: Dict, **kwargs):
if docs_matrix:
# noinspection PyTypeHints
results = OrderedDict()
for docs in docs_matrix:
for doc in docs:
if doc.id in results:
results[doc.id].matches.extend(doc.matches)
else:
results[doc.id] = doc
top_k = parameters.get('top_k')
if top_k:
top_k = int(top_k)
for doc in results.values():
doc.matches = sorted(
doc.matches,
key=lambda m: m.scores['l2'].value,
reverse=True,
)[:top_k]
docs = DocumentArray(list(results.values()))
return docs
class TaggingFileSearcher(LMDBStorage):
def __init__(
self,
**kwargs,
):
super().__init__(**kwargs)
def search(self, docs: DocumentArray, parameters: Dict = None, **kwargs) -> None:
# TODO shouldn't be necessary
parameters = {'traversal_paths': ['m']}
LMDBStorage.search(self, docs, parameters=parameters, **kwargs)
for doc in docs:
for match in doc.matches:
match.tags[ORIGIN_TAG] = self.runtime_args.pea_id
class FaissTaggingFileSearcher(FaissLMDBSearcher):
def __init__(
self,
dump_path=None,
**kwargs,
):
super().__init__(**kwargs)
self._kv_indexer = TaggingFileSearcher(dump_path=dump_path, **kwargs)
@requests(on='/tag_search')
def search(self, docs: 'DocumentArray', parameters: Dict = None, **kwargs):
super().search(docs, parameters, **kwargs)
def random_docs(start, end, embed_dim=10):
for j in range(start, end):
d = Document()
d.content = f'hello world from {j}'
d.embedding = np.random.random([embed_dim]).astype(dtype=np.float32)
yield d
def validate_diff_sources(results, num_shards, docs_before: DocumentArray):
distinct_shards = {}
for doc in results[0].docs:
for match in doc.matches:
if match.tags[ORIGIN_TAG] not in distinct_shards:
distinct_shards[match.tags[ORIGIN_TAG]] = 0
distinct_shards[match.tags[ORIGIN_TAG]] += 1
np.testing.assert_equal(len(distinct_shards.keys()), num_shards)
np.testing.assert_equal(sum(distinct_shards.values()), TOP_K)
# TODO we do not support shards=1 for replicas>1
def assert_folder(dump_path, num_shards):
assert os.path.exists(dump_path)
for i in range(num_shards):
assert os.path.exists(os.path.join(dump_path, str(i)))
assert os.path.exists(os.path.join(dump_path, str(i), 'ids'))
assert os.path.exists(os.path.join(dump_path, str(i), 'vectors'))
assert os.path.exists(os.path.join(dump_path, str(i), 'metas'))
@pytest.mark.parametrize('num_shards', (2, 3, 7))
def test_shards_numpy_filequery(tmpdir, num_shards):
pod_name = 'index'
os.environ['WORKSPACE'] = str(tmpdir)
os.environ['SHARDS'] = str(num_shards)
docs_indexed = list(random_docs(0, 201))
dump_path = os.path.join(tmpdir, 'dump_path')
dump_docs(docs_indexed, dump_path, num_shards)
assert_folder(dump_path, num_shards)
inputs = list(random_docs(0, 1))
# TODO workspace is wrongly saved to curdir
with Flow.load_config('flow.yml') as flow:
flow.rolling_update(pod_name=pod_name, dump_path=dump_path)
time.sleep(2)
results = flow.post(
on='/tag_search',
inputs=inputs,
parameters={'top_k': TOP_K},
return_results=True,
)
validate_diff_sources(results, num_shards, docs_indexed)
|
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RescalingTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_rescaling_basics(self):
self.run_layer_test(
layers.Rescaling,
init_kwargs={"scale": 1.0 / 255, "offset": 0.5},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
@pytest.mark.requires_trainable_backend
def test_rescaling_dtypes(self):
# int scale
self.run_layer_test(
layers.Rescaling,
init_kwargs={"scale": 2, "offset": 0.5},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
# int offset
self.run_layer_test(
layers.Rescaling,
init_kwargs={"scale": 1.0, "offset": 2},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
# int inputs
self.run_layer_test(
layers.Rescaling,
init_kwargs={"scale": 1.0 / 255, "offset": 0.5},
input_shape=(2, 3),
input_dtype="int16",
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
def test_rescaling_correctness(self):
layer = layers.Rescaling(scale=1.0 / 255, offset=0.5)
x = np.random.random((3, 10, 10, 3)) * 255
out = layer(x)
self.assertAllClose(out, x / 255 + 0.5)
def test_tf_data_compatibility(self):
layer = layers.Rescaling(scale=1.0 / 255, offset=0.5)
x = np.random.random((3, 10, 10, 3)) * 255
ds = tf_data.Dataset.from_tensor_slices(x).batch(3).map(layer)
next(iter(ds)).numpy()
def test_rescaling_with_channels_first_and_vector_scale(self):
config = backend.image_data_format()
backend.set_image_data_format("channels_first")
layer = layers.Rescaling(
scale=[1.0 / 255, 1.5 / 255, 2.0 / 255], offset=0.5
)
x = np.random.random((2, 3, 10, 10)) * 255
layer(x)
backend.set_image_data_format(config)
@pytest.mark.requires_trainable_backend
def test_numpy_args(self):
# https://github.com/keras-team/keras/issues/20072
self.run_layer_test(
layers.Rescaling,
init_kwargs={
"scale": np.array(1.0 / 255.0),
"offset": np.array(0.5),
},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
@pytest.mark.requires_trainable_backend
def test_rescaling_broadcast_output_shape(self):
self.run_layer_test(
layers.Rescaling,
init_kwargs={
"scale": [1.0, 1.0],
"offset": [0.0, 0.0],
},
input_shape=(2, 1),
expected_output_shape=(2, 2),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
|
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RescalingTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_rescaling_basics(self):
self.run_layer_test(
layers.Rescaling,
init_kwargs={"scale": 1.0 / 255, "offset": 0.5},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
@pytest.mark.requires_trainable_backend
def test_rescaling_dtypes(self):
# int scale
self.run_layer_test(
layers.Rescaling,
init_kwargs={"scale": 2, "offset": 0.5},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
# int offset
self.run_layer_test(
layers.Rescaling,
init_kwargs={"scale": 1.0, "offset": 2},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
# int inputs
self.run_layer_test(
layers.Rescaling,
init_kwargs={"scale": 1.0 / 255, "offset": 0.5},
input_shape=(2, 3),
input_dtype="int16",
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
def test_rescaling_correctness(self):
layer = layers.Rescaling(scale=1.0 / 255, offset=0.5)
x = np.random.random((3, 10, 10, 3)) * 255
out = layer(x)
self.assertAllClose(out, x / 255 + 0.5)
def test_tf_data_compatibility(self):
layer = layers.Rescaling(scale=1.0 / 255, offset=0.5)
x = np.random.random((3, 10, 10, 3)) * 255
ds = tf_data.Dataset.from_tensor_slices(x).batch(3).map(layer)
next(iter(ds)).numpy()
def test_rescaling_with_channels_first_and_vector_scale(self):
config = backend.image_data_format()
backend.set_image_data_format("channels_first")
layer = layers.Rescaling(
scale=[1.0 / 255, 1.5 / 255, 2.0 / 255], offset=0.5
)
x = np.random.random((2, 3, 10, 10)) * 255
layer(x)
backend.set_image_data_format(config)
@pytest.mark.requires_trainable_backend
def test_numpy_args(self):
# https://github.com/keras-team/keras/issues/20072
self.run_layer_test(
layers.Rescaling,
init_kwargs={
"scale": np.array(1.0 / 255.0),
"offset": np.array(0.5),
},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.cnn.bricks import NonLocal2d
from mmcv.runner import BaseModule
from ..builder import NECKS
@NECKS.register_module()
class BFP(BaseModule):
"""BFP (Balanced Feature Pyramids)
BFP takes multi-level features as inputs and gather them into a single one,
then refine the gathered feature and scatter the refined results to
multi-level features. This module is used in Libra R-CNN (CVPR 2019), see
the paper `Libra R-CNN: Towards Balanced Learning for Object Detection
<https://arxiv.org/abs/1904.02701>`_ for details.
Args:
in_channels (int): Number of input channels (feature maps of all levels
should have the same channels).
num_levels (int): Number of input feature levels.
conv_cfg (dict): The config dict for convolution layers.
norm_cfg (dict): The config dict for normalization layers.
refine_level (int): Index of integration and refine level of BSF in
multi-level features from bottom to top.
refine_type (str): Type of the refine op, currently support
[None, 'conv', 'non_local'].
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
num_levels,
refine_level=2,
refine_type=None,
conv_cfg=None,
norm_cfg=None,
init_cfg=dict(
type='Xavier', layer='Conv2d', distribution='uniform')):
super(BFP, self).__init__(init_cfg)
assert refine_type in [None, 'conv', 'non_local']
self.in_channels = in_channels
self.num_levels = num_levels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.refine_level = refine_level
self.refine_type = refine_type
assert 0 <= self.refine_level < self.num_levels
if self.refine_type == 'conv':
self.refine = ConvModule(
self.in_channels,
self.in_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
elif self.refine_type == 'non_local':
self.refine = NonLocal2d(
self.in_channels,
reduction=1,
use_scale=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == self.num_levels
# step 1: gather multi-level features by resize and average
feats = []
gather_size = inputs[self.refine_level].size()[2:]
for i in range(self.num_levels):
if i < self.refine_level:
gathered = F.adaptive_max_pool2d(
inputs[i], output_size=gather_size)
else:
gathered = F.interpolate(
inputs[i], size=gather_size, mode='nearest')
feats.append(gathered)
bsf = sum(feats) / len(feats)
# step 2: refine gathered features
if self.refine_type is not None:
bsf = self.refine(bsf)
# step 3: scatter refined features to multi-levels by a residual path
outs = []
for i in range(self.num_levels):
out_size = inputs[i].size()[2:]
if i < self.refine_level:
residual = F.interpolate(bsf, size=out_size, mode='nearest')
else:
residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
outs.append(residual + inputs[i])
return tuple(outs)
|
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.cnn.bricks import NonLocal2d
from mmcv.runner import BaseModule
from ..builder import NECKS
@NECKS.register_module()
class BFP(BaseModule):
"""BFP (Balanced Feature Pyramids)
BFP takes multi-level features as inputs and gather them into a single one,
then refine the gathered feature and scatter the refined results to
multi-level features. This module is used in Libra R-CNN (CVPR 2019), see
the paper `Libra R-CNN: Towards Balanced Learning for Object Detection
<https://arxiv.org/abs/1904.02701>`_ for details.
Args:
in_channels (int): Number of input channels (feature maps of all levels
should have the same channels).
num_levels (int): Number of input feature levels.
conv_cfg (dict): The config dict for convolution layers.
norm_cfg (dict): The config dict for normalization layers.
refine_level (int): Index of integration and refine level of BSF in
multi-level features from bottom to top.
refine_type (str): Type of the refine op, currently support
[None, 'conv', 'non_local'].
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
num_levels,
refine_level=2,
refine_type=None,
conv_cfg=None,
norm_cfg=None,
init_cfg=dict(
type='Xavier', layer='Conv2d', distribution='uniform')):
super(BFP, self).__init__(init_cfg)
assert refine_type in [None, 'conv', 'non_local']
self.in_channels = in_channels
self.num_levels = num_levels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.refine_level = refine_level
self.refine_type = refine_type
assert 0 <= self.refine_level < self.num_levels
if self.refine_type == 'conv':
self.refine = ConvModule(
self.in_channels,
self.in_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
elif self.refine_type == 'non_local':
self.refine = NonLocal2d(
self.in_channels,
reduction=1,
use_scale=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == self.num_levels
# step 1: gather multi-level features by resize and average
feats = []
gather_size = inputs[self.refine_level].size()[2:]
for i in range(self.num_levels):
if i < self.refine_level:
gathered = F.adaptive_max_pool2d(
inputs[i], output_size=gather_size)
else:
gathered = F.interpolate(
inputs[i], size=gather_size, mode='nearest')
feats.append(gathered)
bsf = sum(feats) / len(feats)
# step 2: refine gathered features
if self.refine_type is not None:
bsf = self.refine(bsf)
# step 3: scatter refined features to multi-levels by a residual path
outs = []
for i in range(self.num_levels):
out_size = inputs[i].size()[2:]
if i < self.refine_level:
residual = F.interpolate(bsf, size=out_size, mode='nearest')
else:
residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
outs.append(residual + inputs[i])
return tuple(outs)
|
import json
import pytest
import xgboost
from xgboost import testing as tm
from xgboost.testing.metrics import (
check_precision_score,
check_quantile_error,
run_pr_auc_binary,
run_pr_auc_ltr,
run_pr_auc_multi,
run_roc_auc_binary,
run_roc_auc_multi,
)
class TestGPUEvalMetrics:
@pytest.mark.parametrize("n_samples", [4, 100, 1000])
def test_roc_auc_binary(self, n_samples: int) -> None:
run_roc_auc_binary("hist", n_samples, "cuda")
@pytest.mark.parametrize(
"n_samples,weighted", [(4, False), (100, False), (1000, False), (1000, True)]
)
def test_roc_auc_multi(self, n_samples: int, weighted: bool) -> None:
run_roc_auc_multi("hist", n_samples, weighted, "cuda")
@pytest.mark.parametrize("n_samples", [4, 100, 1000])
def test_roc_auc_ltr(self, n_samples):
import numpy as np
rng = np.random.RandomState(1994)
n_samples = n_samples
n_features = 10
X = rng.randn(n_samples, n_features)
y = rng.randint(0, 16, size=n_samples)
group = np.array([n_samples // 2, n_samples // 2])
Xy = xgboost.DMatrix(X, y, group=group)
booster = xgboost.train(
{"tree_method": "hist", "eval_metric": "auc", "objective": "rank:ndcg"},
Xy,
num_boost_round=10,
)
cpu_auc = float(booster.eval(Xy).split(":")[1])
booster.set_param({"device": "cuda:0"})
assert (
json.loads(booster.save_config())["learner"]["generic_param"]["device"]
== "cuda:0"
)
gpu_auc = float(booster.eval(Xy).split(":")[1])
assert (
json.loads(booster.save_config())["learner"]["generic_param"]["device"]
== "cuda:0"
)
np.testing.assert_allclose(cpu_auc, gpu_auc)
def test_pr_auc_binary(self) -> None:
run_pr_auc_binary("hist", "cuda")
def test_pr_auc_multi(self) -> None:
run_pr_auc_multi("hist", "cuda")
def test_pr_auc_ltr(self) -> None:
run_pr_auc_ltr("hist", "cuda")
def test_precision_score(self) -> None:
check_precision_score("hist", "cuda")
@pytest.mark.skipif(**tm.no_sklearn())
def test_quantile_error(self) -> None:
check_quantile_error("hist", "cuda")
|
import json
import sys
import pytest
import xgboost
from xgboost import testing as tm
from xgboost.testing.metrics import check_precision_score, check_quantile_error
sys.path.append("tests/python")
import test_eval_metrics as test_em # noqa
class TestGPUEvalMetrics:
cpu_test = test_em.TestEvalMetrics()
@pytest.mark.parametrize("n_samples", [4, 100, 1000])
def test_roc_auc_binary(self, n_samples):
self.cpu_test.run_roc_auc_binary("gpu_hist", n_samples)
@pytest.mark.parametrize(
"n_samples,weighted", [(4, False), (100, False), (1000, False), (1000, True)]
)
def test_roc_auc_multi(self, n_samples, weighted):
self.cpu_test.run_roc_auc_multi("gpu_hist", n_samples, weighted)
@pytest.mark.parametrize("n_samples", [4, 100, 1000])
def test_roc_auc_ltr(self, n_samples):
import numpy as np
rng = np.random.RandomState(1994)
n_samples = n_samples
n_features = 10
X = rng.randn(n_samples, n_features)
y = rng.randint(0, 16, size=n_samples)
group = np.array([n_samples // 2, n_samples // 2])
Xy = xgboost.DMatrix(X, y, group=group)
booster = xgboost.train(
{"tree_method": "hist", "eval_metric": "auc", "objective": "rank:ndcg"},
Xy,
num_boost_round=10,
)
cpu_auc = float(booster.eval(Xy).split(":")[1])
booster.set_param({"device": "cuda:0"})
assert (
json.loads(booster.save_config())["learner"]["generic_param"]["device"]
== "cuda:0"
)
gpu_auc = float(booster.eval(Xy).split(":")[1])
assert (
json.loads(booster.save_config())["learner"]["generic_param"]["device"]
== "cuda:0"
)
np.testing.assert_allclose(cpu_auc, gpu_auc)
def test_pr_auc_binary(self):
self.cpu_test.run_pr_auc_binary("gpu_hist")
def test_pr_auc_multi(self):
self.cpu_test.run_pr_auc_multi("gpu_hist")
def test_pr_auc_ltr(self):
self.cpu_test.run_pr_auc_ltr("gpu_hist")
def test_precision_score(self):
check_precision_score("gpu_hist")
@pytest.mark.skipif(**tm.no_sklearn())
def test_quantile_error(self) -> None:
check_quantile_error("gpu_hist")
|
from pathlib import Path
from typing import List
import numpy as np
import pytest
import torch
from jina import Document, DocumentArray, Executor
from ...transform_encoder import TransformerTorchEncoder
from ..integration.test_integration import filter_none
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert (
ex.pretrained_model_name_or_path
== 'sentence-transformers/distilbert-base-nli-stsb-mean-tokens'
)
def test_compute_tokens():
enc = TransformerTorchEncoder()
tokens = enc._generate_input_tokens(['hello this is a test', 'and another test'])
assert tokens['input_ids'].shape == (2, 7)
assert tokens['attention_mask'].shape == (2, 7)
@pytest.mark.parametrize('hidden_seqlen', [4, 8])
def test_compute_embeddings(hidden_seqlen):
embedding_size = 10
enc = TransformerTorchEncoder()
tokens = enc._generate_input_tokens(['hello world'])
hidden_states = tuple(
torch.zeros(1, hidden_seqlen, embedding_size) for _ in range(7)
)
embeddings = enc._compute_embedding(
hidden_states=hidden_states, input_tokens=tokens
)
assert embeddings.shape == (1, embedding_size)
def test_encoding_cpu():
enc = TransformerTorchEncoder(device='cpu')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (768,)
@pytest.mark.gpu
def test_encoding_gpu():
enc = TransformerTorchEncoder(device='cuda')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (768,)
def test_encodes_semantic_meaning():
sentences = dict()
sentences['A'] = 'Hello, my name is Michael.'
sentences['B'] = 'Today we are going to Disney World.'
sentences['C'] = 'There are animals on the road'
sentences['D'] = 'A dog is running down the road'
encoder = TransformerTorchEncoder()
embeddings = {}
for id_, sentence in sentences.items():
docs = DocumentArray([Document(text=sentence)])
encoder.encode(docs, parameters={})
embeddings[id_] = docs[0].embedding
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist('C', 'D')
assert small_distance < dist('C', 'B')
assert small_distance < dist('C', 'A')
assert small_distance < dist('B', 'A')
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_path'],
[
(pytest.lazy_fixture('docs_with_text'), [['r', 10], ['c', 0], ['cc', 0]], 'r'),
(
pytest.lazy_fixture('docs_with_chunk_text'),
[['r', 0], ['c', 10], ['cc', 0]],
'c',
),
(
pytest.lazy_fixture('docs_with_chunk_chunk_text'),
[['r', 0], ['c', 0], ['cc', 10]],
'cc',
),
],
)
def test_traversal_path(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_path: str
):
def validate_traversal(expected_docs_per_path: List[List[str]]):
def validate(res):
for path, count in expected_docs_per_path:
embeddings = filter_none(
DocumentArray(res).traverse_flat([path]).get_attributes('embedding')
)
for emb in embeddings:
if emb is None:
return False
assert len(embeddings) == count
return validate
encoder = TransformerTorchEncoder(default_traversal_paths=[traversal_path])
encoder.encode(docs, {'traversal_paths': [traversal_path]})
validate_traversal(docs_per_path)(docs)
def test_multiple_traversal_paths():
sentences = list()
sentences.append('Hello, my name is Michael.')
sentences.append('Today we are going to Disney World.')
sentences.append('There are animals on the road')
sentences.append('A dog is running down the road')
docs = DocumentArray([Document(text=sentence) for sentence in sentences])
for index, sent in enumerate(sentences):
docs[index].chunks.append(Document(text=sent))
docs[index].chunks[0].chunks.append(Document(text=sentences[3 - index]))
encoder = TransformerTorchEncoder(default_traversal_paths=['r', 'c', 'cc'])
encoder.encode(docs, {})
for doc in docs:
assert doc.embedding.shape == (768,)
assert doc.chunks[0].embedding.shape == (768,)
assert doc.chunks[0].chunks[0].embedding.shape == (768,)
|
from pathlib import Path
from typing import List
import numpy as np
import pytest
import torch
from jina import Document, DocumentArray, Executor
from ...transform_encoder import TransformerTorchEncoder
from ..integration.test_integration import filter_none
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert (
ex.pretrained_model_name_or_path
== 'sentence-transformers/distilbert-base-nli-stsb-mean-tokens'
)
def test_compute_tokens():
enc = TransformerTorchEncoder()
tokens = enc._generate_input_tokens(['hello this is a test', 'and another test'])
assert tokens['input_ids'].shape == (2, 7)
assert tokens['attention_mask'].shape == (2, 7)
@pytest.mark.parametrize('hidden_seqlen', [4, 8])
def test_compute_embeddings(hidden_seqlen):
embedding_size = 10
enc = TransformerTorchEncoder()
tokens = enc._generate_input_tokens(['hello world'])
hidden_states = tuple(
torch.zeros(1, hidden_seqlen, embedding_size) for _ in range(7)
)
embeddings = enc._compute_embedding(
hidden_states=hidden_states, input_tokens=tokens
)
assert embeddings.shape == (1, embedding_size)
def test_encoding_cpu():
enc = TransformerTorchEncoder(device='cpu')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (768,)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='GPU is needed for this test')
def test_encoding_gpu():
enc = TransformerTorchEncoder(device='cuda')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (768,)
def test_encodes_semantic_meaning():
sentences = dict()
sentences['A'] = 'Hello, my name is Michael.'
sentences['B'] = 'Today we are going to Disney World.'
sentences['C'] = 'There are animals on the road'
sentences['D'] = 'A dog is running down the road'
encoder = TransformerTorchEncoder()
embeddings = {}
for id_, sentence in sentences.items():
docs = DocumentArray([Document(text=sentence)])
encoder.encode(docs, parameters={})
embeddings[id_] = docs[0].embedding
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist('C', 'D')
assert small_distance < dist('C', 'B')
assert small_distance < dist('C', 'A')
assert small_distance < dist('B', 'A')
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_path'],
[
(pytest.lazy_fixture('docs_with_text'), [['r', 10], ['c', 0], ['cc', 0]], 'r'),
(
pytest.lazy_fixture('docs_with_chunk_text'),
[['r', 0], ['c', 10], ['cc', 0]],
'c',
),
(
pytest.lazy_fixture('docs_with_chunk_chunk_text'),
[['r', 0], ['c', 0], ['cc', 10]],
'cc',
),
],
)
def test_traversal_path(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_path: str
):
def validate_traversal(expected_docs_per_path: List[List[str]]):
def validate(res):
for path, count in expected_docs_per_path:
embeddings = filter_none(
DocumentArray(res).traverse_flat([path]).get_attributes('embedding')
)
for emb in embeddings:
if emb is None:
return False
assert len(embeddings) == count
return validate
encoder = TransformerTorchEncoder(default_traversal_paths=[traversal_path])
encoder.encode(docs, {'traversal_paths': [traversal_path]})
validate_traversal(docs_per_path)(docs)
def test_multiple_traversal_paths():
sentences = list()
sentences.append('Hello, my name is Michael.')
sentences.append('Today we are going to Disney World.')
sentences.append('There are animals on the road')
sentences.append('A dog is running down the road')
docs = DocumentArray([Document(text=sentence) for sentence in sentences])
for index, sent in enumerate(sentences):
docs[index].chunks.append(Document(text=sent))
docs[index].chunks[0].chunks.append(Document(text=sentences[3 - index]))
encoder = TransformerTorchEncoder(default_traversal_paths=['r', 'c', 'cc'])
encoder.encode(docs, {})
for doc in docs:
assert doc.embedding.shape == (768,)
assert doc.chunks[0].embedding.shape == (768,)
assert doc.chunks[0].chunks[0].embedding.shape == (768,)
|
from langchain_core.prompts.prompt import PromptTemplate
API_URL_PROMPT_TEMPLATE = """You are given the below API Documentation:
{api_docs}
Using this documentation, generate the full API url to call for answering the user question.
You should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.
Question:{question}
API url:""" # noqa: E501
API_URL_PROMPT = PromptTemplate(
input_variables=[
"api_docs",
"question",
],
template=API_URL_PROMPT_TEMPLATE,
)
API_RESPONSE_PROMPT_TEMPLATE = (
API_URL_PROMPT_TEMPLATE
+ """ {api_url}
Here is the response from the API:
{api_response}
Summarize this response to answer the original question.
Summary:"""
)
API_RESPONSE_PROMPT = PromptTemplate(
input_variables=["api_docs", "question", "api_url", "api_response"],
template=API_RESPONSE_PROMPT_TEMPLATE,
)
|
# flake8: noqa
from langchain_core.prompts.prompt import PromptTemplate
API_URL_PROMPT_TEMPLATE = """You are given the below API Documentation:
{api_docs}
Using this documentation, generate the full API url to call for answering the user question.
You should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.
Question:{question}
API url:"""
API_URL_PROMPT = PromptTemplate(
input_variables=[
"api_docs",
"question",
],
template=API_URL_PROMPT_TEMPLATE,
)
API_RESPONSE_PROMPT_TEMPLATE = (
API_URL_PROMPT_TEMPLATE
+ """ {api_url}
Here is the response from the API:
{api_response}
Summarize this response to answer the original question.
Summary:"""
)
API_RESPONSE_PROMPT = PromptTemplate(
input_variables=["api_docs", "question", "api_url", "api_response"],
template=API_RESPONSE_PROMPT_TEMPLATE,
)
|
import itertools
from typing import (
TYPE_CHECKING,
Union,
Sequence,
overload,
Any,
List,
)
import numpy as np
from docarray import Document
from docarray.helper import typename
if TYPE_CHECKING:
from docarray.typing import (
DocumentArrayIndexType,
DocumentArraySingletonIndexType,
DocumentArrayMultipleIndexType,
DocumentArrayMultipleAttributeType,
DocumentArraySingleAttributeType,
)
from docarray import DocumentArray
class GetItemMixin:
"""Provide helper functions to enable advance indexing in `__getitem__`"""
@overload
def __getitem__(self, index: 'DocumentArraySingletonIndexType') -> 'Document':
...
@overload
def __getitem__(self, index: 'DocumentArrayMultipleIndexType') -> 'DocumentArray':
...
@overload
def __getitem__(self, index: 'DocumentArraySingleAttributeType') -> List[Any]:
...
@overload
def __getitem__(
self, index: 'DocumentArrayMultipleAttributeType'
) -> List[List[Any]]:
...
def __getitem__(
self, index: 'DocumentArrayIndexType'
) -> Union['Document', 'DocumentArray']:
if isinstance(index, (int, np.generic)) and not isinstance(index, bool):
return self._get_doc_by_offset(int(index))
elif isinstance(index, str):
if index.startswith('@'):
return self.traverse_flat(index[1:])
else:
return self._get_doc_by_id(index)
elif isinstance(index, slice):
from docarray import DocumentArray
return DocumentArray(self._get_docs_by_slice(index))
elif index is Ellipsis:
return self.flatten()
elif isinstance(index, Sequence):
from docarray import DocumentArray
if (
isinstance(index, tuple)
and len(index) == 2
and (
isinstance(index[0], (slice, Sequence, str, int))
or index[0] is Ellipsis
)
and isinstance(index[1], (str, Sequence))
):
# TODO: add support for cases such as da[1, ['text', 'id']]?
if isinstance(index[0], (str, int)) and isinstance(index[1], str):
# ambiguity only comes from the second string
if index[1] in self:
return DocumentArray([self[index[0]], self[index[1]]])
else:
_docs = self[index[0]]
if not _docs:
return []
if isinstance(_docs, Document):
return getattr(_docs, index[1])
return _docs._get_attributes(index[1])
elif isinstance(index[0], (slice, Sequence)) or index[0] is Ellipsis:
_docs = self[index[0]]
_attrs = index[1]
if isinstance(_attrs, str):
_attrs = (index[1],)
return _docs._get_attributes(*_attrs)
elif isinstance(index[0], bool):
return DocumentArray(itertools.compress(self, index))
elif isinstance(index[0], int):
return DocumentArray(self._get_docs_by_offsets(index))
elif isinstance(index[0], str):
return DocumentArray(self._get_docs_by_ids(index))
elif isinstance(index, np.ndarray):
index = index.squeeze()
if index.ndim == 1:
return self[index.tolist()]
else:
raise IndexError(
f'When using np.ndarray as index, its `ndim` must =1. However, receiving ndim={index.ndim}'
)
raise IndexError(f'Unsupported index type {typename(index)}: {index}')
|
import itertools
from typing import (
TYPE_CHECKING,
Union,
Sequence,
overload,
Any,
List,
)
import numpy as np
from docarray import Document
from docarray.helper import typename
if TYPE_CHECKING:
from docarray.typing import (
DocumentArrayIndexType,
DocumentArraySingletonIndexType,
DocumentArrayMultipleIndexType,
DocumentArrayMultipleAttributeType,
DocumentArraySingleAttributeType,
)
from docarray import DocumentArray
class GetItemMixin:
"""Provide helper functions to enable advance indexing in `__getitem__`"""
@overload
def __getitem__(self, index: 'DocumentArraySingletonIndexType') -> 'Document':
...
@overload
def __getitem__(self, index: 'DocumentArrayMultipleIndexType') -> 'DocumentArray':
...
@overload
def __getitem__(self, index: 'DocumentArraySingleAttributeType') -> List[Any]:
...
@overload
def __getitem__(
self, index: 'DocumentArrayMultipleAttributeType'
) -> List[List[Any]]:
...
def __getitem__(
self, index: 'DocumentArrayIndexType'
) -> Union['Document', 'DocumentArray']:
if isinstance(index, (int, np.generic)) and not isinstance(index, bool):
return self._get_doc_by_offset(int(index))
elif isinstance(index, str):
if index.startswith('@'):
return self.traverse_flat(index[1:])
else:
return self._get_doc_by_id(index)
elif isinstance(index, slice):
from docarray import DocumentArray
return DocumentArray(self._get_docs_by_slice(index))
elif index is Ellipsis:
return self.flatten()
elif isinstance(index, Sequence):
from docarray import DocumentArray
if (
isinstance(index, tuple)
and len(index) == 2
and (
isinstance(index[0], (slice, Sequence, str, int))
or index[0] is Ellipsis
)
and isinstance(index[1], (str, Sequence))
):
# TODO: add support for cases such as da[1, ['text', 'id']]?
if isinstance(index[0], (str, int)) and isinstance(index[1], str):
# ambiguity only comes from the second string
if index[1] in self:
return DocumentArray([self[index[0]], self[index[1]]])
else:
_docs = self[index[0]]
if not _docs:
return []
if isinstance(_docs, Document):
return getattr(_docs, index[1])
return _docs._get_attributes(index[1])
elif isinstance(index[0], (slice, Sequence)):
_docs = self[index[0]]
_attrs = index[1]
if isinstance(_attrs, str):
_attrs = (index[1],)
return _docs._get_attributes(*_attrs)
elif isinstance(index[0], bool):
return DocumentArray(itertools.compress(self, index))
elif isinstance(index[0], int):
return DocumentArray(self._get_docs_by_offsets(index))
elif isinstance(index[0], str):
return DocumentArray(self._get_docs_by_ids(index))
elif isinstance(index, np.ndarray):
index = index.squeeze()
if index.ndim == 1:
return self[index.tolist()]
else:
raise IndexError(
f'When using np.ndarray as index, its `ndim` must =1. However, receiving ndim={index.ndim}'
)
raise IndexError(f'Unsupported index type {typename(index)}: {index}')
|
# coding=utf-8
# Copyright 2025 Cohere team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AyaVision model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING, AutoConfig
logger = logging.get_logger(__name__)
class AyaVisionConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`AyaVisionForConditionalGeneration`]. It is used to instantiate an
AyaVision model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of AyaVision.
e.g. [CohereForAI/aya-vision-8b](https://huggingface.co/CohereForAI/aya-vision-8b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `CLIPVisionConfig`):
The config object or dictionary of the vision backbone.
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`):
The config object or dictionary of the text backbone.
vision_feature_select_strategy (`str`, *optional*, defaults to `"full"`):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features.
If `"full"`, the full vision features are used.
vision_feature_layer (`int`, *optional*, defaults to -1):
The index of the layer to select the vision feature.
downsample_factor (`int`, *optional*, defaults to 2):
The downsample factor to apply to the vision features.
adapter_layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon value used for layer normalization in the adapter.
image_token_index (`int`, *optional*, defaults to 255036):
The image token index to encode the image prompt.
"""
model_type = "aya_vision"
attribute_map = {
"image_token_id": "image_token_index",
}
sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig}
def __init__(
self,
vision_config=None,
text_config=None,
vision_feature_select_strategy="full",
vision_feature_layer=-1,
downsample_factor=2,
adapter_layer_norm_eps=1e-6,
image_token_index=255036,
**kwargs,
):
self.image_token_index = image_token_index
self.downsample_factor = downsample_factor
self.adapter_layer_norm_eps = adapter_layer_norm_eps
if vision_feature_select_strategy not in ["default", "full"]:
raise ValueError(
"vision_feature_select_strategy should be one of 'default', 'full'."
f"Got: {vision_feature_select_strategy}"
)
self.vision_feature_select_strategy = vision_feature_select_strategy
self.vision_feature_layer = vision_feature_layer
if isinstance(vision_config, dict):
vision_config["model_type"] = (
vision_config["model_type"] if "model_type" in vision_config else "clip_vision_model"
)
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
elif vision_config is None:
vision_config = CONFIG_MAPPING["siglip_vision_model"](
hidden_size=1152,
intermediate_size=4304,
patch_size=14,
image_size=384,
num_hidden_layers=26,
num_attention_heads=14,
vision_use_head=False,
)
self.vision_config = vision_config
if isinstance(text_config, dict):
text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "llama"
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING["cohere2"]()
self.text_config = text_config
super().__init__(**kwargs)
__all__ = ["AyaVisionConfig"]
|
# coding=utf-8
# Copyright 2025 Cohere team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AyaVision model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING, AutoConfig
logger = logging.get_logger(__name__)
class AyaVisionConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`AyaVisionForConditionalGeneration`]. It is used to instantiate an
AyaVision model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of AyaVision.
e.g. [CohereForAI/aya-vision-8b](https://huggingface.co/CohereForAI/aya-vision-8b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `CLIPVisionConfig`):
The config object or dictionary of the vision backbone.
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`):
The config object or dictionary of the text backbone.
vision_feature_select_strategy (`str`, *optional*, defaults to `"full"`):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features.
If `"full"`, the full vision features are used.
vision_feature_layer (`int`, *optional*, defaults to -1):
The index of the layer to select the vision feature.
downsample_factor (`int`, *optional*, defaults to 2):
The downsample factor to apply to the vision features.
adapter_layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon value used for layer normalization in the adapter.
image_token_index (`int`, *optional*, defaults to 255036):
The image token index to encode the image prompt.
"""
model_type = "aya_vision"
sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig}
def __init__(
self,
vision_config=None,
text_config=None,
vision_feature_select_strategy="full",
vision_feature_layer=-1,
downsample_factor=2,
adapter_layer_norm_eps=1e-6,
image_token_index=255036,
**kwargs,
):
self.image_token_index = image_token_index
self.downsample_factor = downsample_factor
self.adapter_layer_norm_eps = adapter_layer_norm_eps
if vision_feature_select_strategy not in ["default", "full"]:
raise ValueError(
"vision_feature_select_strategy should be one of 'default', 'full'."
f"Got: {vision_feature_select_strategy}"
)
self.vision_feature_select_strategy = vision_feature_select_strategy
self.vision_feature_layer = vision_feature_layer
if isinstance(vision_config, dict):
vision_config["model_type"] = (
vision_config["model_type"] if "model_type" in vision_config else "clip_vision_model"
)
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
elif vision_config is None:
vision_config = CONFIG_MAPPING["siglip_vision_model"](
hidden_size=1152,
intermediate_size=4304,
patch_size=14,
image_size=384,
num_hidden_layers=26,
num_attention_heads=14,
vision_use_head=False,
)
self.vision_config = vision_config
if isinstance(text_config, dict):
text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "llama"
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING["cohere2"]()
self.text_config = text_config
super().__init__(**kwargs)
__all__ = ["AyaVisionConfig"]
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import ConvModule, Linear
from mmengine.model import ModuleList
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import MultiConfig
from .fcn_mask_head import FCNMaskHead
@MODELS.register_module()
class CoarseMaskHead(FCNMaskHead):
"""Coarse mask head used in PointRend.
Compared with standard ``FCNMaskHead``, ``CoarseMaskHead`` will downsample
the input feature map instead of upsample it.
Args:
num_convs (int): Number of conv layers in the head. Defaults to 0.
num_fcs (int): Number of fc layers in the head. Defaults to 2.
fc_out_channels (int): Number of output channels of fc layer.
Defaults to 1024.
downsample_factor (int): The factor that feature map is downsampled by.
Defaults to 2.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_convs: int = 0,
num_fcs: int = 2,
fc_out_channels: int = 1024,
downsample_factor: int = 2,
init_cfg: MultiConfig = dict(
type='Xavier',
override=[
dict(name='fcs'),
dict(type='Constant', val=0.001, name='fc_logits')
]),
*arg,
**kwarg) -> None:
super().__init__(
*arg,
num_convs=num_convs,
upsample_cfg=dict(type=None),
init_cfg=None,
**kwarg)
self.init_cfg = init_cfg
self.num_fcs = num_fcs
assert self.num_fcs > 0
self.fc_out_channels = fc_out_channels
self.downsample_factor = downsample_factor
assert self.downsample_factor >= 1
# remove conv_logit
delattr(self, 'conv_logits')
if downsample_factor > 1:
downsample_in_channels = (
self.conv_out_channels
if self.num_convs > 0 else self.in_channels)
self.downsample_conv = ConvModule(
downsample_in_channels,
self.conv_out_channels,
kernel_size=downsample_factor,
stride=downsample_factor,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
else:
self.downsample_conv = None
self.output_size = (self.roi_feat_size[0] // downsample_factor,
self.roi_feat_size[1] // downsample_factor)
self.output_area = self.output_size[0] * self.output_size[1]
last_layer_dim = self.conv_out_channels * self.output_area
self.fcs = ModuleList()
for i in range(num_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
self.fcs.append(Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
output_channels = self.num_classes * self.output_area
self.fc_logits = Linear(last_layer_dim, output_channels)
def init_weights(self) -> None:
"""Initialize weights."""
super(FCNMaskHead, self).init_weights()
def forward(self, x: Tensor) -> Tensor:
"""Forward features from the upstream network.
Args:
x (Tensor): Extract mask RoI features.
Returns:
Tensor: Predicted foreground masks.
"""
for conv in self.convs:
x = conv(x)
if self.downsample_conv is not None:
x = self.downsample_conv(x)
x = x.flatten(1)
for fc in self.fcs:
x = self.relu(fc(x))
mask_preds = self.fc_logits(x).view(
x.size(0), self.num_classes, *self.output_size)
return mask_preds
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import ConvModule, Linear
from mmengine.model import ModuleList
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import MultiConfig
from .fcn_mask_head import FCNMaskHead
@MODELS.register_module()
class CoarseMaskHead(FCNMaskHead):
"""Coarse mask head used in PointRend.
Compared with standard ``FCNMaskHead``, ``CoarseMaskHead`` will downsample
the input feature map instead of upsample it.
Args:
num_convs (int): Number of conv layers in the head. Defaults to 0.
num_fcs (int): Number of fc layers in the head. Defaults to 2.
fc_out_channels (int): Number of output channels of fc layer.
Defaults to 1024.
downsample_factor (int): The factor that feature map is downsampled by.
Defaults to 2.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_convs: int = 0,
num_fcs: int = 2,
fc_out_channels: int = 1024,
downsample_factor: int = 2,
init_cfg: MultiConfig = dict(
type='Xavier',
override=[
dict(name='fcs'),
dict(type='Constant', val=0.001, name='fc_logits')
]),
*arg,
**kwarg) -> None:
super().__init__(
*arg,
num_convs=num_convs,
upsample_cfg=dict(type=None),
init_cfg=None,
**kwarg)
self.init_cfg = init_cfg
self.num_fcs = num_fcs
assert self.num_fcs > 0
self.fc_out_channels = fc_out_channels
self.downsample_factor = downsample_factor
assert self.downsample_factor >= 1
# remove conv_logit
delattr(self, 'conv_logits')
if downsample_factor > 1:
downsample_in_channels = (
self.conv_out_channels
if self.num_convs > 0 else self.in_channels)
self.downsample_conv = ConvModule(
downsample_in_channels,
self.conv_out_channels,
kernel_size=downsample_factor,
stride=downsample_factor,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
else:
self.downsample_conv = None
self.output_size = (self.roi_feat_size[0] // downsample_factor,
self.roi_feat_size[1] // downsample_factor)
self.output_area = self.output_size[0] * self.output_size[1]
last_layer_dim = self.conv_out_channels * self.output_area
self.fcs = ModuleList()
for i in range(num_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
self.fcs.append(Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
output_channels = self.num_classes * self.output_area
self.fc_logits = Linear(last_layer_dim, output_channels)
def init_weights(self) -> None:
"""Initialize weights."""
super(FCNMaskHead, self).init_weights()
def forward(self, x: Tensor) -> Tensor:
"""Forward features from the upstream network.
Args:
x (Tensor): Extract mask RoI features.
Returns:
Tensor: Predicted foreground masks.
"""
for conv in self.convs:
x = conv(x)
if self.downsample_conv is not None:
x = self.downsample_conv(x)
x = x.flatten(1)
for fc in self.fcs:
x = self.relu(fc(x))
mask_pred = self.fc_logits(x).view(
x.size(0), self.num_classes, *self.output_size)
return mask_pred
|
"""OpenAI Image Generation tool spec."""
import base64
import os
import time
from typing import Optional
from llama_index.core.tools.tool_spec.base import BaseToolSpec
DEFAULT_CACHE_DIR = "../../../img_cache"
DEFAULT_SIZE = "1024x1024"
valid_sizes = {
"dall-e-2": ["256x256", "512x512", "1024x1024"],
"dall-e-3": ["1024x1024", "1792x1024", "1024x1792"],
}
def get_extension(content: str):
map = {
"/": "jpg",
"i": "png",
"R": "gif",
"U": "webp",
}
return map.get(content[0], "jpg")
class OpenAIImageGenerationToolSpec(BaseToolSpec):
"""OpenAI Image Generation tool spec."""
spec_functions = ["image_generation"]
def __init__(
self, api_key: Optional[str] = None, cache_dir: Optional[str] = None
) -> None:
try:
from openai import OpenAI
except ImportError:
raise ImportError(
"Please install openai with `pip install openai` to use this tool"
)
"""Initialize with parameters."""
self.client = OpenAI(api_key=api_key)
self.cache_dir = cache_dir or DEFAULT_CACHE_DIR
def get_cache_dir(self):
return self.cache_dir
def save_base64_image(self, base64_str, image_name):
try:
from io import BytesIO
from PIL import Image
except ImportError:
raise ImportError(
"Please install Pillow with `pip install Pillow` to use this tool"
)
cache_dir = self.cache_dir
# Create cache directory if it doesn't exist
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Decode the base64 string
image_data = base64.b64decode(base64_str)
# Create an image from the decoded bytes and save it
image_path = os.path.join(cache_dir, image_name)
with Image.open(BytesIO(image_data)) as img:
img.save(image_path)
return image_path
def image_generation(
self,
text: str,
model: Optional[str] = "dall-e-3",
quality: Optional[str] = "standard",
num_images: Optional[int] = 1,
size: Optional[str] = DEFAULT_SIZE,
style: Optional[str] = "vivid",
timeout: Optional[int] = None,
download: Optional[bool] = False,
) -> str:
"""
This tool accepts a natural language string and will use OpenAI's DALL-E model to generate an image.
Args:
text: The text to generate an image from.
model: The model to use for image generation. Defaults to `dall-e-3`.
Must be one of `dall-e-2` or `dall-e-3`.
num_images: The number of images to generate. Defaults to 1.
Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported.
quality: The quality of the image that will be generated. Defaults to `standard`.
Must be one of `standard` or `hd`. `hd` creates images with finer
details and greater consistency across the image. This param is only supported
for `dall-e-3`.
size: The size of the generated images. Defaults to `1024x1024`.
Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models.
style: The style of the generated images. Defaults to `vivid`.
Must be one of `vivid` or `natural`.
Vivid causes the model to lean towards generating hyper-real and dramatic images.
Natural causes the model to produce more natural, less hyper-real looking images.
This param is only supported for `dall-e-3`.
timeout: Override the client-level default timeout for this request, in seconds. Defaults to `None`.
download: If `True`, the image will be downloaded to the cache directory. Defaults to `True`.
"""
if size not in valid_sizes[model]:
raise Exception(f"Invalid size for {model}: {size}")
response = self.client.images.generate(
prompt=text,
n=num_images,
model=model,
quality=quality,
size=size,
response_format="b64_json" if download else "url",
style=style,
timeout=timeout,
)
if download:
image_bytes = response.data[0].b64_json
ext = get_extension(image_bytes)
filename = f"{time.time()}.{ext}"
return (self.save_base64_image(image_bytes, filename),)
return response.data[0].url
|
"""OpenAI Image Generation tool spec."""
import base64
import os
import time
from typing import Optional
from llama_index.core.tools.tool_spec.base import BaseToolSpec
DEFAULT_CACHE_DIR = "../../../img_cache"
DEFAULT_SIZE = "1024x1024"
valid_sizes = {
"dall-e-2": ["256x256", "512x512", "1024x1024"],
"dall-e-3": ["1024x1024", "1792x1024", "1024x1792"],
}
def get_extension(content: str):
map = {
"/": "jpg",
"i": "png",
"R": "gif",
"U": "webp",
}
return map.get(content[0], "jpg")
class OpenAIImageGenerationToolSpec(BaseToolSpec):
"""OpenAI Image Generation tool spec."""
spec_functions = ["image_generation"]
def __init__(
self, api_key: Optional[str] = None, cache_dir: Optional[str] = None
) -> None:
try:
from openai import OpenAI
except ImportError:
raise ImportError(
"Please install openai with `pip install openai` to use this tool"
)
"""Initialize with parameters."""
self.client = OpenAI(api_key=api_key)
self.cache_dir = cache_dir or DEFAULT_CACHE_DIR
def get_cache_dir(self):
return self.cache_dir
def save_base64_image(self, base64_str, image_name):
try:
from io import BytesIO
from PIL import Image
except ImportError:
raise ImportError(
"Please install Pillow with `pip install Pillow` to use this tool"
)
cache_dir = self.cache_dir
# Create cache directory if it doesn't exist
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Decode the base64 string
image_data = base64.b64decode(base64_str)
# Create an image from the decoded bytes and save it
image_path = os.path.join(cache_dir, image_name)
with Image.open(BytesIO(image_data)) as img:
img.save(image_path)
return image_path
def image_generation(
self,
text: str,
model: Optional[str] = "dall-e-3",
quality: Optional[str] = "standard",
num_images: Optional[int] = 1,
size: Optional[str] = DEFAULT_SIZE,
style: Optional[str] = "vivid",
timeout: Optional[int] = None,
download: Optional[bool] = False,
) -> str:
"""
This tool accepts a natural language string and will use OpenAI's DALL-E model to generate an image.
Args:
text: The text to generate an image from.
model: The model to use for image generation. Defaults to `dall-e-3`.
Must be one of `dall-e-2` or `dall-e-3`.
num_images: The number of images to generate. Defaults to 1.
Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported.
quality: The quality of the image that will be generated. Defaults to `standard`.
Must be one of `standard` or `hd`. `hd` creates images with finer
details and greater consistency across the image. This param is only supported
for `dall-e-3`.
size: The size of the generated images. Defaults to `1024x1024`.
Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models.
style: The style of the generated images. Defaults to `vivid`.
Must be one of `vivid` or `natural`.
Vivid causes the model to lean towards generating hyper-real and dramatic images.
Natural causes the model to produce more natural, less hyper-real looking images.
This param is only supported for `dall-e-3`.
timeout: Override the client-level default timeout for this request, in seconds. Defaults to `None`.
download: If `True`, the image will be downloaded to the cache directory. Defaults to `True`.
"""
if size not in valid_sizes[model]:
raise Exception(f"Invalid size for {model}: {size}")
response = self.client.images.generate(
prompt=text,
n=num_images,
model=model,
quality=quality,
size=size,
response_format="b64_json" if download else "url",
style=style,
timeout=timeout,
)
if download:
image_bytes = response.data[0].b64_json
ext = get_extension(image_bytes)
filename = f"{time.time()}.{ext}"
return (self.save_base64_image(image_bytes, filename),)
return response.data[0].url
|
"""**Prompt** is the input to the model.
Prompt is often constructed
from multiple components and prompt values. Prompt classes and functions make constructing
and working with prompts easy.
**Class hierarchy:**
.. code-block::
BasePromptTemplate --> PipelinePromptTemplate
StringPromptTemplate --> PromptTemplate
FewShotPromptTemplate
FewShotPromptWithTemplates
BaseChatPromptTemplate --> AutoGPTPrompt
ChatPromptTemplate --> AgentScratchPadChatPromptTemplate
BaseMessagePromptTemplate --> MessagesPlaceholder
BaseStringMessagePromptTemplate --> ChatMessagePromptTemplate
HumanMessagePromptTemplate
AIMessagePromptTemplate
SystemMessagePromptTemplate
""" # noqa: E501
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.prompts.base import (
BasePromptTemplate,
aformat_document,
format_document,
)
from langchain_core.prompts.chat import (
AIMessagePromptTemplate,
BaseChatPromptTemplate,
ChatMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
)
from langchain_core.prompts.dict import DictPromptTemplate
from langchain_core.prompts.few_shot import (
FewShotChatMessagePromptTemplate,
FewShotPromptTemplate,
)
from langchain_core.prompts.few_shot_with_templates import (
FewShotPromptWithTemplates,
)
from langchain_core.prompts.loading import load_prompt
from langchain_core.prompts.pipeline import PipelinePromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.prompts.string import (
StringPromptTemplate,
check_valid_template,
get_template_variables,
jinja2_formatter,
validate_jinja2,
)
__all__ = (
"AIMessagePromptTemplate",
"BaseChatPromptTemplate",
"BasePromptTemplate",
"ChatMessagePromptTemplate",
"ChatPromptTemplate",
"DictPromptTemplate",
"FewShotPromptTemplate",
"FewShotPromptWithTemplates",
"FewShotChatMessagePromptTemplate",
"HumanMessagePromptTemplate",
"MessagesPlaceholder",
"PipelinePromptTemplate",
"PromptTemplate",
"StringPromptTemplate",
"SystemMessagePromptTemplate",
"load_prompt",
"format_document",
"aformat_document",
"check_valid_template",
"get_template_variables",
"jinja2_formatter",
"validate_jinja2",
)
_dynamic_imports = {
"BasePromptTemplate": "base",
"format_document": "base",
"aformat_document": "base",
"AIMessagePromptTemplate": "chat",
"BaseChatPromptTemplate": "chat",
"ChatMessagePromptTemplate": "chat",
"ChatPromptTemplate": "chat",
"DictPromptTemplate": "dict",
"HumanMessagePromptTemplate": "chat",
"MessagesPlaceholder": "chat",
"SystemMessagePromptTemplate": "chat",
"FewShotChatMessagePromptTemplate": "few_shot",
"FewShotPromptTemplate": "few_shot",
"FewShotPromptWithTemplates": "few_shot_with_templates",
"load_prompt": "loading",
"PipelinePromptTemplate": "pipeline",
"PromptTemplate": "prompt",
"StringPromptTemplate": "string",
"check_valid_template": "string",
"get_template_variables": "string",
"jinja2_formatter": "string",
"validate_jinja2": "string",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""**Prompt** is the input to the model.
Prompt is often constructed
from multiple components and prompt values. Prompt classes and functions make constructing
and working with prompts easy.
**Class hierarchy:**
.. code-block::
BasePromptTemplate --> PipelinePromptTemplate
StringPromptTemplate --> PromptTemplate
FewShotPromptTemplate
FewShotPromptWithTemplates
BaseChatPromptTemplate --> AutoGPTPrompt
ChatPromptTemplate --> AgentScratchPadChatPromptTemplate
BaseMessagePromptTemplate --> MessagesPlaceholder
BaseStringMessagePromptTemplate --> ChatMessagePromptTemplate
HumanMessagePromptTemplate
AIMessagePromptTemplate
SystemMessagePromptTemplate
""" # noqa: E501
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.prompts.base import (
BasePromptTemplate,
aformat_document,
format_document,
)
from langchain_core.prompts.chat import (
AIMessagePromptTemplate,
BaseChatPromptTemplate,
ChatMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
)
from langchain_core.prompts.few_shot import (
FewShotChatMessagePromptTemplate,
FewShotPromptTemplate,
)
from langchain_core.prompts.few_shot_with_templates import (
FewShotPromptWithTemplates,
)
from langchain_core.prompts.loading import load_prompt
from langchain_core.prompts.pipeline import PipelinePromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.prompts.string import (
StringPromptTemplate,
check_valid_template,
get_template_variables,
jinja2_formatter,
validate_jinja2,
)
__all__ = (
"AIMessagePromptTemplate",
"BaseChatPromptTemplate",
"BasePromptTemplate",
"ChatMessagePromptTemplate",
"ChatPromptTemplate",
"FewShotPromptTemplate",
"FewShotPromptWithTemplates",
"FewShotChatMessagePromptTemplate",
"HumanMessagePromptTemplate",
"MessagesPlaceholder",
"PipelinePromptTemplate",
"PromptTemplate",
"StringPromptTemplate",
"SystemMessagePromptTemplate",
"load_prompt",
"format_document",
"aformat_document",
"check_valid_template",
"get_template_variables",
"jinja2_formatter",
"validate_jinja2",
)
_dynamic_imports = {
"BasePromptTemplate": "base",
"format_document": "base",
"aformat_document": "base",
"AIMessagePromptTemplate": "chat",
"BaseChatPromptTemplate": "chat",
"ChatMessagePromptTemplate": "chat",
"ChatPromptTemplate": "chat",
"HumanMessagePromptTemplate": "chat",
"MessagesPlaceholder": "chat",
"SystemMessagePromptTemplate": "chat",
"FewShotChatMessagePromptTemplate": "few_shot",
"FewShotPromptTemplate": "few_shot",
"FewShotPromptWithTemplates": "few_shot_with_templates",
"load_prompt": "loading",
"PipelinePromptTemplate": "pipeline",
"PromptTemplate": "prompt",
"StringPromptTemplate": "string",
"check_valid_template": "string",
"get_template_variables": "string",
"jinja2_formatter": "string",
"validate_jinja2": "string",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_simple_tensor # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_format_bounding_boxes,
get_dimensions_image_tensor,
get_dimensions_image_pil,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image_tensor,
get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_spatial_size_bounding_boxes,
get_spatial_size_image_tensor,
get_spatial_size_image_pil,
get_spatial_size_mask,
get_spatial_size_video,
get_spatial_size,
) # usort: skip
from ._augment import erase, erase_image_pil, erase_image_tensor, erase_video
from ._color import (
adjust_brightness,
adjust_brightness_image_pil,
adjust_brightness_image_tensor,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image_pil,
adjust_contrast_image_tensor,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image_pil,
adjust_gamma_image_tensor,
adjust_gamma_video,
adjust_hue,
adjust_hue_image_pil,
adjust_hue_image_tensor,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image_pil,
adjust_saturation_image_tensor,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image_pil,
adjust_sharpness_image_tensor,
adjust_sharpness_video,
autocontrast,
autocontrast_image_pil,
autocontrast_image_tensor,
autocontrast_video,
equalize,
equalize_image_pil,
equalize_image_tensor,
equalize_video,
invert,
invert_image_pil,
invert_image_tensor,
invert_video,
posterize,
posterize_image_pil,
posterize_image_tensor,
posterize_video,
rgb_to_grayscale,
rgb_to_grayscale_image_pil,
rgb_to_grayscale_image_tensor,
solarize,
solarize_image_pil,
solarize_image_tensor,
solarize_video,
to_grayscale,
)
from ._geometry import (
affine,
affine_bounding_boxes,
affine_image_pil,
affine_image_tensor,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_boxes,
center_crop_image_pil,
center_crop_image_tensor,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_boxes,
crop_image_pil,
crop_image_tensor,
crop_mask,
crop_video,
elastic,
elastic_bounding_boxes,
elastic_image_pil,
elastic_image_tensor,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image_pil,
five_crop_image_tensor,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_boxes,
horizontal_flip_image_pil,
horizontal_flip_image_tensor,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_boxes,
pad_image_pil,
pad_image_tensor,
pad_mask,
pad_video,
perspective,
perspective_bounding_boxes,
perspective_image_pil,
perspective_image_tensor,
perspective_mask,
perspective_video,
resize,
resize_bounding_boxes,
resize_image_pil,
resize_image_tensor,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_boxes,
resized_crop_image_pil,
resized_crop_image_tensor,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_boxes,
rotate_image_pil,
rotate_image_tensor,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image_pil,
ten_crop_image_tensor,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_boxes,
vertical_flip_image_pil,
vertical_flip_image_tensor,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
convert_image_dtype,
gaussian_blur,
gaussian_blur_image_pil,
gaussian_blur_image_tensor,
gaussian_blur_video,
normalize,
normalize_image_tensor,
normalize_video,
to_dtype,
to_dtype_image_tensor,
to_dtype_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image_pil, to_image_tensor, to_pil_image
from ._deprecated import get_image_size, to_tensor # usort: skip
|
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_simple_tensor # usort: skip
from ._meta import (
clamp_bounding_box,
convert_format_bounding_box,
get_dimensions_image_tensor,
get_dimensions_image_pil,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image_tensor,
get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_spatial_size_bounding_box,
get_spatial_size_image_tensor,
get_spatial_size_image_pil,
get_spatial_size_mask,
get_spatial_size_video,
get_spatial_size,
) # usort: skip
from ._augment import erase, erase_image_pil, erase_image_tensor, erase_video
from ._color import (
adjust_brightness,
adjust_brightness_image_pil,
adjust_brightness_image_tensor,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image_pil,
adjust_contrast_image_tensor,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image_pil,
adjust_gamma_image_tensor,
adjust_gamma_video,
adjust_hue,
adjust_hue_image_pil,
adjust_hue_image_tensor,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image_pil,
adjust_saturation_image_tensor,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image_pil,
adjust_sharpness_image_tensor,
adjust_sharpness_video,
autocontrast,
autocontrast_image_pil,
autocontrast_image_tensor,
autocontrast_video,
equalize,
equalize_image_pil,
equalize_image_tensor,
equalize_video,
invert,
invert_image_pil,
invert_image_tensor,
invert_video,
posterize,
posterize_image_pil,
posterize_image_tensor,
posterize_video,
rgb_to_grayscale,
rgb_to_grayscale_image_pil,
rgb_to_grayscale_image_tensor,
solarize,
solarize_image_pil,
solarize_image_tensor,
solarize_video,
to_grayscale,
)
from ._geometry import (
affine,
affine_bounding_box,
affine_image_pil,
affine_image_tensor,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_box,
center_crop_image_pil,
center_crop_image_tensor,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_box,
crop_image_pil,
crop_image_tensor,
crop_mask,
crop_video,
elastic,
elastic_bounding_box,
elastic_image_pil,
elastic_image_tensor,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image_pil,
five_crop_image_tensor,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_box,
horizontal_flip_image_pil,
horizontal_flip_image_tensor,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_box,
pad_image_pil,
pad_image_tensor,
pad_mask,
pad_video,
perspective,
perspective_bounding_box,
perspective_image_pil,
perspective_image_tensor,
perspective_mask,
perspective_video,
resize,
resize_bounding_box,
resize_image_pil,
resize_image_tensor,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_box,
resized_crop_image_pil,
resized_crop_image_tensor,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_box,
rotate_image_pil,
rotate_image_tensor,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image_pil,
ten_crop_image_tensor,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_box,
vertical_flip_image_pil,
vertical_flip_image_tensor,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
convert_image_dtype,
gaussian_blur,
gaussian_blur_image_pil,
gaussian_blur_image_tensor,
gaussian_blur_video,
normalize,
normalize_image_tensor,
normalize_video,
to_dtype,
to_dtype_image_tensor,
to_dtype_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image_pil, to_image_tensor, to_pil_image
from ._deprecated import get_image_size, to_tensor # usort: skip
|
__version__ = '0.30.0a3'
from docarray.array import DocumentArray, DocumentArrayStacked
from docarray.base_document.document import BaseDocument
__all__ = ['BaseDocument', 'DocumentArray', 'DocumentArrayStacked']
|
__version__ = '0.30.0a3'
from docarray.array.array.array import DocumentArray
from docarray.base_document.document import BaseDocument
__all__ = [
'BaseDocument',
'DocumentArray',
]
|
from contextlib import suppress
from docutils import nodes
from docutils.parsers.rst import Directive
from sklearn.utils import all_estimators
from sklearn.utils._test_common.instance_generator import _construct_instances
from sklearn.utils._testing import SkipTest
class AllowNanEstimators(Directive):
@staticmethod
def make_paragraph_for_estimator_type(estimator_type):
intro = nodes.list_item()
intro += nodes.strong(text="Estimators that allow NaN values for type ")
intro += nodes.literal(text=f"{estimator_type}")
intro += nodes.strong(text=":\n")
exists = False
lst = nodes.bullet_list()
for name, est_class in all_estimators(type_filter=estimator_type):
with suppress(SkipTest):
# Here we generate the text only for one instance. This directive
# should not be used for meta-estimators where tags depend on the
# sub-estimator.
est = next(_construct_instances(est_class))
if est.__sklearn_tags__().input_tags.allow_nan:
module_name = ".".join(est_class.__module__.split(".")[:2])
class_title = f"{est_class.__name__}"
class_url = f"./generated/{module_name}.{class_title}.html"
item = nodes.list_item()
para = nodes.paragraph()
para += nodes.reference(
class_title, text=class_title, internal=False, refuri=class_url
)
exists = True
item += para
lst += item
intro += lst
return [intro] if exists else None
def run(self):
lst = nodes.bullet_list()
for i in ["cluster", "regressor", "classifier", "transformer"]:
item = self.make_paragraph_for_estimator_type(i)
if item is not None:
lst += item
return [lst]
def setup(app):
app.add_directive("allow_nan_estimators", AllowNanEstimators)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
from contextlib import suppress
from docutils import nodes
from docutils.parsers.rst import Directive
from sklearn.utils import all_estimators
from sklearn.utils._test_common.instance_generator import _construct_instances
from sklearn.utils._testing import SkipTest
class AllowNanEstimators(Directive):
@staticmethod
def make_paragraph_for_estimator_type(estimator_type):
intro = nodes.list_item()
intro += nodes.strong(text="Estimators that allow NaN values for type ")
intro += nodes.literal(text=f"{estimator_type}")
intro += nodes.strong(text=":\n")
exists = False
lst = nodes.bullet_list()
for name, est_class in all_estimators(type_filter=estimator_type):
with suppress(SkipTest):
# Here we generate the text only for one instance. This directive
# should not be used for meta-estimators where tags depend on the
# sub-estimator.
est = next(_construct_instances(est_class))
if est.__sklearn_tags__().input_tags.allow_nan:
module_name = ".".join(est_class.__module__.split(".")[:2])
class_title = f"{est_class.__name__}"
class_url = f"./generated/{module_name}.{class_title}.html"
item = nodes.list_item()
para = nodes.paragraph()
para += nodes.reference(
class_title, text=class_title, internal=False, refuri=class_url
)
exists = True
item += para
lst += item
intro += lst
return [intro] if exists else None
def run(self):
lst = nodes.bullet_list()
for i in ["cluster", "regressor", "classifier", "transformer"]:
item = self.make_paragraph_for_estimator_type(i)
if item is not None:
lst += item
return [lst]
def setup(app):
app.add_directive("allow_nan_estimators", AllowNanEstimators)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import copy
from typing import Dict
from jina import requests, DocumentArray, Executor
from jina_commons import get_logger
from jinahub.indexers.searcher.NumpySearcher.numpy_searcher import NumpySearcher
from jinahub.indexers.storage.PostgreSQLStorage.postgres import PostgreSQLStorage
class NumpyPostgresSearcher(Executor):
"""A Compound Indexer made up of a NumpyIndexer (for vectors) and a Postgres Indexer"""
def __init__(
self,
dump_path=None,
**kwargs,
):
super().__init__(**kwargs)
# when constructed from rolling update the dump_path is passed via a runtime_arg
dump_path = dump_path or kwargs.get('runtime_args').get('dump_path')
self.logger = get_logger(self)
self._kv_indexer = None
self._vec_indexer = None
if dump_path:
self._vec_indexer = NumpySearcher(dump_path=dump_path, **kwargs)
self._kv_indexer = PostgreSQLStorage(**kwargs)
else:
self.logger.warning(
f'No dump path provided for {self}. Use .rolling_update() to re-initialize...'
)
@requests(on='/search')
def search(self, docs: 'DocumentArray', parameters: Dict = None, **kwargs):
if self._kv_indexer and self._vec_indexer:
self._vec_indexer.search(docs, parameters)
kv_parameters = copy.deepcopy(parameters)
kv_parameters['traversal_paths'] = [
path + 'm' for path in kv_parameters.get('traversal_paths', ['r'])
]
self._kv_indexer.search(docs, kv_parameters)
else:
return
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import copy
from typing import Dict
from jina import requests, DocumentArray, Executor
from jina_commons import get_logger
from jinahub.indexers.searcher.NumpySearcher import NumpySearcher
from jinahub.indexers.storage.PostgreSQLStorage import PostgreSQLStorage
class NumpyPostgresSearcher(Executor):
"""A Compound Indexer made up of a NumpyIndexer (for vectors) and a Postgres Indexer"""
def __init__(
self,
dump_path=None,
**kwargs,
):
super().__init__(**kwargs)
# when constructed from rolling update the dump_path is passed via a runtime_arg
dump_path = dump_path or kwargs.get('runtime_args').get('dump_path')
self.logger = get_logger(self)
self._kv_indexer = None
self._vec_indexer = None
if dump_path:
self._vec_indexer = NumpySearcher(dump_path=dump_path, **kwargs)
self._kv_indexer = PostgreSQLStorage(**kwargs)
else:
self.logger.warning(
f'No dump path provided for {self}. Use .rolling_update() to re-initialize...'
)
@requests(on='/search')
def search(self, docs: 'DocumentArray', parameters: Dict = None, **kwargs):
if self._kv_indexer and self._vec_indexer:
self._vec_indexer.search(docs, parameters)
kv_parameters = copy.deepcopy(parameters)
kv_parameters['traversal_paths'] = [
path + 'm' for path in kv_parameters.get('traversal_paths', ['r'])
]
self._kv_indexer.search(docs, kv_parameters)
else:
return
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmengine.data import InstanceData
from mmdet.core.bbox.assigners import AssignResult
from mmdet.registry import TASK_UTILS
from .base_sampler import BaseSampler
from .sampling_result import SamplingResult
@TASK_UTILS.register_module()
class PseudoSampler(BaseSampler):
"""A pseudo sampler that does not do sampling actually."""
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
"""Sample positive samples."""
raise NotImplementedError
def _sample_neg(self, **kwargs):
"""Sample negative samples."""
raise NotImplementedError
def sample(self, assign_result: AssignResult, pred_instances: InstanceData,
gt_instances: InstanceData, *args, **kwargs):
"""Directly returns the positive and negative indices of samples.
Args:
assign_result (:obj:`AssignResult`): Bbox assigning results.
pred_instances (:obj:`InstaceData`): Instances of model
predictions. It includes ``priors``, and the priors can
be anchors, points, or bboxes predicted by the model,
shape(n, 4).
gt_instances (:obj:`InstaceData`): Ground truth of instance
annotations. It usually includes ``bboxes`` and ``labels``
attributes.
Returns:
:obj:`SamplingResult`: sampler results
"""
gt_bboxes = gt_instances.bboxes
priors = pred_instances.priors
pos_inds = torch.nonzero(
assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()
neg_inds = torch.nonzero(
assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()
gt_flags = priors.new_zeros(priors.shape[0], dtype=torch.uint8)
sampling_result = SamplingResult(
pos_inds=pos_inds,
neg_inds=neg_inds,
priors=priors,
gt_bboxes=gt_bboxes,
assign_result=assign_result,
gt_flags=gt_flags,
avg_factor_with_neg=False)
return sampling_result
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmengine.data import InstanceData
from mmdet.core.bbox.assigners import AssignResult
from mmdet.registry import TASK_UTILS
from .base_sampler import BaseSampler
from .sampling_result import SamplingResult
@TASK_UTILS.register_module()
class PseudoSampler(BaseSampler):
"""A pseudo sampler that does not do sampling actually."""
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
"""Sample positive samples."""
raise NotImplementedError
def _sample_neg(self, **kwargs):
"""Sample negative samples."""
raise NotImplementedError
def sample(self, assign_result: AssignResult, pred_instances: InstanceData,
gt_instances: InstanceData, *args, **kwargs):
"""Directly returns the positive and negative indices of samples.
Args:
assign_result (:obj:`AssignResult`): Bbox assigning results.
pred_instances (:obj:`InstaceData`): Instances of model
predictions. It includes ``priors``, and the priors can
be anchors, points, or bboxes predicted by the model,
shape(n, 4).
gt_instances (:obj:`InstaceData`): Ground truth of instance
annotations. It usually includes ``bboxes`` and ``labels``
attributes.
Returns:
:obj:`SamplingResult`: sampler results
"""
gt_bboxes = gt_instances.bboxes
priors = pred_instances.priors
pos_inds = torch.nonzero(
assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()
neg_inds = torch.nonzero(
assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()
gt_flags = priors.new_zeros(priors.shape[0], dtype=torch.uint8)
sampling_result = SamplingResult(
pos_inds=pos_inds,
neg_inds=neg_inds,
priors=priors,
gt_bboxes=gt_bboxes,
assign_result=assign_result,
gt_flags=gt_flags)
return sampling_result
|
"""Tool for the SearxNG search API."""
from typing import Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, ConfigDict, Field
from langchain_community.utilities.searx_search import SearxSearchWrapper
class SearxSearchQueryInput(BaseModel):
"""Input for the SearxSearch tool."""
query: str = Field(description="query to look up on searx")
class SearxSearchRun(BaseTool):
"""Tool that queries a Searx instance."""
name: str = "searx_search"
description: str = (
"A meta search engine."
"Useful for when you need to answer questions about current events."
"Input should be a search query."
)
wrapper: SearxSearchWrapper
kwargs: dict = Field(default_factory=dict)
args_schema: Type[BaseModel] = SearxSearchQueryInput
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.wrapper.run(query, **self.kwargs)
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return await self.wrapper.arun(query, **self.kwargs)
class SearxSearchResults(BaseTool):
"""Tool that queries a Searx instance and gets back json."""
name: str = "searx_search_results"
description: str = (
"A meta search engine."
"Useful for when you need to answer questions about current events."
"Input should be a search query. Output is a JSON array of the query results"
)
wrapper: SearxSearchWrapper
num_results: int = 4
kwargs: dict = Field(default_factory=dict)
args_schema: Type[BaseModel] = SearxSearchQueryInput
model_config = ConfigDict(
extra="allow",
)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return str(self.wrapper.results(query, self.num_results, **self.kwargs))
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return (
await self.wrapper.aresults(query, self.num_results, **self.kwargs)
).__str__()
|
"""Tool for the SearxNG search API."""
from typing import Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, ConfigDict, Field
from langchain_community.utilities.searx_search import SearxSearchWrapper
class SearxSearchQueryInput(BaseModel):
"""Input for the SearxSearch tool."""
query: str = Field(description="query to look up on searx")
class SearxSearchRun(BaseTool): # type: ignore[override, override]
"""Tool that queries a Searx instance."""
name: str = "searx_search"
description: str = (
"A meta search engine."
"Useful for when you need to answer questions about current events."
"Input should be a search query."
)
wrapper: SearxSearchWrapper
kwargs: dict = Field(default_factory=dict)
args_schema: Type[BaseModel] = SearxSearchQueryInput
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.wrapper.run(query, **self.kwargs)
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return await self.wrapper.arun(query, **self.kwargs)
class SearxSearchResults(BaseTool): # type: ignore[override, override]
"""Tool that queries a Searx instance and gets back json."""
name: str = "searx_search_results"
description: str = (
"A meta search engine."
"Useful for when you need to answer questions about current events."
"Input should be a search query. Output is a JSON array of the query results"
)
wrapper: SearxSearchWrapper
num_results: int = 4
kwargs: dict = Field(default_factory=dict)
args_schema: Type[BaseModel] = SearxSearchQueryInput
model_config = ConfigDict(
extra="allow",
)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return str(self.wrapper.results(query, self.num_results, **self.kwargs))
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return (
await self.wrapper.aresults(query, self.num_results, **self.kwargs)
).__str__()
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.5.0'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.4.0'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
"""
This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_nli.py
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CrossEncoderClassificationEvaluator
from sentence_transformers.cross_encoder.losses.CrossEntropyLoss import CrossEntropyLoss
from sentence_transformers.cross_encoder.trainer import CrossEncoderTrainer
from sentence_transformers.cross_encoder.training_args import CrossEncoderTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
train_batch_size = 64
num_epochs = 1
output_dir = "output/training_ce_allnli-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Define our CrossEncoder model. We use distilroberta-base as the base model and set it up to predict 3 labels
# You can also use other base models, like bert-base-uncased, microsoft/mpnet-base, etc.
model_name = "distilroberta-base"
model = CrossEncoder(model_name, num_labels=3)
# 2. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
# We'll start with 100k training samples, but you can increase this to get a stronger model
logging.info("Read AllNLI train dataset")
train_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="train").select(range(100_000))
eval_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="dev").select(range(1000))
test_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="test")
logging.info(train_dataset)
# 3. Define our training loss:
loss = CrossEntropyLoss(model)
# 4. Before and during training, we use CrossEncoderClassificationEvaluator to measure the performance on the dev set
dev_cls_evaluator = CrossEncoderClassificationEvaluator(
sentence_pairs=list(zip(eval_dataset["premise"], eval_dataset["hypothesis"])),
labels=eval_dataset["label"],
name="AllNLI-dev",
)
dev_cls_evaluator(model)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"reranker-{short_model_name}-nli"
args = CrossEncoderTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=500,
save_strategy="steps",
save_steps=500,
save_total_limit=2,
logging_steps=100,
run_name=run_name, # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = CrossEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_cls_evaluator,
)
trainer.train()
# 7. Evaluate the final model on test dataset
test_cls_evaluator = CrossEncoderClassificationEvaluator(
list(zip(test_dataset["premise"], test_dataset["hypothesis"])),
test_dataset["label"],
name="AllNLI-test",
)
test_cls_evaluator(model)
# 8. Save the final model
final_output_dir = f"{output_dir}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = CrossEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
|
"""
This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_nli.py
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CEClassificationEvaluator
from sentence_transformers.cross_encoder.losses.CrossEntropyLoss import CrossEntropyLoss
from sentence_transformers.cross_encoder.trainer import CrossEncoderTrainer
from sentence_transformers.cross_encoder.training_args import CrossEncoderTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
train_batch_size = 64
num_epochs = 1
output_dir = "output/training_ce_allnli-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Define our CrossEncoder model. We use distilroberta-base as the base model and set it up to predict 3 labels
# You can also use other base models, like bert-base-uncased, microsoft/mpnet-base, etc.
model_name = "distilroberta-base"
model = CrossEncoder(model_name, num_labels=3)
# 2. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
# We'll start with 100k training samples, but you can increase this to get a stronger model
logging.info("Read AllNLI train dataset")
train_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="train").select(range(100_000))
eval_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="dev").select(range(1000))
test_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="test")
logging.info(train_dataset)
# 3. Define our training loss:
loss = CrossEntropyLoss(model)
# 4. Before and during training, we use CEClassificationEvaluator to measure the performance on the dev set
dev_cls_evaluator = CEClassificationEvaluator(
sentence_pairs=list(zip(eval_dataset["premise"], eval_dataset["hypothesis"])),
labels=eval_dataset["label"],
name="AllNLI-dev",
)
dev_cls_evaluator(model)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"reranker-{short_model_name}-nli"
args = CrossEncoderTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=500,
save_strategy="steps",
save_steps=500,
save_total_limit=2,
logging_steps=100,
run_name=run_name, # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = CrossEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_cls_evaluator,
)
trainer.train()
# 7. Evaluate the final model on test dataset
test_cls_evaluator = CEClassificationEvaluator(
list(zip(test_dataset["premise"], test_dataset["hypothesis"])),
test_dataset["label"],
name="AllNLI-test",
)
test_cls_evaluator(model)
# 8. Save the final model
final_output_dir = f"{output_dir}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = CrossEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
|
import os
from pathlib import Path
import cv2
import pytest
from jina import Document, DocumentArray, Executor
from ...yolov5_segmenter import YoloV5Segmenter
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_load():
segmenter = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert segmenter.model_name_or_path == 'yolov5s'
@pytest.mark.parametrize(
'model_path',
[
os.path.join(cur_dir, '../data/models/yolov5s.pt'),
os.path.join(cur_dir, '../data/models/yolov5m.pt'),
'yolov5s',
'yolov5m',
],
)
def test_model_name_or_path(build_da, model_path):
da = build_da()
segmenter = YoloV5Segmenter(model_name_or_path=model_path)
segmenter.segment(da, parameters={})
for doc in da:
assert len(doc.chunks) > 0
for chunk in doc.chunks:
assert chunk.blob.ndim == 3
assert chunk.tags.get('label')
assert chunk.tags.get('conf')
@pytest.mark.parametrize(
'model_path, expected_detections',
[
(
os.path.join(cur_dir, '../data/models/yolov5s.pt'),
{'bus.jpg': 5, 'zidane.jpg': 3, 'man.jpg': 3},
),
(
os.path.join(cur_dir, '../data/models/yolov5m.pt'),
{'bus.jpg': 6, 'zidane.jpg': 3, 'man.jpg': 3},
),
],
)
def test_n_detections(build_da, model_path, expected_detections):
da = build_da()
segmenter = YoloV5Segmenter(model_name_or_path=model_path)
segmenter.segment(da, parameters={})
for doc in da:
assert len(doc.chunks) == expected_detections[doc.tags['filename']]
@pytest.mark.parametrize(
'confidence_threshold, expected_detections',
[
(0.3, {'bus.jpg': 6, 'zidane.jpg': 3, 'man.jpg': 3}),
(0.5, {'bus.jpg': 5, 'zidane.jpg': 3, 'man.jpg': 3}),
(0.8, {'bus.jpg': 3, 'zidane.jpg': 2, 'man.jpg': 0}),
],
)
def test_confidence_threshold(build_da, confidence_threshold, expected_detections):
da = build_da()
segmenter = YoloV5Segmenter(
model_name_or_path=os.path.join(cur_dir, '../data/models/yolov5m.pt'),
default_confidence_threshold=confidence_threshold,
)
segmenter.segment(da, parameters={})
for doc in da:
assert len(doc.chunks) == expected_detections[doc.tags['filename']]
assert all(chunk.tags['conf'] >= confidence_threshold for chunk in doc.chunks)
def test_traversal_paths():
da = DocumentArray(
[
Document(
id='root',
blob=cv2.imread(os.path.join(cur_dir, '../data/img/man.jpg')),
),
]
)
segmenter = YoloV5Segmenter(
model_name_or_path=os.path.join(cur_dir, '../data/models/yolov5m.pt')
)
segmenter.segment(da, parameters={})
# detects 2 persons and 1 cell phone
assert len(da[0].chunks) == 3
assert da[0].chunks[0].tags['label'] == 'person'
assert da[0].chunks[1].tags['label'] == 'person'
assert da[0].chunks[2].tags['label'] == 'cell phone'
segmenter.segment(da, parameters={'traversal_paths': ['c']})
# the first detected person spans the whole image, so segmenting the chunk produces 3 detections
person_chunk = da[0].chunks[0]
assert len(person_chunk.chunks) == 3
assert person_chunk.chunks[0].tags['label'] == 'person'
assert person_chunk.chunks[1].tags['label'] == 'person'
assert person_chunk.chunks[2].tags['label'] == 'cell phone'
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from pathlib import Path
import cv2
import pytest
from jina import Executor, Document, DocumentArray
from ...yolov5_segmenter import YoloV5Segmenter
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_load():
segmenter = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert segmenter.model_name_or_path == 'yolov5s'
@pytest.mark.parametrize(
'model_path', [
os.path.join(cur_dir, '../data/models/yolov5s.pt'),
os.path.join(cur_dir, '../data/models/yolov5m.pt'),
'yolov5s',
'yolov5m'
]
)
def test_model_name_or_path(build_da, model_path):
da = build_da()
segmenter = YoloV5Segmenter(model_name_or_path=model_path)
segmenter.segment(da, parameters={})
for doc in da:
assert len(doc.chunks) > 0
for chunk in doc.chunks:
assert chunk.blob.ndim == 3
assert chunk.tags.get('label')
assert chunk.tags.get('conf')
@pytest.mark.parametrize(
'model_path, expected_detections', [
(os.path.join(cur_dir, '../data/models/yolov5s.pt'), {'bus.jpg': 5, 'zidane.jpg': 3, 'man.jpg': 3}),
(os.path.join(cur_dir, '../data/models/yolov5m.pt'), {'bus.jpg': 6, 'zidane.jpg': 3, 'man.jpg': 3}),
]
)
def test_n_detections(build_da, model_path, expected_detections):
da = build_da()
segmenter = YoloV5Segmenter(model_name_or_path=model_path)
segmenter.segment(da, parameters={})
for doc in da:
assert len(doc.chunks) == expected_detections[doc.tags['filename']]
@pytest.mark.parametrize(
'confidence_threshold, expected_detections', [
(0.3, {'bus.jpg': 6, 'zidane.jpg': 3, 'man.jpg': 3}),
(0.5, {'bus.jpg': 5, 'zidane.jpg': 3, 'man.jpg': 3}),
(0.8, {'bus.jpg': 3, 'zidane.jpg': 2, 'man.jpg': 0}),
]
)
def test_confidence_threshold(build_da, confidence_threshold, expected_detections):
da = build_da()
segmenter = YoloV5Segmenter(model_name_or_path=os.path.join(cur_dir, '../data/models/yolov5m.pt'),
default_confidence_threshold=confidence_threshold)
segmenter.segment(da, parameters={})
for doc in da:
assert len(doc.chunks) == expected_detections[doc.tags['filename']]
assert all(chunk.tags['conf'] >= confidence_threshold for chunk in doc.chunks)
def test_traversal_paths():
da = DocumentArray([
Document(
id='root',
blob=cv2.imread(os.path.join(cur_dir, '../data/img/man.jpg')),
),
])
segmenter = YoloV5Segmenter(model_name_or_path=os.path.join(cur_dir, '../data/models/yolov5m.pt'))
segmenter.segment(da, parameters={})
# detects 2 persons and 1 cell phone
assert len(da[0].chunks) == 3
assert da[0].chunks[0].tags['label'] == 'person'
assert da[0].chunks[1].tags['label'] == 'person'
assert da[0].chunks[2].tags['label'] == 'cell phone'
segmenter.segment(da, parameters={'traversal_paths': ['c']})
# the first detected person spans the whole image, so segmenting the chunk produces 3 detections
person_chunk = da[0].chunks[0]
assert len(person_chunk.chunks) == 3
assert person_chunk.chunks[0].tags['label'] == 'person'
assert person_chunk.chunks[1].tags['label'] == 'person'
assert person_chunk.chunks[2].tags['label'] == 'cell phone'
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.29.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.28.1'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
import os
from time import time
import numpy as np
import pytest
from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc
from docarray.typing import NdArray
from docarray.utils.map import map_docs, map_docs_batch
from tests.units.typing.test_bytes import IMAGE_PATHS
pytestmark = [pytest.mark.benchmark, pytest.mark.slow]
class MyMatrix(BaseDoc):
matrix: NdArray
def cpu_intensive(doc: MyMatrix) -> MyMatrix:
# some cpu intensive function
for i in range(3000):
sqrt_matrix = np.sqrt(doc.matrix)
doc.matrix = sqrt_matrix
return doc
def test_map_docs_multiprocessing():
if os.cpu_count() > 1:
def time_multiprocessing(num_workers: int) -> float:
n_docs = 5
rng = np.random.RandomState(0)
matrices = [rng.random(size=(1000, 1000)) for _ in range(n_docs)]
da = DocArray[MyMatrix]([MyMatrix(matrix=m) for m in matrices])
start_time = time()
list(
map_docs(
da=da, func=cpu_intensive, backend='process', num_worker=num_workers
)
)
return time() - start_time
time_1_cpu = time_multiprocessing(num_workers=1)
time_2_cpu = time_multiprocessing(num_workers=2)
assert time_2_cpu < time_1_cpu
def cpu_intensive_batch(da: DocArray[MyMatrix]) -> DocArray[MyMatrix]:
# some cpu intensive function
for doc in da:
for i in range(3000):
sqrt_matrix = np.sqrt(doc.matrix)
doc.matrix = sqrt_matrix
return da
def test_map_docs_batch_multiprocessing():
if os.cpu_count() > 1:
def time_multiprocessing(num_workers: int) -> float:
n_docs = 16
rng = np.random.RandomState(0)
matrices = [rng.random(size=(1000, 1000)) for _ in range(n_docs)]
da = DocArray[MyMatrix]([MyMatrix(matrix=m) for m in matrices])
start_time = time()
list(
map_docs_batch(
da=da,
func=cpu_intensive_batch,
batch_size=8,
backend='process',
num_worker=num_workers,
)
)
return time() - start_time
time_1_cpu = time_multiprocessing(num_workers=1)
time_2_cpu = time_multiprocessing(num_workers=2)
assert time_2_cpu < time_1_cpu
def io_intensive(img: ImageDoc) -> ImageDoc:
# some io intensive function: load and set image url
img.tensor = img.url.load()
return img
def test_map_docs_multithreading():
def time_multithreading(num_workers: int) -> float:
n_docs = 100
da = DocArray[ImageDoc](
[ImageDoc(url=IMAGE_PATHS['png']) for _ in range(n_docs)]
)
start_time = time()
list(
map_docs(da=da, func=io_intensive, backend='thread', num_worker=num_workers)
)
return time() - start_time
time_1_thread = time_multithreading(num_workers=1)
time_2_thread = time_multithreading(num_workers=2)
assert time_2_thread < time_1_thread
def io_intensive_batch(da: DocArray[ImageDoc]) -> DocArray[ImageDoc]:
# some io intensive function: load and set image url
for doc in da:
doc.tensor = doc.url.load()
return da
def test_map_docs_batch_multithreading():
def time_multithreading_batch(num_workers: int) -> float:
n_docs = 100
da = DocArray[ImageDoc](
[ImageDoc(url=IMAGE_PATHS['png']) for _ in range(n_docs)]
)
start_time = time()
list(
map_docs_batch(
da=da,
func=io_intensive_batch,
backend='thread',
num_worker=num_workers,
batch_size=10,
)
)
return time() - start_time
time_1_thread = time_multithreading_batch(num_workers=1)
time_2_thread = time_multithreading_batch(num_workers=2)
assert time_2_thread < time_1_thread
|
import os
from time import time
import numpy as np
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from docarray.typing import NdArray
from docarray.utils.map import map_docs, map_docs_batch
from tests.units.typing.test_bytes import IMAGE_PATHS
pytestmark = [pytest.mark.benchmark, pytest.mark.slow]
class MyMatrix(BaseDocument):
matrix: NdArray
def cpu_intensive(doc: MyMatrix) -> MyMatrix:
# some cpu intensive function
for i in range(3000):
sqrt_matrix = np.sqrt(doc.matrix)
doc.matrix = sqrt_matrix
return doc
def test_map_docs_multiprocessing():
if os.cpu_count() > 1:
def time_multiprocessing(num_workers: int) -> float:
n_docs = 5
rng = np.random.RandomState(0)
matrices = [rng.random(size=(1000, 1000)) for _ in range(n_docs)]
da = DocumentArray[MyMatrix]([MyMatrix(matrix=m) for m in matrices])
start_time = time()
list(
map_docs(
da=da, func=cpu_intensive, backend='process', num_worker=num_workers
)
)
return time() - start_time
time_1_cpu = time_multiprocessing(num_workers=1)
time_2_cpu = time_multiprocessing(num_workers=2)
assert time_2_cpu < time_1_cpu
def cpu_intensive_batch(da: DocumentArray[MyMatrix]) -> DocumentArray[MyMatrix]:
# some cpu intensive function
for doc in da:
for i in range(3000):
sqrt_matrix = np.sqrt(doc.matrix)
doc.matrix = sqrt_matrix
return da
def test_map_docs_batch_multiprocessing():
if os.cpu_count() > 1:
def time_multiprocessing(num_workers: int) -> float:
n_docs = 16
rng = np.random.RandomState(0)
matrices = [rng.random(size=(1000, 1000)) for _ in range(n_docs)]
da = DocumentArray[MyMatrix]([MyMatrix(matrix=m) for m in matrices])
start_time = time()
list(
map_docs_batch(
da=da,
func=cpu_intensive_batch,
batch_size=8,
backend='process',
num_worker=num_workers,
)
)
return time() - start_time
time_1_cpu = time_multiprocessing(num_workers=1)
time_2_cpu = time_multiprocessing(num_workers=2)
assert time_2_cpu < time_1_cpu
def io_intensive(img: ImageDoc) -> ImageDoc:
# some io intensive function: load and set image url
img.tensor = img.url.load()
return img
def test_map_docs_multithreading():
def time_multithreading(num_workers: int) -> float:
n_docs = 100
da = DocumentArray[ImageDoc](
[ImageDoc(url=IMAGE_PATHS['png']) for _ in range(n_docs)]
)
start_time = time()
list(
map_docs(da=da, func=io_intensive, backend='thread', num_worker=num_workers)
)
return time() - start_time
time_1_thread = time_multithreading(num_workers=1)
time_2_thread = time_multithreading(num_workers=2)
assert time_2_thread < time_1_thread
def io_intensive_batch(da: DocumentArray[ImageDoc]) -> DocumentArray[ImageDoc]:
# some io intensive function: load and set image url
for doc in da:
doc.tensor = doc.url.load()
return da
def test_map_docs_batch_multithreading():
def time_multithreading_batch(num_workers: int) -> float:
n_docs = 100
da = DocumentArray[ImageDoc](
[ImageDoc(url=IMAGE_PATHS['png']) for _ in range(n_docs)]
)
start_time = time()
list(
map_docs_batch(
da=da,
func=io_intensive_batch,
backend='thread',
num_worker=num_workers,
batch_size=10,
)
)
return time() - start_time
time_1_thread = time_multithreading_batch(num_workers=1)
time_2_thread = time_multithreading_batch(num_workers=2)
assert time_2_thread < time_1_thread
|
"""Utilities for environment variables."""
from __future__ import annotations
import os
from typing import Any, Optional, Union
def env_var_is_set(env_var: str) -> bool:
"""Check if an environment variable is set.
Args:
env_var (str): The name of the environment variable.
Returns:
bool: True if the environment variable is set, False otherwise.
"""
return env_var in os.environ and os.environ[env_var] not in {
"",
"0",
"false",
"False",
}
def get_from_dict_or_env(
data: dict[str, Any],
key: Union[str, list[str]],
env_key: str,
default: Optional[str] = None,
) -> str:
"""Get a value from a dictionary or an environment variable.
Args:
data: The dictionary to look up the key in.
key: The key to look up in the dictionary. This can be a list of keys to try
in order.
env_key: The environment variable to look up if the key is not
in the dictionary.
default: The default value to return if the key is not in the dictionary
or the environment. Defaults to None.
"""
if isinstance(key, (list, tuple)):
for k in key:
if value := data.get(k):
return value
if isinstance(key, str) and key in data and data[key]:
return data[key]
key_for_err = key[0] if isinstance(key, (list, tuple)) else key
return get_from_env(key_for_err, env_key, default=default)
def get_from_env(key: str, env_key: str, default: Optional[str] = None) -> str:
"""Get a value from a dictionary or an environment variable.
Args:
key: The key to look up in the dictionary.
env_key: The environment variable to look up if the key is not
in the dictionary.
default: The default value to return if the key is not in the dictionary
or the environment. Defaults to None.
Returns:
str: The value of the key.
Raises:
ValueError: If the key is not in the dictionary and no default value is
provided or if the environment variable is not set.
"""
if env_value := os.getenv(env_key):
return env_value
if default is not None:
return default
msg = (
f"Did not find {key}, please add an environment variable"
f" `{env_key}` which contains it, or pass"
f" `{key}` as a named parameter."
)
raise ValueError(msg)
|
"""Utilities for environment variables."""
from __future__ import annotations
import os
from typing import Any, Optional, Union
def env_var_is_set(env_var: str) -> bool:
"""Check if an environment variable is set.
Args:
env_var (str): The name of the environment variable.
Returns:
bool: True if the environment variable is set, False otherwise.
"""
return env_var in os.environ and os.environ[env_var] not in (
"",
"0",
"false",
"False",
)
def get_from_dict_or_env(
data: dict[str, Any],
key: Union[str, list[str]],
env_key: str,
default: Optional[str] = None,
) -> str:
"""Get a value from a dictionary or an environment variable.
Args:
data: The dictionary to look up the key in.
key: The key to look up in the dictionary. This can be a list of keys to try
in order.
env_key: The environment variable to look up if the key is not
in the dictionary.
default: The default value to return if the key is not in the dictionary
or the environment. Defaults to None.
"""
if isinstance(key, (list, tuple)):
for k in key:
if value := data.get(k):
return value
if isinstance(key, str) and key in data and data[key]:
return data[key]
key_for_err = key[0] if isinstance(key, (list, tuple)) else key
return get_from_env(key_for_err, env_key, default=default)
def get_from_env(key: str, env_key: str, default: Optional[str] = None) -> str:
"""Get a value from a dictionary or an environment variable.
Args:
key: The key to look up in the dictionary.
env_key: The environment variable to look up if the key is not
in the dictionary.
default: The default value to return if the key is not in the dictionary
or the environment. Defaults to None.
Returns:
str: The value of the key.
Raises:
ValueError: If the key is not in the dictionary and no default value is
provided or if the environment variable is not set.
"""
if env_value := os.getenv(env_key):
return env_value
if default is not None:
return default
msg = (
f"Did not find {key}, please add an environment variable"
f" `{env_key}` which contains it, or pass"
f" `{key}` as a named parameter."
)
raise ValueError(msg)
|
import pytest
from llama_index.llms.nvidia import NVIDIA as Interface
from pytest_httpx import HTTPXMock
@pytest.fixture()
def mock_local_models(httpx_mock: HTTPXMock, base_url: str) -> None:
mock_response = {
"data": [
{
"id": "dummy",
"object": "model",
"created": 1234567890,
"owned_by": "OWNER",
"root": "model1",
}
]
}
base_url = base_url.rstrip("/")
httpx_mock.add_response(
url=f"{base_url}/models",
method="GET",
json=mock_response,
status_code=200,
)
# test case for invalid base_url
@pytest.mark.parametrize(
"base_url",
[
"http://0.0.0.0:8888/chat/completion",
"https://0.0.0.0:8888/ranking",
],
)
def test_base_url_invalid_not_hosted(base_url: str, mock_local_models: None) -> None:
Interface(base_url=base_url)
@pytest.mark.parametrize("base_url", ["http://localhost:8080/v1"])
def test_base_url_valid_not_hosted(base_url: str, mock_local_models: None) -> None:
with pytest.warns(UserWarning):
Interface(base_url=base_url)
@pytest.mark.parametrize("base_url", ["https://integrate.api.nvidia.com/v1/"])
def test_base_url_valid_hosted_without_api_key(base_url: str) -> None:
Interface(base_url=base_url, api_key="BOGUS")
@pytest.mark.integration
@pytest.mark.parametrize("base_url", ["https://integrate.api.nvidia.com/v1/"])
def test_base_url_valid_hosted_with_api_key(base_url: str) -> None:
llm = Interface()
assert llm.base_url == base_url
llm = Interface(base_url=base_url)
assert llm.base_url == base_url
|
import pytest
from llama_index.llms.nvidia import NVIDIA as Interface
from pytest_httpx import HTTPXMock
@pytest.fixture()
def mock_local_models(httpx_mock: HTTPXMock, base_url: str) -> None:
mock_response = {
"data": [
{
"id": "dummy",
"object": "model",
"created": 1234567890,
"owned_by": "OWNER",
"root": "model1",
}
]
}
base_url = base_url.rstrip("/")
httpx_mock.add_response(
url=f"{base_url}/models",
method="GET",
json=mock_response,
status_code=200,
)
# test case for invalid base_url
@pytest.mark.parametrize(
"base_url",
[
"http://0.0.0.0:8888/chat/completion",
"https://0.0.0.0:8888/ranking",
],
)
def test_base_url_invalid_not_hosted(base_url: str, mock_local_models: None) -> None:
Interface(base_url=base_url)
@pytest.mark.parametrize("base_url", ["http://localhost:8080/v1"])
def test_base_url_valid_not_hosted(base_url: str, mock_local_models: None) -> None:
with pytest.warns(UserWarning):
Interface(base_url=base_url)
@pytest.mark.parametrize("base_url", ["https://integrate.api.nvidia.com/v1/"])
def test_base_url_valid_hosted_without_api_key(base_url: str) -> None:
Interface(base_url=base_url, api_key="BOGUS")
@pytest.mark.integration()
@pytest.mark.parametrize("base_url", ["https://integrate.api.nvidia.com/v1/"])
def test_base_url_valid_hosted_with_api_key(base_url: str) -> None:
llm = Interface()
assert llm.base_url == base_url
llm = Interface(base_url=base_url)
assert llm.base_url == base_url
|
from __future__ import annotations
from typing import Any, List, Optional, Tuple, Union
import PIL.Image
import torch
from torchvision.transforms import InterpolationMode
from ._datapoint import Datapoint, FillTypeJIT
class Mask(Datapoint):
@classmethod
def _wrap(cls, tensor: torch.Tensor) -> Mask:
return tensor.as_subclass(cls)
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Mask:
if isinstance(data, PIL.Image.Image):
from torchvision.prototype.transforms import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor)
@classmethod
def wrap_like(
cls,
other: Mask,
tensor: torch.Tensor,
) -> Mask:
return cls._wrap(tensor)
@property
def spatial_size(self) -> Tuple[int, int]:
return tuple(self.shape[-2:]) # type: ignore[return-value]
def horizontal_flip(self) -> Mask:
output = self._F.horizontal_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def vertical_flip(self) -> Mask:
output = self._F.vertical_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def resize( # type: ignore[override]
self,
size: List[int],
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
max_size: Optional[int] = None,
antialias: Optional[Union[str, bool]] = "warn",
) -> Mask:
output = self._F.resize_mask(self.as_subclass(torch.Tensor), size, max_size=max_size)
return Mask.wrap_like(self, output)
def crop(self, top: int, left: int, height: int, width: int) -> Mask:
output = self._F.crop_mask(self.as_subclass(torch.Tensor), top, left, height, width)
return Mask.wrap_like(self, output)
def center_crop(self, output_size: List[int]) -> Mask:
output = self._F.center_crop_mask(self.as_subclass(torch.Tensor), output_size=output_size)
return Mask.wrap_like(self, output)
def resized_crop(
self,
top: int,
left: int,
height: int,
width: int,
size: List[int],
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
antialias: Optional[Union[str, bool]] = "warn",
) -> Mask:
output = self._F.resized_crop_mask(self.as_subclass(torch.Tensor), top, left, height, width, size=size)
return Mask.wrap_like(self, output)
def pad(
self,
padding: List[int],
fill: Optional[Union[int, float, List[float]]] = None,
padding_mode: str = "constant",
) -> Mask:
output = self._F.pad_mask(self.as_subclass(torch.Tensor), padding, padding_mode=padding_mode, fill=fill)
return Mask.wrap_like(self, output)
def rotate(
self,
angle: float,
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
expand: bool = False,
center: Optional[List[float]] = None,
fill: FillTypeJIT = None,
) -> Mask:
output = self._F.rotate_mask(self.as_subclass(torch.Tensor), angle, expand=expand, center=center, fill=fill)
return Mask.wrap_like(self, output)
def affine(
self,
angle: Union[int, float],
translate: List[float],
scale: float,
shear: List[float],
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> Mask:
output = self._F.affine_mask(
self.as_subclass(torch.Tensor),
angle,
translate=translate,
scale=scale,
shear=shear,
fill=fill,
center=center,
)
return Mask.wrap_like(self, output)
def perspective(
self,
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
coefficients: Optional[List[float]] = None,
) -> Mask:
output = self._F.perspective_mask(
self.as_subclass(torch.Tensor), startpoints, endpoints, fill=fill, coefficients=coefficients
)
return Mask.wrap_like(self, output)
def elastic(
self,
displacement: torch.Tensor,
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
) -> Mask:
output = self._F.elastic_mask(self.as_subclass(torch.Tensor), displacement, fill=fill)
return Mask.wrap_like(self, output)
|
from __future__ import annotations
from typing import Any, List, Optional, Tuple, Union
import PIL.Image
import torch
from torchvision.transforms import InterpolationMode
from ._datapoint import Datapoint, FillTypeJIT
class Mask(Datapoint):
@classmethod
def _wrap(cls, tensor: torch.Tensor) -> Mask:
return tensor.as_subclass(cls)
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Mask:
if isinstance(data, PIL.Image.Image):
from torchvision.prototype.transforms import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor)
@classmethod
def wrap_like(
cls,
other: Mask,
tensor: torch.Tensor,
) -> Mask:
return cls._wrap(tensor)
@property
def spatial_size(self) -> Tuple[int, int]:
return tuple(self.shape[-2:]) # type: ignore[return-value]
def horizontal_flip(self) -> Mask:
output = self._F.horizontal_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def vertical_flip(self) -> Mask:
output = self._F.vertical_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def resize( # type: ignore[override]
self,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
max_size: Optional[int] = None,
antialias: Optional[Union[str, bool]] = "warn",
) -> Mask:
output = self._F.resize_mask(self.as_subclass(torch.Tensor), size, max_size=max_size)
return Mask.wrap_like(self, output)
def crop(self, top: int, left: int, height: int, width: int) -> Mask:
output = self._F.crop_mask(self.as_subclass(torch.Tensor), top, left, height, width)
return Mask.wrap_like(self, output)
def center_crop(self, output_size: List[int]) -> Mask:
output = self._F.center_crop_mask(self.as_subclass(torch.Tensor), output_size=output_size)
return Mask.wrap_like(self, output)
def resized_crop(
self,
top: int,
left: int,
height: int,
width: int,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
antialias: Optional[Union[str, bool]] = "warn",
) -> Mask:
output = self._F.resized_crop_mask(self.as_subclass(torch.Tensor), top, left, height, width, size=size)
return Mask.wrap_like(self, output)
def pad(
self,
padding: List[int],
fill: Optional[Union[int, float, List[float]]] = None,
padding_mode: str = "constant",
) -> Mask:
output = self._F.pad_mask(self.as_subclass(torch.Tensor), padding, padding_mode=padding_mode, fill=fill)
return Mask.wrap_like(self, output)
def rotate(
self,
angle: float,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
expand: bool = False,
center: Optional[List[float]] = None,
fill: FillTypeJIT = None,
) -> Mask:
output = self._F.rotate_mask(self.as_subclass(torch.Tensor), angle, expand=expand, center=center, fill=fill)
return Mask.wrap_like(self, output)
def affine(
self,
angle: Union[int, float],
translate: List[float],
scale: float,
shear: List[float],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> Mask:
output = self._F.affine_mask(
self.as_subclass(torch.Tensor),
angle,
translate=translate,
scale=scale,
shear=shear,
fill=fill,
center=center,
)
return Mask.wrap_like(self, output)
def perspective(
self,
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
coefficients: Optional[List[float]] = None,
) -> Mask:
output = self._F.perspective_mask(
self.as_subclass(torch.Tensor), startpoints, endpoints, fill=fill, coefficients=coefficients
)
return Mask.wrap_like(self, output)
def elastic(
self,
displacement: torch.Tensor,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
) -> Mask:
output = self._F.elastic_mask(self.as_subclass(torch.Tensor), displacement, fill=fill)
return Mask.wrap_like(self, output)
|
from __future__ import annotations
import pytest
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer
from sentence_transformers.model_card import generate_model_card
from sentence_transformers.util import is_datasets_available, is_training_available
if is_datasets_available():
from datasets import Dataset, DatasetDict
if not is_training_available():
pytest.skip(
reason='Sentence Transformers was not installed with the `["train"]` extra.',
allow_module_level=True,
)
@pytest.fixture(scope="session")
def dummy_dataset():
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"anchor": ["anchor 1", "anchor 2", ..., "anchor 10"],
"positive": ["positive 1", "positive 2", ..., "positive 10"],
"negative": ["negative 1", "negative 2", ..., "negative 10"],
}
"""
return Dataset.from_dict(
{
"anchor": [f"anchor {i}" for i in range(1, 11)],
"positive": [f"positive {i}" for i in range(1, 11)],
"negative": [f"negative {i}" for i in range(1, 11)],
}
)
@pytest.mark.parametrize(
("num_datasets", "expected_substrings"),
[
# 0 actually refers to just a single dataset
(
0,
[
"This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers-testing/stsb-bert-tiny-safetensors](https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors).",
"**Maximum Sequence Length:** 512 tokens",
"**Output Dimensionality:** 128 dimensions",
"**Similarity Function:** Cosine Similarity",
"#### Unnamed Dataset",
" | <code>anchor 1</code> | <code>positive 1</code> | <code>negative 1</code> |",
"* Loss: [<code>CoSENTLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) with these parameters:",
],
),
(
1,
[
"This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers-testing/stsb-bert-tiny-safetensors](https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors) on the train_0 dataset.",
"#### train_0",
],
),
(
2,
[
"This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers-testing/stsb-bert-tiny-safetensors](https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors) on the train_0 and train_1 datasets.",
"#### train_0",
"#### train_1",
],
),
(
10,
[
"This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers-testing/stsb-bert-tiny-safetensors](https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors) on the train_0, train_1, train_2, train_3, train_4, train_5, train_6, train_7, train_8 and train_9 datasets.",
"<details><summary>train_0</summary>", # We start using <details><summary> if we have more than 3 datasets
"#### train_0",
"</details>\n<details><summary>train_9</summary>",
"#### train_9",
],
),
# We start using "50 datasets" when the ", "-joined dataset name exceed 200 characters
(
50,
[
"This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers-testing/stsb-bert-tiny-safetensors](https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors) on 50 datasets.",
"<details><summary>train_0</summary>",
"#### train_0",
"</details>\n<details><summary>train_49</summary>",
"#### train_49",
],
),
],
)
def test_model_card_base(
stsb_bert_tiny_model: SentenceTransformer,
dummy_dataset: Dataset,
num_datasets: int,
expected_substrings: list[str],
) -> None:
model = stsb_bert_tiny_model
train_dataset = dummy_dataset
if num_datasets:
train_dataset = DatasetDict({f"train_{i}": train_dataset for i in range(num_datasets)})
# This adds data to model.model_card_data
SentenceTransformerTrainer(
model,
train_dataset=train_dataset,
)
model_card = generate_model_card(model)
# For debugging purposes, we save the model card to a file
# with open(f"test_model_card_{num_datasets}.md", "w", encoding="utf8") as f:
# f.write(model_card)
for substring in expected_substrings:
assert substring in model_card
# We don't want to have two consecutive empty lines anywhere
assert "\n\n\n" not in model_card
|
from __future__ import annotations
import pytest
from datasets import Dataset, DatasetDict
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer
from sentence_transformers.model_card import generate_model_card
@pytest.fixture(scope="session")
def dummy_dataset():
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"anchor": ["anchor 1", "anchor 2", ..., "anchor 10"],
"positive": ["positive 1", "positive 2", ..., "positive 10"],
"negative": ["negative 1", "negative 2", ..., "negative 10"],
}
"""
return Dataset.from_dict(
{
"anchor": [f"anchor {i}" for i in range(1, 11)],
"positive": [f"positive {i}" for i in range(1, 11)],
"negative": [f"negative {i}" for i in range(1, 11)],
}
)
@pytest.mark.parametrize(
("num_datasets", "expected_substrings"),
[
# 0 actually refers to just a single dataset
(
0,
[
"This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers-testing/stsb-bert-tiny-safetensors](https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors).",
"**Maximum Sequence Length:** 512 tokens",
"**Output Dimensionality:** 128 dimensions",
"**Similarity Function:** Cosine Similarity",
"#### Unnamed Dataset",
" | <code>anchor 1</code> | <code>positive 1</code> | <code>negative 1</code> |",
"* Loss: [<code>CoSENTLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) with these parameters:",
],
),
(
1,
[
"This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers-testing/stsb-bert-tiny-safetensors](https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors) on the train_0 dataset.",
"#### train_0",
],
),
(
2,
[
"This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers-testing/stsb-bert-tiny-safetensors](https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors) on the train_0 and train_1 datasets.",
"#### train_0",
"#### train_1",
],
),
(
10,
[
"This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers-testing/stsb-bert-tiny-safetensors](https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors) on the train_0, train_1, train_2, train_3, train_4, train_5, train_6, train_7, train_8 and train_9 datasets.",
"<details><summary>train_0</summary>", # We start using <details><summary> if we have more than 3 datasets
"#### train_0",
"</details>\n<details><summary>train_9</summary>",
"#### train_9",
],
),
# We start using "50 datasets" when the ", "-joined dataset name exceed 200 characters
(
50,
[
"This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers-testing/stsb-bert-tiny-safetensors](https://huggingface.co/sentence-transformers-testing/stsb-bert-tiny-safetensors) on 50 datasets.",
"<details><summary>train_0</summary>",
"#### train_0",
"</details>\n<details><summary>train_49</summary>",
"#### train_49",
],
),
],
)
def test_model_card_base(
stsb_bert_tiny_model: SentenceTransformer,
dummy_dataset: Dataset,
num_datasets: int,
expected_substrings: list[str],
) -> None:
model = stsb_bert_tiny_model
train_dataset = dummy_dataset
if num_datasets:
train_dataset = DatasetDict({f"train_{i}": train_dataset for i in range(num_datasets)})
# This adds data to model.model_card_data
SentenceTransformerTrainer(
model,
train_dataset=train_dataset,
)
model_card = generate_model_card(model)
# For debugging purposes, we save the model card to a file
# with open(f"test_model_card_{num_datasets}.md", "w", encoding="utf8") as f:
# f.write(model_card)
for substring in expected_substrings:
assert substring in model_card
# We don't want to have two consecutive empty lines anywhere
assert "\n\n\n" not in model_card
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
T = TypeVar('T', bound='ImageTorchTensor')
@_register_proto(proto_type_name='image_torch_tensor')
class ImageTorchTensor(AbstractImageTensor, TorchTensor, metaclass=metaTorchAndNode):
"""
Subclass of TorchTensor, to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to image bytes which are
optimized to send over the wire
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
from docarray import BaseDocument
from docarray.typing import ImageTorchTensor, ImageUrl
class MyImageDoc(BaseDocument):
title: str
tensor: Optional[ImageTorchTensor]
url: Optional[ImageUrl]
bytes: Optional[bytes]
doc = MyImageDoc(
title='my_second_image_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
"""
...
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
T = TypeVar('T', bound='ImageTorchTensor')
@_register_proto(proto_type_name='image_torch_tensor')
class ImageTorchTensor(AbstractImageTensor, TorchTensor, metaclass=metaTorchAndNode):
"""
Subclass of TorchTensor, to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to image bytes which are
optimized to send over the wire
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import torch
from pydantic import parse_obj_as
from docarray import Document
from docarray.typing import ImageTorchTensor, ImageUrl
class MyImageDoc(Document):
title: str
tensor: Optional[ImageTorchTensor]
url: Optional[ImageUrl]
bytes: Optional[bytes]
doc = MyImageDoc(
title='my_second_image_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
"""
...
|
from typing import Dict, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import SentenceTransformer
class MSELoss(nn.Module):
def __init__(self, model: SentenceTransformer) -> None:
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation.
For an example, see `the distillation documentation <../../examples/training/distillation/README.html>`_ on extending language models to new languages.
Args:
model: SentenceTransformerModel
References:
- Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation: https://arxiv.org/abs/2004.09813
- `Training > Model Distillation <../../examples/training/distillation/README.html>`_
- `Training > Multilingual Models <../../examples/training/multilingual/README.html>`_
Requirements:
1. Usually uses a finetuned teacher M in a knowledge distillation setup
Relations:
- :class:`MarginMSELoss` is equivalent to this loss, but with a margin through a negative pair.
Input:
+-----------------------------------------+-----------------------------+
| Texts | Labels |
+=========================================+=============================+
| sentence | model sentence embeddings |
+-----------------------------------------+-----------------------------+
| sentence_1, sentence_2, ..., sentence_N | model sentence embeddings |
+-----------------------------------------+-----------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
student_model = SentenceTransformer("microsoft/mpnet-base")
teacher_model = SentenceTransformer("all-mpnet-base-v2")
train_dataset = Dataset.from_dict({
"english": ["The first sentence", "The second sentence", "The third sentence", "The fourth sentence"],
"french": ["La première phrase", "La deuxième phrase", "La troisième phrase", "La quatrième phrase"],
})
def compute_labels(batch):
return {
"label": teacher_model.encode(batch["english"])
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.MSELoss(student_model)
trainer = SentenceTransformerTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(MSELoss, self).__init__()
self.model = model
self.loss_fct = nn.MSELoss()
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor) -> Tensor:
# Concatenate multiple inputs on the batch dimension
if len(sentence_features) > 1:
embeddings = torch.cat([self.model(inputs)["sentence_embedding"] for inputs in sentence_features], dim=0)
# Repeat the labels for each input
return self.loss_fct(embeddings, labels.repeat(len(sentence_features), 1))
embeddings = self.model(sentence_features[0])["sentence_embedding"]
return self.loss_fct(embeddings, labels)
@property
def citation(self) -> str:
return """
@inproceedings{reimers-2020-multilingual-sentence-bert,
title = "Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation",
author = "Reimers, Nils and Gurevych, Iryna",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing",
month = "11",
year = "2020",
publisher = "Association for Computational Linguistics",
url = "https://arxiv.org/abs/2004.09813",
}
"""
|
from typing import Dict, Iterable
import torch
from torch import Tensor, nn
class MSELoss(nn.Module):
def __init__(self, model):
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation.
For an example, see `the distillation documentation <../../examples/training/distillation/README.html>`_ on extending language models to new languages.
Args:
model: SentenceTransformerModel
References:
- Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation: https://arxiv.org/abs/2004.09813
- `Training > Model Distillation <../../examples/training/distillation/README.html>`_
- `Training > Multilingual Models <../../examples/training/multilingual/README.html>`_
Requirements:
1. Usually uses a finetuned teacher M in a knowledge distillation setup
Relations:
- :class:`MarginMSELoss` is equivalent to this loss, but with a margin through a negative pair.
Input:
+-----------------------------------------+-----------------------------+
| Texts | Labels |
+=========================================+=============================+
| sentence | model sentence embeddings |
+-----------------------------------------+-----------------------------+
| sentence_1, sentence_2, ..., sentence_N | model sentence embeddings |
+-----------------------------------------+-----------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
student_model = SentenceTransformer("microsoft/mpnet-base")
teacher_model = SentenceTransformer("all-mpnet-base-v2")
train_dataset = Dataset.from_dict({
"english": ["The first sentence", "The second sentence", "The third sentence", "The fourth sentence"],
"french": ["La première phrase", "La deuxième phrase", "La troisième phrase", "La quatrième phrase"],
})
def compute_labels(batch):
return {
"label": teacher_model.encode(batch["english"])
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.MSELoss(student_model)
trainer = SentenceTransformerTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(MSELoss, self).__init__()
self.model = model
self.loss_fct = nn.MSELoss()
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
# Concatenate multiple inputs on the batch dimension
if len(sentence_features) > 1:
embeddings = torch.cat([self.model(inputs)["sentence_embedding"] for inputs in sentence_features], dim=0)
# Repeat the labels for each input
return self.loss_fct(embeddings, labels.repeat(len(sentence_features), 1))
embeddings = self.model(sentence_features[0])["sentence_embedding"]
return self.loss_fct(embeddings, labels)
@property
def citation(self) -> str:
return """
@inproceedings{reimers-2020-multilingual-sentence-bert,
title = "Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation",
author = "Reimers, Nils and Gurevych, Iryna",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing",
month = "11",
year = "2020",
publisher = "Association for Computational Linguistics",
url = "https://arxiv.org/abs/2004.09813",
}
"""
|
from typing import Optional
from typing_extensions import Protocol, runtime_checkable
from torch.distributed._state_dict_utils import _copy_state_dict, _create_cpu_state_dict
from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE
__all__ = ["AsyncStager", "BlockingAsyncStager"]
@runtime_checkable
class AsyncStager(Protocol):
"""
This protocol is meant to provide customization and extensibility for dcp.async_save, allowing users
to customize how data is staged previous to executing the usual dcp.save path in parallel.
The expected order of operations (concretely defined in `torch.distributed.state_dict_saver.async_save`)
is the following:
1. AsyncStager.stage_data(state_dict):
This call gives the AsyncStager the opportunity to 'stage'
the state_dict. The expectation and purpose of staging in this context is to create a "training-safe"
representation of the state dict, meaning that any updates to module data after staging is complete
should not be reflected in the state dict returned from this method. For example, in the default
case a copy of the entire state dict is created on CPU RAM and returned here, allowing users
to continue training without risking changes to data which is being serialized.
2. dcp.save is called on the state_dict returned from stage in parallel. This call is responsible
for serializing the state_dict and writing it to storage.
3. If AsyncStager.should_synchronize_after_execute is True, this method will be called immediately after
the serialization thread starts and before returning from dcp.async_save. If this is set to False,
the assumption is the user has defined a custom synchronization point for the the purpose of further
optimizing save latency in the training loop (for example, by overlapping staging with the
forward/backward pass), and it is the respondsibility of the user to call `AsyncStager.synchronize_staging`
at the appropriate time.
"""
# default to True since the common case is to stage synchronously
_synchronize_after_execute: bool = True
@property
def should_synchronize_after_execute(self) -> bool:
"""
Whether to synchronize after executing the stage.
"""
return self._synchronize_after_execute
def stage(self, state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE:
"""
Returns a "staged" copy of `state_dict`. The expectation of the staged copy is that it is
inoculated from any updates incurred after the stage call is complete.
"""
raise NotImplementedError(
f"{self.__class__.__name__} must implement stage method"
)
def synchronize_staging(self) -> None:
"""
In the case `stage` is async in some way, this method should be called to ensure staging
is complete and it is safe to begin modifying the original `state_dict`
"""
class BlockingAsyncStager(AsyncStager):
"""
An implementation of AsyncStager which stages the state_dict on CPU RAM and blocks until the copy is complete.
This implementation also provides an option to optimize stage latency using pinned memory.
N.B. synchronize_staging is a no-op in this case.
"""
# default to True since the common case is to stage synchronously
_synchronize_after_execute: bool = False
def __init__(
self,
cache_staged_state_dict: bool = False,
type_check: bool = False,
):
"""
Initializes the BlockingAsyncStager.
Args:
cache_staged_state_dict: Whether to cache the staged state_dict. This option decreases staging latency
at the cost of increases memory usage. Additionally, if this parameter is set to True, it's the expectation
that the stager is maintained and reused for multiple dcp.async_save calls. Default to False.
type_check: Whether to perform a type check during cpu_offload. Defaults to False.
"""
self.cache_staged_state_dict = cache_staged_state_dict
self.type_check = type_check
self.state_dict_cache: Optional[STATE_DICT_TYPE] = None
def stage(self, state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE:
"""
Returns a copy of `state_dict` on the CPU.
"""
if not self.cache_staged_state_dict:
staged_state_dict = _create_cpu_state_dict(state_dict)
_copy_state_dict(state_dict, staged_state_dict, type_check=self.type_check)
return staged_state_dict
if self.state_dict_cache is None:
self.state_dict_cache = _create_cpu_state_dict(state_dict, pin_memory=True)
return _copy_state_dict(state_dict, self.state_dict_cache)
def synchronize_staging(self) -> None:
"""
No-op function, since staging is blocking.
"""
|
from typing import Optional
from typing_extensions import Protocol, runtime_checkable
from torch.distributed._state_dict_utils import _copy_state_dict, _create_cpu_state_dict
from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE
__all__ = ["AsyncStager", "BlockingAsyncStager"]
@runtime_checkable
class AsyncStager(Protocol):
"""
This protocol is meant to provide customization and extensibility for dcp.async_save, allowing users
to customize how data is staged previous to executing the usual dcp.save path in parallel.
The expected order of operations (concretely defined in `torch.distributed.state_dict_saver.async_save`)
is the following:
1. AsyncStager.stage_data(state_dict):
This call gives the AsyncStager the opportunity to 'stage'
the state_dict. The expectation and purpose of staging in this context is to create a "training-safe"
representation of the state dict, meaning that any updates to module data after staging is complete
should not be reflected in the state dict returned from this method. For example, in the default
case a copy of the entire state dict is created on CPU RAM and returned here, allowing users
to continue training without risking changes to data which is being serialized.
2. dcp.save is called on the state_dict returned from stage in parallel. This call is responsible
for serializing the state_dict and writing it to storage.
3. If AsyncStager.should_synchronize_after_execute is True, this method will be called immediately after
the serialization thread starts and before returning from dcp.async_save. If this is set to False,
the assumption is the user has defined a custom synchronization point for the the purpose of further
optimizing save latency in the training loop (for example, by overlapping staging with the
forward/backward pass), and it is the respondsibility of the user to call `AsyncStager.synchronize_staging`
at the appropriate time.
"""
# default to True since the common case is to stage synchronously
_synchronize_after_execute: bool = True
@property
def should_synchronize_after_execute(self) -> bool:
"""
Whether to synchronize after executing the stage.
"""
return self._synchronize_after_execute
def stage(self, state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE:
"""
Returns a "staged" copy of `state_dict`. The expectation of the staged copy is that it is
innoculated from any updates incurred after the stage call is complete.
"""
raise NotImplementedError(
f"{self.__class__.__name__} must implement stage method"
)
def synchronize_staging(self) -> None:
"""
In the case `stage` is async in some way, this method should be called to ensure staging
is complete and it is safe to begin modifying the original `state_dict`
"""
class BlockingAsyncStager(AsyncStager):
"""
An implementation of AsyncStager which stages the state_dict on CPU RAM and blocks until the copy is complete.
This implementation also provides an option to optimize stage latency using pinned memory.
N.B. synchronize_staging is a no-op in this case.
"""
# default to True since the common case is to stage synchronously
_synchronize_after_execute: bool = False
def __init__(
self,
cache_staged_state_dict: bool = False,
type_check: bool = False,
):
"""
Initializes the BlockingAsyncStager.
Args:
cache_staged_state_dict: Whether to cache the staged state_dict. This option decreases staging latency
at the cost of increases memory usage. Additionally, if this parameter is set to True, it's the expectation
that the stager is maintained and re-used for multiple dcp.async_save calls. Default to False.
type_check: Whether to perform a type check during cpu_offload. Defaults to False.
"""
self.cache_staged_state_dict = cache_staged_state_dict
self.type_check = type_check
self.state_dict_cache: Optional[STATE_DICT_TYPE] = None
def stage(self, state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE:
"""
Returns a copy of `state_dict` on the CPU.
"""
if not self.cache_staged_state_dict:
staged_state_dict = _create_cpu_state_dict(state_dict)
_copy_state_dict(state_dict, staged_state_dict, type_check=self.type_check)
return staged_state_dict
if self.state_dict_cache is None:
self.state_dict_cache = _create_cpu_state_dict(state_dict, pin_memory=True)
return _copy_state_dict(state_dict, self.state_dict_cache)
def synchronize_staging(self) -> None:
"""
No-op function, since staging is blocking.
"""
|
_base_ = 'grounding_dino_swin-t_pretrain_obj365.py'
o365v1_od_dataset = dict(
type='ODVGDataset',
data_root='data/objects365v1/',
ann_file='o365v1_train_odvg.json',
label_map_file='o365v1_label_map.json',
data_prefix=dict(img='train/'),
filter_cfg=dict(filter_empty_gt=False),
pipeline=_base_.train_pipeline,
return_classes=True,
backend_args=None,
)
flickr30k_dataset = dict(
type='ODVGDataset',
data_root='data/flickr30k_entities/',
ann_file='final_flickr_separateGT_train_vg.json',
label_map_file=None,
data_prefix=dict(img='flickr30k_images/'),
filter_cfg=dict(filter_empty_gt=False),
pipeline=_base_.train_pipeline,
return_classes=True,
backend_args=None)
gqa_dataset = dict(
type='ODVGDataset',
data_root='data/gqa/',
ann_file='final_mixed_train_no_coco_vg.json',
label_map_file=None,
data_prefix=dict(img='images/'),
filter_cfg=dict(filter_empty_gt=False),
pipeline=_base_.train_pipeline,
return_classes=True,
backend_args=None)
grit_dataset = dict(
type='ODVGDataset',
data_root='grit_processed/',
ann_file='grit20m_vg.json',
label_map_file=None,
data_prefix=dict(img=''),
filter_cfg=dict(filter_empty_gt=False),
pipeline=_base_.train_pipeline,
return_classes=True,
backend_args=None)
train_dataloader = dict(
sampler=dict(
_delete_=True,
type='CustomSampleSizeSampler',
dataset_size=[-1, -1, -1, 500000]),
dataset=dict(datasets=[
o365v1_od_dataset, flickr30k_dataset, gqa_dataset, grit_dataset
]))
|
_base_ = 'grounding_dino_swin-t_pretrain_obj365.py'
o365v1_od_dataset = dict(
type='ODVGDataset',
data_root='data/objects365v1/',
ann_file='o365v1_train_odvg.jsonl',
label_map_file='o365v1_label_map.json',
data_prefix=dict(img='train/'),
filter_cfg=dict(filter_empty_gt=False),
pipeline=_base_.train_pipeline,
return_classes=True,
backend_args=None,
)
flickr30k_dataset = dict(
type='ODVGDataset',
data_root='data/flickr30k_entities/',
ann_file='final_flickr_separateGT_train_vg.json',
label_map_file=None,
data_prefix=dict(img='flickr30k_images/'),
filter_cfg=dict(filter_empty_gt=False),
pipeline=_base_.train_pipeline,
return_classes=True,
backend_args=None)
gqa_dataset = dict(
type='ODVGDataset',
data_root='data/gqa/',
ann_file='final_mixed_train_no_coco_vg.json',
label_map_file=None,
data_prefix=dict(img='images/'),
filter_cfg=dict(filter_empty_gt=False),
pipeline=_base_.train_pipeline,
return_classes=True,
backend_args=None)
grit_dataset = dict(
type='ODVGDataset',
data_root='grit_processed/',
ann_file='grit20m_vg.json',
label_map_file=None,
data_prefix=dict(img=''),
filter_cfg=dict(filter_empty_gt=False),
pipeline=_base_.train_pipeline,
return_classes=True,
backend_args=None)
train_dataloader = dict(
sampler=dict(
_delete_=True,
type='CustomSampleSizeSampler',
dataset_size=[-1, -1, -1, 500000]),
dataset=dict(datasets=[
o365v1_od_dataset, flickr30k_dataset, gqa_dataset, grit_dataset
]))
|
from abc import ABC
from collections import namedtuple
from dataclasses import is_dataclass, asdict
from typing import Dict, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from ....typing import DocumentArraySourceType, ArrayType
TypeMap = namedtuple('TypeMap', ['type', 'converter'])
class BaseBackendMixin(ABC):
TYPE_MAP: Dict[str, TypeMap]
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
copy: bool = False,
*args,
**kwargs
):
self._load_offset2ids()
def _get_storage_infos(self) -> Optional[Dict]:
if hasattr(self, '_config') and is_dataclass(self._config):
return {k: str(v) for k, v in asdict(self._config).items()}
def _map_id(self, _id: str) -> str:
return _id
def _map_column(self, value, col_type) -> str:
return self.TYPE_MAP[col_type].converter(value)
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
from ....math.ndarray import to_numpy_array
return to_numpy_array(embedding)
def _map_type(self, col_type: str) -> str:
return self.TYPE_MAP[col_type].type
|
from abc import ABC
from dataclasses import is_dataclass, asdict
from typing import Dict, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from ....typing import DocumentArraySourceType, ArrayType
class BaseBackendMixin(ABC):
TYPE_MAP: Dict
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
copy: bool = False,
*args,
**kwargs
):
self._load_offset2ids()
def _get_storage_infos(self) -> Optional[Dict]:
if hasattr(self, '_config') and is_dataclass(self._config):
return {k: str(v) for k, v in asdict(self._config).items()}
def _map_id(self, _id: str) -> str:
return _id
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
from ....math.ndarray import to_numpy_array
return to_numpy_array(embedding)
def _map_type(self, col_type: str) -> str:
return self.TYPE_MAP[col_type]
|
"""Argparser module for WorkerRuntime"""
from jina import __default_host__, helper
from jina.parsers.helper import KVAppendAction, add_arg_group
def mixin_worker_runtime_parser(parser):
"""Mixing in arguments required by :class:`WorkerRuntime` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='WorkerRuntime')
from jina import __default_executor__
gp.add_argument(
'--uses',
type=str,
default=__default_executor__,
help='''
The config of the executor, it could be one of the followings:
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
''',
)
gp.add_argument(
'--uses-with',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `with` configuration in `uses`
''',
)
gp.add_argument(
'--uses-metas',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `metas` configuration in `uses`
''',
)
gp.add_argument(
'--uses-requests',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `requests` configuration in `uses`
''',
)
gp.add_argument(
'--py-modules',
type=str,
nargs='*',
metavar='PATH',
help='''
The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/executor-files/>`__
''',
)
gp.add_argument(
'--port-in',
type=int,
default=helper.random_port(),
dest='port',
help='The port for input data to bind to, default a random port between [49152, 65535]',
)
gp.add_argument(
'--host-in',
type=str,
default=__default_host__,
help=f'The host address for binding to, by default it is {__default_host__}',
)
gp.add_argument(
'--native',
action='store_true',
default=False,
help='If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.',
)
gp.add_argument(
'--output-array-type',
type=str,
default=None,
help='''
The type of array `tensor` and `embedding` will be serialized to.
Supports the same types as `docarray.to_protobuf(.., ndarray_type=...)`, which can be found
`here <https://docarray.jina.ai/fundamentals/document/serialization/#from-to-protobuf>`.
Defaults to retaining whatever type is returned by the Executor.
''',
)
|
"""Argparser module for WorkerRuntime"""
from jina import __default_host__, helper
from jina.parsers.helper import KVAppendAction, add_arg_group
def mixin_worker_runtime_parser(parser):
"""Mixing in arguments required by :class:`WorkerRuntime` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='WorkerRuntime')
from jina import __default_executor__
gp.add_argument(
'--uses',
type=str,
default=__default_executor__,
help='''
The config of the executor, it could be one of the followings:
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
''',
)
gp.add_argument(
'--uses-with',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `with` configuration in `uses`
''',
)
gp.add_argument(
'--uses-metas',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `metas` configuration in `uses`
''',
)
gp.add_argument(
'--uses-requests',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `requests` configuration in `uses`
''',
)
gp.add_argument(
'--py-modules',
type=str,
nargs='*',
metavar='PATH',
help='''
The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/repository-structure/>`__
''',
)
gp.add_argument(
'--port-in',
type=int,
default=helper.random_port(),
dest='port',
help='The port for input data to bind to, default a random port between [49152, 65535]',
)
gp.add_argument(
'--host-in',
type=str,
default=__default_host__,
help=f'The host address for binding to, by default it is {__default_host__}',
)
gp.add_argument(
'--native',
action='store_true',
default=False,
help='If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.',
)
gp.add_argument(
'--output-array-type',
type=str,
default=None,
help='''
The type of array `tensor` and `embedding` will be serialized to.
Supports the same types as `docarray.to_protobuf(.., ndarray_type=...)`, which can be found
`here <https://docarray.jina.ai/fundamentals/document/serialization/#from-to-protobuf>`.
Defaults to retaining whatever type is returned by the Executor.
''',
)
|
from typing import TYPE_CHECKING, Type, TypeVar, Union
from uuid import UUID
from pydantic import BaseConfig, parse_obj_as
from pydantic.fields import ModelField
if TYPE_CHECKING:
from docarray.proto import NodeProto
from docarray.typing.abstract_type import AbstractType
T = TypeVar('T', bound='ID')
class ID(str, AbstractType):
"""
Represent an unique ID
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[str, int, UUID],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
try:
id: str = str(value)
return cls(id)
except Exception:
raise ValueError(f'Expected a str, int or UUID, got {type(value)}')
def _to_node_protobuf(self) -> 'NodeProto':
"""Convert an ID into a NodeProto message. This function should
be called when the self is nested into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(id=self)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:
"""
read ndarray from a proto msg
:param pb_msg:
:return: a string
"""
return parse_obj_as(cls, pb_msg)
|
from typing import Type, TypeVar, Union
from uuid import UUID
from pydantic import BaseConfig, parse_obj_as
from pydantic.fields import ModelField
from docarray.proto import NodeProto
from docarray.typing.abstract_type import AbstractType
T = TypeVar('T', bound='ID')
class ID(str, AbstractType):
"""
Represent an unique ID
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[str, int, UUID],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
try:
id: str = str(value)
return cls(id)
except Exception:
raise ValueError(f'Expected a str, int or UUID, got {type(value)}')
def _to_node_protobuf(self) -> NodeProto:
"""Convert an ID into a NodeProto message. This function should
be called when the self is nested into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(id=self)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:
"""
read ndarray from a proto msg
:param pb_msg:
:return: a string
"""
return parse_obj_as(cls, pb_msg)
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.25.3'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.25.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
checkpoint = 'https://download.pytorch.org/models/resnet50-11ad3fa6.pth'
model = dict(
backbone=dict(init_cfg=dict(type='Pretrained', checkpoint=checkpoint)))
# `lr` and `weight_decay` have been searched to be optimal.
optim_wrapper = dict(
optimizer=dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.1),
paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True))
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
checkpoint = 'https://download.pytorch.org/models/resnet50-11ad3fa6.pth'
model = dict(
backbone=dict(init_cfg=dict(type='Pretrained', checkpoint=checkpoint)))
# `lr` and `weight_decay` have been searched to be optimal.
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.0001,
weight_decay=0.1,
paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True))
|
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
# use caffe img_norm
preprocess_cfg=preprocess_cfg,
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768),
(1333, 800)]),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
_base_ = './retinanet_r50-caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
_base_ = './retinanet_r50_caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Dict, Iterator, List, Type
from typing_extensions import TYPE_CHECKING
if TYPE_CHECKING:
from docarray import BaseDoc, DocList
class AbstractDocStore(ABC):
@staticmethod
@abstractmethod
def list(namespace: str, show_table: bool) -> List[str]:
"""List all DocLists in the specified backend at the namespace.
:param namespace: The namespace to list
:param show_table: If true, a table is printed to the console
:return: A list of DocList names
"""
...
@staticmethod
@abstractmethod
def delete(name: str, missing_ok: bool) -> bool:
"""Delete the DocList object at the specified name
:param name: The name of the DocList to delete
:param missing_ok: If true, no error will be raised if the DocList does not exist.
:return: True if the DocList was deleted, False if it did not exist.
"""
...
@staticmethod
@abstractmethod
def push(
docs: 'DocList',
name: str,
show_progress: bool,
) -> Dict:
"""Push this DocList to the specified name.
:param docs: The DocList to push
:param name: The name to push to
:param show_progress: If true, a progress bar will be displayed.
"""
...
@staticmethod
@abstractmethod
def push_stream(
docs: Iterator['BaseDoc'],
url: str,
show_progress: bool = False,
) -> Dict:
"""Push a stream of documents to the specified name.
:param docs: a stream of documents
:param url: The name to push to
:param show_progress: If true, a progress bar will be displayed.
"""
...
@staticmethod
@abstractmethod
def pull(
docs_cls: Type['DocList'],
name: str,
show_progress: bool,
local_cache: bool,
) -> 'DocList':
"""Pull a DocList from the specified name.
:param docs_cls: The DocList class to instantiate
:param name: The name to pull from
:param show_progress: If true, a progress bar will be displayed.
:param local_cache: If true, the DocList will be cached locally
:return: A DocList
"""
...
@staticmethod
@abstractmethod
def pull_stream(
docs_cls: Type['DocList'],
name: str,
show_progress: bool,
local_cache: bool,
) -> Iterator['BaseDoc']:
"""Pull a stream of documents from the specified name.
:param docs_cls: The DocList class to instantiate
:param name: The name to pull from
:param show_progress: If true, a progress bar will be displayed.
:param local_cache: If true, the DocList will be cached locally
:return: An iterator of documents"""
...
|
from abc import ABC, abstractmethod
from typing import Dict, Iterator, List, Type
from typing_extensions import TYPE_CHECKING
if TYPE_CHECKING:
from docarray import BaseDoc, DocList
class AbstractDocStore(ABC):
@staticmethod
@abstractmethod
def list(namespace: str, show_table: bool) -> List[str]:
"""List all DocLists in the specified backend at the namespace.
:param namespace: The namespace to list
:param show_table: If true, a table is printed to the console
:return: A list of DocList names
"""
...
@staticmethod
@abstractmethod
def delete(name: str, missing_ok: bool) -> bool:
"""Delete the DocList object at the specified name
:param name: The name of the DocList to delete
:param missing_ok: If true, no error will be raised if the DocList does not exist.
:return: True if the DocList was deleted, False if it did not exist.
"""
...
@staticmethod
@abstractmethod
def push(
docs: 'DocList',
name: str,
show_progress: bool,
) -> Dict:
"""Push this DocList to the specified name.
:param docs: The DocList to push
:param name: The name to push to
:param show_progress: If true, a progress bar will be displayed.
"""
...
@staticmethod
@abstractmethod
def push_stream(
docs: Iterator['BaseDoc'],
url: str,
show_progress: bool = False,
) -> Dict:
"""Push a stream of documents to the specified name.
:param docs: a stream of documents
:param url: The name to push to
:param show_progress: If true, a progress bar will be displayed.
"""
...
@staticmethod
@abstractmethod
def pull(
docs_cls: Type['DocList'],
name: str,
show_progress: bool,
local_cache: bool,
) -> 'DocList':
"""Pull a DocList from the specified name.
:param docs_cls: The DocList class to instantiate
:param name: The name to pull from
:param show_progress: If true, a progress bar will be displayed.
:param local_cache: If true, the DocList will be cached locally
:return: A DocList
"""
...
@staticmethod
@abstractmethod
def pull_stream(
docs_cls: Type['DocList'],
name: str,
show_progress: bool,
local_cache: bool,
) -> Iterator['BaseDoc']:
"""Pull a stream of documents from the specified name.
:param docs_cls: The DocList class to instantiate
:param name: The name to pull from
:param show_progress: If true, a progress bar will be displayed.
:param local_cache: If true, the DocList will be cached locally
:return: An iterator of documents"""
...
|
import os
from typing import Optional
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from tests import TOYDATA_DIR
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDocument):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: ImageDoc
image2: ImageDoc
return MyDocNested
def test_to_from_csv(tmpdir, nested_doc_cls):
da = DocumentArray[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=ImageDoc(url='aux.png'),
image2=ImageDoc(url='aux.png'),
),
nested_doc_cls(text='hello world', image=ImageDoc(), image2=ImageDoc()),
]
)
tmp_file = str(tmpdir / 'tmp.csv')
da.to_csv(tmp_file)
assert os.path.isfile(tmp_file)
da_from = DocumentArray[nested_doc_cls].from_csv(tmp_file)
for doc1, doc2 in zip(da, da_from):
assert doc1 == doc2
def test_from_csv_nested(nested_doc_cls):
da = DocumentArray[nested_doc_cls].from_csv(
file_path=str(TOYDATA_DIR / 'docs_nested.csv')
)
assert len(da) == 3
for i, doc in enumerate(da):
assert doc.count.__class__ == int
assert doc.count == int(f'{i}{i}{i}')
assert doc.text.__class__ == str
assert doc.text == f'hello {i}'
assert doc.image.__class__ == ImageDoc
assert doc.image.tensor is None
assert doc.image.embedding is None
assert doc.image.bytes is None
assert doc.image2.__class__ == ImageDoc
assert doc.image2.tensor is None
assert doc.image2.embedding is None
assert doc.image2.bytes is None
assert da[0].image2.url == 'image_10.png'
assert da[1].image2.url is None
assert da[2].image2.url is None
@pytest.fixture()
def nested_doc():
class Inner(BaseDocument):
img: Optional[ImageDoc]
class Middle(BaseDocument):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDocument):
img: Optional[ImageDoc]
middle: Optional[Middle]
doc = Outer(
img=ImageDoc(), middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc()))
)
return doc
def test_from_csv_without_schema_raise_exception():
with pytest.raises(TypeError, match='no document schema defined'):
DocumentArray.from_csv(file_path=str(TOYDATA_DIR / 'docs_nested.csv'))
def test_from_csv_with_wrong_schema_raise_exception(nested_doc):
with pytest.raises(ValueError, match='Column names do not match the schema'):
DocumentArray[nested_doc.__class__].from_csv(
file_path=str(TOYDATA_DIR / 'docs.csv')
)
|
import os
from typing import Optional
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image
from tests import TOYDATA_DIR
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDocument):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: Image
image2: Image
return MyDocNested
def test_to_from_csv(tmpdir, nested_doc_cls):
da = DocumentArray[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=Image(url='aux.png'),
image2=Image(url='aux.png'),
),
nested_doc_cls(text='hello world', image=Image(), image2=Image()),
]
)
tmp_file = str(tmpdir / 'tmp.csv')
da.to_csv(tmp_file)
assert os.path.isfile(tmp_file)
da_from = DocumentArray[nested_doc_cls].from_csv(tmp_file)
for doc1, doc2 in zip(da, da_from):
assert doc1 == doc2
def test_from_csv_nested(nested_doc_cls):
da = DocumentArray[nested_doc_cls].from_csv(
file_path=str(TOYDATA_DIR / 'docs_nested.csv')
)
assert len(da) == 3
for i, doc in enumerate(da):
assert doc.count.__class__ == int
assert doc.count == int(f'{i}{i}{i}')
assert doc.text.__class__ == str
assert doc.text == f'hello {i}'
assert doc.image.__class__ == Image
assert doc.image.tensor is None
assert doc.image.embedding is None
assert doc.image.bytes is None
assert doc.image2.__class__ == Image
assert doc.image2.tensor is None
assert doc.image2.embedding is None
assert doc.image2.bytes is None
assert da[0].image2.url == 'image_10.png'
assert da[1].image2.url is None
assert da[2].image2.url is None
@pytest.fixture()
def nested_doc():
class Inner(BaseDocument):
img: Optional[Image]
class Middle(BaseDocument):
img: Optional[Image]
inner: Optional[Inner]
class Outer(BaseDocument):
img: Optional[Image]
middle: Optional[Middle]
doc = Outer(img=Image(), middle=Middle(img=Image(), inner=Inner(img=Image())))
return doc
def test_from_csv_without_schema_raise_exception():
with pytest.raises(TypeError, match='no document schema defined'):
DocumentArray.from_csv(file_path=str(TOYDATA_DIR / 'docs_nested.csv'))
def test_from_csv_with_wrong_schema_raise_exception(nested_doc):
with pytest.raises(ValueError, match='Column names do not match the schema'):
DocumentArray[nested_doc.__class__].from_csv(
file_path=str(TOYDATA_DIR / 'docs.csv')
)
|
from enum import Enum
from typing import Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""The metric for the contrastive loss"""
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
COSINE_DISTANCE = lambda x, y: 1 - F.cosine_similarity(x, y)
class ContrastiveLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
distance_metric=SiameseDistanceMetric.COSINE_DISTANCE,
margin: float = 0.5,
size_average: bool = True,
):
"""
Contrastive loss. Expects as input two texts and a label of either 0 or 1. If the label == 1, then the distance between the
two embeddings is reduced. If the label == 0, then the distance between the embeddings is increased.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrices that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
size_average: Average by the size of the mini-batch.
References:
* Further information: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
* `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
Relations:
- :class:`OnlineContrastiveLoss` is similar, but uses hard positive and hard negative pairs.
It often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.ContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(ContrastiveLoss, self).__init__()
self.distance_metric = distance_metric
self.margin = margin
self.model = model
self.size_average = size_average
def get_config_dict(self):
distance_metric_name = self.distance_metric.__name__
for name, value in vars(SiameseDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "SiameseDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "margin": self.margin, "size_average": self.size_average}
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
assert len(reps) == 2
rep_anchor, rep_other = reps
distances = self.distance_metric(rep_anchor, rep_other)
losses = 0.5 * (
labels.float() * distances.pow(2) + (1 - labels).float() * F.relu(self.margin - distances).pow(2)
)
return losses.mean() if self.size_average else losses.sum()
@property
def citation(self) -> str:
return """
@inproceedings{hadsell2006dimensionality,
author={Hadsell, R. and Chopra, S. and LeCun, Y.},
booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
title={Dimensionality Reduction by Learning an Invariant Mapping},
year={2006},
volume={2},
number={},
pages={1735-1742},
doi={10.1109/CVPR.2006.100}
}
"""
|
from enum import Enum
from typing import Iterable, Dict
import torch.nn.functional as F
from torch import nn, Tensor
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""
The metric for the contrastive loss
"""
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
COSINE_DISTANCE = lambda x, y: 1-F.cosine_similarity(x, y)
class ContrastiveLoss(nn.Module):
"""
Contrastive loss. Expects as input two texts and a label of either 0 or 1. If the label == 1, then the distance between the
two embeddings is reduced. If the label == 0, then the distance between the embeddings is increased.
Further information: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
:param model: SentenceTransformer model
:param distance_metric: Function that returns a distance between two embeddings. The class SiameseDistanceMetric contains pre-defined metrices that can be used
:param margin: Negative samples (label == 0) should have a distance of at least the margin value.
:param size_average: Average by the size of the mini-batch.
Example::
from sentence_transformers import SentenceTransformer, LoggingHandler, losses, InputExample
from torch.utils.data import DataLoader
model = SentenceTransformer('all-MiniLM-L6-v2')
train_examples = [
InputExample(texts=['This is a positive pair', 'Where the distance will be minimized'], label=1),
InputExample(texts=['This is a negative pair', 'Their distance will be increased'], label=0)]
train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=2)
train_loss = losses.ContrastiveLoss(model=model)
model.fit([(train_dataloader, train_loss)], show_progress_bar=True)
"""
def __init__(self, model: SentenceTransformer, distance_metric=SiameseDistanceMetric.COSINE_DISTANCE, margin: float = 0.5, size_average:bool = True):
super(ContrastiveLoss, self).__init__()
self.distance_metric = distance_metric
self.margin = margin
self.model = model
self.size_average = size_average
def get_config_dict(self):
distance_metric_name = self.distance_metric.__name__
for name, value in vars(SiameseDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "SiameseDistanceMetric.{}".format(name)
break
return {'distance_metric': distance_metric_name, 'margin': self.margin, 'size_average': self.size_average}
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
assert len(reps) == 2
rep_anchor, rep_other = reps
distances = self.distance_metric(rep_anchor, rep_other)
losses = 0.5 * (labels.float() * distances.pow(2) + (1 - labels).float() * F.relu(self.margin - distances).pow(2))
return losses.mean() if self.size_average else losses.sum()
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.device import (get_device, is_cuda_available, is_mlu_available,
is_mps_available, is_npu_available)
def test_get_device():
device = get_device()
if is_npu_available():
assert device == 'npu'
elif is_cuda_available():
assert device == 'cuda'
elif is_mlu_available():
assert device == 'mlu'
elif is_mps_available():
assert device == 'mps'
else:
assert device == 'cpu'
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.device import (get_device, is_cuda_available, is_mlu_available,
is_mps_available)
def test_get_device():
device = get_device()
if is_cuda_available():
assert device == 'cuda'
elif is_mlu_available():
assert device == 'mlu'
elif is_mps_available():
assert device == 'mps'
else:
assert device == 'cpu'
|
import logging
import re
from typing import Any
import uvicorn.config
from colorama import Fore
def remove_color_codes(s: str) -> str:
return re.sub(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])", "", s)
def fmt_kwargs(kwargs: dict) -> str:
return ", ".join(f"{n}={repr(v)}" for n, v in kwargs.items())
def print_attribute(
title: str, value: Any, title_color: str = Fore.GREEN, value_color: str = ""
) -> None:
logger = logging.getLogger()
logger.info(
str(value),
extra={
"title": f"{title.rstrip(':')}:",
"title_color": title_color,
"color": value_color,
},
)
def generate_uvicorn_config():
"""
Generates a uvicorn logging config that silences uvicorn's default logging and tells it to use the native logging module.
"""
log_config = dict(uvicorn.config.LOGGING_CONFIG)
log_config["loggers"]["uvicorn"] = {"handlers": []}
log_config["loggers"]["uvicorn.error"] = {"handlers": []}
log_config["loggers"]["uvicorn.access"] = {"handlers": []}
return log_config
|
import logging
import re
from typing import Any
from colorama import Fore
def remove_color_codes(s: str) -> str:
return re.sub(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])", "", s)
def fmt_kwargs(kwargs: dict) -> str:
return ", ".join(f"{n}={repr(v)}" for n, v in kwargs.items())
def print_attribute(
title: str, value: Any, title_color: str = Fore.GREEN, value_color: str = ""
) -> None:
logger = logging.getLogger()
logger.info(
str(value),
extra={
"title": f"{title.rstrip(':')}:",
"title_color": title_color,
"color": value_color,
},
)
|
from __future__ import annotations
import pytest
from torch import Tensor
from sentence_transformers import SparseEncoder
@pytest.mark.parametrize(
"model_name",
[
("sentence-transformers/all-MiniLM-L6-v2"),
],
)
def test_load_and_encode(model_name: str) -> None:
# Ensure that SparseEncoder can be initialized with a base model and can encode
try:
model = SparseEncoder(model_name)
except Exception as e:
pytest.fail(f"Failed to load SparseEncoder with {model_name}: {e}")
sentences = [
"This is a test sentence.",
"Another example sentence here.",
"Sparse encoders are interesting.",
]
try:
embeddings = model.encode(sentences)
except Exception as e:
pytest.fail(f"SparseEncoder failed to encode sentences: {e}")
assert embeddings is not None
assert isinstance(embeddings, Tensor), "Embeddings should be a tensor for sparse encoders"
assert len(embeddings) == len(sentences), "Number of embeddings should match number of sentences"
decoded_embeddings = model.decode(embeddings)
assert len(decoded_embeddings) == len(sentences), "Decoded embeddings should match number of sentences"
assert all(isinstance(emb, list) for emb in decoded_embeddings), "Decoded embeddings should be a list of lists"
# Check a known property: encoding a single sentence
single_sentence_emb = model.encode(["A single sentence."], convert_to_tensor=False)
assert isinstance(
single_sentence_emb, list
), "Encoding a single sentence with convert_to_tensor=False should return a list of len 1"
assert len(single_sentence_emb) == 1, "Single sentence embedding dict should not be empty"
# If we're using a string instead of a list, we should get a single tensor embedding
single_sentence_emb_tensor = model.encode("A single sentence.", convert_to_tensor=False)
assert isinstance(
single_sentence_emb_tensor, Tensor
), "Encoding a single sentence with convert_to_tensor=False should return a tensor"
assert single_sentence_emb_tensor.dim() == 1, "Single sentence embedding tensor should be 1D"
# Check encoding with show_progress_bar
try:
embeddings_with_progress = model.encode(sentences, show_progress_bar=True)
assert len(embeddings_with_progress) == len(sentences)
except Exception as e:
pytest.fail(f"SparseEncoder failed to encode with progress bar: {e}")
|
from __future__ import annotations
import pytest
from torch import Tensor
from sentence_transformers import SparseEncoder
@pytest.mark.parametrize(
"model_name",
[
("sentence-transformers/all-MiniLM-L6-v2"),
],
)
def test_load_and_encode(model_name: str) -> None:
# Ensure that SparseEncoder can be initialized with a base model and can encode
try:
model = SparseEncoder(model_name)
except Exception as e:
pytest.fail(f"Failed to load SparseEncoder with {model_name}: {e}")
sentences = [
"This is a test sentence.",
"Another example sentence here.",
"Sparse encoders are interesting.",
]
try:
embeddings = model.encode(sentences)
except Exception as e:
pytest.fail(f"SparseEncoder failed to encode sentences: {e}")
assert embeddings is not None
assert isinstance(embeddings, Tensor), "Embeddings should be a tensor for sparse encoders"
assert len(embeddings) == len(sentences), "Number of embeddings should match number of sentences"
decoded_embeddings = model.decode(embeddings)
assert len(decoded_embeddings) == len(sentences), "Decoded embeddings should match number of sentences"
assert all(isinstance(emb, list) for emb in decoded_embeddings), "Decoded embeddings should be a list of lists"
# Check a known property: encoding a single sentence
single_sentence_emb = model.encode("A single sentence.", convert_to_tensor=False)
assert isinstance(
single_sentence_emb, list
), "Encoding a single sentence with convert_to_tensor=False should return a list of len 1"
assert len(single_sentence_emb) > 0, "Single sentence embedding dict should not be empty"
# Check encoding with show_progress_bar
try:
embeddings_with_progress = model.encode(sentences, show_progress_bar=True)
assert len(embeddings_with_progress) == len(sentences)
except Exception as e:
pytest.fail(f"SparseEncoder failed to encode with progress bar: {e}")
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script to evaluate accuracy of TFLite flatbuffer model on mnist dataset."""
import numpy as np
import tensorflow as tf # pylint: disable=g-bad-import-order
from tensorflow.lite.python import lite
from tensorflow.lite.tutorials import dataset
flags = tf.app.flags
flags.DEFINE_string('data_dir', '/tmp/data_dir',
'Directory where data is stored.')
flags.DEFINE_string('model_file', '',
'The path to the TFLite flatbuffer model file.')
flags = flags.FLAGS
def test_image_generator():
# Generates an iterator over images
with tf.compat.v1.Session() as sess:
input_data = tf.compat.v1.data.make_one_shot_iterator(dataset.test(
flags.data_dir)).get_next()
try:
while True:
yield sess.run(input_data)
except tf.errors.OutOfRangeError:
pass
def run_eval(interpreter, input_image):
"""Performs evaluation for input image over specified model.
Args:
interpreter: TFLite interpreter initialized with model to execute.
input_image: Image input to the model.
Returns:
output: output tensor of model being executed.
"""
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Test model on the input images.
input_image = np.reshape(input_image, input_details[0]['shape'])
interpreter.set_tensor(input_details[0]['index'], input_image)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
output = np.squeeze(output_data)
return output
def main(_):
interpreter = lite.Interpreter(model_path=flags.model_file)
interpreter.allocate_tensors()
num_correct, total = 0, 0
for input_data in test_image_generator():
output = run_eval(interpreter, input_data[0])
total += 1
if output == input_data[1]:
num_correct += 1
if total % 500 == 0:
print('Accuracy after %i images: %f' %
(total, float(num_correct) / float(total)))
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.compat.v1.app.run(main)
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script to evaluate accuracy of TFLite flatbuffer model on mnist dataset."""
import numpy as np
import tensorflow as tf # pylint: disable=g-bad-import-order
from tensorflow.lite.tutorials import dataset
flags = tf.app.flags
flags.DEFINE_string('data_dir', '/tmp/data_dir',
'Directory where data is stored.')
flags.DEFINE_string('model_file', '',
'The path to the TFLite flatbuffer model file.')
flags = flags.FLAGS
def test_image_generator():
# Generates an iterator over images
with tf.compat.v1.Session() as sess:
input_data = tf.compat.v1.data.make_one_shot_iterator(dataset.test(
flags.data_dir)).get_next()
try:
while True:
yield sess.run(input_data)
except tf.errors.OutOfRangeError:
pass
def run_eval(interpreter, input_image):
"""Performs evaluation for input image over specified model.
Args:
interpreter: TFLite interpreter initialized with model to execute.
input_image: Image input to the model.
Returns:
output: output tensor of model being executed.
"""
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Test model on the input images.
input_image = np.reshape(input_image, input_details[0]['shape'])
interpreter.set_tensor(input_details[0]['index'], input_image)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
output = np.squeeze(output_data)
return output
def main(_):
interpreter = tf.lite.Interpreter(model_path=flags.model_file)
interpreter.allocate_tensors()
num_correct, total = 0, 0
for input_data in test_image_generator():
output = run_eval(interpreter, input_data[0])
total += 1
if output == input_data[1]:
num_correct += 1
if total % 500 == 0:
print('Accuracy after %i images: %f' %
(total, float(num_correct) / float(total)))
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.compat.v1.app.run(main)
|
from typing import Any, Union, Optional
from vertexai.generative_models._generative_models import SafetySettingsType
from google.cloud.aiplatform_v1beta1.types import content as gapic_content_types
from llama_index.core.llms import ChatMessage, MessageRole, ImageBlock, TextBlock
def is_gemini_model(model: str) -> bool:
return model.startswith("gemini")
def create_gemini_client(
model: str, safety_settings: Optional[SafetySettingsType]
) -> Any:
from vertexai.preview.generative_models import GenerativeModel
return GenerativeModel(model_name=model, safety_settings=safety_settings)
def convert_chat_message_to_gemini_content(
message: ChatMessage, is_history: bool = True
) -> Any:
from vertexai.preview.generative_models import Content, Part
def _convert_block_to_part(block: Union[TextBlock, ImageBlock]) -> Optional[Part]:
from vertexai.preview.generative_models import Image
if isinstance(block, TextBlock):
if block.text:
return Part.from_text(block.text)
else:
return None
elif isinstance(block, ImageBlock):
if block.path:
image = Image.load_from_file(block.path)
elif block.image:
image = Image.from_bytes(block.image)
elif block.url:
base64_bytes = block.resolve_image(as_base64=False).read()
image = Image.from_bytes(base64_bytes)
else:
raise ValueError("ImageBlock must have either path, url, or image data")
return Part.from_image(image)
else:
raise ValueError(f"Unsupported block type: {type(block).__name__}")
if (
message.content == "" or message.content is None
) and "tool_calls" in message.additional_kwargs:
tool_calls = message.additional_kwargs["tool_calls"]
parts = [
Part._from_gapic(raw_part=gapic_content_types.Part(function_call=tool_call))
for tool_call in tool_calls
]
else:
if not isinstance(message.blocks, list):
raise ValueError("message.blocks must be a list of content blocks")
parts = [
part
for part in (_convert_block_to_part(block) for block in message.blocks)
if part is not None
]
if is_history:
return Content(
role="user" if message.role == MessageRole.USER else "model",
parts=parts,
)
else:
return parts
|
import base64
from typing import Any, Dict, Union, Optional
from vertexai.generative_models._generative_models import SafetySettingsType
from google.cloud.aiplatform_v1beta1.types import content as gapic_content_types
from llama_index.core.llms import ChatMessage, MessageRole
def is_gemini_model(model: str) -> bool:
return model.startswith("gemini")
def create_gemini_client(
model: str, safety_settings: Optional[SafetySettingsType]
) -> Any:
from vertexai.preview.generative_models import GenerativeModel
return GenerativeModel(model_name=model, safety_settings=safety_settings)
def convert_chat_message_to_gemini_content(
message: ChatMessage, is_history: bool = True
) -> Any:
from vertexai.preview.generative_models import Content, Part
def _convert_gemini_part_to_prompt(part: Union[str, Dict]) -> Part:
from vertexai.preview.generative_models import Image, Part
if isinstance(part, str):
return Part.from_text(part)
if not isinstance(part, Dict):
raise ValueError(
f"Message's content is expected to be a dict, got {type(part)}!"
)
if part["type"] == "text":
return Part.from_text(part["text"])
elif part["type"] == "image_url":
path = part["image_url"]
if path.startswith("gs://"):
raise ValueError("Only local image path is supported!")
elif path.startswith("data:image/jpeg;base64,"):
image = Image.from_bytes(base64.b64decode(path[23:]))
else:
image = Image.load_from_file(path)
else:
raise ValueError("Only text and image_url types are supported!")
return Part.from_image(image)
if (
message.content == "" or message.content is None
) and "tool_calls" in message.additional_kwargs:
tool_calls = message.additional_kwargs["tool_calls"]
parts = [
Part._from_gapic(raw_part=gapic_content_types.Part(function_call=tool_call))
for tool_call in tool_calls
]
else:
raw_content = message.content
if raw_content is None:
raw_content = ""
if isinstance(raw_content, str):
raw_content = [raw_content]
parts = [_convert_gemini_part_to_prompt(part) for part in raw_content]
if is_history:
return Content(
role="user" if message.role == MessageRole.USER else "model",
parts=parts,
)
else:
return parts
|
"""Test LLMSummarization functionality."""
import pytest
from langchain.chains.llm_summarization_checker.base import (
ARE_ALL_TRUE_PROMPT,
CHECK_ASSERTIONS_PROMPT,
CREATE_ASSERTIONS_PROMPT,
REVISED_SUMMARY_PROMPT,
LLMSummarizationCheckerChain,
)
from tests.unit_tests.llms.fake_llm import FakeLLM
def test_input_variables() -> None:
assert CREATE_ASSERTIONS_PROMPT.input_variables == ["summary"]
assert CHECK_ASSERTIONS_PROMPT.input_variables == ["assertions"]
assert REVISED_SUMMARY_PROMPT.input_variables == ["checked_assertions", "summary"]
assert ARE_ALL_TRUE_PROMPT.input_variables == ["checked_assertions"]
@pytest.fixture
def fake_llm_summarization_checker_chain() -> LLMSummarizationCheckerChain:
"""Fake LLMCheckerChain for testing."""
queries = {
CREATE_ASSERTIONS_PROMPT.format(
summary="a",
): "b",
CHECK_ASSERTIONS_PROMPT.format(
assertions="b",
): "- b - True",
REVISED_SUMMARY_PROMPT.format(
checked_assertions="- b - True", summary="a"
): "b",
ARE_ALL_TRUE_PROMPT.format(
checked_assertions="- b - True",
): "True",
}
fake_llm = FakeLLM(queries=queries)
return LLMSummarizationCheckerChain.from_llm(
fake_llm, input_key="q", output_key="a"
)
def test_simple_text(
fake_llm_summarization_checker_chain: LLMSummarizationCheckerChain,
) -> None:
"""Test simple question that should not need python."""
question = "a"
output = fake_llm_summarization_checker_chain.run(question)
assert output == "b"
|
# flake8: noqa E501
"""Test LLMSummarization functionality."""
import pytest
from langchain.chains.llm_summarization_checker.base import (
ARE_ALL_TRUE_PROMPT,
CHECK_ASSERTIONS_PROMPT,
CREATE_ASSERTIONS_PROMPT,
REVISED_SUMMARY_PROMPT,
LLMSummarizationCheckerChain,
)
from tests.unit_tests.llms.fake_llm import FakeLLM
def test_input_variables() -> None:
assert CREATE_ASSERTIONS_PROMPT.input_variables == ["summary"]
assert CHECK_ASSERTIONS_PROMPT.input_variables == ["assertions"]
assert REVISED_SUMMARY_PROMPT.input_variables == ["checked_assertions", "summary"]
assert ARE_ALL_TRUE_PROMPT.input_variables == ["checked_assertions"]
@pytest.fixture
def fake_llm_summarization_checker_chain() -> LLMSummarizationCheckerChain:
"""Fake LLMCheckerChain for testing."""
queries = {
CREATE_ASSERTIONS_PROMPT.format(
summary="a",
): "b",
CHECK_ASSERTIONS_PROMPT.format(
assertions="b",
): "- b - True",
REVISED_SUMMARY_PROMPT.format(
checked_assertions="- b - True", summary="a"
): "b",
ARE_ALL_TRUE_PROMPT.format(
checked_assertions="- b - True",
): "True",
}
fake_llm = FakeLLM(queries=queries)
return LLMSummarizationCheckerChain.from_llm(
fake_llm, input_key="q", output_key="a"
)
def test_simple_text(
fake_llm_summarization_checker_chain: LLMSummarizationCheckerChain,
) -> None:
"""Test simple question that should not need python."""
question = "a"
output = fake_llm_summarization_checker_chain.run(question)
assert output == "b"
|
_base_ = './htc-without-semantic_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(pad_seg=True),
roi_head=dict(
semantic_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[8]),
semantic_head=dict(
type='FusedSemanticHead',
num_ins=5,
fusion_level=1,
seg_scale_factor=1 / 8,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2))))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(
type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(
dataset=dict(
data_prefix=dict(img='train2017/', seg='stuffthingmaps/train2017/'),
pipeline=train_pipeline))
|
_base_ = './htc-without-semantic_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(pad_seg=True),
roi_head=dict(
semantic_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[8]),
semantic_head=dict(
type='FusedSemanticHead',
num_ins=5,
fusion_level=1,
seg_scale_factor=1 / 8,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2))))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(
dataset=dict(
data_prefix=dict(img='train2017/', seg='stuffthingmaps/train2017/'),
pipeline=train_pipeline))
|
from langchain.indexes import __all__
def test_all() -> None:
"""Use to catch obvious breaking changes."""
expected = [
"aindex",
"GraphIndexCreator",
"index",
"IndexingResult",
"SQLRecordManager",
"VectorstoreIndexCreator",
]
assert sorted(__all__) == sorted(expected)
|
from langchain.indexes import __all__
def test_all() -> None:
"""Use to catch obvious breaking changes."""
expected = [
"aindex",
"GraphIndexCreator",
"index",
"IndexingResult",
"SQLRecordManager",
"VectorstoreIndexCreator",
]
assert __all__ == sorted(expected, key=lambda x: x.lower())
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import GoogleSerperResults, GoogleSerperRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GoogleSerperRun": "langchain_community.tools",
"GoogleSerperResults": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GoogleSerperResults",
"GoogleSerperRun",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import GoogleSerperResults, GoogleSerperRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GoogleSerperRun": "langchain_community.tools",
"GoogleSerperResults": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GoogleSerperRun",
"GoogleSerperResults",
]
|
import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import sparse_plus
from keras.src.activations.activations import sparse_sigmoid
from keras.src.activations.activations import sparsemax
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
from keras.src.activations.activations import threshold
from keras.src.api_export import keras_export
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
relu,
leaky_relu,
relu6,
softmax,
celu,
elu,
selu,
softplus,
softsign,
squareplus,
soft_shrink,
sparse_plus,
silu,
gelu,
glu,
tanh,
tanh_shrink,
threshold,
sigmoid,
sparse_sigmoid,
exponential,
hard_sigmoid,
hard_silu,
hard_tanh,
hard_shrink,
linear,
mish,
log_softmax,
log_sigmoid,
sparsemax,
}
ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS}
# Additional aliases
ALL_OBJECTS_DICT["swish"] = silu
ALL_OBJECTS_DICT["hard_swish"] = hard_silu
@keras_export("keras.activations.serialize")
def serialize(activation):
fn_config = serialization_lib.serialize_keras_object(activation)
if "config" not in fn_config:
raise ValueError(
f"Unknown activation function '{activation}' cannot be "
"serialized due to invalid function name. Make sure to use "
"an activation name that matches the references defined in "
"activations.py or use "
"`@keras.saving.register_keras_serializable()`"
"to register any custom activations. "
f"config={fn_config}"
)
if not isinstance(activation, types.FunctionType):
# Case for additional custom activations represented by objects
return fn_config
if (
isinstance(fn_config["config"], str)
and fn_config["config"] not in globals()
):
# Case for custom activation functions from external activations modules
fn_config["config"] = object_registration.get_registered_name(
activation
)
return fn_config
# Case for keras.activations builtins (simply return name)
return fn_config["config"]
@keras_export("keras.activations.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras activation function via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.activations.get")
def get(identifier):
"""Retrieve a Keras activation function via an identifier."""
if identifier is None:
return linear
if isinstance(identifier, dict):
obj = serialization_lib.deserialize_keras_object(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
return obj
raise ValueError(
f"Could not interpret activation function identifier: {identifier}"
)
|
import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import sparse_plus
from keras.src.activations.activations import sparsemax
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
from keras.src.activations.activations import threshold
from keras.src.api_export import keras_export
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
relu,
leaky_relu,
relu6,
softmax,
celu,
elu,
selu,
softplus,
softsign,
squareplus,
soft_shrink,
sparse_plus,
silu,
gelu,
glu,
tanh,
tanh_shrink,
threshold,
sigmoid,
exponential,
hard_sigmoid,
hard_silu,
hard_tanh,
hard_shrink,
linear,
mish,
log_softmax,
log_sigmoid,
sparsemax,
}
ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS}
# Additional aliases
ALL_OBJECTS_DICT["swish"] = silu
ALL_OBJECTS_DICT["hard_swish"] = hard_silu
@keras_export("keras.activations.serialize")
def serialize(activation):
fn_config = serialization_lib.serialize_keras_object(activation)
if "config" not in fn_config:
raise ValueError(
f"Unknown activation function '{activation}' cannot be "
"serialized due to invalid function name. Make sure to use "
"an activation name that matches the references defined in "
"activations.py or use "
"`@keras.saving.register_keras_serializable()`"
"to register any custom activations. "
f"config={fn_config}"
)
if not isinstance(activation, types.FunctionType):
# Case for additional custom activations represented by objects
return fn_config
if (
isinstance(fn_config["config"], str)
and fn_config["config"] not in globals()
):
# Case for custom activation functions from external activations modules
fn_config["config"] = object_registration.get_registered_name(
activation
)
return fn_config
# Case for keras.activations builtins (simply return name)
return fn_config["config"]
@keras_export("keras.activations.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras activation function via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.activations.get")
def get(identifier):
"""Retrieve a Keras activation function via an identifier."""
if identifier is None:
return linear
if isinstance(identifier, dict):
obj = serialization_lib.deserialize_keras_object(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
return obj
raise ValueError(
f"Could not interpret activation function identifier: {identifier}"
)
|
import asyncio
import pytest
from grpc import ChannelConnectivity
from jina.serve.networking.connection_stub import _ConnectionStubs
from jina.serve.networking.instrumentation import _NetworkingHistograms
from jina.serve.networking.replica_list import _ReplicaList
@pytest.fixture()
def replica_list(logger, metrics):
return _ReplicaList(
metrics=metrics,
histograms=_NetworkingHistograms(),
logger=logger,
runtime_name='test',
)
def test_add_connection(replica_list):
replica_list.add_connection('executor0', 'executor-0')
assert replica_list.has_connections()
assert replica_list.has_connection('executor0')
assert not replica_list.has_connection('random-address')
assert len(replica_list.get_all_connections()) == 1
@pytest.mark.asyncio
async def test_remove_connection(replica_list):
replica_list.add_connection('executor0', 'executor-0')
assert replica_list.has_connections()
await replica_list.remove_connection('executor0')
assert not replica_list.has_connections()
assert not replica_list.has_connection('executor0')
# unknown/unmanaged connections
removed_connection_invalid = await replica_list.remove_connection('random-address')
assert removed_connection_invalid is None
assert len(replica_list.get_all_connections()) == 0
@pytest.mark.asyncio
async def test_reset_connection(replica_list):
replica_list.add_connection('executor0', 'executor-0')
connection_stub = await replica_list.get_next_connection('executor0')
await replica_list.reset_connection('executor0', 'executor-0')
new_connection_stub = await replica_list.get_next_connection()
assert len(replica_list.get_all_connections()) == 1
connection_stub_random_address = await replica_list.reset_connection(
'random-address', '0'
)
assert connection_stub_random_address is None
@pytest.mark.asyncio
async def test_close(replica_list):
replica_list.add_connection('executor0', 'executor-0')
replica_list.add_connection('executor1', 'executor-0')
assert replica_list.has_connection('executor0')
assert replica_list.has_connection('executor1')
await replica_list.close()
assert not replica_list.has_connections()
async def _print_channel_attributes(connection_stub: _ConnectionStubs):
await asyncio.sleep(0.5)
assert connection_stub.channel.get_state() != ChannelConnectivity.SHUTDOWN
@pytest.mark.asyncio
async def test_synchornization_when_resetting_connection(replica_list, logger):
replica_list.add_connection('executor0', 'executor-0')
connection_stub = await replica_list.get_next_connection(num_retries=0)
responses = await asyncio.gather(
asyncio.create_task(_print_channel_attributes(connection_stub)),
asyncio.create_task(
replica_list.reset_connection(
address='executor0', deployment_name='executor-0'
)
),
return_exceptions=True,
)
assert not any(
[issubclass(type(response), BaseException) for response in responses]
)
|
import asyncio
import pytest
from grpc import ChannelConnectivity
from jina.serve.networking.connection_stub import _ConnectionStubs
from jina.serve.networking.instrumentation import _NetworkingHistograms
from jina.serve.networking.replica_list import _ReplicaList
@pytest.fixture()
def replica_list(logger, metrics):
return _ReplicaList(
metrics=metrics,
histograms=_NetworkingHistograms(),
logger=logger,
runtime_name='test',
)
def test_add_connection(replica_list):
replica_list.add_connection('executor0', 'executor-0')
assert replica_list.has_connections()
assert replica_list.has_connection('executor0')
assert len(replica_list.warmup_stubs)
assert not replica_list.has_connection('random-address')
assert len(replica_list.get_all_connections()) == 1
@pytest.mark.asyncio
async def test_remove_connection(replica_list):
replica_list.add_connection('executor0', 'executor-0')
assert replica_list.has_connections()
await replica_list.remove_connection('executor0')
assert not replica_list.has_connections()
assert not replica_list.has_connection('executor0')
# warmup stubs are not updated in the remove_connection method
assert len(replica_list.warmup_stubs)
# unknown/unmanaged connections
removed_connection_invalid = await replica_list.remove_connection('random-address')
assert removed_connection_invalid is None
assert len(replica_list.get_all_connections()) == 0
@pytest.mark.asyncio
async def test_reset_connection(replica_list):
replica_list.add_connection('executor0', 'executor-0')
connection_stub = await replica_list.get_next_connection('executor0')
await replica_list.reset_connection('executor0', 'executor-0')
new_connection_stub = await replica_list.get_next_connection()
assert len(replica_list.get_all_connections()) == 1
connection_stub_random_address = await replica_list.reset_connection(
'random-address', '0'
)
assert connection_stub_random_address is None
@pytest.mark.asyncio
async def test_close(replica_list):
replica_list.add_connection('executor0', 'executor-0')
replica_list.add_connection('executor1', 'executor-0')
assert replica_list.has_connection('executor0')
assert replica_list.has_connection('executor1')
await replica_list.close()
assert not replica_list.has_connections()
assert not len(replica_list.warmup_stubs)
async def _print_channel_attributes(connection_stub: _ConnectionStubs):
await asyncio.sleep(0.5)
assert connection_stub.channel.get_state() != ChannelConnectivity.SHUTDOWN
@pytest.mark.asyncio
async def test_synchornization_when_resetting_connection(replica_list, logger):
replica_list.add_connection('executor0', 'executor-0')
connection_stub = await replica_list.get_next_connection(num_retries=0)
responses = await asyncio.gather(
asyncio.create_task(_print_channel_attributes(connection_stub)),
asyncio.create_task(
replica_list.reset_connection(
address='executor0', deployment_name='executor-0'
)
),
return_exceptions=True,
)
assert not any(
[issubclass(type(response), BaseException) for response in responses]
)
|
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
pytestmark = pytest.mark.skipif(**tm.no_pandas())
dpath = 'demo/data/'
rng = np.random.RandomState(1994)
class TestTreesToDataFrame:
def build_model(self, max_depth, num_round):
dtrain, _ = tm.load_agaricus(__file__)
param = {'max_depth': max_depth, 'objective': 'binary:logistic',
'verbosity': 1}
num_round = num_round
bst = xgb.train(param, dtrain, num_round)
return bst
def parse_dumped_model(self, booster, item_to_get, splitter):
item_to_get += '='
txt_dump = booster.get_dump(with_stats=True)
tree_list = [tree.split('/n') for tree in txt_dump]
split_trees = [tree[0].split(item_to_get)[1:] for tree in tree_list]
res = sum([float(line.split(splitter)[0])
for tree in split_trees for line in tree])
return res
def test_trees_to_dataframe(self):
bst = self.build_model(max_depth=5, num_round=10)
gain_from_dump = self.parse_dumped_model(booster=bst,
item_to_get='gain',
splitter=',')
cover_from_dump = self.parse_dumped_model(booster=bst,
item_to_get='cover',
splitter='\n')
# method being tested
df = bst.trees_to_dataframe()
# test for equality of gains
gain_from_df = df[df.Feature != 'Leaf'][['Gain']].sum()
assert np.allclose(gain_from_dump, gain_from_df)
# test for equality of covers
cover_from_df = df.Cover.sum()
assert np.allclose(cover_from_dump, cover_from_df)
def run_tree_to_df_categorical(self, tree_method: str) -> None:
X, y = tm.make_categorical(100, 10, 31, onehot=False)
Xy = xgb.DMatrix(X, y, enable_categorical=True)
booster = xgb.train({"tree_method": tree_method}, Xy, num_boost_round=10)
df = booster.trees_to_dataframe()
for _, x in df.iterrows():
if x["Feature"] != "Leaf":
assert len(x["Category"]) >= 1
def test_tree_to_df_categorical(self) -> None:
self.run_tree_to_df_categorical("approx")
def run_split_value_histograms(self, tree_method) -> None:
X, y = tm.make_categorical(1000, 10, 13, onehot=False)
reg = xgb.XGBRegressor(tree_method=tree_method, enable_categorical=True)
reg.fit(X, y)
with pytest.raises(ValueError, match="doesn't"):
reg.get_booster().get_split_value_histogram("3", bins=5)
def test_split_value_histograms(self):
self.run_split_value_histograms("approx")
|
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
pytestmark = pytest.mark.skipif(**tm.no_pandas())
dpath = 'demo/data/'
rng = np.random.RandomState(1994)
class TestTreesToDataFrame:
def build_model(self, max_depth, num_round):
dtrain, _ = tm.load_agaricus(__file__)
param = {'max_depth': max_depth, 'objective': 'binary:logistic',
'verbosity': 1}
num_round = num_round
bst = xgb.train(param, dtrain, num_round)
return bst
def parse_dumped_model(self, booster, item_to_get, splitter):
item_to_get += '='
txt_dump = booster.get_dump(with_stats=True)
tree_list = [tree.split('/n') for tree in txt_dump]
split_trees = [tree[0].split(item_to_get)[1:] for tree in tree_list]
res = sum([float(line.split(splitter)[0])
for tree in split_trees for line in tree])
return res
def test_trees_to_dataframe(self):
bst = self.build_model(max_depth=5, num_round=10)
gain_from_dump = self.parse_dumped_model(booster=bst,
item_to_get='gain',
splitter=',')
cover_from_dump = self.parse_dumped_model(booster=bst,
item_to_get='cover',
splitter='\n')
# method being tested
df = bst.trees_to_dataframe()
# test for equality of gains
gain_from_df = df[df.Feature != 'Leaf'][['Gain']].sum()
assert np.allclose(gain_from_dump, gain_from_df)
# test for equality of covers
cover_from_df = df.Cover.sum()
assert np.allclose(cover_from_dump, cover_from_df)
def run_tree_to_df_categorical(self, tree_method: str) -> None:
X, y = tm.make_categorical(100, 10, 31, False)
Xy = xgb.DMatrix(X, y, enable_categorical=True)
booster = xgb.train({"tree_method": tree_method}, Xy, num_boost_round=10)
df = booster.trees_to_dataframe()
for _, x in df.iterrows():
if x["Feature"] != "Leaf":
assert len(x["Category"]) >= 1
def test_tree_to_df_categorical(self) -> None:
self.run_tree_to_df_categorical("approx")
def run_split_value_histograms(self, tree_method) -> None:
X, y = tm.make_categorical(1000, 10, 13, False)
reg = xgb.XGBRegressor(tree_method=tree_method, enable_categorical=True)
reg.fit(X, y)
with pytest.raises(ValueError, match="doesn't"):
reg.get_booster().get_split_value_histogram("3", bins=5)
def test_split_value_histograms(self):
self.run_split_value_histograms("approx")
|
from abc import ABC, abstractmethod
from typing import Dict, Iterator, List, Type
from typing_extensions import TYPE_CHECKING
if TYPE_CHECKING:
from docarray import BaseDoc, DocList
class AbstractDocStore(ABC):
@staticmethod
@abstractmethod
def list(namespace: str, show_table: bool) -> List[str]:
"""List all DocLists in the specified backend at the namespace.
:param namespace: The namespace to list
:param show_table: If true, a table is printed to the console
:return: A list of DocList names
"""
...
@staticmethod
@abstractmethod
def delete(name: str, missing_ok: bool) -> bool:
"""Delete the DocList object at the specified name
:param name: The name of the DocList to delete
:param missing_ok: If true, no error will be raised if the DocList does not exist.
:return: True if the DocList was deleted, False if it did not exist.
"""
...
@staticmethod
@abstractmethod
def push(
docs: 'DocList',
name: str,
show_progress: bool,
) -> Dict:
"""Push this DocList to the specified name.
:param docs: The DocList to push
:param name: The name to push to
:param show_progress: If true, a progress bar will be displayed.
"""
...
@staticmethod
@abstractmethod
def push_stream(
docs: Iterator['BaseDoc'],
url: str,
show_progress: bool = False,
) -> Dict:
"""Push a stream of documents to the specified name.
:param docs: a stream of documents
:param url: The name to push to
:param show_progress: If true, a progress bar will be displayed.
"""
...
@staticmethod
@abstractmethod
def pull(
docs_cls: Type['DocList'],
name: str,
show_progress: bool,
local_cache: bool,
) -> 'DocList':
"""Pull a DocList from the specified name.
:param docs_cls: The DocList class to instantiate
:param name: The name to pull from
:param show_progress: If true, a progress bar will be displayed.
:param local_cache: If true, the DocList will be cached locally
:return: A DocList
"""
...
@staticmethod
@abstractmethod
def pull_stream(
docs_cls: Type['DocList'],
name: str,
show_progress: bool,
local_cache: bool,
) -> Iterator['BaseDoc']:
"""Pull a stream of documents from the specified name.
:param docs_cls: The DocList class to instantiate
:param name: The name to pull from
:param show_progress: If true, a progress bar will be displayed.
:param local_cache: If true, the DocList will be cached locally
:return: An iterator of documents"""
...
|
from abc import ABC, abstractmethod
from typing import Dict, Iterator, List, Optional, Type
from typing_extensions import TYPE_CHECKING
if TYPE_CHECKING:
from docarray import BaseDoc, DocList
class AbstractDocStore(ABC):
@staticmethod
@abstractmethod
def list(namespace: str, show_table: bool) -> List[str]:
"""List all DocLists in the specified backend at the namespace.
:param namespace: The namespace to list
:param show_table: If true, a table is printed to the console
:return: A list of DocList names
"""
...
@staticmethod
@abstractmethod
def delete(name: str, missing_ok: bool) -> bool:
"""Delete the DocList object at the specified name
:param name: The name of the DocList to delete
:param missing_ok: If true, no error will be raised if the DocList does not exist.
:return: True if the DocList was deleted, False if it did not exist.
"""
...
@staticmethod
@abstractmethod
def push(
docs: 'DocList',
name: str,
public: bool,
show_progress: bool,
branding: Optional[Dict],
) -> Dict:
"""Push this DocList to the specified name.
:param docs: The DocList to push
:param name: The name to push to
:param public: Whether the DocList should be publicly accessible
:param show_progress: If true, a progress bar will be displayed.
:param branding: Branding information to be stored with the DocList
"""
...
@staticmethod
@abstractmethod
def push_stream(
docs: Iterator['BaseDoc'],
url: str,
public: bool = True,
show_progress: bool = False,
branding: Optional[Dict] = None,
) -> Dict:
"""Push a stream of documents to the specified name.
:param docs: a stream of documents
:param url: The name to push to
:param public: Whether the DocList should be publicly accessible
:param show_progress: If true, a progress bar will be displayed.
:param branding: Branding information to be stored with the DocList
"""
...
@staticmethod
@abstractmethod
def pull(
docs_cls: Type['DocList'],
name: str,
show_progress: bool,
local_cache: bool,
) -> 'DocList':
"""Pull a DocList from the specified name.
:param docs_cls: The DocList class to instantiate
:param name: The name to pull from
:param show_progress: If true, a progress bar will be displayed.
:param local_cache: If true, the DocList will be cached locally
:return: A DocList
"""
...
@staticmethod
@abstractmethod
def pull_stream(
docs_cls: Type['DocList'],
name: str,
show_progress: bool,
local_cache: bool,
) -> Iterator['BaseDoc']:
"""Pull a stream of documents from the specified name.
:param docs_cls: The DocList class to instantiate
:param name: The name to pull from
:param show_progress: If true, a progress bar will be displayed.
:param local_cache: If true, the DocList will be cached locally
:return: An iterator of documents"""
...
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# please install mmpretrain
# import mmpretrain.models to trigger register_module in mmpretrain
custom_imports = dict(
imports=['mmpretrain.models'], allow_failed_imports=False)
model = dict(
backbone=dict(
_delete_=True,
type='mmpretrain.TIMMBackbone',
model_name='efficientnet_b1',
features_only=True,
pretrained=True,
out_indices=(1, 2, 3, 4)),
neck=dict(in_channels=[24, 40, 112, 320]))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# TODO: delete custom_imports after mmcls supports auto import
# please install mmcls>=1.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.TIMMBackbone',
model_name='efficientnet_b1',
features_only=True,
pretrained=True,
out_indices=(1, 2, 3, 4)),
neck=dict(in_channels=[24, 40, 112, 320]))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
import numpy as np
import torch
import torch.nn as nn
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import init_detector
from mmdet.registry import VISUALIZERS
from mmdet.structures import DetDataSample
from mmdet.utils import register_all_modules
from mmdet.utils.typing import Tuple
try:
import ffmpegcv
except ImportError:
raise ImportError(
'Please install ffmpegcv with:\n\n pip install ffmpegcv')
def parse_args():
parser = argparse.ArgumentParser(
description='MMDetection video demo with GPU acceleration')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--nvdecode', action='store_true', help='Use NVIDIA decoder')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def prefetch_batch_input_shape(model: nn.Module, ori_wh: Tuple[int,
int]) -> dict:
cfg = model.cfg
w, h = ori_wh
cfg.test_dataloader.dataset.pipeline[0].type = 'LoadImageFromNDArray'
test_pipeline = Compose(cfg.test_dataloader.dataset.pipeline)
data = {'img': np.zeros((h, w, 3), dtype=np.uint8), 'img_id': 0}
data = test_pipeline(data)
_, data_sample = model.data_preprocessor([data], False)
batch_input_shape = data_sample[0].batch_input_shape
return batch_input_shape
def pack_data(frame_resize: np.ndarray, batch_input_shape: Tuple[int, int],
ori_shape: Tuple[int, int]) -> dict:
assert frame_resize.shape[:2] == batch_input_shape
data_sample = DetDataSample()
data_sample.set_metainfo({
'img_shape':
batch_input_shape,
'ori_shape':
ori_shape,
'scale_factor': (batch_input_shape[0] / ori_shape[0],
batch_input_shape[1] / ori_shape[1])
})
frame_resize = torch.from_numpy(frame_resize).permute((2, 0, 1))
data = {'inputs': frame_resize, 'data_sample': data_sample}
return data
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
# register all modules in mmdet into the registries
register_all_modules()
model = init_detector(args.config, args.checkpoint, device=args.device)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
if args.nvdecode:
VideoCapture = ffmpegcv.VideoCaptureNV
else:
VideoCapture = ffmpegcv.VideoCapture
video_origin = VideoCapture(args.video)
batch_input_shape = prefetch_batch_input_shape(
model, (video_origin.width, video_origin.height))
ori_shape = (video_origin.height, video_origin.width)
resize_wh = batch_input_shape[::-1]
video_resize = VideoCapture(
args.video,
resize=resize_wh,
resize_keepratio=True,
resize_keepratioalign='topleft')
video_writer = None
if args.out:
video_writer = ffmpegcv.VideoWriter(args.out, fps=video_origin.fps)
with torch.no_grad():
for i, (frame_resize, frame_origin) in enumerate(
zip(track_iter_progress(video_resize), video_origin)):
data = pack_data(frame_resize, batch_input_shape, ori_shape)
result = model.test_step([data])[0]
visualizer.add_datasample(
name='video',
image=frame_origin,
data_sample=result,
draw_gt=False,
show=False,
pred_score_thr=args.score_thr)
frame_mask = visualizer.get_image()
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame_mask, 'video', args.wait_time)
if args.out:
video_writer.write(frame_mask)
if video_writer:
video_writer.release()
video_origin.release()
video_resize.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
import numpy as np
import torch
import torch.nn as nn
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import init_detector
from mmdet.registry import VISUALIZERS
from mmdet.structures import DetDataSample
from mmdet.utils import register_all_modules
from mmdet.utils.typing import Tuple
try:
import ffmpegcv
except ImportError:
raise ImportError(
'Please install ffmpegcv with:\n\n pip install ffmpegcv')
def parse_args():
parser = argparse.ArgumentParser(
description='MMDetection video demo with GPU acceleration')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--nvdecode', action='store_true', help='Use NVIDIA decoder')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def prefetch_batch_input_shape(model: nn.Module, ori_wh: Tuple[int,
int]) -> dict:
cfg = model.cfg
w, h = ori_wh
cfg.test_dataloader.dataset.pipeline[0].type = 'LoadImageFromNDArray'
test_pipeline = Compose(cfg.test_dataloader.dataset.pipeline)
data = {'img': np.zeros((h, w, 3), dtype=np.uint8), 'img_id': 0}
data = test_pipeline(data)
_, data_sample = model.data_preprocessor([data], False)
batch_input_shape = data_sample[0].batch_input_shape
return batch_input_shape
def pack_data(frame_resize: np.ndarray, batch_input_shape: Tuple[int, int],
ori_shape: Tuple[int, int]) -> dict:
assert frame_resize.shape[:2] == batch_input_shape
data_sample = DetDataSample()
data_sample.set_metainfo({
'img_shape':
batch_input_shape,
'ori_shape':
ori_shape,
'scale_factor': (batch_input_shape[0] / ori_shape[0],
batch_input_shape[1] / ori_shape[1])
})
frame_resize = torch.from_numpy(frame_resize).permute((2, 0, 1))
data = {'inputs': frame_resize, 'data_sample': data_sample}
return data
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
# register all modules in mmdet into the registries
register_all_modules()
model = init_detector(args.config, args.checkpoint, device=args.device)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
if args.nvdecode:
VideoCapture = ffmpegcv.VideoCaptureNV
else:
VideoCapture = ffmpegcv.VideoCapture
video_origin = VideoCapture(args.video)
batch_input_shape = prefetch_batch_input_shape(
model, (video_origin.width, video_origin.height))
ori_shape = (video_origin.height, video_origin.width)
resize_wh = batch_input_shape[::-1]
video_resize = VideoCapture(
args.video,
resize=resize_wh,
resize_keepratio=True,
resize_keepratioalign='topleft')
video_writer = None
if args.out:
video_writer = ffmpegcv.VideoWriter(args.out, fps=video_origin.fps)
with torch.no_grad():
for i, (frame_resize, frame_origin) in enumerate(
zip(track_iter_progress(video_resize), video_origin)):
data = pack_data(frame_resize, batch_input_shape, ori_shape)
result = model.test_step([data])[0]
visualizer.add_datasample(
name='video',
image=frame_origin,
pred_sample=result,
show=False,
pred_score_thr=args.score_thr)
frame_mask = visualizer.get_image()
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame_mask, 'video', args.wait_time)
if args.out:
video_writer.write(frame_mask)
if video_writer:
video_writer.release()
video_origin.release()
video_resize.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='YOLOF',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')),
neck=dict(
type='DilatedEncoder',
in_channels=2048,
out_channels=512,
block_mid_channels=128,
num_residual_blocks=4,
block_dilations=[2, 4, 6, 8]),
bbox_head=dict(
type='YOLOFHead',
num_classes=80,
in_channels=512,
reg_decoded_bbox=True,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[1, 2, 4, 8, 16],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1., 1., 1., 1.],
add_ctr_clamp=True,
ctr_clamp=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='UniformAssigner', pos_ignore_thr=0.15, neg_ignore_thr=0.7),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.12, momentum=0.9, weight_decay=0.0001),
paramwise_cfg=dict(
norm_decay_mult=0., custom_keys={'backbone': dict(lr_mult=1. / 3)}))
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=0.00066667,
by_epoch=False,
begin=0,
end=1500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='RandomShift', prob=0.5, max_shift_px=32),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8, num_workers=8, dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='YOLOF',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')),
neck=dict(
type='DilatedEncoder',
in_channels=2048,
out_channels=512,
block_mid_channels=128,
num_residual_blocks=4,
block_dilations=[2, 4, 6, 8]),
bbox_head=dict(
type='YOLOFHead',
num_classes=80,
in_channels=512,
reg_decoded_bbox=True,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[1, 2, 4, 8, 16],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1., 1., 1., 1.],
add_ctr_clamp=True,
ctr_clamp=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='UniformAssigner', pos_ignore_thr=0.15, neg_ignore_thr=0.7),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.12, momentum=0.9, weight_decay=0.0001),
paramwise_cfg=dict(
norm_decay_mult=0., custom_keys={'backbone': dict(lr_mult=1. / 3)}))
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=0.00066667,
by_epoch=False,
begin=0,
end=1500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='RandomShift', prob=0.5, max_shift_px=32),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8, num_workers=8, dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = '../_base_/default_runtime.py'
# model settings
model = dict(
type='YOLOV3',
backbone=dict(
type='MobileNetV2',
out_indices=(2, 4, 6),
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://mmdet/mobilenet_v2')),
neck=dict(
type='YOLOV3Neck',
num_scales=3,
in_channels=[320, 96, 32],
out_channels=[96, 96, 96]),
bbox_head=dict(
type='YOLOV3Head',
num_classes=80,
in_channels=[96, 96, 96],
out_channels=[96, 96, 96],
anchor_generator=dict(
type='YOLOAnchorGenerator',
base_sizes=[[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)]],
strides=[32, 16, 8]),
bbox_coder=dict(type='YOLOBBoxCoder'),
featmap_strides=[32, 16, 8],
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_conf=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_xy=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=2.0,
reduction='sum'),
loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='GridAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0)),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
conf_thr=0.005,
nms=dict(type='nms', iou_threshold=0.45),
max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(
type='Resize',
img_scale=[(320, 320), (416, 416)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(416, 416),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=24,
workers_per_gpu=4,
train=dict(
type='RepeatDataset', # use RepeatDataset to speed up training
times=10,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=4000,
warmup_ratio=0.0001,
step=[24, 28])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=30)
evaluation = dict(interval=1, metric=['bbox'])
find_unused_parameters = True
|
_base_ = '../_base_/default_runtime.py'
# model settings
model = dict(
type='YOLOV3',
backbone=dict(
type='MobileNetV2',
out_indices=(2, 4, 6),
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://mmdet/mobilenet_v2')),
neck=dict(
type='YOLOV3Neck',
num_scales=3,
in_channels=[320, 96, 32],
out_channels=[96, 96, 96]),
bbox_head=dict(
type='YOLOV3Head',
num_classes=80,
in_channels=[96, 96, 96],
out_channels=[96, 96, 96],
anchor_generator=dict(
type='YOLOAnchorGenerator',
base_sizes=[[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)]],
strides=[32, 16, 8]),
bbox_coder=dict(type='YOLOBBoxCoder'),
featmap_strides=[32, 16, 8],
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_conf=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_xy=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=2.0,
reduction='sum'),
loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='GridAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0)),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
conf_thr=0.005,
nms=dict(type='nms', iou_threshold=0.45),
max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='PhotoMetricDistortion'),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(
type='Resize',
img_scale=[(320, 320), (416, 416)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(416, 416),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=24,
workers_per_gpu=4,
train=dict(
type='RepeatDataset', # use RepeatDataset to speed up training
times=10,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=4000,
warmup_ratio=0.0001,
step=[24, 28])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=30)
evaluation = dict(interval=1, metric=['bbox'])
find_unused_parameters = True
|
from typing import List, TYPE_CHECKING
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T, Document
def _reduce_doc_props(doc1: 'Document', doc2: 'Document'):
doc1_fields = set(doc1.non_empty_fields)
doc2_fields = set(doc2.non_empty_fields)
# update only fields that are set in doc2 and not set in doc1
fields = doc2_fields - doc1_fields
fields = fields - {'matches', 'chunks', 'id', 'parent_id'}
for field in fields:
setattr(doc1, field, getattr(doc2, field))
class ReduceMixin:
"""
A mixin that provides reducing logic for :class:`DocumentArray`
Reducing 2 or more DocumentArrays consists in merging all Documents into the same DocumentArray.
If a Document belongs to 2 or more DocumentArrays, it is added once and data attributes are merged with priority to
the Document belonging to the left-most DocumentArray. Matches and chunks are also reduced in the same way.
Reduction is applied to all levels of DocumentArrays, that is, from root Documents to all their chunk and match
children.
"""
def reduce(self: 'T', other: 'T') -> 'T':
"""
Reduces other and the current DocumentArray into one DocumentArray in-place. Changes are applied to the current
DocumentArray.
Reducing 2 DocumentArrays consists in adding Documents in the second DocumentArray to the first DocumentArray
if they do not exist. If a Document exists in both DocumentArrays, the data properties are merged with priority
to the first Document (that is, to the current DocumentArray's Document). The matches and chunks are also
reduced in the same way.
:param other: DocumentArray
:return: DocumentArray
"""
for doc in other:
if doc.id in self:
self._reduce_doc(self[doc.id], doc)
else:
self.append(doc)
return self
@staticmethod
def _reduce_doc(doc1: 'Document', doc2: 'Document'):
"""
Reduces doc1 and doc2 into one Document in-place. Changes are applied to doc1.
Reducing 2 Documents consists in setting data properties of the second Document to the first Document if they
are empty (that is, priority to the left-most Document) and reducing the matches and the chunks of both
documents.
Non-data properties are ignored.
Reduction of matches and chunks relies on :class:`DocumentArray`.:method:`reduce`.
:param doc1: first Document
:param doc2: second Document
"""
_reduce_doc_props(doc1, doc2)
if len(doc2.matches) > 0:
doc1.matches.reduce(doc2.matches)
if len(doc2.chunks) > 0:
doc1.chunks.reduce(doc2.chunks)
def reduce_all(self: 'T', others: List['T']) -> 'T':
"""
Reduces a list of DocumentArrays and this DocumentArray into one DocumentArray. Changes are applied to this
DocumentArray in-place.
Reduction consists in reducing this DocumentArray with every DocumentArray in `others` sequentially using
:class:`DocumentArray`.:method:`reduce`.
The resulting DocumentArray contains Documents of all DocumentArrays.
If a Document exists in many DocumentArrays, data properties are merged with priority to the left-most
DocumentArrays (that is, if a data attribute is set in a Document belonging to many DocumentArrays, the
attribute value of the left-most DocumentArray is kept).
Matches and chunks of a Document belonging to many DocumentArrays are also reduced in the same way.
Other non-data properties are ignored.
.. note::
- Matches are not kept in a sorted order when they are reduced. You might want to re-sort them in a later
step.
- The final result depends on the order of DocumentArrays when applying reduction.
:param others: List of DocumentArrays to be reduced
:return: the resulting DocumentArray
"""
for da in others:
self.reduce(da)
return self
|
from typing import List, TYPE_CHECKING
if TYPE_CHECKING:
from docarray.typing import T, Document
def _reduce_doc_props(doc1: 'Document', doc2: 'Document'):
doc1_fields = set(doc1.non_empty_fields)
doc2_fields = set(doc2.non_empty_fields)
# update only fields that are set in doc2 and not set in doc1
fields = doc2_fields - doc1_fields
fields = fields - {'matches', 'chunks', 'id', 'parent_id'}
for field in fields:
setattr(doc1, field, getattr(doc2, field))
class ReduceMixin:
"""
A mixin that provides reducing logic for :class:`DocumentArray`
Reducing 2 or more DocumentArrays consists in merging all Documents into the same DocumentArray.
If a Document belongs to 2 or more DocumentArrays, it is added once and data attributes are merged with priority to
the Document belonging to the left-most DocumentArray. Matches and chunks are also reduced in the same way.
Reduction is applied to all levels of DocumentArrays, that is, from root Documents to all their chunk and match
children.
"""
def reduce(self: 'T', other: 'T') -> 'T':
"""
Reduces other and the current DocumentArray into one DocumentArray in-place. Changes are applied to the current
DocumentArray.
Reducing 2 DocumentArrays consists in adding Documents in the second DocumentArray to the first DocumentArray
if they do not exist. If a Document exists in both DocumentArrays, the data properties are merged with priority
to the first Document (that is, to the current DocumentArray's Document). The matches and chunks are also
reduced in the same way.
:param other: DocumentArray
:return: DocumentArray
"""
for doc in other:
if doc.id in self:
self._reduce_doc(self[doc.id], doc)
else:
self.append(doc)
return self
@staticmethod
def _reduce_doc(doc1: 'Document', doc2: 'Document'):
"""
Reduces doc1 and doc2 into one Document in-place. Changes are applied to doc1.
Reducing 2 Documents consists in setting data properties of the second Document to the first Document if they
are empty (that is, priority to the left-most Document) and reducing the matches and the chunks of both
documents.
Non-data properties are ignored.
Reduction of matches and chunks relies on :class:`DocumentArray`.:method:`reduce`.
:param doc1: first Document
:param doc2: second Document
"""
_reduce_doc_props(doc1, doc2)
if len(doc2.matches) > 0:
doc1.matches.reduce(doc2.matches)
if len(doc2.chunks) > 0:
doc1.chunks.reduce(doc2.chunks)
def reduce_all(self: 'T', others: List['T']) -> 'T':
"""
Reduces a list of DocumentArrays and this DocumentArray into one DocumentArray. Changes are applied to this
DocumentArray in-place.
Reduction consists in reducing this DocumentArray with every DocumentArray in `others` sequentially using
:class:`DocumentArray`.:method:`reduce`.
The resulting DocumentArray contains Documents of all DocumentArrays.
If a Document exists in many DocumentArrays, data properties are merged with priority to the left-most
DocumentArrays (that is, if a data attribute is set in a Document belonging to many DocumentArrays, the
attribute value of the left-most DocumentArray is kept).
Matches and chunks of a Document belonging to many DocumentArrays are also reduced in the same way.
Other non-data properties are ignored.
.. note::
- Matches are not kept in a sorted order when they are reduced. You might want to re-sort them in a later
step.
- The final result depends on the order of DocumentArrays when applying reduction.
:param others: List of DocumentArrays to be reduced
:return: the resulting DocumentArray
"""
for da in others:
self.reduce(da)
return self
|
_base_ = './faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=_base_.backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
# MMEngine support the following two ways, users can choose
# according to convenience
# train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
_base_.train_dataloader.dataset.pipeline = train_pipeline
|
_base_ = './faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=_base_.backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768),
(1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
# MMEngine support the following two ways, users can choose
# according to convenience
# train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
_base_.train_dataloader.dataset.pipeline = train_pipeline
|
import json
from jina.orchestrate.flow.base import Flow
from jina.orchestrate.deployments import Deployment
from jina.jaml import JAML
from jina.logging.predefined import default_logger
from jina.schemas import get_full_schema
from jina_cli.export import api_to_dict
def export_kubernetes(args):
"""Export to k8s yaml files
:param args: args from CLI
"""
from jina.jaml import JAMLCompatible
obj = JAMLCompatible.load_config(args.config_path)
if isinstance(obj, (Flow, Deployment)):
obj.to_kubernetes_yaml(
output_base_path=args.outpath, k8s_namespace=args.k8s_namespace
)
else:
raise NotImplementedError(
f'Object of class {obj.__class__.__name__} cannot be exported to Kubernetes'
)
def export_docker_compose(args):
"""Export to Docker compose yaml files
:param args: args from CLI
"""
from jina.jaml import JAMLCompatible
obj = JAMLCompatible.load_config(args.config_path)
if isinstance(obj, (Flow, Deployment)):
obj.to_docker_compose_yaml(
output_path=args.outpath, network_name=args.network_name
)
else:
raise NotImplementedError(
f'Object of class {obj.__class__.__name__} cannot be exported to Docker Compose'
)
def export_flowchart(args):
"""Export to flowchart file
:param args: args from CLI
"""
Flow.load_config(args.config_path).plot(
args.outpath, vertical_layout=args.vertical_layout
)
def export_schema(args):
"""Export to JSON Schemas
:param args: args from CLI
"""
from jina import __version__
if args.yaml_path:
dump_api = api_to_dict()
for yp in args.yaml_path:
f_name = (yp % __version__) if '%s' in yp else yp
with open(f_name, 'w', encoding='utf-8') as fp:
JAML.dump(dump_api, fp)
default_logger.info(f'API is exported to {f_name}')
if args.json_path:
dump_api = api_to_dict()
for jp in args.json_path:
f_name = (jp % __version__) if '%s' in jp else jp
with open(f_name, 'w', encoding='utf-8') as fp:
json.dump(dump_api, fp, sort_keys=True)
default_logger.info(f'API is exported to {f_name}')
if args.schema_path:
dump_api = get_full_schema()
for jp in args.schema_path:
f_name = (jp % __version__) if '%s' in jp else jp
with open(f_name, 'w', encoding='utf-8') as fp:
json.dump(dump_api, fp, sort_keys=True)
default_logger.info(f'API is exported to {f_name}')
|
import json
from jina.orchestrate.flow.base import Flow
from jina.orchestrate.deployments import Deployment
from jina.jaml import JAML
from jina.logging.predefined import default_logger
from jina.schemas import get_full_schema
from jina_cli.export import api_to_dict
def export_kubernetes(args):
"""Export to k8s yaml files
:param args: args from CLI
"""
from jina.jaml import JAMLCompatible
obj = JAMLCompatible.load_config(args.config_path)
if isinstance(obj, (Flow, Deployment)):
obj.to_kubernetes_yaml(
output_base_path=args.outpath, k8s_namespace=args.k8s_namespace
)
else:
raise NotImplementedError(f'Object of class {obj.__class__.__name__} cannot be exported to Kubernetes')
def export_docker_compose(args):
"""Export to Docker compose yaml files
:param args: args from CLI
"""
from jina.jaml import JAMLCompatible
obj = JAMLCompatible.load_config(args.config_path)
if isinstance(obj, (Flow, Deployment)):
obj.to_docker_compose_yaml(
output_path=args.outpath, network_name=args.network_name
)
else:
raise NotImplementedError(f'Object of class {obj.__class__.__name__} cannot be exported to Docker Compose')
def export_flowchart(args):
"""Export to flowchart file
:param args: args from CLI
"""
Flow.load_config(args.config_path).plot(
args.outpath, vertical_layout=args.vertical_layout
)
def export_schema(args):
"""Export to JSON Schemas
:param args: args from CLI
"""
from jina import __version__
if args.yaml_path:
dump_api = api_to_dict()
for yp in args.yaml_path:
f_name = (yp % __version__) if '%s' in yp else yp
with open(f_name, 'w', encoding='utf-8') as fp:
JAML.dump(dump_api, fp)
default_logger.info(f'API is exported to {f_name}')
if args.json_path:
dump_api = api_to_dict()
for jp in args.json_path:
f_name = (jp % __version__) if '%s' in jp else jp
with open(f_name, 'w', encoding='utf-8') as fp:
json.dump(dump_api, fp, sort_keys=True)
default_logger.info(f'API is exported to {f_name}')
if args.schema_path:
dump_api = get_full_schema()
for jp in args.schema_path:
f_name = (jp % __version__) if '%s' in jp else jp
with open(f_name, 'w', encoding='utf-8') as fp:
json.dump(dump_api, fp, sort_keys=True)
default_logger.info(f'API is exported to {f_name}')
|
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc, DocList
from docarray.index.backends.in_memory import InMemoryExactNNIndex
from docarray.typing import NdArray
class SchemaDoc(BaseDoc):
text: str
price: int
tensor: NdArray[10]
@pytest.fixture
def docs():
docs = DocList[SchemaDoc](
[
SchemaDoc(text=f'hello {i}', price=i, tensor=np.array([i] * 10))
for i in range(9)
]
)
docs.append(SchemaDoc(text='good bye', price=100, tensor=np.array([100.0] * 10)))
return docs
def test_indexing(docs):
doc_index = InMemoryExactNNIndex[SchemaDoc]()
assert doc_index.num_docs() == 0
doc_index.index(docs)
assert doc_index.num_docs() == 10
@pytest.fixture
def doc_index(docs):
doc_index = InMemoryExactNNIndex[SchemaDoc]()
doc_index.index(docs)
return doc_index
def test_del_item(docs, doc_index):
to_remove = [docs[0].id, docs[1].id]
doc_index._del_items(to_remove)
assert doc_index.num_docs() == 8
def test_del(docs, doc_index):
del doc_index[docs[0].id]
assert doc_index.num_docs() == 9
@pytest.mark.parametrize('space', ['cosine_sim', 'euclidean_dist', 'sqeuclidean_dist'])
@pytest.mark.parametrize('is_query_doc', [True, False])
def test_find(doc_index, space, is_query_doc):
class MyDoc(BaseDoc):
text: str
price: int
tensor: NdArray[10] = Field(space=space)
if is_query_doc:
query = MyDoc(text='query', price=0, tensor=np.ones(10))
else:
query = np.ones(10)
docs, scores = doc_index.find(query, search_field='tensor', limit=5)
assert len(docs) == 5
assert len(scores) == 5
assert doc_index.num_docs() == 10
@pytest.mark.parametrize('space', ['cosine_sim', 'euclidean_dist', 'sqeuclidean_dist'])
@pytest.mark.parametrize('is_query_doc', [True, False])
def test_find_batched(doc_index, space, is_query_doc):
class MyDoc(BaseDoc):
text: str
price: int
tensor: NdArray[10] = Field(space=space)
if is_query_doc:
query = DocList[MyDoc](
[
MyDoc(text='query 0', price=0, tensor=np.zeros(10)),
MyDoc(text='query 1', price=1, tensor=np.ones(10)),
]
)
else:
query = np.ones((2, 10))
docs, scores = doc_index.find_batched(query, search_field='tensor', limit=5)
assert len(docs) == 2
for result in docs:
assert len(result) == 5
assert doc_index.num_docs() == 10
def test_concatenated_queries(doc_index):
query = SchemaDoc(text='query', price=0, tensor=np.ones(10))
q = (
doc_index.build_query()
.find(query=query, search_field='tensor', limit=5)
.filter(filter_query={'price': {'$neq': 5}})
.build()
)
docs, scores = doc_index.execute_query(q)
assert len(docs) == 4
|
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc, DocList
from docarray.index.backends.in_memory import InMemoryDocIndex
from docarray.typing import NdArray
class SchemaDoc(BaseDoc):
text: str
price: int
tensor: NdArray[10]
@pytest.fixture
def docs():
docs = DocList[SchemaDoc](
[
SchemaDoc(text=f'hello {i}', price=i, tensor=np.array([i] * 10))
for i in range(9)
]
)
docs.append(SchemaDoc(text='good bye', price=100, tensor=np.array([100.0] * 10)))
return docs
def test_indexing(docs):
doc_index = InMemoryDocIndex[SchemaDoc]()
assert doc_index.num_docs() == 0
doc_index.index(docs)
assert doc_index.num_docs() == 10
@pytest.fixture
def doc_index(docs):
doc_index = InMemoryDocIndex[SchemaDoc]()
doc_index.index(docs)
return doc_index
def test_del_item(docs, doc_index):
to_remove = [docs[0].id, docs[1].id]
doc_index._del_items(to_remove)
assert doc_index.num_docs() == 8
def test_del(docs, doc_index):
del doc_index[docs[0].id]
assert doc_index.num_docs() == 9
@pytest.mark.parametrize('space', ['cosine_sim', 'euclidean_dist', 'sqeuclidean_dist'])
@pytest.mark.parametrize('is_query_doc', [True, False])
def test_find(doc_index, space, is_query_doc):
class MyDoc(BaseDoc):
text: str
price: int
tensor: NdArray[10] = Field(space=space)
if is_query_doc:
query = MyDoc(text='query', price=0, tensor=np.ones(10))
else:
query = np.ones(10)
docs, scores = doc_index.find(query, search_field='tensor', limit=5)
assert len(docs) == 5
assert len(scores) == 5
assert doc_index.num_docs() == 10
@pytest.mark.parametrize('space', ['cosine_sim', 'euclidean_dist', 'sqeuclidean_dist'])
@pytest.mark.parametrize('is_query_doc', [True, False])
def test_find_batched(doc_index, space, is_query_doc):
class MyDoc(BaseDoc):
text: str
price: int
tensor: NdArray[10] = Field(space=space)
if is_query_doc:
query = DocList[MyDoc](
[
MyDoc(text='query 0', price=0, tensor=np.zeros(10)),
MyDoc(text='query 1', price=1, tensor=np.ones(10)),
]
)
else:
query = np.ones((2, 10))
docs, scores = doc_index.find_batched(query, search_field='tensor', limit=5)
assert len(docs) == 2
for result in docs:
assert len(result) == 5
assert doc_index.num_docs() == 10
def test_concatenated_queries(doc_index):
query = SchemaDoc(text='query', price=0, tensor=np.ones(10))
q = (
doc_index.build_query()
.find(query=query, search_field='tensor', limit=5)
.filter(filter_query={'price': {'$neq': 5}})
.build()
)
docs, scores = doc_index.execute_query(q)
assert len(docs) == 4
|
from typing import TYPE_CHECKING, TypeVar
import numpy as np
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.proto import NodeProto
T = TypeVar('T', bound='PointCloud3DUrl')
class PointCloud3DUrl(Url3D):
"""
URL to a .obj, .glb, or .ply file containing point cloud information.
Can be remote (web) URL, or a local file path.
"""
def _to_node_protobuf(self: T) -> 'NodeProto':
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that needs to
be converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(point_cloud_url=str(self))
def load(self: T, samples: int, multiple_geometries: bool = False) -> np.ndarray:
"""
Load the data from the url into a numpy.ndarray containing point cloud
information.
EXAMPLE USAGE
.. code-block:: python
import numpy as np
from docarray import BaseDocument
from docarray.typing import PointCloud3DUrl
class MyDoc(BaseDocument):
point_cloud_url: PointCloud3DvUrl
doc = MyDoc(point_cloud_url="toydata/tetrahedron.obj")
point_cloud = doc.point_cloud_url.load(samples=100)
assert isinstance(point_cloud, np.ndarray)
assert point_cloud.shape == (100, 3)
:param samples: number of points to sample from the mesh
:param multiple_geometries: if False, store point cloud in 2D np.ndarray.
If True, store point clouds from multiple geometries in 3D np.ndarray.
:return: np.ndarray representing the point cloud
"""
if multiple_geometries:
# try to coerce everything into a scene
scene = self._load_trimesh_instance(force='scene')
point_cloud = np.stack(
[np.array(geo.sample(samples)) for geo in scene.geometry.values()],
axis=0,
)
else:
# combine a scene into a single mesh
mesh = self._load_trimesh_instance(force='mesh')
point_cloud = np.array(mesh.sample(samples))
return point_cloud
|
from typing import TYPE_CHECKING, TypeVar
import numpy as np
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.proto import NodeProto
T = TypeVar('T', bound='PointCloud3DUrl')
class PointCloud3DUrl(Url3D):
"""
URL to a .obj, .glb, or .ply file containing point cloud information.
Can be remote (web) URL, or a local file path.
"""
def _to_node_protobuf(self: T) -> 'NodeProto':
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that needs to
be converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(point_cloud_url=str(self))
def load(self: T, samples: int, multiple_geometries: bool = False) -> np.ndarray:
"""
Load the data from the url into a numpy.ndarray containing point cloud
information.
EXAMPLE USAGE
.. code-block:: python
import numpy as np
from docarray import Document
from docarray.typing import PointCloud3DUrl
class MyDoc(Document):
point_cloud_url: PointCloud3DvUrl
doc = MyDoc(point_cloud_url="toydata/tetrahedron.obj")
point_cloud = doc.point_cloud_url.load(samples=100)
assert isinstance(point_cloud, np.ndarray)
assert point_cloud.shape == (100, 3)
:param samples: number of points to sample from the mesh
:param multiple_geometries: if False, store point cloud in 2D np.ndarray.
If True, store point clouds from multiple geometries in 3D np.ndarray.
:return: np.ndarray representing the point cloud
"""
if multiple_geometries:
# try to coerce everything into a scene
scene = self._load_trimesh_instance(force='scene')
point_cloud = np.stack(
[np.array(geo.sample(samples)) for geo in scene.geometry.values()],
axis=0,
)
else:
# combine a scene into a single mesh
mesh = self._load_trimesh_instance(force='mesh')
point_cloud = np.array(mesh.sample(samples))
return point_cloud
|
# Copyright (c) OpenMMLab. All rights reserved.
from .history_buffer import HistoryBuffer
from .log_processor import LogProcessor
from .logger import MMLogger, print_log
from .message_hub import MessageHub
__all__ = [
'HistoryBuffer', 'MessageHub', 'MMLogger', 'print_log', 'LogProcessor'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .history_buffer import HistoryBuffer
from .logger import MMLogger, print_log
from .message_hub import MessageHub
__all__ = ['HistoryBuffer', 'MessageHub', 'MMLogger', 'print_log']
|
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
import torch.nn.functional as F
from sentence_transformers.sparse_encoder import SparseEncoder
class ReconstructionLoss(nn.Module):
"""
Reconstruction Loss module for Sparse AutoEncoder.
This module computes the reconstruction loss according to the formula:
L_recon = L(k) + L(4k)/8 + β*L_aux
where:
- L(k) = ||f(x) - f(dx)_k||₂²
- L(4k) = ||f(x) - f(dx)_4k||₂²
- L_aux = ||e - ê||₂², e = f(x) - f(dx), ê = W_dec*z
"""
def __init__(self, model: SparseEncoder, beta: float = 1.0) -> None:
super().__init__()
self.model = model
self.beta = beta
def forward(self, sentence_features: Iterable[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
"""
Forward pass of the Reconstruction Loss module.
This method is used when the loss is computed as part of the model's forward pass.
Args:
sentence_features: Iterable of dictionaries containing sentence embeddings and their sparse representations
Returns:
Dictionary containing the total loss and individual loss components
"""
# Compute embeddings using the model
outputs = [self.model(sentence_feature) for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(outputs)
def compute_loss_from_embeddings(self, outputs: list[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
"""
Compute the reconstruction loss from embeddings.
Args:
outputs: List of dictionaries containing sentence embeddings and their sparse representations
Returns:
Dictionary containing the total loss and individual loss components
"""
# Initialize loss components
total_L_k = 0.0
total_L_4k = 0.0
total_L_aux = 0.0
# Process each sentence feature
for features in outputs:
f_x = features["sentence_embedding_backbone"]
x_hat_k = features["decoded_embedding_k"]
x_hat_4k = features["decoded_embedding_4k"]
e = features["error"]
e_hat = features["error_hat"]
# L(k) = ||f(x) - f(dx)_k||₂²
L_k = F.mse_loss(f_x, x_hat_k)
# L(4k) = ||f(x) - f(dx)_4k||₂²
L_4k = F.mse_loss(f_x, x_hat_4k)
# L_aux = ||e - ê||₂²
L_aux = F.mse_loss(e, e_hat)
# Accumulate losses
total_L_k += L_k
total_L_4k += L_4k
total_L_aux += L_aux
# Average losses over batch
batch_size = len(outputs)
if batch_size > 0:
total_L_k /= batch_size
total_L_4k /= batch_size
total_L_aux /= batch_size
# Total loss: L_recon = L(k) + L(4k)/8 + β*L_aux
total_loss = total_L_k + total_L_4k / 8 + self.beta * total_L_aux
return total_loss
def get_config_dict(self):
"""
Get the configuration dictionary.
Returns:
Dictionary containing the configuration parameters
"""
return {
"beta": self.beta,
}
|
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
import torch.nn.functional as F
from sentence_transformers.sparse_encoder import SparseEncoder
class ReconstructionLoss(nn.Module):
"""
Reconstruction Loss module for Sparse AutoEncoder.
This module computes the reconstruction loss according to the formula:
L_recon = L(k) + L(4k)/8 + β*L_aux
where:
- L(k) = ||f(x) - f(dx)_k||₂²
- L(4k) = ||f(x) - f(dx)_4k||₂²
- L_aux = ||e - ê||₂², e = f(x) - f(dx), ê = W_dec*z
"""
def __init__(self, model: SparseEncoder, beta: float = 1.0) -> None:
super().__init__()
self.model = model
self.beta = beta
def forward(self, sentence_features: Iterable[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
"""
Forward pass of the Reconstruction Loss module.
This method is used when the loss is computed as part of the model's forward pass.
Args:
sentence_features: Iterable of dictionaries containing sentence embeddings and their sparse representations
Returns:
Dictionary containing the total loss and individual loss components
"""
# Compute embeddings using the model
outputs = [self.model(sentence_feature) for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(outputs)
def compute_loss_from_embeddings(self, outputs: list[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
"""
Compute the reconstruction loss from embeddings.
Args:
outputs: List of dictionaries containing sentence embeddings and their sparse representations
Returns:
Dictionary containing the total loss and individual loss components
"""
# Initialize loss components
total_L_k = 0.0
total_L_4k = 0.0
total_L_aux = 0.0
# Process each sentence feature
for features in outputs:
x = features["sentence_embedding"]
x_hat = features["decoded_embedding"]
x_hat_4k = features["decoded_embedding_4k"]
e = features["error"]
e_hat = features["error_hat"]
# L(k) = ||f(x) - f(dx)_k||₂²
L_k = F.mse_loss(x, x_hat)
# L(4k) = ||f(x) - f(dx)_4k||₂²
L_4k = F.mse_loss(x, x_hat_4k)
# L_aux = ||e - ê||₂²
L_aux = F.mse_loss(e, e_hat)
# Accumulate losses
total_L_k += L_k
total_L_4k += L_4k
total_L_aux += L_aux
# Average losses over batch
batch_size = len(outputs)
if batch_size > 0:
total_L_k /= batch_size
total_L_4k /= batch_size
total_L_aux /= batch_size
# Total loss: L_recon = L(k) + L(4k)/8 + β*L_aux
total_loss = total_L_k + total_L_4k / 8 + self.beta * total_L_aux
return total_loss
def get_config_dict(self):
"""
Get the configuration dictionary.
Returns:
Dictionary containing the configuration parameters
"""
return {
"beta": self.beta,
}
|
import warnings
from typing import Any, Callable, List, Optional, Sequence, Union
import torch
from torch import nn
from torchvision.prototype.transforms import Transform
class Compose(Transform):
def __init__(self, transforms: Sequence[Callable]) -> None:
super().__init__()
if not isinstance(transforms, Sequence):
raise TypeError("Argument transforms should be a sequence of callables")
self.transforms = transforms
def forward(self, *inputs: Any) -> Any:
sample = inputs if len(inputs) > 1 else inputs[0]
for transform in self.transforms:
sample = transform(sample)
return sample
def extra_repr(self) -> str:
format_string = []
for t in self.transforms:
format_string.append(f" {t}")
return "\n".join(format_string)
class RandomApply(Transform):
def __init__(self, transforms: Union[Sequence[Callable], nn.ModuleList], p: float = 0.5) -> None:
super().__init__()
if not isinstance(transforms, (Sequence, nn.ModuleList)):
raise TypeError("Argument transforms should be a sequence of callables or a `nn.ModuleList`")
self.transforms = transforms
if not (0.0 <= p <= 1.0):
raise ValueError("`p` should be a floating point value in the interval [0.0, 1.0].")
self.p = p
def forward(self, *inputs: Any) -> Any:
sample = inputs if len(inputs) > 1 else inputs[0]
if torch.rand(1) >= self.p:
return sample
for transform in self.transforms:
sample = transform(sample)
return sample
def extra_repr(self) -> str:
format_string = []
for t in self.transforms:
format_string.append(f" {t}")
return "\n".join(format_string)
class RandomChoice(Transform):
def __init__(
self,
transforms: Sequence[Callable],
probabilities: Optional[List[float]] = None,
p: Optional[List[float]] = None,
) -> None:
if not isinstance(transforms, Sequence):
raise TypeError("Argument transforms should be a sequence of callables")
if p is not None:
warnings.warn(
"Argument p is deprecated and will be removed in a future release. "
"Please use probabilities argument instead."
)
probabilities = p
if probabilities is None:
probabilities = [1] * len(transforms)
elif len(probabilities) != len(transforms):
raise ValueError(
f"The number of probabilities doesn't match the number of transforms: "
f"{len(probabilities)} != {len(transforms)}"
)
super().__init__()
self.transforms = transforms
total = sum(probabilities)
self.probabilities = [prob / total for prob in probabilities]
def forward(self, *inputs: Any) -> Any:
idx = int(torch.multinomial(torch.tensor(self.probabilities), 1))
transform = self.transforms[idx]
return transform(*inputs)
class RandomOrder(Transform):
def __init__(self, transforms: Sequence[Callable]) -> None:
if not isinstance(transforms, Sequence):
raise TypeError("Argument transforms should be a sequence of callables")
super().__init__()
self.transforms = transforms
def forward(self, *inputs: Any) -> Any:
sample = inputs if len(inputs) > 1 else inputs[0]
for idx in torch.randperm(len(self.transforms)):
transform = self.transforms[idx]
sample = transform(sample)
return sample
|
import warnings
from typing import Any, Callable, List, Optional, Sequence
import torch
from torchvision.prototype.transforms import Transform
class Compose(Transform):
def __init__(self, transforms: Sequence[Callable]) -> None:
super().__init__()
if not isinstance(transforms, Sequence):
raise TypeError("Argument transforms should be a sequence of callables")
self.transforms = transforms
def forward(self, *inputs: Any) -> Any:
sample = inputs if len(inputs) > 1 else inputs[0]
for transform in self.transforms:
sample = transform(sample)
return sample
def extra_repr(self) -> str:
format_string = []
for t in self.transforms:
format_string.append(f" {t}")
return "\n".join(format_string)
class RandomApply(Compose):
def __init__(self, transforms: Sequence[Callable], p: float = 0.5) -> None:
super().__init__(transforms)
if not (0.0 <= p <= 1.0):
raise ValueError("`p` should be a floating point value in the interval [0.0, 1.0].")
self.p = p
def forward(self, *inputs: Any) -> Any:
sample = inputs if len(inputs) > 1 else inputs[0]
if torch.rand(1) >= self.p:
return sample
return super().forward(sample)
class RandomChoice(Transform):
def __init__(
self,
transforms: Sequence[Callable],
probabilities: Optional[List[float]] = None,
p: Optional[List[float]] = None,
) -> None:
if not isinstance(transforms, Sequence):
raise TypeError("Argument transforms should be a sequence of callables")
if p is not None:
warnings.warn(
"Argument p is deprecated and will be removed in a future release. "
"Please use probabilities argument instead."
)
probabilities = p
if probabilities is None:
probabilities = [1] * len(transforms)
elif len(probabilities) != len(transforms):
raise ValueError(
f"The number of probabilities doesn't match the number of transforms: "
f"{len(probabilities)} != {len(transforms)}"
)
super().__init__()
self.transforms = transforms
total = sum(probabilities)
self.probabilities = [prob / total for prob in probabilities]
def forward(self, *inputs: Any) -> Any:
idx = int(torch.multinomial(torch.tensor(self.probabilities), 1))
transform = self.transforms[idx]
return transform(*inputs)
class RandomOrder(Transform):
def __init__(self, transforms: Sequence[Callable]) -> None:
if not isinstance(transforms, Sequence):
raise TypeError("Argument transforms should be a sequence of callables")
super().__init__()
self.transforms = transforms
def forward(self, *inputs: Any) -> Any:
sample = inputs if len(inputs) > 1 else inputs[0]
for idx in torch.randperm(len(self.transforms)):
transform = self.transforms[idx]
sample = transform(sample)
return sample
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from pathlib import Path
import pytest
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def build_docker_image(docker_image_name: str) -> str:
subprocess.run(['docker', 'build', '-t', docker_image_name, '.'], check=True)
return docker_image_name
@pytest.fixture(scope='session')
def build_docker_image_gpu(docker_image_name: str) -> str:
image_name = f'{docker_image_name}:gpu'
subprocess.run(
['docker', 'build', '-t', image_name, '-f', 'Dockerfile.gpu', '.'], check=True
)
return image_name
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from pathlib import Path
import pytest
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def build_docker_image(docker_image_name: str) -> str:
subprocess.run(['docker', 'build', '-t', docker_image_name, '.'], check=True)
return docker_image_name
|
import os
from pathlib import Path
import pytest
from jina import Flow
from jina.excepts import RuntimeFailToStart
from jina.orchestrate.deployments import Deployment
from jina.parsers import set_deployment_parser
from jina.serve.executors import BaseExecutor
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_simple_use_abs_import_shall_fail():
with pytest.raises(ModuleNotFoundError):
from .dummyhub_abs import DummyHubExecutorAbs
DummyHubExecutorAbs()
with pytest.raises(RuntimeFailToStart):
with Flow().add(uses='DummyHubExecutorAbs'):
pass
def test_simple_use_relative_import():
from .dummyhub import DummyHubExecutor
DummyHubExecutor()
with Flow().add(uses='DummyHubExecutor'):
pass
def test_use_from_local_dir_exe_level():
with BaseExecutor.load_config('dummyhub/config.yml'):
pass
def test_use_from_local_dir_deployment_level():
a = set_deployment_parser().parse_args(['--uses', 'dummyhub/config.yml'])
with Deployment(a):
pass
def test_use_from_local_dir_flow_level():
with Flow().add(uses='dummyhub/config.yml'):
pass
@pytest.fixture
def local_hub_executor(tmpdir):
from hubble.executor import HubExecutor, helper, hubapi
pkg_path = Path(__file__).parent / 'dummyhub'
stream_data = helper.archive_package(pkg_path)
with open(tmpdir / 'dummy_test.zip', 'wb') as temp_zip_file:
temp_zip_file.write(stream_data.getvalue())
hubapi.install_local(
Path(tmpdir) / 'dummy_test.zip', HubExecutor(uuid='hello', tag='v0')
)
def test_use_from_local_hub_deployment_level(mocker, monkeypatch, local_hub_executor):
from hubble.executor.hubio import HubExecutor, HubIO
mock = mocker.Mock()
def _mock_fetch(
name,
tag,
image_required=True,
rebuild_image=True,
*,
secret=None,
force=False,
):
mock(name=name)
return (
HubExecutor(
uuid='hello',
name='alias_dummy',
tag='v0',
image_name='jinahub/pod.dummy_mwu_encoder',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
a = set_deployment_parser().parse_args(['--uses', 'jinahub://hello'])
with Deployment(a):
pass
def test_use_from_local_hub_flow_level(mocker, monkeypatch, local_hub_executor):
from hubble.executor.hubio import HubExecutor, HubIO
mock = mocker.Mock()
def _mock_fetch(
name,
tag,
image_required=True,
rebuild_image=True,
*,
secret=None,
force=False,
):
mock(name=name)
return (
HubExecutor(
uuid='hello',
name='alias_dummy',
tag='v0',
image_name='jinahub/pod.dummy_mwu_encoder',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
with Flow().add(uses='jinahub://hello', install_requirements=True):
pass
|
import os
from pathlib import Path
import pytest
from jina import Flow
from jina.excepts import RuntimeFailToStart
from jina.orchestrate.deployments import Deployment
from jina.parsers import set_deployment_parser
from jina.serve.executors import BaseExecutor
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_simple_use_abs_import_shall_fail():
with pytest.raises(ModuleNotFoundError):
from .dummyhub_abs import DummyHubExecutorAbs
DummyHubExecutorAbs()
with pytest.raises(RuntimeFailToStart):
with Flow().add(uses='DummyHubExecutorAbs'):
pass
def test_simple_use_relative_import():
from .dummyhub import DummyHubExecutor
DummyHubExecutor()
with Flow().add(uses='DummyHubExecutor'):
pass
def test_use_from_local_dir_exe_level():
with BaseExecutor.load_config('dummyhub/config.yml'):
pass
def test_use_from_local_dir_deployment_level():
a = set_deployment_parser().parse_args(['--uses', 'dummyhub/config.yml'])
with Deployment(a):
pass
def test_use_from_local_dir_flow_level():
with Flow().add(uses='dummyhub/config.yml'):
pass
@pytest.fixture
def local_hub_executor(tmpdir):
from jina.hubble import HubExecutor, helper, hubapi
pkg_path = Path(__file__).parent / 'dummyhub'
stream_data = helper.archive_package(pkg_path)
with open(tmpdir / 'dummy_test.zip', 'wb') as temp_zip_file:
temp_zip_file.write(stream_data.getvalue())
hubapi.install_local(
Path(tmpdir) / 'dummy_test.zip', HubExecutor(uuid='hello', tag='v0')
)
def test_use_from_local_hub_deployment_level(mocker, monkeypatch, local_hub_executor):
from jina.hubble.hubio import HubExecutor, HubIO
mock = mocker.Mock()
def _mock_fetch(
name,
tag,
image_required=True,
rebuild_image=True,
*,
secret=None,
force=False,
):
mock(name=name)
return (
HubExecutor(
uuid='hello',
name='alias_dummy',
tag='v0',
image_name='jinahub/pod.dummy_mwu_encoder',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
a = set_deployment_parser().parse_args(['--uses', 'jinahub://hello'])
with Deployment(a):
pass
def test_use_from_local_hub_flow_level(mocker, monkeypatch, local_hub_executor):
from jina.hubble.hubio import HubExecutor, HubIO
mock = mocker.Mock()
def _mock_fetch(
name,
tag,
image_required=True,
rebuild_image=True,
*,
secret=None,
force=False,
):
mock(name=name)
return (
HubExecutor(
uuid='hello',
name='alias_dummy',
tag='v0',
image_name='jinahub/pod.dummy_mwu_encoder',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
with Flow().add(uses='jinahub://hello', install_requirements=True):
pass
|
"""Image prompt template for a multimodal model."""
from typing import Any
from pydantic import Field
from langchain_core.prompt_values import ImagePromptValue, ImageURL, PromptValue
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.string import (
DEFAULT_FORMATTER_MAPPING,
PromptTemplateFormat,
)
from langchain_core.runnables import run_in_executor
class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
"""Image prompt template for a multimodal model."""
template: dict = Field(default_factory=dict)
"""Template for the prompt."""
template_format: PromptTemplateFormat = "f-string"
"""The format of the prompt template.
Options are: 'f-string', 'mustache', 'jinja2'."""
def __init__(self, **kwargs: Any) -> None:
"""Create an image prompt template."""
if "input_variables" not in kwargs:
kwargs["input_variables"] = []
overlap = set(kwargs["input_variables"]) & {"url", "path", "detail"}
if overlap:
msg = (
"input_variables for the image template cannot contain"
" any of 'url', 'path', or 'detail'."
f" Found: {overlap}"
)
raise ValueError(msg)
super().__init__(**kwargs)
@property
def _prompt_type(self) -> str:
"""Return the prompt type key."""
return "image-prompt"
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "image"]
def format_prompt(self, **kwargs: Any) -> PromptValue:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return ImagePromptValue(image_url=self.format(**kwargs))
async def aformat_prompt(self, **kwargs: Any) -> PromptValue:
"""Async format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return ImagePromptValue(image_url=await self.aformat(**kwargs))
def format(
self,
**kwargs: Any,
) -> ImageURL:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Raises:
ValueError: If the url is not provided.
ValueError: If the url is not a string.
Example:
.. code-block:: python
prompt.format(variable1="foo")
"""
formatted = {}
for k, v in self.template.items():
if isinstance(v, str):
formatted[k] = DEFAULT_FORMATTER_MAPPING[self.template_format](
v, **kwargs
)
else:
formatted[k] = v
url = kwargs.get("url") or formatted.get("url")
if kwargs.get("path") or formatted.get("path"):
msg = (
"Loading images from 'path' has been removed as of 0.3.15 for security "
"reasons. Please specify images by 'url'."
)
raise ValueError(msg)
detail = kwargs.get("detail") or formatted.get("detail")
if not url:
msg = "Must provide url."
raise ValueError(msg)
if not isinstance(url, str):
msg = "url must be a string."
raise ValueError(msg) # noqa: TRY004
output: ImageURL = {"url": url}
if detail:
# Don't check literal values here: let the API check them
output["detail"] = detail # type: ignore[typeddict-item]
return output
async def aformat(self, **kwargs: Any) -> ImageURL:
"""Async format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Raises:
ValueError: If the path or url is not a string.
"""
return await run_in_executor(None, self.format, **kwargs)
def pretty_repr(self, html: bool = False) -> str:
"""Return a pretty representation of the prompt.
Args:
html: Whether to return an html formatted string.
Returns:
A pretty representation of the prompt.
"""
raise NotImplementedError
|
"""Image prompt template for a multimodal model."""
from typing import Any
from pydantic import Field
from langchain_core.prompt_values import ImagePromptValue, ImageURL, PromptValue
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.string import (
DEFAULT_FORMATTER_MAPPING,
PromptTemplateFormat,
)
from langchain_core.runnables import run_in_executor
class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
"""Image prompt template for a multimodal model."""
template: dict = Field(default_factory=dict)
"""Template for the prompt."""
template_format: PromptTemplateFormat = "f-string"
"""The format of the prompt template.
Options are: 'f-string', 'mustache', 'jinja2'."""
def __init__(self, **kwargs: Any) -> None:
"""Create an image prompt template."""
if "input_variables" not in kwargs:
kwargs["input_variables"] = []
overlap = set(kwargs["input_variables"]) & {"url", "path", "detail"}
if overlap:
msg = (
"input_variables for the image template cannot contain"
" any of 'url', 'path', or 'detail'."
f" Found: {overlap}"
)
raise ValueError(msg)
super().__init__(**kwargs)
@property
def _prompt_type(self) -> str:
"""Return the prompt type key."""
return "image-prompt"
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "image"]
def format_prompt(self, **kwargs: Any) -> PromptValue:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return ImagePromptValue(image_url=self.format(**kwargs))
async def aformat_prompt(self, **kwargs: Any) -> PromptValue:
"""Async format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return ImagePromptValue(image_url=await self.aformat(**kwargs))
def format(
self,
**kwargs: Any,
) -> ImageURL:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Raises:
ValueError: If the url is not provided.
ValueError: If the url is not a string.
Example:
.. code-block:: python
prompt.format(variable1="foo")
"""
formatted = {}
for k, v in self.template.items():
if isinstance(v, str):
formatted[k] = DEFAULT_FORMATTER_MAPPING[self.template_format](
v, **kwargs
)
else:
formatted[k] = v
url = kwargs.get("url") or formatted.get("url")
if kwargs.get("path") or formatted.get("path"):
msg = (
"Loading images from 'path' has been removed as of 0.3.15 for security "
"reasons. Please specify images by 'url'."
)
raise ValueError(msg)
detail = kwargs.get("detail") or formatted.get("detail")
if not url:
msg = "Must provide url."
raise ValueError(msg)
elif not isinstance(url, str):
msg = "url must be a string."
raise ValueError(msg)
else:
output: ImageURL = {"url": url}
if detail:
# Don't check literal values here: let the API check them
output["detail"] = detail # type: ignore[typeddict-item]
return output
async def aformat(self, **kwargs: Any) -> ImageURL:
"""Async format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Raises:
ValueError: If the path or url is not a string.
"""
return await run_in_executor(None, self.format, **kwargs)
def pretty_repr(self, html: bool = False) -> str:
"""Return a pretty representation of the prompt.
Args:
html: Whether to return an html formatted string.
Returns:
A pretty representation of the prompt.
"""
raise NotImplementedError
|
import http.client
import json
from typing import Optional
def list_packages(*, contains: Optional[str] = None):
conn = http.client.HTTPSConnection("api.github.com")
headers = {
"Accept": "application/vnd.github+json",
"X-GitHub-Api-Version": "2022-11-28",
"User-Agent": "langchain-cli",
}
conn.request(
"GET",
"/repos/langchain-ai/langchain/contents/templates",
headers=headers,
)
res = conn.getresponse()
res_str = res.read()
data = json.loads(res_str)
package_names = [
p["name"] for p in data if p["type"] == "dir" and p["name"] != "docs"
]
return [p for p in package_names if contains in p] if contains else package_names
|
import http.client
import json
from typing import Optional
def list_packages(*, contains: Optional[str] = None):
conn = http.client.HTTPSConnection("api.github.com")
headers = {
"Accept": "application/vnd.github+json",
"X-GitHub-Api-Version": "2022-11-28",
"User-Agent": "langchain-cli",
}
conn.request(
"GET", "/repos/langchain-ai/langchain/contents/templates", headers=headers
)
res = conn.getresponse()
res_str = res.read()
data = json.loads(res_str)
package_names = [
p["name"] for p in data if p["type"] == "dir" and p["name"] != "docs"
]
return [p for p in package_names if contains in p] if contains else package_names
|
from __future__ import annotations
from collections.abc import Iterable
from typing import Any
import torch
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.util import fullname
class CosineSimilarityLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
"""
CosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SentenceTransformer model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
References:
- `Training Examples > Semantic Textual Similarity <../../../examples/sentence_transformer/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`CoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, CoSENTLoss is recommended.
- :class:`AnglELoss` is :class:`CoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than CosineSimilarityLoss.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CosineSimilarityLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.loss_fct = loss_fct
self.cos_score_transformation = cos_score_transformation
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
output = self.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
return self.loss_fct(output, labels.float().view(-1))
def get_config_dict(self) -> dict[str, Any]:
return {"loss_fct": fullname(self.loss_fct)}
|
from __future__ import annotations
from collections.abc import Iterable
from typing import Any
import torch
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.util import fullname
class CosineSimilarityLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
"""
CosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SentenceTransformer model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
References:
- `Training Examples > Semantic Textual Similarity <../../examples/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`CoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, CoSENTLoss is recommended.
- :class:`AnglELoss` is :class:`CoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than CosineSimilarityLoss.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CosineSimilarityLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.loss_fct = loss_fct
self.cos_score_transformation = cos_score_transformation
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
output = self.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
return self.loss_fct(output, labels.float().view(-1))
def get_config_dict(self) -> dict[str, Any]:
return {"loss_fct": fullname(self.loss_fct)}
|
_base_ = [
'../common/ms_3x_coco-instance.py',
'../_base_/models/cascade-mask-rcnn_r50_fpn.py'
]
|
_base_ = [
'../common/mstrain_3x_coco_instance.py',
'../_base_/models/cascade_mask_rcnn_r50_fpn.py'
]
|
import grpc
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from grpc_reflection.v1alpha import reflection
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.proto import jina_pb2, jina_pb2_grpc
class DummyResponseModel(BaseModel):
protocol: str
class MultiProtocolGateway(Gateway):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.http_port = self.ports[0]
self.grpc_port = self.ports[1]
self.health_servicer = health.HealthServicer(experimental_non_blocking=True)
async def _setup_http_server(self):
from fastapi import FastAPI
app = FastAPI(
title='HTTP Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {'protocol': 'http'}
self.http_server = Server(
Config(app, host=__default_host__, port=self.http_port)
)
async def _setup_grpc_server(self):
self.grpc_server = grpc.aio.server()
jina_pb2_grpc.add_JinaRPCServicer_to_server(
self.streamer._streamer, self.grpc_server
)
service_names = (
jina_pb2.DESCRIPTOR.services_by_name['JinaRPC'].full_name,
reflection.SERVICE_NAME,
)
# Mark all services as healthy.
health_pb2_grpc.add_HealthServicer_to_server(
self.health_servicer, self.grpc_server
)
for service in service_names:
self.health_servicer.set(service, health_pb2.HealthCheckResponse.SERVING)
reflection.enable_server_reflection(service_names, self.grpc_server)
self.grpc_server.add_insecure_port(f'{__default_host__}:{self.grpc_port}')
await self.grpc_server.start()
async def setup_server(self):
await self._setup_http_server()
await self._setup_grpc_server()
async def run_server(self):
await self.http_server.serve()
await self.grpc_server.wait_for_termination()
async def shutdown(self):
self.http_server.should_exit = True
await self.grpc_server.stop(0)
await self.http_server.shutdown()
self.health_servicer.enter_graceful_shutdown()
@property
def _should_exit(self) -> bool:
return self.http_server.should_exit
|
import grpc
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from grpc_reflection.v1alpha import reflection
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.proto import jina_pb2, jina_pb2_grpc
class DummyResponseModel(BaseModel):
protocol: str
class MultiProtocolGateway(Gateway):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.http_port = self.runtime_args.port[0]
self.grpc_port = self.runtime_args.port[1]
self.health_servicer = health.HealthServicer(experimental_non_blocking=True)
async def _setup_http_server(self):
from fastapi import FastAPI
app = FastAPI(
title='HTTP Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {'protocol': 'http'}
self.http_server = Server(
Config(app, host=__default_host__, port=self.http_port)
)
async def _setup_grpc_server(self):
self.grpc_server = grpc.aio.server()
jina_pb2_grpc.add_JinaRPCServicer_to_server(
self.streamer._streamer, self.grpc_server
)
service_names = (
jina_pb2.DESCRIPTOR.services_by_name['JinaRPC'].full_name,
reflection.SERVICE_NAME,
)
# Mark all services as healthy.
health_pb2_grpc.add_HealthServicer_to_server(
self.health_servicer, self.grpc_server
)
for service in service_names:
self.health_servicer.set(service, health_pb2.HealthCheckResponse.SERVING)
reflection.enable_server_reflection(service_names, self.grpc_server)
self.grpc_server.add_insecure_port(f'{__default_host__}:{self.grpc_port}')
await self.grpc_server.start()
async def setup_server(self):
await self._setup_http_server()
await self._setup_grpc_server()
async def run_server(self):
await self.http_server.serve()
await self.grpc_server.wait_for_termination()
async def shutdown(self):
self.http_server.should_exit = True
await self.grpc_server.stop(0)
await self.http_server.shutdown()
self.health_servicer.enter_graceful_shutdown()
@property
def _should_exit(self) -> bool:
return self.http_server.should_exit
|
from jina.clients.base.grpc import GRPCBaseClient
from jina.clients.mixin import (
AsyncHealthCheckMixin,
AsyncPostMixin,
HealthCheckMixin,
PostMixin,
)
class GRPCClient(GRPCBaseClient, PostMixin, HealthCheckMixin):
"""A client connecting to a Gateway using gRPC protocol.
Instantiate this class through the :meth:`jina.Client` convenience method.
EXAMPLE USAGE
.. code-block:: python
from jina import Client
from docarray import Document
# select host address to connect to
c = Client(
protocol='grpc', asyncio=False, host='grpc://my.awesome.flow:1234'
) # returns GRPCClient instance
c.post(on='/index', inputs=Document(text='hello!'))
"""
class AsyncGRPCClient(GRPCBaseClient, AsyncPostMixin, AsyncHealthCheckMixin):
"""
Asynchronous client connecting to a Gateway using gRPC protocol.
Instantiate this class through the :meth:`jina.Client` convenience method.
Unlike :class:`GRPCClient`, here :meth:`post` is a coroutine (i.e. declared with the async/await syntax),
simply calling them will not schedule them to be executed.
To actually run a coroutine, user need to put them in an event loop, e.g. via ``asyncio.run()``,
``asyncio.create_task()``.
:class:`AsyncGRPCClient` can be very useful in
the integration settings, where Jina/Flow/Client is NOT the main logic, but rather served as a part of other program.
In this case, users often do **NOT** want to let Jina control the ``asyncio.eventloop``. On contrary, :class:`Client`
is controlling and wrapping the event loop internally, making the Client looks synchronous from outside.
EXAMPLE USAGE
.. code-block:: python
from jina import Client
from docarray import Document
# async inputs for the client
async def async_inputs():
for _ in range(10):
yield Document()
await asyncio.sleep(0.1)
# select host address to connect to
c = Client(
protocol='grpc', asyncio=True, host='grpc://my.awesome.flow:1234'
) # returns AsyncGRPCClient instance
async for resp in client.post(on='/index', async_inputs, request_size=1):
print(resp)
"""
|
from jina.clients.base.grpc import GRPCBaseClient
from jina.clients.mixin import AsyncPostMixin, HealthCheckMixin, PostMixin
class GRPCClient(GRPCBaseClient, PostMixin, HealthCheckMixin):
"""A client connecting to a Gateway using gRPC protocol.
Instantiate this class through the :meth:`jina.Client` convenience method.
EXAMPLE USAGE
.. code-block:: python
from jina import Client
from docarray import Document
# select host address to connect to
c = Client(
protocol='grpc', asyncio=False, host='grpc://my.awesome.flow:1234'
) # returns GRPCClient instance
c.post(on='/index', inputs=Document(text='hello!'))
"""
class AsyncGRPCClient(GRPCBaseClient, AsyncPostMixin, HealthCheckMixin):
"""
Asynchronous client connecting to a Gateway using gRPC protocol.
Instantiate this class through the :meth:`jina.Client` convenience method.
Unlike :class:`GRPCClient`, here :meth:`post` is a coroutine (i.e. declared with the async/await syntax),
simply calling them will not schedule them to be executed.
To actually run a coroutine, user need to put them in an event loop, e.g. via ``asyncio.run()``,
``asyncio.create_task()``.
:class:`AsyncGRPCClient` can be very useful in
the integration settings, where Jina/Flow/Client is NOT the main logic, but rather served as a part of other program.
In this case, users often do **NOT** want to let Jina control the ``asyncio.eventloop``. On contrary, :class:`Client`
is controlling and wrapping the event loop internally, making the Client looks synchronous from outside.
EXAMPLE USAGE
.. code-block:: python
from jina import Client
from docarray import Document
# async inputs for the client
async def async_inputs():
for _ in range(10):
yield Document()
await asyncio.sleep(0.1)
# select host address to connect to
c = Client(
protocol='grpc', asyncio=True, host='grpc://my.awesome.flow:1234'
) # returns AsyncGRPCClient instance
async for resp in client.post(on='/index', async_inputs, request_size=1):
print(resp)
"""
|
from torchaudio._internal.module_utils import dropping_support, dropping_class_support
import inspect
_CTC_DECODERS = [
"CTCHypothesis",
"CTCDecoder",
"CTCDecoderLM",
"CTCDecoderLMState",
"ctc_decoder",
"download_pretrained_files",
]
_CUDA_CTC_DECODERS = [
"CUCTCDecoder",
"CUCTCHypothesis",
"cuda_ctc_decoder",
]
def __getattr__(name: str):
if name in _CTC_DECODERS:
try:
from . import _ctc_decoder
except Exception as err:
raise RuntimeError(
"CTC Decoder suit requires flashlight-text package and optionally KenLM. Please install them."
) from err
item = getattr(_ctc_decoder, name)
globals()[name] = item
return item
elif name in _CUDA_CTC_DECODERS:
try:
from . import _cuda_ctc_decoder
except AttributeError as err:
raise RuntimeError(
"To use CUCTC decoder, please set BUILD_CUDA_CTC_DECODER=1 when building from source."
) from err
orig_item = getattr(_cuda_ctc_decoder, name)
if inspect.isclass(orig_item):
item = dropping_class_support(orig_item)
else:
item = dropping_support(orig_item)
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(__all__)
__all__ = _CTC_DECODERS + _CUDA_CTC_DECODERS
|
from torchaudio._internal.module_utils import dropping_support
_CTC_DECODERS = [
"CTCHypothesis",
"CTCDecoder",
"CTCDecoderLM",
"CTCDecoderLMState",
"ctc_decoder",
"download_pretrained_files",
]
_CUDA_CTC_DECODERS = [
"CUCTCDecoder",
"CUCTCHypothesis",
"cuda_ctc_decoder",
]
def __getattr__(name: str):
if name in _CTC_DECODERS:
try:
from . import _ctc_decoder
except Exception as err:
raise RuntimeError(
"CTC Decoder suit requires flashlight-text package and optionally KenLM. Please install them."
) from err
item = getattr(_ctc_decoder, name)
globals()[name] = item
return item
elif name in _CUDA_CTC_DECODERS:
try:
from . import _cuda_ctc_decoder
except AttributeError as err:
raise RuntimeError(
"To use CUCTC decoder, please set BUILD_CUDA_CTC_DECODER=1 when building from source."
) from err
item = dropping_support(getattr(_cuda_ctc_decoder, name))
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(__all__)
__all__ = _CTC_DECODERS + _CUDA_CTC_DECODERS
|
# Copyright (c) OpenMMLab. All rights reserved.
from .image import (color_val_matplotlib, imshow_det_bboxes,
imshow_gt_det_bboxes)
__all__ = ['imshow_det_bboxes', 'imshow_gt_det_bboxes', 'color_val_matplotlib']
|
from .image import (color_val_matplotlib, imshow_det_bboxes,
imshow_gt_det_bboxes)
__all__ = ['imshow_det_bboxes', 'imshow_gt_det_bboxes', 'color_val_matplotlib']
|
_base_ = '../mask_rcnn/mask-rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 4),
stages=(False, True, True, True),
position='after_conv3')
]))
|
_base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 4),
stages=(False, True, True, True),
position='after_conv3')
]))
|
import os
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as
from docarray import BaseDocument
from docarray.typing import (
AudioNdArray,
AudioTorchTensor,
VideoNdArray,
VideoTorchTensor,
)
@pytest.mark.parametrize(
'tensor,cls_video_tensor,cls_tensor',
[
(torch.zeros(1, 224, 224, 3), VideoTorchTensor, torch.Tensor),
(np.zeros((1, 224, 224, 3)), VideoNdArray, np.ndarray),
],
)
def test_set_video_tensor(tensor, cls_video_tensor, cls_tensor):
class MyVideoDoc(BaseDocument):
tensor: cls_video_tensor
doc = MyVideoDoc(tensor=tensor)
assert isinstance(doc.tensor, cls_video_tensor)
assert isinstance(doc.tensor, cls_tensor)
assert (doc.tensor == tensor).all()
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(VideoNdArray, np.zeros((1, 224, 224, 3))),
(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
(VideoTorchTensor, np.zeros((1, 224, 224, 3))),
],
)
def test_validation(cls_tensor, tensor):
arr = parse_obj_as(cls_tensor, tensor)
assert isinstance(arr, cls_tensor)
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(VideoNdArray, torch.zeros(1, 224, 224, 3)),
(VideoTorchTensor, torch.zeros(224, 3)),
(VideoTorchTensor, torch.zeros(1, 224, 224, 100)),
(VideoNdArray, 'hello'),
(VideoTorchTensor, 'hello'),
],
)
def test_illegal_validation(cls_tensor, tensor):
match = str(cls_tensor).split('.')[-1][:-2]
with pytest.raises(ValueError, match=match):
parse_obj_as(cls_tensor, tensor)
@pytest.mark.parametrize(
'cls_tensor,tensor,proto_key',
[
(
VideoTorchTensor,
torch.zeros(1, 224, 224, 3),
VideoTorchTensor._proto_type_name,
),
(VideoNdArray, np.zeros((1, 224, 224, 3)), VideoNdArray._proto_type_name),
],
)
def test_proto_tensor(cls_tensor, tensor, proto_key):
tensor = parse_obj_as(cls_tensor, tensor)
proto = tensor._to_node_protobuf()
assert proto_key in str(proto)
@pytest.mark.parametrize(
'video_tensor',
[
parse_obj_as(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
parse_obj_as(VideoNdArray, np.zeros((1, 224, 224, 3))),
],
)
def test_save_video_tensor_to_file(video_tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.mp4')
video_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
@pytest.mark.parametrize(
'video_tensor',
[
parse_obj_as(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
parse_obj_as(VideoNdArray, np.zeros((1, 224, 224, 3))),
],
)
@pytest.mark.parametrize(
'audio_tensor',
[
parse_obj_as(AudioTorchTensor, torch.randn(100, 1, 1024).to(torch.float32)),
parse_obj_as(AudioNdArray, np.random.randn(100, 1, 1024).astype('float32')),
],
)
def test_save_video_tensor_to_file_including_audio(video_tensor, audio_tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.mp4')
video_tensor.save(tmp_file, audio_tensor=audio_tensor)
assert os.path.isfile(tmp_file)
|
import os
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as
from docarray import BaseDocument
from docarray.typing import (
AudioNdArray,
AudioTorchTensor,
VideoNdArray,
VideoTorchTensor,
)
@pytest.mark.parametrize(
'tensor,cls_video_tensor,cls_tensor',
[
(torch.zeros(1, 224, 224, 3), VideoTorchTensor, torch.Tensor),
(np.zeros((1, 224, 224, 3)), VideoNdArray, np.ndarray),
],
)
def test_set_video_tensor(tensor, cls_video_tensor, cls_tensor):
class MyVideoDoc(BaseDocument):
tensor: cls_video_tensor
doc = MyVideoDoc(tensor=tensor)
assert isinstance(doc.tensor, cls_video_tensor)
assert isinstance(doc.tensor, cls_tensor)
assert (doc.tensor == tensor).all()
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(VideoNdArray, np.zeros((1, 224, 224, 3))),
(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
(VideoTorchTensor, np.zeros((1, 224, 224, 3))),
],
)
def test_validation(cls_tensor, tensor):
arr = parse_obj_as(cls_tensor, tensor)
assert isinstance(arr, cls_tensor)
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(VideoNdArray, torch.zeros(1, 224, 224, 3)),
(VideoTorchTensor, torch.zeros(224, 3)),
(VideoTorchTensor, torch.zeros(1, 224, 224, 100)),
(VideoNdArray, 'hello'),
(VideoTorchTensor, 'hello'),
],
)
def test_illegal_validation(cls_tensor, tensor):
match = str(cls_tensor).split('.')[-1][:-2]
with pytest.raises(ValueError, match=match):
parse_obj_as(cls_tensor, tensor)
@pytest.mark.parametrize(
'cls_tensor,tensor,proto_key',
[
(
VideoTorchTensor,
torch.zeros(1, 224, 224, 3),
VideoTorchTensor._PROTO_FIELD_NAME,
),
(VideoNdArray, np.zeros((1, 224, 224, 3)), VideoNdArray._PROTO_FIELD_NAME),
],
)
def test_proto_tensor(cls_tensor, tensor, proto_key):
tensor = parse_obj_as(cls_tensor, tensor)
proto = tensor._to_node_protobuf()
assert str(proto).startswith(proto_key)
@pytest.mark.parametrize(
'video_tensor',
[
parse_obj_as(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
parse_obj_as(VideoNdArray, np.zeros((1, 224, 224, 3))),
],
)
def test_save_video_tensor_to_file(video_tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.mp4')
video_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
@pytest.mark.parametrize(
'video_tensor',
[
parse_obj_as(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
parse_obj_as(VideoNdArray, np.zeros((1, 224, 224, 3))),
],
)
@pytest.mark.parametrize(
'audio_tensor',
[
parse_obj_as(AudioTorchTensor, torch.randn(100, 1, 1024).to(torch.float32)),
parse_obj_as(AudioNdArray, np.random.randn(100, 1, 1024).astype('float32')),
],
)
def test_save_video_tensor_to_file_including_audio(video_tensor, audio_tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.mp4')
video_tensor.save(tmp_file, audio_tensor=audio_tensor)
assert os.path.isfile(tmp_file)
|
import copy
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Optional, Union
from .. import config
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*):
Specify a cache directory to save the file to (overwrite the
default cache dir).
force_download (`bool`, defaults to `False`):
If `True`, re-download the file even if it's already cached in
the cache dir.
resume_download (`bool`, defaults to `False`):
If `True`, resume the download if an incompletely received file is
found.
proxies (`dict`, *optional*):
user_agent (`str`, *optional*):
Optional string or dict that will be appended to the user-agent on remote
requests.
extract_compressed_file (`bool`, defaults to `False`):
If `True` and the path point to a zip or tar file,
extract the compressed file in a folder along the archive.
force_extract (`bool`, defaults to `False`):
If `True` when `extract_compressed_file` is `True` and the archive
was already extracted, re-extract the archive and override the folder where it was extracted.
delete_extracted (`bool`, defaults to `False`):
Whether to delete (or keep) the extracted files.
extract_on_the_fly (`bool`, defaults to `False`):
If `True`, extract compressed files while they are being read.
use_etag (`bool`, defaults to `True`):
Whether to use the ETag HTTP response header to validate the cached files.
num_proc (`int`, *optional*):
The number of processes to launch to download the files in parallel.
max_retries (`int`, default to `1`):
The number of times to retry an HTTP request if it fails.
token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the dataset file-system backend, if any.
download_desc (`str`, *optional*):
A description to be displayed alongside with the progress bar while downloading the files.
disable_tqdm (`bool`, defaults to `False`):
Whether to disable the individual files download progress bar
"""
cache_dir: Optional[Union[str, Path]] = None
force_download: bool = False
resume_download: bool = False
local_files_only: bool = False
proxies: Optional[dict] = None
user_agent: Optional[str] = None
extract_compressed_file: bool = False
force_extract: bool = False
delete_extracted: bool = False
extract_on_the_fly: bool = False
use_etag: bool = True
num_proc: Optional[int] = None
max_retries: int = 1
token: Optional[Union[str, bool]] = None
storage_options: dict[str, Any] = field(default_factory=dict)
download_desc: Optional[str] = None
disable_tqdm: bool = False
def copy(self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
def __setattr__(self, name, value):
if name == "token" and getattr(self, "storage_options", None) is not None:
if "hf" not in self.storage_options:
self.storage_options["hf"] = {"token": value, "endpoint": config.HF_ENDPOINT}
elif getattr(self.storage_options["hf"], "token", None) is None:
self.storage_options["hf"]["token"] = value
super().__setattr__(name, value)
|
import copy
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Optional, Union
from .. import config
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*):
Specify a cache directory to save the file to (overwrite the
default cache dir).
force_download (`bool`, defaults to `False`):
If `True`, re-dowload the file even if it's already cached in
the cache dir.
resume_download (`bool`, defaults to `False`):
If `True`, resume the download if an incompletely received file is
found.
proxies (`dict`, *optional*):
user_agent (`str`, *optional*):
Optional string or dict that will be appended to the user-agent on remote
requests.
extract_compressed_file (`bool`, defaults to `False`):
If `True` and the path point to a zip or tar file,
extract the compressed file in a folder along the archive.
force_extract (`bool`, defaults to `False`):
If `True` when `extract_compressed_file` is `True` and the archive
was already extracted, re-extract the archive and override the folder where it was extracted.
delete_extracted (`bool`, defaults to `False`):
Whether to delete (or keep) the extracted files.
extract_on_the_fly (`bool`, defaults to `False`):
If `True`, extract compressed files while they are being read.
use_etag (`bool`, defaults to `True`):
Whether to use the ETag HTTP response header to validate the cached files.
num_proc (`int`, *optional*):
The number of processes to launch to download the files in parallel.
max_retries (`int`, default to `1`):
The number of times to retry an HTTP request if it fails.
token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the dataset file-system backend, if any.
download_desc (`str`, *optional*):
A description to be displayed alongside with the progress bar while downloading the files.
disable_tqdm (`bool`, defaults to `False`):
Whether to disable the individual files download progress bar
"""
cache_dir: Optional[Union[str, Path]] = None
force_download: bool = False
resume_download: bool = False
local_files_only: bool = False
proxies: Optional[dict] = None
user_agent: Optional[str] = None
extract_compressed_file: bool = False
force_extract: bool = False
delete_extracted: bool = False
extract_on_the_fly: bool = False
use_etag: bool = True
num_proc: Optional[int] = None
max_retries: int = 1
token: Optional[Union[str, bool]] = None
storage_options: dict[str, Any] = field(default_factory=dict)
download_desc: Optional[str] = None
disable_tqdm: bool = False
def copy(self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
def __setattr__(self, name, value):
if name == "token" and getattr(self, "storage_options", None) is not None:
if "hf" not in self.storage_options:
self.storage_options["hf"] = {"token": value, "endpoint": config.HF_ENDPOINT}
elif getattr(self.storage_options["hf"], "token", None) is None:
self.storage_options["hf"]["token"] = value
super().__setattr__(name, value)
|
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Any, Callable
import torch
from sentence_transformers.data_collator import SentenceTransformerDataCollator
@dataclass
class CrossEncoderDataCollator(SentenceTransformerDataCollator):
"""Collator for a CrossEncoder model.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
This works with the two text dataset that is used as the example in the training overview:
https://www.sbert.net/docs/sentence_transformer/training_overview.html
It is important that the columns are in the expected order. For example, if your dataset has columns
"answer", "question" in that order, then the MultipleNegativesRankingLoss will consider
"answer" as the anchor and "question" as the positive, and it will (unexpectedly) optimize for
"given the answer, what is the question?".
"""
tokenize_fn: Callable
valid_label_columns: list[str] = field(default_factory=lambda: ["label", "labels", "score", "scores"])
_warned_columns: set[tuple[str]] = field(default_factory=set, init=False, repr=False)
def __call__(self, features: list[dict[str, Any]]) -> dict[str, torch.Tensor]:
column_names = list(features[0].keys())
# We should always be able to return a loss, label or not:
batch = {}
if "dataset_name" in column_names:
column_names.remove("dataset_name")
batch["dataset_name"] = features[0]["dataset_name"]
# TODO:
# if tuple(column_names) not in self._warned_columns:
# self.maybe_warn_about_column_order(column_names)
# Extract the label column if it exists
for label_column in self.valid_label_columns:
if label_column in column_names:
batch["label"] = torch.tensor([row[label_column] for row in features])
column_names.remove(label_column)
break
for column_name in column_names:
# If the prompt length has been set, we should add it to the batch
if column_name.endswith("_prompt_length") and column_name[: -len("_prompt_length")] in column_names:
batch[column_name] = torch.tensor([row[column_name] for row in features], dtype=torch.int)
continue
batch[column_name] = [row[column_name] for row in features]
return batch
|
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Any, Callable
import torch
from sentence_transformers.data_collator import SentenceTransformerDataCollator
@dataclass
class CrossEncoderDataCollator(SentenceTransformerDataCollator):
"""Collator for a CrossEncoder model.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
This works with the two text dataset that is used as the example in the training overview:
https://www.sbert.net/docs/sentence_transformer/training_overview.html
It is important that the columns are in the expected order. For example, if your dataset has columns
"answer", "question" in that order, then the MultipleNegativesRankingLoss will consider
"answer" as the anchor and "question" as the positive, and it will (unexpectedly) optimize for
"given the answer, what is the question?".
"""
tokenize_fn: Callable
valid_label_columns: list[str] = field(default_factory=lambda: ["label", "labels", "score", "scores"])
_warned_columns: set[tuple[str]] = field(default_factory=set, init=False, repr=False)
def __call__(self, features: list[dict[str, Any]]) -> dict[str, torch.Tensor]:
column_names = list(features[0].keys())
# We should always be able to return a loss, label or not:
batch = {}
if "dataset_name" in column_names:
column_names.remove("dataset_name")
batch["dataset_name"] = features[0]["dataset_name"]
# TODO:
# if tuple(column_names) not in self._warned_columns:
# self.maybe_warn_about_column_order(column_names)
# Extract the label column if it exists
for label_column in self.valid_label_columns:
if label_column in column_names:
batch["label"] = torch.tensor([row[label_column] for row in features])
column_names.remove(label_column)
break
for column_name in column_names:
# If the prompt length has been set, we should add it to the batch
if column_name.endswith("_prompt_length") and column_name[: -len("_prompt_length")] in column_names:
batch[column_name] = torch.tensor([row[column_name] for row in features], dtype=torch.int)
continue
# tokenized = self.tokenize_fn([row[column_name] for row in features])
# for key, value in tokenized.items():
# batch[f"{column_name}_{key}"] = value
batch[column_name] = [row[column_name] for row in features]
return batch
|
import pathlib
from typing import Any, Dict, List, Tuple, Union
import torch
from torchdata.datapipes.iter import CSVParser, IterDataPipe, Mapper
from torchvision.prototype.datapoints import Image, OneHotLabel
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from .._api import register_dataset, register_info
NAME = "semeion"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=[str(i) for i in range(10)])
@register_dataset(NAME)
class SEMEION(Dataset):
"""Semeion dataset
homepage="https://archive.ics.uci.edu/ml/datasets/Semeion+Handwritten+Digit",
"""
def __init__(self, root: Union[str, pathlib.Path], *, skip_integrity_check: bool = False) -> None:
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
data = HttpResource(
"http://archive.ics.uci.edu/ml/machine-learning-databases/semeion/semeion.data",
sha256="f43228ae3da5ea6a3c95069d53450b86166770e3b719dcc333182128fe08d4b1",
)
return [data]
def _prepare_sample(self, data: Tuple[str, ...]) -> Dict[str, Any]:
image_data, label_data = data[:256], data[256:-1]
return dict(
image=Image(torch.tensor([float(pixel) for pixel in image_data], dtype=torch.float).reshape(16, 16)),
label=OneHotLabel([int(label) for label in label_data], categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = CSVParser(dp, delimiter=" ")
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 1_593
|
import pathlib
from typing import Any, Dict, List, Tuple, Union
import torch
from torchdata.datapipes.iter import CSVParser, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.features import Image, OneHotLabel
from .._api import register_dataset, register_info
NAME = "semeion"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=[str(i) for i in range(10)])
@register_dataset(NAME)
class SEMEION(Dataset):
"""Semeion dataset
homepage="https://archive.ics.uci.edu/ml/datasets/Semeion+Handwritten+Digit",
"""
def __init__(self, root: Union[str, pathlib.Path], *, skip_integrity_check: bool = False) -> None:
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
data = HttpResource(
"http://archive.ics.uci.edu/ml/machine-learning-databases/semeion/semeion.data",
sha256="f43228ae3da5ea6a3c95069d53450b86166770e3b719dcc333182128fe08d4b1",
)
return [data]
def _prepare_sample(self, data: Tuple[str, ...]) -> Dict[str, Any]:
image_data, label_data = data[:256], data[256:-1]
return dict(
image=Image(torch.tensor([float(pixel) for pixel in image_data], dtype=torch.float).reshape(16, 16)),
label=OneHotLabel([int(label) for label in label_data], categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = CSVParser(dp, delimiter=" ")
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 1_593
|
import asyncio
from itertools import cycle
from typing import Any, Optional, Union
from uuid import UUID
import pytest
from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-untyped]
from typing_extensions import override
from langchain_core.callbacks.base import AsyncCallbackHandler
from langchain_core.language_models import GenericFakeChatModel
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
class MyCustomAsyncHandler(AsyncCallbackHandler):
@override
async def on_chat_model_start(
self,
serialized: dict[str, Any],
messages: list[list[BaseMessage]],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[list[str]] = None,
metadata: Optional[dict[str, Any]] = None,
**kwargs: Any,
) -> Any:
# Do nothing
# Required to implement since this is an abstract method
pass
@override
async def on_llm_new_token(
self,
token: str,
*,
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[list[str]] = None,
**kwargs: Any,
) -> None:
await asyncio.sleep(0)
@pytest.mark.benchmark
async def test_async_callbacks_in_sync(benchmark: BenchmarkFixture) -> None:
infinite_cycle = cycle([AIMessage(content=" ".join(["hello", "goodbye"] * 500))])
model = GenericFakeChatModel(messages=infinite_cycle)
@benchmark
def sync_callbacks() -> None:
for _ in range(5):
for _ in model.stream("meow", {"callbacks": [MyCustomAsyncHandler()]}):
pass
|
import asyncio
from itertools import cycle
from typing import Any, Optional, Union
from uuid import UUID
import pytest
from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-untyped]
from typing_extensions import override
from langchain_core.callbacks.base import AsyncCallbackHandler
from langchain_core.language_models import GenericFakeChatModel
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
class MyCustomAsyncHandler(AsyncCallbackHandler):
@override
async def on_chat_model_start(
self,
serialized: dict[str, Any],
messages: list[list[BaseMessage]],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[list[str]] = None,
metadata: Optional[dict[str, Any]] = None,
**kwargs: Any,
) -> Any:
# Do nothing
# Required to implement since this is an abstract method
pass
@override
async def on_llm_new_token(
self,
token: str,
*,
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[list[str]] = None,
**kwargs: Any,
) -> None:
await asyncio.sleep(0)
@pytest.mark.benchmark
async def test_async_callbacks(benchmark: BenchmarkFixture) -> None:
infinite_cycle = cycle([AIMessage(content=" ".join(["hello", "goodbye"] * 1000))])
model = GenericFakeChatModel(messages=infinite_cycle)
@benchmark
def async_callbacks() -> None:
for _ in range(10):
for _ in model.stream("meow", {"callbacks": [MyCustomAsyncHandler()]}):
pass
|
__version__ = '0.13.3'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.13.2'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
from docarray.array.mixins.attribute import GetAttributeArrayMixin
from docarray.array.mixins.proto import ProtoArrayMixin
__all__ = ['ProtoArrayMixin', 'GetAttributeArrayMixin']
|
from docarray.array.mixins.proto import ProtoArrayMixin
__all__ = ['ProtoArrayMixin']
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils.typing import ConfigType, OptConfigType, OptMultiConfig
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@MODELS.register_module()
class YOLACT(SingleStageInstanceSegmentor):
"""Implementation of `YOLACT <https://arxiv.org/abs/1904.02689>`_"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
mask_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.data_elements.bbox import bbox2result
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class YOLACT(SingleStageDetector):
"""Implementation of `YOLACT <https://arxiv.org/abs/1904.02689>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
segm_head,
mask_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(YOLACT, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
self.segm_head = MODELS.build(segm_head)
self.mask_head = MODELS.build(mask_head)
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
feat = self.extract_feat(img)
bbox_outs = self.bbox_head(feat)
prototypes = self.mask_head.forward_dummy(feat[0])
return (bbox_outs, prototypes)
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# convert Bitmap mask or Polygon Mask to Tensor here
gt_masks = [
gt_mask.to_tensor(dtype=torch.uint8, device=img.device)
for gt_mask in gt_masks
]
x = self.extract_feat(img)
cls_score, bbox_pred, coeff_pred = self.bbox_head(x)
bbox_head_loss_inputs = (cls_score, bbox_pred) + (gt_bboxes, gt_labels,
img_metas)
losses, sampling_results = self.bbox_head.loss(
*bbox_head_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
segm_head_outs = self.segm_head(x[0])
loss_segm = self.segm_head.loss(segm_head_outs, gt_masks, gt_labels)
losses.update(loss_segm)
mask_pred = self.mask_head(x[0], coeff_pred, gt_bboxes, img_metas,
sampling_results)
loss_mask = self.mask_head.loss(mask_pred, gt_masks, gt_bboxes,
img_metas, sampling_results)
losses.update(loss_mask)
# check NaN and Inf
for loss_name in losses.keys():
assert torch.isfinite(torch.stack(losses[loss_name]))\
.all().item(), '{} becomes infinite or NaN!'\
.format(loss_name)
return losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test-time augmentation."""
feat = self.extract_feat(img)
det_bboxes, det_labels, det_coeffs = self.bbox_head.simple_test(
feat, img_metas, rescale=rescale)
bbox_results = [
bbox2result(det_bbox, det_label, self.bbox_head.num_classes)
for det_bbox, det_label in zip(det_bboxes, det_labels)
]
segm_results = self.mask_head.simple_test(
feat,
det_bboxes,
det_labels,
det_coeffs,
img_metas,
rescale=rescale)
return list(zip(bbox_results, segm_results))
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations."""
raise NotImplementedError(
'YOLACT does not support test-time augmentation')
|
"""Init params."""
from llama_index.finetuning.cross_encoders.cross_encoder import (
CrossEncoderFinetuneEngine,
)
__all__ = ["CrossEncoderFinetuneEngine"]
|
"""Init params."""
from llama_index.finetuning.cross_encoders.cross_encoder import (
CrossEncoderFinetuneEngine,
)
__all__ = ["CrossEncoderFinetuneEngine"]
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_conditional_detr import *
from .feature_extraction_conditional_detr import *
from .image_processing_conditional_detr import *
from .image_processing_conditional_detr_fast import *
from .modeling_conditional_detr import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_conditional_detr import *
from .feature_extraction_conditional_detr import *
from .image_processing_conditional_detr import *
from .modeling_conditional_detr import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
import colorsys
from rich.color import Color
from rich.console import Console
from rich.console import ConsoleOptions, RenderResult
from rich.measure import Measurement
from rich.segment import Segment
from rich.style import Style
from docarray.math.helper import minmax_normalize
from docarray.math.ndarray import to_numpy_array
class ColorBoxEmbedding:
def __init__(self, array):
self._array = minmax_normalize(array, (0, 5))
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
h = 0.75
for idx, y in enumerate(self._array):
l = 0.1 + ((y / 5) * 0.7)
r2, g2, b2 = colorsys.hls_to_rgb(h, l + 0.7 / 10, 1.0)
color = Color.from_rgb(r2 * 255, g2 * 255, b2 * 255)
yield Segment('▄', Style(color=color, bgcolor=color))
if idx != 0 and idx % options.max_width == 0:
yield Segment.line()
def __rich_measure__(
self, console: "Console", options: ConsoleOptions
) -> Measurement:
return Measurement(1, options.max_width)
|
import colorsys
from rich.color import Color
from rich.console import Console
from rich.console import ConsoleOptions, RenderResult
from rich.measure import Measurement
from rich.segment import Segment
from rich.style import Style
from ...math.helper import minmax_normalize
from ...math.ndarray import to_numpy_array
class ColorBoxEmbedding:
def __init__(self, array):
self._array = minmax_normalize(array, (0, 5))
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
h = 0.75
for idx, y in enumerate(self._array):
l = 0.1 + ((y / 5) * 0.7)
r2, g2, b2 = colorsys.hls_to_rgb(h, l + 0.7 / 10, 1.0)
color = Color.from_rgb(r2 * 255, g2 * 255, b2 * 255)
yield Segment('▄', Style(color=color, bgcolor=color))
if idx != 0 and idx % options.max_width == 0:
yield Segment.line()
def __rich_measure__(
self, console: "Console", options: ConsoleOptions
) -> Measurement:
return Measurement(1, options.max_width)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmcv import Config
def parse_args():
parser = argparse.ArgumentParser(
description='Convert benchmark model list to script')
parser.add_argument('config', help='test config file path')
parser.add_argument('--port', type=int, default=29666, help='dist port')
parser.add_argument(
'--work-dir',
default='tools/batch_test',
help='the dir to save metric')
parser.add_argument(
'--run', action='store_true', help='run script directly')
parser.add_argument(
'--out', type=str, help='path to save model benchmark script')
args = parser.parse_args()
return args
def process_model_info(model_info, work_dir):
config = model_info['config'].strip()
fname, _ = osp.splitext(osp.basename(config))
job_name = fname
work_dir = osp.join(work_dir, fname)
checkpoint = model_info['checkpoint'].strip()
if not isinstance(model_info['eval'], list):
evals = [model_info['eval']]
else:
evals = model_info['eval']
eval = ' '.join(evals)
return dict(
config=config,
job_name=job_name,
work_dir=work_dir,
checkpoint=checkpoint,
eval=eval)
def create_test_bash_info(commands, model_test_dict, port, script_name,
partition):
config = model_test_dict['config']
job_name = model_test_dict['job_name']
checkpoint = model_test_dict['checkpoint']
work_dir = model_test_dict['work_dir']
eval = model_test_dict['eval']
echo_info = f' \necho \'{config}\' &'
commands.append(echo_info)
commands.append('\n')
command_info = f'GPUS=8 GPUS_PER_NODE=8 ' \
f'CPUS_PER_TASK=2 {script_name} '
command_info += f'{partition} '
command_info += f'{job_name} '
command_info += f'{config} '
command_info += f'$CHECKPOINT_DIR/{checkpoint} '
command_info += f'--work-dir {work_dir} '
command_info += f'--eval {eval} '
command_info += f'--cfg-option dist_params.port={port} '
command_info += ' &'
commands.append(command_info)
def main():
args = parse_args()
if args.out:
out_suffix = args.out.split('.')[-1]
assert args.out.endswith('.sh'), \
f'Expected out file path suffix is .sh, but get .{out_suffix}'
assert args.out or args.run, \
('Please specify at least one operation (save/run/ the '
'script) with the argument "--out" or "--run"')
commands = []
partition_name = 'PARTITION=$1 '
commands.append(partition_name)
commands.append('\n')
checkpoint_root = 'CHECKPOINT_DIR=$2 '
commands.append(checkpoint_root)
commands.append('\n')
script_name = osp.join('tools', 'slurm_test.sh')
port = args.port
work_dir = args.work_dir
cfg = Config.fromfile(args.config)
for model_key in cfg:
model_infos = cfg[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'])
model_test_dict = process_model_info(model_info, work_dir)
create_test_bash_info(commands, model_test_dict, port, script_name,
'$PARTITION')
port += 1
command_str = ''.join(commands)
if args.out:
with open(args.out, 'w') as f:
f.write(command_str)
if args.run:
os.system(command_str)
if __name__ == '__main__':
main()
|
import argparse
import os
import os.path as osp
from mmcv import Config
def parse_args():
parser = argparse.ArgumentParser(
description='Convert benchmark model list to script')
parser.add_argument('config', help='test config file path')
parser.add_argument('--port', type=int, default=29666, help='dist port')
parser.add_argument(
'--work-dir',
default='tools/batch_test',
help='the dir to save metric')
parser.add_argument(
'--run', action='store_true', help='run script directly')
parser.add_argument(
'--out', type=str, help='path to save model benchmark script')
args = parser.parse_args()
return args
def process_model_info(model_info, work_dir):
config = model_info['config'].strip()
fname, _ = osp.splitext(osp.basename(config))
job_name = fname
work_dir = osp.join(work_dir, fname)
checkpoint = model_info['checkpoint'].strip()
if not isinstance(model_info['eval'], list):
evals = [model_info['eval']]
else:
evals = model_info['eval']
eval = ' '.join(evals)
return dict(
config=config,
job_name=job_name,
work_dir=work_dir,
checkpoint=checkpoint,
eval=eval)
def create_test_bash_info(commands, model_test_dict, port, script_name,
partition):
config = model_test_dict['config']
job_name = model_test_dict['job_name']
checkpoint = model_test_dict['checkpoint']
work_dir = model_test_dict['work_dir']
eval = model_test_dict['eval']
echo_info = f' \necho \'{config}\' &'
commands.append(echo_info)
commands.append('\n')
command_info = f'GPUS=8 GPUS_PER_NODE=8 ' \
f'CPUS_PER_TASK=2 {script_name} '
command_info += f'{partition} '
command_info += f'{job_name} '
command_info += f'{config} '
command_info += f'$CHECKPOINT_DIR/{checkpoint} '
command_info += f'--work-dir {work_dir} '
command_info += f'--eval {eval} '
command_info += f'--cfg-option dist_params.port={port} '
command_info += ' &'
commands.append(command_info)
def main():
args = parse_args()
if args.out:
out_suffix = args.out.split('.')[-1]
assert args.out.endswith('.sh'), \
f'Expected out file path suffix is .sh, but get .{out_suffix}'
assert args.out or args.run, \
('Please specify at least one operation (save/run/ the '
'script) with the argument "--out" or "--run"')
commands = []
partition_name = 'PARTITION=$1 '
commands.append(partition_name)
commands.append('\n')
checkpoint_root = 'CHECKPOINT_DIR=$2 '
commands.append(checkpoint_root)
commands.append('\n')
script_name = osp.join('tools', 'slurm_test.sh')
port = args.port
work_dir = args.work_dir
cfg = Config.fromfile(args.config)
for model_key in cfg:
model_infos = cfg[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'])
model_test_dict = process_model_info(model_info, work_dir)
create_test_bash_info(commands, model_test_dict, port, script_name,
'$PARTITION')
port += 1
command_str = ''.join(commands)
if args.out:
with open(args.out, 'w') as f:
f.write(command_str)
if args.run:
os.system(command_str)
if __name__ == '__main__':
main()
|
from langchain_core.prompts.prompt import PromptTemplate
_PROMPT_TEMPLATE = """Translate a math problem into a expression that can be executed using Python's numexpr library. Use the output of running this code to answer the question.
Question: ${{Question with math problem.}}
```text
${{single line mathematical expression that solves the problem}}
```
...numexpr.evaluate(text)...
```output
${{Output of running the code}}
```
Answer: ${{Answer}}
Begin.
Question: What is 37593 * 67?
```text
37593 * 67
```
...numexpr.evaluate("37593 * 67")...
```output
2518731
```
Answer: 2518731
Question: 37593^(1/5)
```text
37593**(1/5)
```
...numexpr.evaluate("37593**(1/5)")...
```output
8.222831614237718
```
Answer: 8.222831614237718
Question: {question}
""" # noqa: E501
PROMPT = PromptTemplate(
input_variables=["question"],
template=_PROMPT_TEMPLATE,
)
|
# flake8: noqa
from langchain_core.prompts.prompt import PromptTemplate
_PROMPT_TEMPLATE = """Translate a math problem into a expression that can be executed using Python's numexpr library. Use the output of running this code to answer the question.
Question: ${{Question with math problem.}}
```text
${{single line mathematical expression that solves the problem}}
```
...numexpr.evaluate(text)...
```output
${{Output of running the code}}
```
Answer: ${{Answer}}
Begin.
Question: What is 37593 * 67?
```text
37593 * 67
```
...numexpr.evaluate("37593 * 67")...
```output
2518731
```
Answer: 2518731
Question: 37593^(1/5)
```text
37593**(1/5)
```
...numexpr.evaluate("37593**(1/5)")...
```output
8.222831614237718
```
Answer: 8.222831614237718
Question: {question}
"""
PROMPT = PromptTemplate(
input_variables=["question"],
template=_PROMPT_TEMPLATE,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.