input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import prisma
AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = {
"Input": True,
"Output": True,
"Webhook": True,
"AgentBlock": True,
}
AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore
}
EXECUTION_RESULT_INCLUDE: prisma.types.AgentNodeExecutionInclude = {
"Input": True,
"Output": True,
"AgentNode": True,
"AgentGraphExecution": True,
}
GRAPH_EXECUTION_INCLUDE: prisma.types.AgentGraphExecutionInclude = {
"AgentNodeExecutions": {
"include": {
"Input": True,
"Output": True,
"AgentNode": True,
"AgentGraphExecution": True,
}
}
}
INTEGRATION_WEBHOOK_INCLUDE: prisma.types.IntegrationWebhookInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore
}
|
import prisma
AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = {
"Input": True,
"Output": True,
"AgentBlock": True,
}
AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore
}
EXECUTION_RESULT_INCLUDE: prisma.types.AgentNodeExecutionInclude = {
"Input": True,
"Output": True,
"AgentNode": True,
"AgentGraphExecution": True,
}
GRAPH_EXECUTION_INCLUDE: prisma.types.AgentGraphExecutionInclude = {
"AgentNodeExecutions": {
"include": {
"Input": True,
"Output": True,
"AgentNode": True,
"AgentGraphExecution": True,
}
}
}
|
import pytest
from llama_index.core.readers.base import BaseReader
from llama_index.readers.hive.base import InvalidSqlError, _validate_sql_query
from llama_index.readers.hive import HiveReader
def test_class():
assert issubclass(HiveReader, BaseReader)
def test_validation():
with pytest.raises(InvalidSqlError):
sql_query = _validate_sql_query(["SELECT * FROM users", "DROP TABLE users"])
with pytest.raises(InvalidSqlError):
sql_query = _validate_sql_query(
["SELECT * FROM users WHERE name = 'Bob' OR '1'='1'"]
)
with pytest.raises(InvalidSqlError):
sql_query = _validate_sql_query(
[
"""
CREATE TABLE IF NOT EXISTS users (
id INT,
name STRING,
email STRING
)
STORED AS TEXTFILE
"""
]
)
sql_query = _validate_sql_query(["SELECT * FROM users WHERE name = 'Bob'"])
assert sql_query is None
|
from llama_index.core.readers.base import BaseReader
from llama_index.readers.hive import HiveReader
def test_class():
names_of_base_classes = [b.__name__ for b in HiveReader.__mro__]
assert BaseReader.__name__ in names_of_base_classes
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import VQModel
from diffusers.utils.testing_utils import (
backend_manual_seed,
enable_full_determinism,
floats_tensor,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class VQModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase):
model_class = VQModel
main_input_name = "sample"
@property
def dummy_input(self, sizes=(32, 32)):
batch_size = 4
num_channels = 3
image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device)
return {"sample": image}
@property
def input_shape(self):
return (3, 32, 32)
@property
def output_shape(self):
return (3, 32, 32)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"block_out_channels": [8, 16],
"norm_num_groups": 8,
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
@unittest.skip("Test not supported.")
def test_forward_signature(self):
pass
@unittest.skip("Test not supported.")
def test_training(self):
pass
def test_from_pretrained_hub(self):
model, loading_info = VQModel.from_pretrained("fusing/vqgan-dummy", output_loading_info=True)
self.assertIsNotNone(model)
self.assertEqual(len(loading_info["missing_keys"]), 0)
model.to(torch_device)
image = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def test_output_pretrained(self):
model = VQModel.from_pretrained("fusing/vqgan-dummy")
model.to(torch_device).eval()
torch.manual_seed(0)
backend_manual_seed(torch_device, 0)
image = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
image = image.to(torch_device)
with torch.no_grad():
output = model(image).sample
output_slice = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
expected_output_slice = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143])
# fmt: on
self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3))
def test_loss_pretrained(self):
model = VQModel.from_pretrained("fusing/vqgan-dummy")
model.to(torch_device).eval()
torch.manual_seed(0)
backend_manual_seed(torch_device, 0)
image = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
image = image.to(torch_device)
with torch.no_grad():
output = model(image).commit_loss.cpu()
# fmt: off
expected_output = torch.tensor([0.1936])
# fmt: on
self.assertTrue(torch.allclose(output, expected_output, atol=1e-3))
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import VQModel
from diffusers.utils.testing_utils import (
backend_manual_seed,
enable_full_determinism,
floats_tensor,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class VQModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase):
model_class = VQModel
main_input_name = "sample"
@property
def dummy_input(self, sizes=(32, 32)):
batch_size = 4
num_channels = 3
image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device)
return {"sample": image}
@property
def input_shape(self):
return (3, 32, 32)
@property
def output_shape(self):
return (3, 32, 32)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"block_out_channels": [8, 16],
"norm_num_groups": 8,
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_forward_signature(self):
pass
def test_training(self):
pass
def test_from_pretrained_hub(self):
model, loading_info = VQModel.from_pretrained("fusing/vqgan-dummy", output_loading_info=True)
self.assertIsNotNone(model)
self.assertEqual(len(loading_info["missing_keys"]), 0)
model.to(torch_device)
image = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def test_output_pretrained(self):
model = VQModel.from_pretrained("fusing/vqgan-dummy")
model.to(torch_device).eval()
torch.manual_seed(0)
backend_manual_seed(torch_device, 0)
image = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
image = image.to(torch_device)
with torch.no_grad():
output = model(image).sample
output_slice = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
expected_output_slice = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143])
# fmt: on
self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3))
def test_loss_pretrained(self):
model = VQModel.from_pretrained("fusing/vqgan-dummy")
model.to(torch_device).eval()
torch.manual_seed(0)
backend_manual_seed(torch_device, 0)
image = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
image = image.to(torch_device)
with torch.no_grad():
output = model(image).commit_loss.cpu()
# fmt: off
expected_output = torch.tensor([0.1936])
# fmt: on
self.assertTrue(torch.allclose(output, expected_output, atol=1e-3))
|
# Copyright (c) OpenMMLab. All rights reserved.
import base64
import os
import mmcv
import torch
from ts.torch_handler.base_handler import BaseHandler
from mmdet.apis import inference_detector, init_detector
class MMdetHandler(BaseHandler):
threshold = 0.5
def initialize(self, context):
properties = context.system_properties
self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = torch.device(self.map_location + ':' +
str(properties.get('gpu_id')) if torch.cuda.
is_available() else self.map_location)
self.manifest = context.manifest
model_dir = properties.get('model_dir')
serialized_file = self.manifest['model']['serializedFile']
checkpoint = os.path.join(model_dir, serialized_file)
self.config_file = os.path.join(model_dir, 'config.py')
self.model = init_detector(self.config_file, checkpoint, self.device)
self.initialized = True
def preprocess(self, data):
images = []
for row in data:
image = row.get('data') or row.get('body')
if isinstance(image, str):
image = base64.b64decode(image)
image = mmcv.imfrombytes(image)
images.append(image)
return images
def inference(self, data, *args, **kwargs):
results = inference_detector(self.model, data)
return results
def postprocess(self, data):
# Format output following the example ObjectDetectionHandler format
output = []
for image_index, image_result in enumerate(data):
output.append([])
if isinstance(image_result, tuple):
bbox_result, segm_result = image_result
if isinstance(segm_result, tuple):
segm_result = segm_result[0] # ms rcnn
else:
bbox_result, segm_result = image_result, None
for class_index, class_result in enumerate(bbox_result):
class_name = self.model.CLASSES[class_index]
for bbox in class_result:
bbox_coords = bbox[:-1].tolist()
score = float(bbox[-1])
if score >= self.threshold:
output[image_index].append({
'class_name': class_name,
'bbox': bbox_coords,
'score': score
})
return output
|
# Copyright (c) OpenMMLab. All rights reserved.
import base64
import os
import mmcv
import torch
from ts.torch_handler.base_handler import BaseHandler
from mmdet.apis import inference_detector, init_detector
class MMdetHandler(BaseHandler):
threshold = 0.5
def initialize(self, context):
properties = context.system_properties
self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = torch.device(self.map_location + ':' +
str(properties.get('gpu_id')) if torch.cuda.
is_available() else self.map_location)
self.manifest = context.manifest
model_dir = properties.get('model_dir')
serialized_file = self.manifest['model']['serializedFile']
checkpoint = os.path.join(model_dir, serialized_file)
self.config_file = os.path.join(model_dir, 'config.py')
self.model = init_detector(self.config_file, checkpoint, self.device)
self.initialized = True
def preprocess(self, data):
images = []
for row in data:
image = row.get('data') or row.get('body')
if isinstance(image, str):
image = base64.b64decode(image)
image = mmcv.imfrombytes(image)
images.append(image)
return images
def inference(self, data, *args, **kwargs):
results = inference_detector(self.model, data)
return results
def postprocess(self, data):
# Format output following the example ObjectDetectionHandler format
output = []
for image_index, image_result in enumerate(data):
output.append([])
if isinstance(image_result, tuple):
bbox_result, segm_result = image_result
if isinstance(segm_result, tuple):
segm_result = segm_result[0] # ms rcnn
else:
bbox_result, segm_result = image_result, None
for class_index, class_result in enumerate(bbox_result):
class_name = self.model.CLASSES[class_index]
for bbox in class_result:
bbox_coords = bbox[:-1].tolist()
score = float(bbox[-1])
if score >= self.threshold:
output[image_index].append({
class_name: bbox_coords,
'score': score
})
return output
|
from typing import Any
from langchain_core.agents import AgentAction
from langchain_core.prompts.chat import ChatPromptTemplate
class AgentScratchPadChatPromptTemplate(ChatPromptTemplate):
"""Chat prompt template for the agent scratchpad."""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
def _construct_agent_scratchpad(
self, intermediate_steps: list[tuple[AgentAction, str]]
) -> str:
if len(intermediate_steps) == 0:
return ""
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\nObservation: {observation}\nThought: "
return (
f"This was your previous work "
f"(but I haven't seen any of it! I only see what "
f"you return as final answer):\n{thoughts}"
)
def _merge_partial_and_user_variables(self, **kwargs: Any) -> dict[str, Any]:
intermediate_steps = kwargs.pop("intermediate_steps")
kwargs["agent_scratchpad"] = self._construct_agent_scratchpad(
intermediate_steps
)
return kwargs
|
from typing import Any, Dict, List, Tuple
from langchain_core.agents import AgentAction
from langchain_core.prompts.chat import ChatPromptTemplate
class AgentScratchPadChatPromptTemplate(ChatPromptTemplate):
"""Chat prompt template for the agent scratchpad."""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
def _construct_agent_scratchpad(
self, intermediate_steps: List[Tuple[AgentAction, str]]
) -> str:
if len(intermediate_steps) == 0:
return ""
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\nObservation: {observation}\nThought: "
return (
f"This was your previous work "
f"(but I haven't seen any of it! I only see what "
f"you return as final answer):\n{thoughts}"
)
def _merge_partial_and_user_variables(self, **kwargs: Any) -> Dict[str, Any]:
intermediate_steps = kwargs.pop("intermediate_steps")
kwargs["agent_scratchpad"] = self._construct_agent_scratchpad(
intermediate_steps
)
return kwargs
|
from typing import Any, Dict, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class MultipleNegativesSymmetricRankingLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.cos_sim) -> None:
"""
This loss is an adaptation of MultipleNegativesRankingLoss. MultipleNegativesRankingLoss computes the following loss:
For a given anchor and a list of candidates, find the positive candidate.
In MultipleNegativesSymmetricRankingLoss, we add another loss term: Given the positive and a list of all anchors,
find the correct (matching) anchor.
For the example of question-answering: You have (question, answer)-pairs. MultipleNegativesRankingLoss just computes
the loss to find the answer for a given question. MultipleNegativesSymmetricRankingLoss additionally computes the
loss to find the question for a given answer.
Note: If you pass triplets, the negative entry will be ignored. A anchor is just searched for the positive.
Args:
model: SentenceTransformer model
scale: Output of similarity function is multiplied by scale
value
similarity_fct: similarity function between sentence
embeddings. By default, cos_sim. Can also be set to dot
product (and then set scale to 1)
Requirements:
1. (anchor, positive) pairs
Relations:
- Like :class:`MultipleNegativesRankingLoss`, but with an additional loss term.
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive) pairs | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
})
loss = losses.MultipleNegativesSymmetricRankingLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(MultipleNegativesSymmetricRankingLoss, self).__init__()
self.model = model
self.scale = scale
self.similarity_fct = similarity_fct
self.cross_entropy_loss = nn.CrossEntropyLoss()
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor) -> Tensor:
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
anchor = reps[0]
candidates = torch.cat(reps[1:])
scores = self.similarity_fct(anchor, candidates) * self.scale
labels = torch.tensor(
range(len(scores)), dtype=torch.long, device=scores.device
) # Example a[i] should match with b[i]
anchor_positive_scores = scores[:, 0 : len(reps[1])]
forward_loss = self.cross_entropy_loss(scores, labels)
backward_loss = self.cross_entropy_loss(anchor_positive_scores.transpose(0, 1), labels)
return (forward_loss + backward_loss) / 2
def get_config_dict(self) -> Dict[str, Any]:
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
|
from typing import Dict, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class MultipleNegativesSymmetricRankingLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.cos_sim):
"""
This loss is an adaptation of MultipleNegativesRankingLoss. MultipleNegativesRankingLoss computes the following loss:
For a given anchor and a list of candidates, find the positive candidate.
In MultipleNegativesSymmetricRankingLoss, we add another loss term: Given the positive and a list of all anchors,
find the correct (matching) anchor.
For the example of question-answering: You have (question, answer)-pairs. MultipleNegativesRankingLoss just computes
the loss to find the answer for a given question. MultipleNegativesSymmetricRankingLoss additionally computes the
loss to find the question for a given answer.
Note: If you pass triplets, the negative entry will be ignored. A anchor is just searched for the positive.
Args:
model: SentenceTransformer model
scale: Output of similarity function is multiplied by scale
value
similarity_fct: similarity function between sentence
embeddings. By default, cos_sim. Can also be set to dot
product (and then set scale to 1)
Requirements:
1. (anchor, positive) pairs
Relations:
- Like :class:`MultipleNegativesRankingLoss`, but with an additional loss term.
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive) pairs | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
})
loss = losses.MultipleNegativesSymmetricRankingLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(MultipleNegativesSymmetricRankingLoss, self).__init__()
self.model = model
self.scale = scale
self.similarity_fct = similarity_fct
self.cross_entropy_loss = nn.CrossEntropyLoss()
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
anchor = reps[0]
candidates = torch.cat(reps[1:])
scores = self.similarity_fct(anchor, candidates) * self.scale
labels = torch.tensor(
range(len(scores)), dtype=torch.long, device=scores.device
) # Example a[i] should match with b[i]
anchor_positive_scores = scores[:, 0 : len(reps[1])]
forward_loss = self.cross_entropy_loss(scores, labels)
backward_loss = self.cross_entropy_loss(anchor_positive_scores.transpose(0, 1), labels)
return (forward_loss + backward_loss) / 2
def get_config_dict(self):
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
|
import functools
import pytest
from jina.helper import iscoroutinefunction
from jina.serve.executors import get_executor_taboo
from jina.serve.executors.decorators import dynamic_batching, requests
from jina.serve.helper import store_init_kwargs
def test_store_init_kwargs():
store_init_kwargs_decorator = functools.partial(
store_init_kwargs, taboo=get_executor_taboo()
)
class A:
@store_init_kwargs_decorator
def __init__(self, a, b, c, *args, **kwargs):
pass
@store_init_kwargs_decorator
def f(self, a, b, *args, **kwargs):
pass
instance = A('a', 'b', c=5, d='d')
assert instance._init_kwargs_dict
assert instance._init_kwargs_dict == {'a': 'a', 'b': 'b', 'c': 5}
with pytest.raises(TypeError):
instance.f('a', 'b', c='c')
def test_requests():
with pytest.raises(TypeError):
@requests
def fn(*args):
pass
@requests
def fn_2(*args, **kwargs):
pass
assert hasattr(fn_2, 'fn')
def test_async_requests():
with pytest.raises(TypeError):
@requests
def fn(*args):
pass
@requests
def fn_2(*args, **kwargs):
pass
@requests
async def fn_3(*args, **kwargs):
pass
assert hasattr(fn_2, 'fn')
assert not iscoroutinefunction(getattr(fn_2, 'fn'))
assert hasattr(fn_3, 'fn')
assert iscoroutinefunction(getattr(fn_3, 'fn'))
def test_dynamic_batching():
@dynamic_batching()
def fn_2(*args, **kwargs):
pass
assert hasattr(fn_2, 'fn')
def test_async_dynamic_batching():
@dynamic_batching
def fn_2(*args, **kwargs):
pass
@dynamic_batching
async def fn_3(*args, **kwargs):
pass
assert hasattr(fn_2, 'fn')
assert not iscoroutinefunction(getattr(fn_2, 'fn'))
assert hasattr(fn_3, 'fn')
assert iscoroutinefunction(getattr(fn_3, 'fn'))
|
import functools
import pytest
from jina.helper import iscoroutinefunction
from jina.serve.executors import get_executor_taboo
from jina.serve.executors.decorators import requests
from jina.serve.helper import store_init_kwargs
def test_store_init_kwargs():
store_init_kwargs_decorator = functools.partial(
store_init_kwargs, taboo=get_executor_taboo()
)
class A:
@store_init_kwargs_decorator
def __init__(self, a, b, c, *args, **kwargs):
pass
@store_init_kwargs_decorator
def f(self, a, b, *args, **kwargs):
pass
instance = A('a', 'b', c=5, d='d')
assert instance._init_kwargs_dict
assert instance._init_kwargs_dict == {'a': 'a', 'b': 'b', 'c': 5}
with pytest.raises(TypeError):
instance.f('a', 'b', c='c')
def test_requests():
with pytest.raises(TypeError):
@requests
def fn(*args):
pass
@requests
def fn_2(*args, **kwargs):
pass
assert hasattr(fn_2, 'fn')
def test_async_requests():
with pytest.raises(TypeError):
@requests
def fn(*args):
pass
@requests
def fn_2(*args, **kwargs):
pass
@requests
async def fn_3(*args, **kwargs):
pass
assert hasattr(fn_2, 'fn')
assert not iscoroutinefunction(getattr(fn_2, 'fn'))
assert hasattr(fn_3, 'fn')
assert iscoroutinefunction(getattr(fn_3, 'fn'))
|
class DataAdapter:
"""Base class for input data adapters.
The purpose of a DataAdapter is to provide a unified interface to
iterate over input data provided in a variety of formats -- such as
NumPy arrays, tf.Tensors, tf.data.Datasets, Keras PyDatasets, etc.
"""
def get_numpy_iterator(self):
"""Get a Python iterable for the `DataAdapter`, that yields NumPy
arrays.
Returns:
A Python iterator.
"""
raise NotImplementedError
def get_tf_dataset(self):
"""Get a `tf.data.Dataset` instance for the DataAdapter.
Note that the dataset returned does not repeat for epoch, so caller
might need to create new iterator for the same dataset at the beginning
of the epoch. This behavior might change in the future.
Returns:
A `tf.data.Dataset`. Caller might use the dataset in different
context, e.g. iter(dataset) in eager to get the value directly, or
in graph mode, provide the iterator tensor to Keras model function.
"""
raise NotImplementedError
def get_jax_iterator(self):
"""Get a Python iterable for the `DataAdapter`, that yields arrays that
that can be fed to JAX. NumPy arrays are preferred for performance.
Returns:
A Python iterator.
"""
raise NotImplementedError
def get_torch_dataloader(self):
"""Get a Torch `DataLoader` for the `DataAdapter`.
Returns:
A Torch `DataLoader`.
"""
raise NotImplementedError
@property
def num_batches(self):
"""Return the size (number of batches) for the dataset created.
For certain type of the data input, the number of batches is known, eg
for Numpy data, the size is same as (number_of_element / batch_size).
Whereas for dataset or python generator, the size is unknown since it
may or may not have an end state.
Returns:
int, the number of batches for the dataset, or None if it is
unknown. The caller could use this to control the loop of training,
show progress bar, or handle unexpected StopIteration error.
"""
raise NotImplementedError
@property
def batch_size(self):
"""Return the batch size of the dataset created.
For certain type of the data input, the batch size is known, and even
required, like numpy array. Whereas for dataset, the batch is unknown
unless we take a peek.
Returns:
int, the batch size of the dataset, or None if it is unknown.
"""
raise NotImplementedError
@property
def has_partial_batch(self):
"""Whether the dataset has partial batch at the end."""
raise NotImplementedError
@property
def partial_batch_size(self):
"""The size of the final partial batch for dataset.
Will return None if has_partial_batch is False or batch_size is None.
"""
raise NotImplementedError
def on_epoch_begin(self):
"""A hook called before each epoch."""
pass
def on_epoch_end(self):
"""A hook called after each epoch."""
pass
|
class DataAdapter:
"""Base class for input data adapters.
The purpose of a DataAdapter is to provide a unfied interface to
iterate over input data provided in a variety of formats -- such as
NumPy arrays, tf.Tensors, tf.data.Datasets, Keras PyDatasets, etc.
"""
def get_numpy_iterator(self):
"""Get a Python iterable for the `DataAdapter`, that yields NumPy
arrays.
Returns:
A Python iterator.
"""
raise NotImplementedError
def get_tf_dataset(self):
"""Get a `tf.data.Dataset` instance for the DataAdapter.
Note that the dataset returned does not repeat for epoch, so caller
might need to create new iterator for the same dataset at the beginning
of the epoch. This behavior might change in the future.
Returns:
A `tf.data.Dataset`. Caller might use the dataset in different
context, e.g. iter(dataset) in eager to get the value directly, or
in graph mode, provide the iterator tensor to Keras model function.
"""
raise NotImplementedError
def get_jax_iterator(self):
"""Get a Python iterable for the `DataAdapter`, that yields arrays that
that can be fed to JAX. NumPy arrays are preferred for performance.
Returns:
A Python iterator.
"""
raise NotImplementedError
def get_torch_dataloader(self):
"""Get a Torch `DataLoader` for the `DataAdapter`.
Returns:
A Torch `DataLoader`.
"""
raise NotImplementedError
@property
def num_batches(self):
"""Return the size (number of batches) for the dataset created.
For certain type of the data input, the number of batches is known, eg
for Numpy data, the size is same as (number_of_element / batch_size).
Whereas for dataset or python generator, the size is unknown since it
may or may not have an end state.
Returns:
int, the number of batches for the dataset, or None if it is
unknown. The caller could use this to control the loop of training,
show progress bar, or handle unexpected StopIteration error.
"""
raise NotImplementedError
@property
def batch_size(self):
"""Return the batch size of the dataset created.
For certain type of the data input, the batch size is known, and even
required, like numpy array. Whereas for dataset, the batch is unknown
unless we take a peek.
Returns:
int, the batch size of the dataset, or None if it is unknown.
"""
raise NotImplementedError
@property
def has_partial_batch(self):
"""Whether the dataset has partial batch at the end."""
raise NotImplementedError
@property
def partial_batch_size(self):
"""The size of the final partial batch for dataset.
Will return None if has_partial_batch is False or batch_size is None.
"""
raise NotImplementedError
def on_epoch_begin(self):
"""A hook called before each epoch."""
pass
def on_epoch_end(self):
"""A hook called after each epoch."""
pass
|
_base_ = './mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py'
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa
model = dict(
backbone=dict(
depths=[2, 2, 18, 2],
init_cfg=dict(type='Pretrained', checkpoint=pretrained)))
|
_base_ = './mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py'
pretrained = 'https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_small_224_b16x64_300e_imagenet_20210615_110219-7f9d988b.pth' # noqa
model = dict(
backbone=dict(depths=[2, 2, 18, 2]),
init_cfg=dict(type='Pretrained', checkpoint=pretrained))
|
from pydantic import BaseModel
from backend.data.block import (
Block,
BlockCategory,
BlockManualWebhookConfig,
BlockOutput,
BlockSchema,
)
from backend.data.model import SchemaField
from backend.integrations.providers import ProviderName
from backend.integrations.webhooks.compass import CompassWebhookType
class Transcription(BaseModel):
text: str
speaker: str
end: float
start: float
duration: float
class TranscriptionDataModel(BaseModel):
date: str
transcription: str
transcriptions: list[Transcription]
class CompassAITriggerBlock(Block):
class Input(BlockSchema):
payload: TranscriptionDataModel = SchemaField(hidden=True)
class Output(BlockSchema):
transcription: str = SchemaField(
description="The contents of the compass transcription."
)
def __init__(self):
super().__init__(
id="9464a020-ed1d-49e1-990f-7f2ac924a2b7",
description="This block will output the contents of the compass transcription.",
categories={BlockCategory.HARDWARE},
input_schema=CompassAITriggerBlock.Input,
output_schema=CompassAITriggerBlock.Output,
webhook_config=BlockManualWebhookConfig(
provider=ProviderName.COMPASS,
webhook_type=CompassWebhookType.TRANSCRIPTION,
),
test_input=[
{"input": "Hello, World!"},
{"input": "Hello, World!", "data": "Existing Data"},
],
# test_output=[
# ("output", "Hello, World!"), # No data provided, so trigger is returned
# ("output", "Existing Data"), # Data is provided, so data is returned.
# ],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "transcription", input_data.payload.transcription
|
from pydantic import BaseModel
from backend.data.block import (
Block,
BlockCategory,
BlockManualWebhookConfig,
BlockOutput,
BlockSchema,
)
from backend.data.model import SchemaField
from backend.integrations.providers import ProviderName
from backend.integrations.webhooks.compass import CompassWebhookType
class Transcription(BaseModel):
text: str
speaker: str
end: float
start: float
duration: float
class TranscriptionDataModel(BaseModel):
date: str
transcription: str
transcriptions: list[Transcription]
class CompassAITriggerBlock(Block):
class Input(BlockSchema):
payload: TranscriptionDataModel = SchemaField(hidden=True)
class Output(BlockSchema):
transcription: str = SchemaField(
description="The contents of the compass transcription."
)
def __init__(self):
super().__init__(
id="9464a020-ed1d-49e1-990f-7f2ac924a2b7",
description="This block will output the contents of the compass transcription.",
categories={BlockCategory.HARDWARE},
input_schema=CompassAITriggerBlock.Input,
output_schema=CompassAITriggerBlock.Output,
webhook_config=BlockManualWebhookConfig(
provider=ProviderName.COMPASS,
webhook_type=CompassWebhookType.TRANSCRIPTION,
),
test_input=[
{"input": "Hello, World!"},
{"input": "Hello, World!", "data": "Existing Data"},
],
# test_output=[
# ("output", "Hello, World!"), # No data provided, so trigger is returned
# ("output", "Existing Data"), # Data is provided, so data is returned.
# ],
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "transcription", input_data.payload.transcription
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(300, 300),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=3,
train=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict(_delete_=True)
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(300, 300),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=3,
train=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict(_delete_=True)
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
|
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.audio_url import AudioUrl
from docarray.typing.url.image_url import ImageUrl
from docarray.typing.url.text_url import TextUrl
from docarray.typing.url.url_3d.mesh_url import Mesh3DUrl
from docarray.typing.url.url_3d.point_cloud_url import PointCloud3DUrl
__all__ = ['ImageUrl', 'AudioUrl', 'AnyUrl', 'TextUrl', 'Mesh3DUrl', 'PointCloud3DUrl']
|
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.image_url import ImageUrl
from docarray.typing.url.text_url import TextUrl
from docarray.typing.url.url_3d.mesh_url import Mesh3DUrl
from docarray.typing.url.url_3d.point_cloud_url import PointCloud3DUrl
__all__ = ['ImageUrl', 'AnyUrl', 'TextUrl', 'Mesh3DUrl', 'PointCloud3DUrl']
|
"""
This script contains an example how to perform semantic search with Elasticsearch.
You need Elasticsearch up and running locally:
https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html
Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.readthedocs.io/, e.g.:
```
pip install elasticsearch
```
This script was created for `elasticsearch` v8.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_elasticsearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 4. Encode the corpus
print("Start encoding corpus...")
start_time = time.time()
corpus_embeddings = sparse_model.encode_document(
corpus, convert_to_sparse_tensor=True, batch_size=16, show_progress_bar=True
)
corpus_embeddings_decoded = sparse_model.decode(corpus_embeddings)
print(f"Corpus encoding time: {time.time() - start_time:.6f} seconds")
corpus_index = None
while True:
# 5. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode_query(queries, convert_to_sparse_tensor=True)
query_embeddings_decoded = sparse_model.decode(query_embeddings)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using Elasticsearch
results, search_time, corpus_index = semantic_search_elasticsearch(
query_embeddings_decoded,
corpus_embeddings_decoded=corpus_embeddings_decoded if corpus_index is None else None,
corpus_index=corpus_index,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""
This script contains an example how to perform semantic search with Elasticsearch.
You need Elasticsearch up and running locally:
https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html
Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.readthedocs.io/, e.g.:
```
pip install elasticsearch
```
This script was created for `elasticsearch` v8.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_elasticsearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 4. Encode the corpus
print("Start encoding corpus...")
start_time = time.time()
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=16, show_progress_bar=True)
corpus_embeddings_decoded = sparse_model.decode(corpus_embeddings)
print(f"Corpus encoding time: {time.time() - start_time:.6f} seconds")
corpus_index = None
while True:
# 5. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
query_embeddings_decoded = sparse_model.decode(query_embeddings)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using Elasticsearch
results, search_time, corpus_index = semantic_search_elasticsearch(
query_embeddings_decoded,
corpus_embeddings_decoded=corpus_embeddings_decoded if corpus_index is None else None,
corpus_index=corpus_index,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import tempfile
from collections import OrderedDict
import torch
from mmengine import Config
from mmengine.utils import digit_version
def parse_config(config_strings):
temp_file = tempfile.NamedTemporaryFile()
config_path = f'{temp_file.name}.py'
with open(config_path, 'w') as f:
f.write(config_strings)
config = Config.fromfile(config_path)
# check whether it is SSD
if config.model.bbox_head.type != 'SSDHead':
raise AssertionError('This is not a SSD model.')
def convert(in_file, out_file):
checkpoint = torch.load(in_file)
in_state_dict = checkpoint.pop('state_dict')
out_state_dict = OrderedDict()
meta_info = checkpoint['meta']
parse_config('#' + meta_info['config'])
for key, value in in_state_dict.items():
if 'extra' in key:
layer_idx = int(key.split('.')[2])
new_key = 'neck.extra_layers.{}.{}.conv.'.format(
layer_idx // 2, layer_idx % 2) + key.split('.')[-1]
elif 'l2_norm' in key:
new_key = 'neck.l2_norm.weight'
elif 'bbox_head' in key:
new_key = key[:21] + '.0' + key[21:]
else:
new_key = key
out_state_dict[new_key] = value
checkpoint['state_dict'] = out_state_dict
if digit_version(torch.__version__) >= digit_version('1.6'):
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
def main():
parser = argparse.ArgumentParser(description='Upgrade SSD version')
parser.add_argument('in_file', help='input checkpoint file')
parser.add_argument('out_file', help='output checkpoint file')
args = parser.parse_args()
convert(args.in_file, args.out_file)
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import tempfile
from collections import OrderedDict
import torch
from mmengine import Config
def parse_config(config_strings):
temp_file = tempfile.NamedTemporaryFile()
config_path = f'{temp_file.name}.py'
with open(config_path, 'w') as f:
f.write(config_strings)
config = Config.fromfile(config_path)
# check whether it is SSD
if config.model.bbox_head.type != 'SSDHead':
raise AssertionError('This is not a SSD model.')
def convert(in_file, out_file):
checkpoint = torch.load(in_file)
in_state_dict = checkpoint.pop('state_dict')
out_state_dict = OrderedDict()
meta_info = checkpoint['meta']
parse_config('#' + meta_info['config'])
for key, value in in_state_dict.items():
if 'extra' in key:
layer_idx = int(key.split('.')[2])
new_key = 'neck.extra_layers.{}.{}.conv.'.format(
layer_idx // 2, layer_idx % 2) + key.split('.')[-1]
elif 'l2_norm' in key:
new_key = 'neck.l2_norm.weight'
elif 'bbox_head' in key:
new_key = key[:21] + '.0' + key[21:]
else:
new_key = key
out_state_dict[new_key] = value
checkpoint['state_dict'] = out_state_dict
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
def main():
parser = argparse.ArgumentParser(description='Upgrade SSD version')
parser.add_argument('in_file', help='input checkpoint file')
parser.add_argument('out_file', help='output checkpoint file')
args = parser.parse_args()
convert(args.in_file, args.out_file)
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Sequence
from mmengine.hooks import Hook
from mmengine.model import is_model_wrapper
from mmdet.registry import HOOKS
@HOOKS.register_module()
class YOLOXModeSwitchHook(Hook):
"""Switch the mode of YOLOX during training.
This hook turns off the mosaic and mixup data augmentation and switches
to use L1 loss in bbox_head.
Args:
num_last_epochs (int): The number of latter epochs in the end of the
training to close the data augmentation and switch to L1 loss.
Defaults to 15.
skip_type_keys (Sequence[str], optional): Sequence of type string to be
skip pipeline. Defaults to ('Mosaic', 'RandomAffine', 'MixUp').
"""
def __init__(
self,
num_last_epochs: int = 15,
skip_type_keys: Sequence[str] = ('Mosaic', 'RandomAffine', 'MixUp')
) -> None:
self.num_last_epochs = num_last_epochs
self.skip_type_keys = skip_type_keys
self._restart_dataloader = False
self._has_switched = False
def before_train_epoch(self, runner) -> None:
"""Close mosaic and mixup augmentation and switches to use L1 loss."""
epoch = runner.epoch
train_loader = runner.train_dataloader
model = runner.model
# TODO: refactor after mmengine using model wrapper
if is_model_wrapper(model):
model = model.module
epoch_to_be_switched = ((epoch + 1) >=
runner.max_epochs - self.num_last_epochs)
if epoch_to_be_switched and not self._has_switched:
runner.logger.info('No mosaic and mixup aug now!')
# The dataset pipeline cannot be updated when persistent_workers
# is True, so we need to force the dataloader's multi-process
# restart. This is a very hacky approach.
train_loader.dataset.update_skip_type_keys(self.skip_type_keys)
if hasattr(train_loader, 'persistent_workers'
) and train_loader.persistent_workers is True:
train_loader._DataLoader__initialized = False
train_loader._iterator = None
self._restart_dataloader = True
runner.logger.info('Add additional L1 loss now!')
if hasattr(model, 'detector'):
model.detector.bbox_head.use_l1 = True
else:
model.bbox_head.use_l1 = True
self._has_switched = True
else:
# Once the restart is complete, we need to restore
# the initialization flag.
if self._restart_dataloader:
train_loader._DataLoader__initialized = True
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Sequence
from mmengine.hooks import Hook
from mmengine.model import is_model_wrapper
from mmdet.registry import HOOKS
@HOOKS.register_module()
class YOLOXModeSwitchHook(Hook):
"""Switch the mode of YOLOX during training.
This hook turns off the mosaic and mixup data augmentation and switches
to use L1 loss in bbox_head.
Args:
num_last_epochs (int): The number of latter epochs in the end of the
training to close the data augmentation and switch to L1 loss.
Defaults to 15.
skip_type_keys (Sequence[str], optional): Sequence of type string to be
skip pipeline. Defaults to ('Mosaic', 'RandomAffine', 'MixUp').
"""
def __init__(
self,
num_last_epochs: int = 15,
skip_type_keys: Sequence[str] = ('Mosaic', 'RandomAffine', 'MixUp')
) -> None:
self.num_last_epochs = num_last_epochs
self.skip_type_keys = skip_type_keys
self._restart_dataloader = False
def before_train_epoch(self, runner) -> None:
"""Close mosaic and mixup augmentation and switches to use L1 loss."""
epoch = runner.epoch
train_loader = runner.train_dataloader
model = runner.model
# TODO: refactor after mmengine using model wrapper
if is_model_wrapper(model):
model = model.module
if (epoch + 1) == runner.max_epochs - self.num_last_epochs:
runner.logger.info('No mosaic and mixup aug now!')
# The dataset pipeline cannot be updated when persistent_workers
# is True, so we need to force the dataloader's multi-process
# restart. This is a very hacky approach.
train_loader.dataset.update_skip_type_keys(self.skip_type_keys)
if hasattr(train_loader, 'persistent_workers'
) and train_loader.persistent_workers is True:
train_loader._DataLoader__initialized = False
train_loader._iterator = None
self._restart_dataloader = True
runner.logger.info('Add additional L1 loss now!')
model.bbox_head.use_l1 = True
else:
# Once the restart is complete, we need to restore
# the initialization flag.
if self._restart_dataloader:
train_loader._DataLoader__initialized = True
|
_base_ = '../fast_rcnn/fast-rcnn_r50_fpn_1x_coco.py'
# model settings
model = dict(
neck=[
dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
dict(
type='BFP',
in_channels=256,
num_levels=5,
refine_level=2,
refine_type='non_local')
],
roi_head=dict(
bbox_head=dict(
loss_bbox=dict(
_delete_=True,
type='BalancedL1Loss',
alpha=0.5,
gamma=1.5,
beta=1.0,
loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rcnn=dict(
sampler=dict(
_delete_=True,
type='CombinedSampler',
num=512,
pos_fraction=0.25,
add_gt_as_proposals=True,
pos_sampler=dict(type='InstanceBalancedPosSampler'),
neg_sampler=dict(
type='IoUBalancedNegSampler',
floor_thr=-1,
floor_fraction=0,
num_bins=3)))))
# MMEngine support the following two ways, users can choose
# according to convenience
# _base_.train_dataloader.dataset.proposal_file = 'libra_proposals/rpn_r50_fpn_1x_train2017.pkl' # noqa
train_dataloader = dict(
dataset=dict(proposal_file='libra_proposals/rpn_r50_fpn_1x_train2017.pkl'))
# _base_.val_dataloader.dataset.proposal_file = 'libra_proposals/rpn_r50_fpn_1x_val2017.pkl' # noqa
# test_dataloader = _base_.val_dataloader
val_dataloader = dict(
dataset=dict(proposal_file='libra_proposals/rpn_r50_fpn_1x_val2017.pkl'))
test_dataloader = val_dataloader
|
_base_ = '../fast_rcnn/fast-rcnn_r50_fpn_1x_coco.py'
# model settings
model = dict(
neck=[
dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
dict(
type='BFP',
in_channels=256,
num_levels=5,
refine_level=2,
refine_type='non_local')
],
roi_head=dict(
bbox_head=dict(
loss_bbox=dict(
_delete_=True,
type='BalancedL1Loss',
alpha=0.5,
gamma=1.5,
beta=1.0,
loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rcnn=dict(
sampler=dict(
_delete_=True,
type='CombinedSampler',
num=512,
pos_fraction=0.25,
add_gt_as_proposals=True,
pos_sampler=dict(type='InstanceBalancedPosSampler'),
neg_sampler=dict(
type='IoUBalancedNegSampler',
floor_thr=-1,
floor_fraction=0,
num_bins=3)))))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
data = dict(
train=dict(proposal_file=data_root +
'libra_proposals/rpn_r50_fpn_1x_train2017.pkl'),
val=dict(proposal_file=data_root +
'libra_proposals/rpn_r50_fpn_1x_val2017.pkl'),
test=dict(proposal_file=data_root +
'libra_proposals/rpn_r50_fpn_1x_val2017.pkl'))
|
import functools
import pytest
from jina.helper import iscoroutinefunction
from jina.serve.executors import get_executor_taboo
from jina.serve.executors.decorators import requests
from jina.serve.helper import store_init_kwargs
def test_store_init_kwargs():
store_init_kwargs_decorator = functools.partial(
store_init_kwargs, taboo=get_executor_taboo()
)
class A:
@store_init_kwargs_decorator
def __init__(self, a, b, c, *args, **kwargs):
pass
@store_init_kwargs_decorator
def f(self, a, b, *args, **kwargs):
pass
instance = A('a', 'b', c=5, d='d')
assert instance._init_kwargs_dict
assert instance._init_kwargs_dict == {'a': 'a', 'b': 'b', 'c': 5}
with pytest.raises(TypeError):
instance.f('a', 'b', c='c')
def test_requests():
with pytest.raises(TypeError):
@requests
def fn(*args):
pass
@requests
def fn_2(*args, **kwargs):
pass
assert hasattr(fn_2, 'fn')
def test_async_requests():
with pytest.raises(TypeError):
@requests
def fn(*args):
pass
@requests
def fn_2(*args, **kwargs):
pass
@requests
async def fn_3(*args, **kwargs):
pass
assert hasattr(fn_2, 'fn')
assert not iscoroutinefunction(getattr(fn_2, 'fn'))
assert hasattr(fn_3, 'fn')
assert iscoroutinefunction(getattr(fn_3, 'fn'))
|
import functools
import pytest
from jina.helper import iscoroutinefunction
from jina.serve.executors import get_default_metas, get_executor_taboo
from jina.serve.executors.decorators import requests
from jina.serve.helper import store_init_kwargs, wrap_func
def test_store_init_kwargs():
store_init_kwargs_decorator = functools.partial(
store_init_kwargs, taboo=get_executor_taboo()
)
class A:
@store_init_kwargs_decorator
def __init__(self, a, b, c, *args, **kwargs):
pass
@store_init_kwargs_decorator
def f(self, a, b, *args, **kwargs):
pass
instance = A('a', 'b', c=5, d='d')
assert instance._init_kwargs_dict
assert instance._init_kwargs_dict == {'a': 'a', 'b': 'b', 'c': 5}
with pytest.raises(TypeError):
instance.f('a', 'b', c='c')
def test_requests():
with pytest.raises(TypeError):
@requests
def fn(*args):
pass
@requests
def fn_2(*args, **kwargs):
pass
assert hasattr(fn_2, 'fn')
def test_async_requests():
with pytest.raises(TypeError):
@requests
def fn(*args):
pass
@requests
def fn_2(*args, **kwargs):
pass
@requests
async def fn_3(*args, **kwargs):
pass
assert hasattr(fn_2, 'fn')
assert not iscoroutinefunction(getattr(fn_2, 'fn'))
assert hasattr(fn_3, 'fn')
assert iscoroutinefunction(getattr(fn_3, 'fn'))
|
"""Init file."""
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.llama_pack.download import download_llama_pack
__all__ = [
"BaseLlamaPack",
"download_llama_pack",
]
|
"""Init file."""
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.llama_pack.download import download_llama_pack
__all__ = [
"BaseLlamaPack",
"download_llama_pack",
]
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg),
mask_head=dict(norm_cfg=norm_cfg)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg),
mask_head=dict(norm_cfg=norm_cfg)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
from mmengine.logging import HistoryBuffer
array_method = [np.array, lambda x: x]
try:
import torch
except ImportError:
pass
else:
array_method.append(torch.tensor)
class TestLoggerBuffer:
def test_init(self):
log_buffer = HistoryBuffer()
assert log_buffer.max_length == 1000000
log_history, counts = log_buffer.data
assert len(log_history) == 0
assert len(counts) == 0
# test the length of array exceed `max_length`
logs = np.random.randint(1, 10, log_buffer.max_length + 1)
counts = np.random.randint(1, 10, log_buffer.max_length + 1)
log_buffer = HistoryBuffer(logs, counts)
log_history, count_history = log_buffer.data
assert len(log_history) == log_buffer.max_length
assert len(count_history) == log_buffer.max_length
assert logs[1] == log_history[0]
assert counts[1] == count_history[0]
# The different lengths of `log_history` and `count_history` will
# raise error
with pytest.raises(AssertionError):
HistoryBuffer([1, 2], [1])
@pytest.mark.parametrize('array_method', array_method)
def test_update(self, array_method):
# test `update` method
log_buffer = HistoryBuffer()
log_history = array_method([1, 2, 3, 4, 5])
count_history = array_method([5, 5, 5, 5, 5])
for i in range(len(log_history)):
log_buffer.update(float(log_history[i]), float(count_history[i]))
recorded_history, recorded_count = log_buffer.data
for a, b in zip(log_history, recorded_history):
assert float(a) == float(b)
for a, b in zip(count_history, recorded_count):
assert float(a) == float(b)
# test the length of `array` exceed `max_length`
max_array = array_method([[-1] + [1] * (log_buffer.max_length - 1)])
max_count = array_method([[-1] + [1] * (log_buffer.max_length - 1)])
log_buffer = HistoryBuffer(max_array, max_count)
log_buffer.update(1)
log_history, count_history = log_buffer.data
assert log_history[0] == 1
assert count_history[0] == 1
assert len(log_history) == log_buffer.max_length
assert len(count_history) == log_buffer.max_length
# Update an iterable object will raise a type error, `log_val` and
# `count` should be single value
with pytest.raises(TypeError):
log_buffer.update(array_method([1, 2]))
@pytest.mark.parametrize('statistics_method, log_buffer_type',
[(np.min, 'min'), (np.max, 'max')])
def test_max_min(self, statistics_method, log_buffer_type):
log_history = np.random.randint(1, 5, 20)
count_history = np.ones(20)
log_buffer = HistoryBuffer(log_history, count_history)
assert statistics_method(log_history[-10:]) == \
getattr(log_buffer, log_buffer_type)(10)
assert statistics_method(log_history) == \
getattr(log_buffer, log_buffer_type)()
def test_mean(self):
log_history = np.random.randint(1, 5, 20)
count_history = np.ones(20)
log_buffer = HistoryBuffer(log_history, count_history)
assert np.sum(log_history[-10:]) / \
np.sum(count_history[-10:]) == \
log_buffer.mean(10)
assert np.sum(log_history) / \
np.sum(count_history) == \
log_buffer.mean()
def test_current(self):
log_history = np.random.randint(1, 5, 20)
count_history = np.ones(20)
log_buffer = HistoryBuffer(log_history, count_history)
assert log_history[-1] == log_buffer.current()
# test get empty array
log_buffer = HistoryBuffer()
with pytest.raises(ValueError):
log_buffer.current()
def test_statistics(self):
log_history = np.array([1, 2, 3, 4, 5])
count_history = np.array([1, 1, 1, 1, 1])
log_buffer = HistoryBuffer(log_history, count_history)
assert log_buffer.statistics('mean') == 3
assert log_buffer.statistics('min') == 1
assert log_buffer.statistics('max') == 5
assert log_buffer.statistics('current') == 5
# Access unknown method will raise an error.
with pytest.raises(KeyError):
log_buffer.statistics('unknown')
def test_register_statistics(self):
@HistoryBuffer.register_statistics
def custom_statistics(self):
return -1
log_buffer = HistoryBuffer()
assert log_buffer.statistics('custom_statistics') == -1
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmengine import HistoryBuffer
class TestLoggerBuffer:
def test_init(self):
log_buffer = HistoryBuffer()
assert log_buffer.max_length == 1000000
log_history, counts = log_buffer.data
assert len(log_history) == 0
assert len(counts) == 0
# test the length of array exceed `max_length`
logs = np.random.randint(1, 10, log_buffer.max_length + 1)
counts = np.random.randint(1, 10, log_buffer.max_length + 1)
log_buffer = HistoryBuffer(logs, counts)
log_history, count_history = log_buffer.data
assert len(log_history) == log_buffer.max_length
assert len(count_history) == log_buffer.max_length
assert logs[1] == log_history[0]
assert counts[1] == count_history[0]
# The different lengths of `log_history` and `count_history` will
# raise error
with pytest.raises(AssertionError):
HistoryBuffer([1, 2], [1])
@pytest.mark.parametrize('array_method',
[torch.tensor, np.array, lambda x: x])
def test_update(self, array_method):
# test `update` method
log_buffer = HistoryBuffer()
log_history = array_method([1, 2, 3, 4, 5])
count_history = array_method([5, 5, 5, 5, 5])
for i in range(len(log_history)):
log_buffer.update(float(log_history[i]), float(count_history[i]))
recorded_history, recorded_count = log_buffer.data
for a, b in zip(log_history, recorded_history):
assert float(a) == float(b)
for a, b in zip(count_history, recorded_count):
assert float(a) == float(b)
# test the length of `array` exceed `max_length`
max_array = array_method([[-1] + [1] * (log_buffer.max_length - 1)])
max_count = array_method([[-1] + [1] * (log_buffer.max_length - 1)])
log_buffer = HistoryBuffer(max_array, max_count)
log_buffer.update(1)
log_history, count_history = log_buffer.data
assert log_history[0] == 1
assert count_history[0] == 1
assert len(log_history) == log_buffer.max_length
assert len(count_history) == log_buffer.max_length
# Update an iterable object will raise a type error, `log_val` and
# `count` should be single value
with pytest.raises(TypeError):
log_buffer.update(array_method([1, 2]))
@pytest.mark.parametrize('statistics_method, log_buffer_type',
[(np.min, 'min'), (np.max, 'max')])
def test_max_min(self, statistics_method, log_buffer_type):
log_history = np.random.randint(1, 5, 20)
count_history = np.ones(20)
log_buffer = HistoryBuffer(log_history, count_history)
assert statistics_method(log_history[-10:]) == \
getattr(log_buffer, log_buffer_type)(10)
assert statistics_method(log_history) == \
getattr(log_buffer, log_buffer_type)()
def test_mean(self):
log_history = np.random.randint(1, 5, 20)
count_history = np.ones(20)
log_buffer = HistoryBuffer(log_history, count_history)
assert np.sum(log_history[-10:]) / \
np.sum(count_history[-10:]) == \
log_buffer.mean(10)
assert np.sum(log_history) / \
np.sum(count_history) == \
log_buffer.mean()
def test_current(self):
log_history = np.random.randint(1, 5, 20)
count_history = np.ones(20)
log_buffer = HistoryBuffer(log_history, count_history)
assert log_history[-1] == log_buffer.current()
# test get empty array
log_buffer = HistoryBuffer()
with pytest.raises(ValueError):
log_buffer.current()
def test_statistics(self):
log_history = np.array([1, 2, 3, 4, 5])
count_history = np.array([1, 1, 1, 1, 1])
log_buffer = HistoryBuffer(log_history, count_history)
assert log_buffer.statistics('mean') == 3
assert log_buffer.statistics('min') == 1
assert log_buffer.statistics('max') == 5
assert log_buffer.statistics('current') == 5
# Access unknown method will raise an error.
with pytest.raises(KeyError):
log_buffer.statistics('unknown')
def test_register_statistics(self):
@HistoryBuffer.register_statistics
def custom_statistics(self):
return -1
log_buffer = HistoryBuffer()
assert log_buffer.statistics('custom_statistics') == -1
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.utils import (LearnedPositionalEncoding,
SinePositionalEncoding)
def test_sine_positional_encoding(num_feats=16, batch_size=2):
# test invalid type of scale
with pytest.raises(AssertionError):
module = SinePositionalEncoding(
num_feats, scale=(3., ), normalize=True)
module = SinePositionalEncoding(num_feats)
h, w = 10, 6
mask = (torch.rand(batch_size, h, w) > 0.5).to(torch.int)
assert not module.normalize
out = module(mask)
assert out.shape == (batch_size, num_feats * 2, h, w)
# set normalize
module = SinePositionalEncoding(num_feats, normalize=True)
assert module.normalize
out = module(mask)
assert out.shape == (batch_size, num_feats * 2, h, w)
def test_learned_positional_encoding(num_feats=16,
row_num_embed=10,
col_num_embed=10,
batch_size=2):
module = LearnedPositionalEncoding(num_feats, row_num_embed, col_num_embed)
assert module.row_embed.weight.shape == (row_num_embed, num_feats)
assert module.col_embed.weight.shape == (col_num_embed, num_feats)
h, w = 10, 6
mask = torch.rand(batch_size, h, w) > 0.5
out = module(mask)
assert out.shape == (batch_size, num_feats * 2, h, w)
|
import pytest
import torch
from mmdet.models.utils import (LearnedPositionalEncoding,
SinePositionalEncoding)
def test_sine_positional_encoding(num_feats=16, batch_size=2):
# test invalid type of scale
with pytest.raises(AssertionError):
module = SinePositionalEncoding(
num_feats, scale=(3., ), normalize=True)
module = SinePositionalEncoding(num_feats)
h, w = 10, 6
mask = (torch.rand(batch_size, h, w) > 0.5).to(torch.int)
assert not module.normalize
out = module(mask)
assert out.shape == (batch_size, num_feats * 2, h, w)
# set normalize
module = SinePositionalEncoding(num_feats, normalize=True)
assert module.normalize
out = module(mask)
assert out.shape == (batch_size, num_feats * 2, h, w)
def test_learned_positional_encoding(num_feats=16,
row_num_embed=10,
col_num_embed=10,
batch_size=2):
module = LearnedPositionalEncoding(num_feats, row_num_embed, col_num_embed)
assert module.row_embed.weight.shape == (row_num_embed, num_feats)
assert module.col_embed.weight.shape == (col_num_embed, num_feats)
h, w = 10, 6
mask = torch.rand(batch_size, h, w) > 0.5
out = module(mask)
assert out.shape == (batch_size, num_feats * 2, h, w)
|
"""
This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_stsbenchmark.py
"""
import csv
import gzip
import logging
import math
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CECorrelationEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Define our Cross-Encoder
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_stsbenchmark-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# We use distilroberta-base as base model and set num_labels=1, which predicts a continuous score between 0 and 1
model = CrossEncoder("distilroberta-base", num_labels=1)
# Read STSb dataset
logger.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
if row["split"] == "dev":
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
elif row["split"] == "test":
test_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
else:
# As we want to get symmetric scores, i.e. CrossEncoder(A,B) = CrossEncoder(B,A), we pass both combinations to the train set
train_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
train_samples.append(InputExample(texts=[row["sentence2"], row["sentence1"]], label=score))
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# We add an evaluator, which evaluates the performance during training
evaluator = CECorrelationEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info(f"Warmup-steps: {warmup_steps}")
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##### Load model and eval on test set
model = CrossEncoder(model_save_path)
evaluator = CECorrelationEvaluator.from_input_examples(test_samples, name="sts-test")
evaluator(model)
|
"""
This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_stsbenchmark.py
"""
import csv
import gzip
import logging
import math
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CECorrelationEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Define our Cross-Encoder
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_stsbenchmark-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# We use distilroberta-base as base model and set num_labels=1, which predicts a continuous score between 0 and 1
model = CrossEncoder("distilroberta-base", num_labels=1)
# Read STSb dataset
logger.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
if row["split"] == "dev":
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
elif row["split"] == "test":
test_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
else:
# As we want to get symmetric scores, i.e. CrossEncoder(A,B) = CrossEncoder(B,A), we pass both combinations to the train set
train_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
train_samples.append(InputExample(texts=[row["sentence2"], row["sentence1"]], label=score))
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# We add an evaluator, which evaluates the performance during training
evaluator = CECorrelationEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##### Load model and eval on test set
model = CrossEncoder(model_save_path)
evaluator = CECorrelationEvaluator.from_input_examples(test_samples, name="sts-test")
evaluator(model)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='[email protected]',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'weaviate-client~=3.3.0',
'annlite>=0.3.0',
'qdrant-client~=0.7.0',
'strawberry-graphql',
'elasticsearch>=8.0.1',
],
'qdrant': [
'qdrant-client~=0.7.0',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='[email protected]',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'weaviate-client~=3.3.0',
'annlite>=0.3.0',
'qdrant-client~=0.6.0',
'strawberry-graphql',
'elasticsearch>=8.0.1',
],
'qdrant': [
'qdrant-client~=0.6.0',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
from typing import Any, Collection, List, Optional, Tuple, Union
from llama_index.core.tools.types import AsyncBaseTool
from pydantic import BaseModel
class LLMCompilerParseResult(BaseModel):
"""LLMCompiler parser result."""
thought: str
idx: int
tool_name: str
args: str
class JoinerOutput(BaseModel):
"""Joiner output."""
thought: str
answer: str
is_replan: bool = False
def _default_stringify_rule_for_arguments(args: Union[List, Tuple]) -> str:
if len(args) == 1:
return str(args[0])
else:
return str(tuple(args))
class LLMCompilerTask(BaseModel):
"""
LLM Compiler Task.
Object taken from
https://github.com/SqueezeAILab/LLMCompiler/blob/main/src/llm_compiler/task_fetching_unit.py.
"""
idx: int
name: str
# tool: Callable
tool: AsyncBaseTool
args: Union[List, Tuple]
dependencies: Collection[int]
# TODO: look into this
# stringify_rule: Optional[Callable] = None
thought: Optional[str] = None
observation: Optional[str] = None
is_join: bool = False
class Config:
arbitrary_types_allowed = True
async def __call__(self) -> Any:
return await self.tool.acall(*self.args)
def get_thought_action_observation(
self,
include_action: bool = True,
include_thought: bool = True,
include_action_idx: bool = False,
) -> str:
thought_action_observation = ""
if self.thought and include_thought:
thought_action_observation = f"Thought: {self.thought}\n"
if include_action:
idx = f"{self.idx}. " if include_action_idx else ""
# if self.stringify_rule:
# # If the user has specified a custom stringify rule for the
# # function argument, use it
# thought_action_observation += f"{idx}{self.stringify_rule(self.args)}\n"
# else:
# Otherwise, we have a default stringify rule
thought_action_observation += (
f"{idx}{self.name}{_default_stringify_rule_for_arguments(self.args)}\n"
)
if self.observation is not None:
thought_action_observation += f"Observation: {self.observation}\n"
return thought_action_observation
|
from typing import Any, Collection, List, Optional, Tuple, Union
from llama_index.core.tools.types import AsyncBaseTool
from pydantic import BaseModel
class LLMCompilerParseResult(BaseModel):
"""LLMCompiler parser result."""
thought: str
idx: int
tool_name: str
args: str
class JoinerOutput(BaseModel):
"""Joiner output."""
thought: str
answer: str
is_replan: bool = False
def _default_stringify_rule_for_arguments(args: Union[List, Tuple]) -> str:
if len(args) == 1:
return str(args[0])
else:
return str(tuple(args))
class LLMCompilerTask(BaseModel):
"""
LLM Compiler Task.
Object taken from
https://github.com/SqueezeAILab/LLMCompiler/blob/main/src/llm_compiler/task_fetching_unit.py.
"""
idx: int
name: str
# tool: Callable
tool: AsyncBaseTool
args: Union[List, Tuple]
dependencies: Collection[int]
# TODO: look into this
# stringify_rule: Optional[Callable] = None
thought: Optional[str] = None
observation: Optional[str] = None
is_join: bool = False
class Config:
arbitrary_types_allowed = True
async def __call__(self) -> Any:
return await self.tool.acall(*self.args)
def get_thought_action_observation(
self,
include_action: bool = True,
include_thought: bool = True,
include_action_idx: bool = False,
) -> str:
thought_action_observation = ""
if self.thought and include_thought:
thought_action_observation = f"Thought: {self.thought}\n"
if include_action:
idx = f"{self.idx}. " if include_action_idx else ""
# if self.stringify_rule:
# # If the user has specified a custom stringify rule for the
# # function argument, use it
# thought_action_observation += f"{idx}{self.stringify_rule(self.args)}\n"
# else:
# Otherwise, we have a default stringify rule
thought_action_observation += (
f"{idx}{self.name}"
f"{_default_stringify_rule_for_arguments(self.args)}\n"
)
if self.observation is not None:
thought_action_observation += f"Observation: {self.observation}\n"
return thought_action_observation
|
from llama_index.core.base.llms.types import (
LLMMetadata,
)
from llama_index.core.bridge.pydantic import Field
from llama_index.llms.openai_like import OpenAILike
class LlamaAPI(OpenAILike):
"""LlamaAPI LLM.
Examples:
`pip install llama-index-llms-llama-api`
```python
from llama_index.llms.llama_api import LlamaAPI
# Obtain an API key from https://www.llama-api.com/
api_key = "your-api-key"
llm = LlamaAPI(model="llama3.1-8b", context_window=128000, is_function_calling_model=True, api_key=api_key)
# Call the complete method with a prompt
resp = llm.complete("Paul Graham is ")
print(resp)
```
"""
model: str = Field(
default="llama3.1-8b",
description=LLMMetadata.model_fields["model_name"].description,
)
api_base: str = Field(
default="https://api.llmapi.com/",
description="The base URL for OpenAI API.",
)
is_chat_model: bool = Field(
default=True,
description=LLMMetadata.model_fields["is_chat_model"].description,
)
is_function_calling_model: bool = Field(
default=False,
description=LLMMetadata.model_fields["is_function_calling_model"].description,
)
@classmethod
def class_name(cls) -> str:
return "llama_api_llm"
|
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseGen,
CompletionResponse,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks import CallbackManager
from llama_index.core.constants import DEFAULT_NUM_OUTPUTS
from llama_index.core.llms.callbacks import llm_chat_callback, llm_completion_callback
from llama_index.core.llms.custom import CustomLLM
from llama_index.core.base.llms.generic_utils import chat_to_completion_decorator
from llama_index.core.types import BaseOutputParser, PydanticProgramMode
from llama_index.llms.openai.utils import (
from_openai_message_dict,
to_openai_message_dicts,
)
from llamaapi import LlamaAPI as Client
class LlamaAPI(CustomLLM):
"""LlamaAPI LLM.
Examples:
`pip install llama-index-llms-llama-api`
```python
from llama_index.llms.llama_api import LlamaAPI
# Obtain an API key from https://www.llama-api.com/
api_key = "your-api-key"
llm = LlamaAPI(api_key=api_key)
# Call the complete method with a prompt
resp = llm.complete("Paul Graham is ")
print(resp)
```
"""
model: str = Field(description="The llama-api model to use.")
temperature: float = Field(description="The temperature to use for sampling.")
max_tokens: int = Field(description="The maximum number of tokens to generate.")
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the llama-api API."
)
_client: Any = PrivateAttr()
def __init__(
self,
model: str = "llama-13b-chat",
temperature: float = 0.1,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
api_key: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
additional_kwargs=additional_kwargs or {},
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
self._client = Client(api_key)
@classmethod
def class_name(cls) -> str:
return "llama_api_llm"
@property
def _model_kwargs(self) -> Dict[str, Any]:
base_kwargs = {
"model": self.model,
"temperature": self.temperature,
"max_length": self.max_tokens,
}
return {
**base_kwargs,
**self.additional_kwargs,
}
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=4096,
num_output=DEFAULT_NUM_OUTPUTS,
is_chat_model=True,
is_function_calling_model=True,
model_name="llama-api",
)
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
message_dicts = to_openai_message_dicts(messages)
json_dict = {
"messages": message_dicts,
**self._model_kwargs,
**kwargs,
}
response = self._client.run(json_dict).json()
message_dict = response["choices"][0]["message"]
message = from_openai_message_dict(message_dict)
return ChatResponse(message=message, raw=response)
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
complete_fn = chat_to_completion_decorator(self.chat)
return complete_fn(prompt, **kwargs)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
raise NotImplementedError("stream_complete is not supported for LlamaAPI")
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
raise NotImplementedError("stream_chat is not supported for LlamaAPI")
|
from __future__ import annotations
from sentence_transformers.losses.GISTEmbedLoss import GISTEmbedLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseGISTEmbedLoss(GISTEmbedLoss):
def __init__(
self,
model: SparseEncoder,
guide: SparseEncoder,
temperature: float = 0.01,
) -> None:
"""
This loss is used to train a SparseEncoder model using the GISTEmbed algorithm.
It takes a model and a guide model as input, and uses the guide model to guide the
in-batch negative sample selection. The cosine similarity is used to compute the loss
and the temperature parameter is used to scale the cosine similarities.
You can apply different false-negative filtering strategies to discard hard negatives that are too similar to
the positive. Two strategies are supported:
- "absolute": Discards negatives whose similarity score is greater than or equal to ``positive_score - margin``.
- "relative": Discards negatives whose similarity score is greater than or equal to ``positive_score * (1 - margin)``.
Args:
model: SparseEncoder model based on a `transformers` model.
guide: SparseEncoder model to guide the in-batch negative sample selection.
temperature: Temperature parameter to scale the cosine similarities.
margin_strategy: Strategy used for false negative filtering. One of {"absolute", "relative"}.
margin: The margin value for filtering negatives. Defaults to 0.0, together with the "absolute" strategy,
this only removes negatives that are more similar to the query than the positive is to the query.
References:
- For further details, see: https://arxiv.org/abs/2402.16829
Requirements:
1. (anchor, positive, negative) triplets
2. (anchor, positive) pairs
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
| (anchor, positive) pairs | none |
+---------------------------------------+--------+
Recommendations:
- Use ``BatchSamplers.NO_DUPLICATES`` (:class:`docs <sentence_transformers.training_args.BatchSamplers>`) to
ensure that no in-batch negatives are duplicates of the anchor or positive samples.
Relations:
- :class:`SparseMultipleNegativesRankingLoss` is similar to this loss, but it does not use
a guide model to guide the in-batch negative sample selection. `SparseGISTEmbedLoss` yields
a stronger training signal at the cost of some training overhead.
Example:
::
# TODO: Add example but need to be sure it works first
"""
return super().__init__(model, guide=guide, temperature=temperature)
|
from __future__ import annotations
from sentence_transformers.losses.GISTEmbedLoss import GISTEmbedLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseGISTEmbedLoss(GISTEmbedLoss):
def __init__(
self,
model: SparseEncoder,
guide: SparseEncoder,
temperature: float = 0.01,
) -> None:
return super().__init__(model, guide=guide, temperature=temperature)
|
import os
import urllib
import pytest
from pydantic import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import TextUrl
from tests import TOYDATA_DIR
REMOTE_TEXT_FILE = 'https://de.wikipedia.org/wiki/Brixen'
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
LOCAL_TEXT_FILES = [
str(TOYDATA_DIR / 'penal_colony.txt'),
str(TOYDATA_DIR / 'test.md'),
str(TOYDATA_DIR / 'test.html'),
str(TOYDATA_DIR / 'test.css'),
str(TOYDATA_DIR / 'test.csv'),
str(TOYDATA_DIR / 'test.log'),
]
LOCAL_TEXT_FILES_AND_BEGINNING = [
(str(TOYDATA_DIR / 'penal_colony.txt'), '“It’s a peculiar apparatus,”'),
(str(TOYDATA_DIR / 'test.md'), "# Hello"),
(str(TOYDATA_DIR / 'test.html'), "<html>"),
(str(TOYDATA_DIR / 'test.css'), 'body {'),
(str(TOYDATA_DIR / 'test.csv'), "John,Doe"),
(str(TOYDATA_DIR / 'test.log'), "2022-11-25 12:34:56 INFO: Program started"),
]
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'url,expected_beginning',
[(REMOTE_TEXT_FILE, '<!DOCTYPE html>'), *LOCAL_TEXT_FILES_AND_BEGINNING],
)
def test_load(url, expected_beginning):
uri = parse_obj_as(TextUrl, url)
txt = uri.load()
assert txt.startswith(expected_beginning)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('url', [REMOTE_TEXT_FILE, *LOCAL_TEXT_FILES])
def test_load_to_bytes(url):
uri = parse_obj_as(TextUrl, url)
txt_bytes = uri.load_bytes()
assert isinstance(txt_bytes, bytes)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('url', [REMOTE_TEXT_FILE, *LOCAL_TEXT_FILES])
def test_proto_text_url(url):
uri = parse_obj_as(TextUrl, url)
proto = uri._to_node_protobuf()
assert 'text_url' in str(proto)
@pytest.mark.internet
def test_load_timeout():
url = parse_obj_as(TextUrl, REMOTE_TEXT_FILE)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
with pytest.raises(urllib.error.URLError):
_ = url.load_bytes(timeout=0.001)
def test_json_schema():
schema_json_of(TextUrl)
@pytest.mark.internet
def test_dump_json():
url = parse_obj_as(TextUrl, REMOTE_TEXT_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[
'my/local/text/file.mp3',
'my/local/text/file.png',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='TextUrl'):
parse_obj_as(TextUrl, path_to_file)
|
import os
import urllib
import pytest
from pydantic import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import TextUrl
from tests import TOYDATA_DIR
REMOTE_TEXT_FILE = 'https://de.wikipedia.org/wiki/Brixen'
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
LOCAL_TEXT_FILES = [
str(TOYDATA_DIR / 'penal_colony.txt'),
str(TOYDATA_DIR / 'test.md'),
str(TOYDATA_DIR / 'test.html'),
str(TOYDATA_DIR / 'test.css'),
str(TOYDATA_DIR / 'test.csv'),
str(TOYDATA_DIR / 'test.log'),
]
LOCAL_TEXT_FILES_AND_BEGINNING = [
(str(TOYDATA_DIR / 'penal_colony.txt'), '“It’s a peculiar apparatus,”'),
(str(TOYDATA_DIR / 'test.md'), "# Hello"),
(str(TOYDATA_DIR / 'test.html'), "<html>"),
(str(TOYDATA_DIR / 'test.css'), 'body {'),
(str(TOYDATA_DIR / 'test.csv'), "John,Doe"),
(str(TOYDATA_DIR / 'test.log'), "2022-11-25 12:34:56 INFO: Program started"),
]
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'url,expected_beginning',
[(REMOTE_TEXT_FILE, '<!DOCTYPE html>'), *LOCAL_TEXT_FILES_AND_BEGINNING],
)
def test_load(url, expected_beginning):
uri = parse_obj_as(TextUrl, url)
txt = uri.load()
assert txt.startswith(expected_beginning)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('url', [REMOTE_TEXT_FILE, *LOCAL_TEXT_FILES])
def test_load_to_bytes(url):
uri = parse_obj_as(TextUrl, url)
txt_bytes = uri.load_bytes()
assert isinstance(txt_bytes, bytes)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('url', [REMOTE_TEXT_FILE, *LOCAL_TEXT_FILES])
def test_proto_text_url(url):
uri = parse_obj_as(TextUrl, url)
proto = uri._to_node_protobuf()
assert 'text_url' in str(proto)
@pytest.mark.internet
def test_load_timeout():
url = parse_obj_as(TextUrl, REMOTE_TEXT_FILE)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
with pytest.raises(urllib.error.URLError):
_ = url.load_bytes(timeout=0.001)
def test_json_schema():
schema_json_of(TextUrl)
@pytest.mark.internet
def test_dump_json():
url = parse_obj_as(TextUrl, REMOTE_TEXT_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[
'my/local/text/file.mp3',
'my/local/text/file.png',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='TextUrl'):
parse_obj_as(TextUrl, path_to_file)
|
import os
import pytest
import yaml
from jina import Gateway
from jina.jaml import JAML
from jina.serve.executors import BaseExecutor
class MyDummyGateway(Gateway):
async def setup_server(self):
self.server = 'dummy server'
async def run_server(self):
self.logger.info(self.server)
async def shutdown(self):
pass
def test_cls_from_tag():
assert JAML.cls_from_tag('MyDummyGateway') == MyDummyGateway
assert JAML.cls_from_tag('!MyDummyGateway') == MyDummyGateway
assert JAML.cls_from_tag('BaseGateway') == Gateway
assert JAML.cls_from_tag('Nonexisting') is None
def test_base_jtype(tmpdir):
gateway_path = os.path.join(tmpdir, 'gateway.yml')
from jina.serve.runtimes.gateway import BaseGateway
g = BaseGateway.load_config('BaseGateway', runtime_args={'port': [12345]})
g.save_config(gateway_path)
with open(gateway_path, 'r') as file:
conf = yaml.safe_load(file)
assert 'jtype' in conf
assert conf['jtype'] == 'BaseGateway'
assert (
type(Gateway.load_config(gateway_path, runtime_args={'port': [12345]}))
== Gateway
)
def test_custom_jtype(tmpdir):
gateway_path = os.path.join(tmpdir, 'gateway.yml')
e = Gateway.load_config('MyDummyGateway', runtime_args={'port': [12345]})
e.save_config(gateway_path)
with open(gateway_path, 'r') as file:
conf = yaml.safe_load(file)
assert 'jtype' in conf
assert conf['jtype'] == 'MyDummyGateway'
assert (
type(Gateway.load_config(gateway_path, runtime_args={'port': [12345]}))
== MyDummyGateway
)
|
import os
import yaml
from jina import Gateway
from jina.jaml import JAML
from jina.serve.executors import BaseExecutor
class MyDummyGateway(Gateway):
async def setup_server(self):
self.server = 'dummy server'
async def run_server(self):
self.logger.info(self.server)
async def shutdown(self):
pass
def test_cls_from_tag():
assert JAML.cls_from_tag('MyDummyGateway') == MyDummyGateway
assert JAML.cls_from_tag('!MyDummyGateway') == MyDummyGateway
assert JAML.cls_from_tag('BaseGateway') == Gateway
assert JAML.cls_from_tag('Nonexisting') is None
def test_base_jtype(tmpdir):
gateway_path = os.path.join(tmpdir, 'gateway.yml')
g = Gateway()
g.save_config(gateway_path)
with open(gateway_path, 'r') as file:
conf = yaml.safe_load(file)
assert 'jtype' in conf
assert conf['jtype'] == 'BaseGateway'
assert type(Gateway.load_config(gateway_path)) == Gateway
def test_custom_jtype(tmpdir):
gateway_path = os.path.join(tmpdir, 'gateway.yml')
e = MyDummyGateway()
e.save_config(gateway_path)
with open(gateway_path, 'r') as file:
conf = yaml.safe_load(file)
assert 'jtype' in conf
assert conf['jtype'] == 'MyDummyGateway'
assert type(Gateway.load_config(gateway_path)) == MyDummyGateway
|
from groq._utils._utils import quote
from backend.blocks.jina._auth import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.blocks.search import GetRequest
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class SearchTheWebBlock(Block, GetRequest):
class Input(BlockSchema):
credentials: JinaCredentialsInput = JinaCredentialsField()
query: str = SchemaField(description="The search query to search the web for")
class Output(BlockSchema):
results: str = SchemaField(
description="The search results including content from top 5 URLs"
)
error: str = SchemaField(description="Error message if the search fails")
def __init__(self):
super().__init__(
id="87840993-2053-44b7-8da4-187ad4ee518c",
description="This block searches the internet for the given search query.",
categories={BlockCategory.SEARCH},
input_schema=SearchTheWebBlock.Input,
output_schema=SearchTheWebBlock.Output,
test_input={
"credentials": TEST_CREDENTIALS_INPUT,
"query": "Artificial Intelligence",
},
test_credentials=TEST_CREDENTIALS,
test_output=("results", "search content"),
test_mock={"get_request": lambda *args, **kwargs: "search content"},
)
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
# Encode the search query
encoded_query = quote(input_data.query)
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
# Prepend the Jina Search URL to the encoded query
jina_search_url = f"https://s.jina.ai/{encoded_query}"
results = self.get_request(jina_search_url, headers=headers, json=False)
# Output the search results
yield "results", results
class ExtractWebsiteContentBlock(Block, GetRequest):
class Input(BlockSchema):
credentials: JinaCredentialsInput = JinaCredentialsField()
url: str = SchemaField(description="The URL to scrape the content from")
raw_content: bool = SchemaField(
default=False,
title="Raw Content",
description="Whether to do a raw scrape of the content or use Jina-ai Reader to scrape the content",
advanced=True,
)
class Output(BlockSchema):
content: str = SchemaField(description="The scraped content from the given URL")
error: str = SchemaField(
description="Error message if the content cannot be retrieved"
)
def __init__(self):
super().__init__(
id="436c3984-57fd-4b85-8e9a-459b356883bd",
description="This block scrapes the content from the given web URL.",
categories={BlockCategory.SEARCH},
input_schema=ExtractWebsiteContentBlock.Input,
output_schema=ExtractWebsiteContentBlock.Output,
test_input={
"url": "https://en.wikipedia.org/wiki/Artificial_intelligence",
"credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,
test_output=("content", "scraped content"),
test_mock={"get_request": lambda *args, **kwargs: "scraped content"},
)
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
if input_data.raw_content:
url = input_data.url
headers = {}
else:
url = f"https://r.jina.ai/{input_data.url}"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
content = self.get_request(url, json=False, headers=headers)
yield "content", content
|
from groq._utils._utils import quote
from backend.blocks.jina._auth import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.blocks.search import GetRequest
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class SearchTheWebBlock(Block, GetRequest):
class Input(BlockSchema):
credentials: JinaCredentialsInput = JinaCredentialsField()
query: str = SchemaField(description="The search query to search the web for")
class Output(BlockSchema):
results: str = SchemaField(
description="The search results including content from top 5 URLs"
)
error: str = SchemaField(description="Error message if the search fails")
def __init__(self):
super().__init__(
id="87840993-2053-44b7-8da4-187ad4ee518c",
description="This block searches the internet for the given search query.",
categories={BlockCategory.SEARCH},
input_schema=SearchTheWebBlock.Input,
output_schema=SearchTheWebBlock.Output,
test_input={
"credentials": TEST_CREDENTIALS_INPUT,
"query": "Artificial Intelligence",
},
test_credentials=TEST_CREDENTIALS,
test_output=("results", "search content"),
test_mock={"get_request": lambda *args, **kwargs: "search content"},
)
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
# Encode the search query
encoded_query = quote(input_data.query)
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
# Prepend the Jina Search URL to the encoded query
jina_search_url = f"https://s.jina.ai/{encoded_query}"
results = self.get_request(jina_search_url, headers=headers, json=False)
# Output the search results
yield "results", results
|
import multiprocessing
import random
import time
from functools import partial
import pytest
from jina import Client, Document, DocumentArray, Executor, Flow, requests
from jina.types.request.data import Response
NUM_REQUESTS = 5
class MyExecutor(Executor):
@requests(on='/ping')
def ping(self, **kwargs):
time.sleep(0.1 * random.random())
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
@pytest.mark.parametrize('shards', [10])
@pytest.mark.parametrize('polling', ['ANY', 'ALL'])
@pytest.mark.parametrize('prefetch', [1, 10])
@pytest.mark.parametrize('concurrent', [15])
@pytest.mark.parametrize('use_stream', [False, True])
def test_concurrent_clients(
concurrent, protocol, shards, polling, prefetch, reraise, use_stream
):
if not use_stream and protocol != 'grpc':
return
def pong(peer_hash, queue, resp: Response):
for d in resp.docs:
queue.put((peer_hash, d.text))
def peer_client(port, protocol, peer_hash, queue):
c = Client(protocol=protocol, port=port)
for _ in range(NUM_REQUESTS):
c.post(
'/ping',
Document(text=peer_hash),
on_done=lambda r: pong(peer_hash, queue, r),
return_responses=True,
stream=use_stream,
)
f = Flow(protocol=protocol, prefetch=prefetch).add(
uses=MyExecutor, shards=shards, polling=polling
)
with f:
pqueue = multiprocessing.Queue()
port = f.port
process_pool = []
for peer_id in range(concurrent):
p = multiprocessing.Process(
target=partial(peer_client, port, protocol, str(peer_id), pqueue),
daemon=True,
)
p.start()
process_pool.append(p)
for p in process_pool:
p.join()
queue_len = 0
while not pqueue.empty():
peer_hash, text = pqueue.get()
assert peer_hash == text
queue_len += 1
assert queue_len == concurrent * NUM_REQUESTS
|
import multiprocessing
import random
import time
from functools import partial
import pytest
from jina import Client, Document, DocumentArray, Executor, Flow, requests
from jina.types.request.data import Response
NUM_REQUESTS = 5
class MyExecutor(Executor):
@requests(on='/ping')
def ping(self, **kwargs):
time.sleep(0.1 * random.random())
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
@pytest.mark.parametrize('shards', [10])
@pytest.mark.parametrize('polling', ['ANY', 'ALL'])
@pytest.mark.parametrize('prefetch', [1, 10])
@pytest.mark.parametrize('concurrent', [15])
@pytest.mark.parametrize('use_stream', [False, True])
def test_concurrent_clients(concurrent, protocol, shards, polling, prefetch, reraise, use_stream):
if not use_stream and protocol != 'grpc':
return
def pong(peer_hash, queue, resp: Response):
for d in resp.docs:
queue.put((peer_hash, d.text))
def peer_client(port, protocol, peer_hash, queue):
c = Client(protocol=protocol, port=port)
for _ in range(NUM_REQUESTS):
c.post(
'/ping',
Document(text=peer_hash),
on_done=lambda r: pong(peer_hash, queue, r),
return_responses=True,
stream=use_stream
)
f = Flow(protocol=protocol, prefetch=prefetch).add(
uses=MyExecutor, shards=shards, polling=polling
)
with f:
pqueue = multiprocessing.Queue()
port = f.port
process_pool = []
for peer_id in range(concurrent):
p = multiprocessing.Process(
target=partial(peer_client, port, protocol, str(peer_id), pqueue),
daemon=True,
)
p.start()
process_pool.append(p)
for p in process_pool:
p.join()
queue_len = 0
while not pqueue.empty():
peer_hash, text = pqueue.get()
assert peer_hash == text
queue_len += 1
assert queue_len == concurrent * NUM_REQUESTS
|
"""Gemini embeddings file."""
import deprecated
from typing import Any, List, Optional
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks.base import CallbackManager
import google.generativeai as gemini
@deprecated.deprecated(
reason=(
"Should use `llama-index-embeddings-google-genai` instead, using Google's latest unified SDK. "
"See: https://docs.llamaindex.ai/en/stable/examples/embeddings/google_genai/"
)
)
class GeminiEmbedding(BaseEmbedding):
"""
Google Gemini embeddings.
Args:
model_name (str): Model for embedding.
Defaults to "models/embedding-001".
api_key (Optional[str]): API key to access the model. Defaults to None.
"""
_model: Any = PrivateAttr()
title: Optional[str] = Field(
default="",
description="Title is only applicable for retrieval_document tasks, and is used to represent a document title. For other tasks, title is invalid.",
)
task_type: Optional[str] = Field(
default="retrieval_document",
description="The task for embedding model.",
)
def __init__(
self,
model_name: str = "models/embedding-001",
task_type: Optional[str] = "retrieval_document",
api_key: Optional[str] = None,
title: Optional[str] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
):
super().__init__(
model_name=model_name,
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
title=title,
task_type=task_type,
**kwargs,
)
gemini.configure(api_key=api_key)
self._model = gemini
@classmethod
def class_name(cls) -> str:
return "GeminiEmbedding"
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._model.embed_content(
model=self.model_name,
content=query,
title=self.title,
task_type=self.task_type,
)["embedding"]
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self._model.embed_content(
model=self.model_name,
content=text,
title=self.title,
task_type=self.task_type,
)["embedding"]
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
return [
self._model.embed_content(
model=self.model_name,
content=text,
title=self.title,
task_type=self.task_type,
)["embedding"]
for text in texts
]
### Async methods ###
# need to wait async calls from Gemini side to be implemented.
# Issue: https://github.com/google/generative-ai-python/issues/125
async def _aget_query_embedding(self, query: str) -> List[float]:
"""The asynchronous version of _get_query_embedding."""
return self._get_query_embedding(query)
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Asynchronously get text embedding."""
return self._get_text_embedding(text)
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Asynchronously get text embeddings."""
return self._get_text_embeddings(texts)
|
"""Gemini embeddings file."""
import deprecated
from typing import Any, List, Optional
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks.base import CallbackManager
import google.generativeai as gemini
@deprecated.deprecated(
reason=(
"Should use `llama-index-embeddings-google-genai` instead, using Google's latest unified SDK. "
"See: https://docs.llamaindex.ai/en/stable/examples/embeddings/google_genai/"
)
)
class GeminiEmbedding(BaseEmbedding):
"""Google Gemini embeddings.
Args:
model_name (str): Model for embedding.
Defaults to "models/embedding-001".
api_key (Optional[str]): API key to access the model. Defaults to None.
"""
_model: Any = PrivateAttr()
title: Optional[str] = Field(
default="",
description="Title is only applicable for retrieval_document tasks, and is used to represent a document title. For other tasks, title is invalid.",
)
task_type: Optional[str] = Field(
default="retrieval_document",
description="The task for embedding model.",
)
def __init__(
self,
model_name: str = "models/embedding-001",
task_type: Optional[str] = "retrieval_document",
api_key: Optional[str] = None,
title: Optional[str] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
):
super().__init__(
model_name=model_name,
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
title=title,
task_type=task_type,
**kwargs,
)
gemini.configure(api_key=api_key)
self._model = gemini
@classmethod
def class_name(cls) -> str:
return "GeminiEmbedding"
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._model.embed_content(
model=self.model_name,
content=query,
title=self.title,
task_type=self.task_type,
)["embedding"]
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self._model.embed_content(
model=self.model_name,
content=text,
title=self.title,
task_type=self.task_type,
)["embedding"]
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
return [
self._model.embed_content(
model=self.model_name,
content=text,
title=self.title,
task_type=self.task_type,
)["embedding"]
for text in texts
]
### Async methods ###
# need to wait async calls from Gemini side to be implemented.
# Issue: https://github.com/google/generative-ai-python/issues/125
async def _aget_query_embedding(self, query: str) -> List[float]:
"""The asynchronous version of _get_query_embedding."""
return self._get_query_embedding(query)
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Asynchronously get text embedding."""
return self._get_text_embedding(text)
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Asynchronously get text embeddings."""
return self._get_text_embeddings(texts)
|
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...flair_text import FlairTextEncoder
_EMBEDDING_DIM = 100
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=FlairTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
|
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...flair_text import FlairTextEncoder
_EMBEDDING_DIM = 100
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=FlairTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
'--volumes=.cache:/workspace/.cache',
],
timeout=30,
check=True,
)
|
from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .CosineSimilarityLoss import CosineSimilarityLoss
from .SoftmaxLoss import SoftmaxLoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from .MultipleNegativesSymmetricRankingLoss import MultipleNegativesSymmetricRankingLoss
from .TripletLoss import TripletDistanceMetric, TripletLoss
from .MarginMSELoss import MarginMSELoss
from .MatryoshkaLoss import MatryoshkaLoss
from .Matryoshka2dLoss import Matryoshka2dLoss
from .MSELoss import MSELoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .ContrastiveLoss import SiameseDistanceMetric, ContrastiveLoss
from .ContrastiveTensionLoss import (
ContrastiveTensionLoss,
ContrastiveTensionLossInBatchNegatives,
ContrastiveTensionDataLoader,
)
from .CoSENTLoss import CoSENTLoss
from .AnglELoss import AnglELoss
from .OnlineContrastiveLoss import OnlineContrastiveLoss
from .MegaBatchMarginLoss import MegaBatchMarginLoss
from .DenoisingAutoEncoderLoss import DenoisingAutoEncoderLoss
# Triplet losses
from .BatchHardTripletLoss import BatchHardTripletLoss, BatchHardTripletLossDistanceFunction
from .BatchHardSoftMarginTripletLoss import BatchHardSoftMarginTripletLoss
from .BatchSemiHardTripletLoss import BatchSemiHardTripletLoss
from .BatchAllTripletLoss import BatchAllTripletLoss
__all__ = [
"AdaptiveLayerLoss",
"CosineSimilarityLoss",
"SoftmaxLoss",
"MultipleNegativesRankingLoss",
"MultipleNegativesSymmetricRankingLoss",
"TripletLoss",
"TripletDistanceMetric",
"MarginMSELoss",
"MatryoshkaLoss",
"Matryoshka2dLoss",
"MSELoss",
"ContrastiveLoss",
"SiameseDistanceMetric",
"CachedMultipleNegativesRankingLoss",
"ContrastiveTensionLoss",
"ContrastiveTensionLossInBatchNegatives",
"ContrastiveTensionDataLoader",
"CoSENTLoss",
"AnglELoss",
"OnlineContrastiveLoss",
"MegaBatchMarginLoss",
"DenoisingAutoEncoderLoss",
"BatchHardTripletLoss",
"BatchHardTripletLossDistanceFunction",
"BatchHardSoftMarginTripletLoss",
"BatchSemiHardTripletLoss",
"BatchAllTripletLoss",
]
|
from .CosineSimilarityLoss import CosineSimilarityLoss
from .SoftmaxLoss import SoftmaxLoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from .MultipleNegativesSymmetricRankingLoss import MultipleNegativesSymmetricRankingLoss
from .TripletLoss import TripletDistanceMetric, TripletLoss
from .MarginMSELoss import MarginMSELoss
from .MatryoshkaLoss import MatryoshkaLoss
from .MSELoss import MSELoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .ContrastiveLoss import SiameseDistanceMetric, ContrastiveLoss
from .ContrastiveTensionLoss import (
ContrastiveTensionLoss,
ContrastiveTensionLossInBatchNegatives,
ContrastiveTensionDataLoader,
)
from .CoSENTLoss import CoSENTLoss
from .AnglELoss import AnglELoss
from .OnlineContrastiveLoss import OnlineContrastiveLoss
from .MegaBatchMarginLoss import MegaBatchMarginLoss
from .DenoisingAutoEncoderLoss import DenoisingAutoEncoderLoss
# Triplet losses
from .BatchHardTripletLoss import BatchHardTripletLoss, BatchHardTripletLossDistanceFunction
from .BatchHardSoftMarginTripletLoss import BatchHardSoftMarginTripletLoss
from .BatchSemiHardTripletLoss import BatchSemiHardTripletLoss
from .BatchAllTripletLoss import BatchAllTripletLoss
__all__ = [
"CosineSimilarityLoss",
"SoftmaxLoss",
"MultipleNegativesRankingLoss",
"MultipleNegativesSymmetricRankingLoss",
"TripletLoss",
"TripletDistanceMetric",
"MarginMSELoss",
"MatryoshkaLoss",
"MSELoss",
"ContrastiveLoss",
"SiameseDistanceMetric",
"CachedMultipleNegativesRankingLoss",
"ContrastiveTensionLoss",
"ContrastiveTensionLossInBatchNegatives",
"ContrastiveTensionDataLoader",
"CoSENTLoss",
"AnglELoss",
"OnlineContrastiveLoss",
"MegaBatchMarginLoss",
"DenoisingAutoEncoderLoss",
"BatchHardTripletLoss",
"BatchHardTripletLossDistanceFunction",
"BatchHardSoftMarginTripletLoss",
"BatchSemiHardTripletLoss",
"BatchAllTripletLoss",
]
|
from typing import List
from pydantic import BaseModel
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
class ContentRetrievalSettings(BaseModel):
text: dict = SchemaField(
description="Text content settings",
default={"maxCharacters": 1000, "includeHtmlTags": False},
advanced=True,
)
highlights: dict = SchemaField(
description="Highlight settings",
default={
"numSentences": 3,
"highlightsPerUrl": 3,
"query": "",
},
advanced=True,
)
summary: dict = SchemaField(
description="Summary settings",
default={"query": ""},
advanced=True,
)
class ExaContentsBlock(Block):
class Input(BlockSchema):
credentials: ExaCredentialsInput = ExaCredentialsField()
ids: List[str] = SchemaField(
description="Array of document IDs obtained from searches",
)
contents: ContentRetrievalSettings = SchemaField(
description="Content retrieval settings",
default=ContentRetrievalSettings(),
advanced=True,
)
class Output(BlockSchema):
results: list = SchemaField(
description="List of document contents",
default_factory=list,
)
error: str = SchemaField(description="Error message if the request failed")
def __init__(self):
super().__init__(
id="c52be83f-f8cd-4180-b243-af35f986b461",
description="Retrieves document contents using Exa's contents API",
categories={BlockCategory.SEARCH},
input_schema=ExaContentsBlock.Input,
output_schema=ExaContentsBlock.Output,
)
def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/contents"
headers = {
"Content-Type": "application/json",
"x-api-key": credentials.api_key.get_secret_value(),
}
payload = {
"ids": input_data.ids,
"text": input_data.contents.text,
"highlights": input_data.contents.highlights,
"summary": input_data.contents.summary,
}
try:
response = Requests().post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
|
from typing import List
from pydantic import BaseModel
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class ContentRetrievalSettings(BaseModel):
text: dict = SchemaField(
description="Text content settings",
default={"maxCharacters": 1000, "includeHtmlTags": False},
advanced=True,
)
highlights: dict = SchemaField(
description="Highlight settings",
default={
"numSentences": 3,
"highlightsPerUrl": 3,
"query": "",
},
advanced=True,
)
summary: dict = SchemaField(
description="Summary settings",
default={"query": ""},
advanced=True,
)
class ExaContentsBlock(Block):
class Input(BlockSchema):
credentials: ExaCredentialsInput = ExaCredentialsField()
ids: List[str] = SchemaField(
description="Array of document IDs obtained from searches",
)
contents: ContentRetrievalSettings = SchemaField(
description="Content retrieval settings",
default=ContentRetrievalSettings(),
advanced=True,
)
class Output(BlockSchema):
results: list = SchemaField(
description="List of document contents",
default_factory=list,
)
error: str = SchemaField(description="Error message if the request failed")
def __init__(self):
super().__init__(
id="c52be83f-f8cd-4180-b243-af35f986b461",
description="Retrieves document contents using Exa's contents API",
categories={BlockCategory.SEARCH},
input_schema=ExaContentsBlock.Input,
output_schema=ExaContentsBlock.Output,
)
def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/contents"
headers = {
"Content-Type": "application/json",
"x-api-key": credentials.api_key.get_secret_value(),
}
payload = {
"ids": input_data.ids,
"text": input_data.contents.text,
"highlights": input_data.contents.highlights,
"summary": input_data.contents.summary,
}
try:
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.config import read_base
with read_base():
from mmdet.configs.retinanet.retinanet_r50_caffe_fpn_1x_coco import *
from mmdet.configs.retinanet.retinanet_r101_caffe_fpn_1x_coco import \
model as r101
model = r101
|
# Copyright (c) OpenMMLab. All rights reserved.
if '_base_':
from mmdet.configs.retinanet.retinanet_r50_caffe_fpn_1x_coco import *
from mmdet.configs.retinanet.retinanet_r101_caffe_fpn_1x_coco import \
model as r101
model = r101
|
"""Init file of LlamaIndex."""
__version__ = "0.12.24.post1"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"VellumPredictor",
"VellumPromptRegistry",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Init file of LlamaIndex."""
__version__ = "0.12.24"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"VellumPredictor",
"VellumPromptRegistry",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
from dataclasses import dataclass
from functools import partial
from typing import Callable
import torch
import torchaudio
from torchaudio.models import conv_tasnet_base, hdemucs_high
@dataclass
class SourceSeparationBundle:
"""Dataclass that bundles components for performing source separation.
Example
>>> import torchaudio
>>> from torchaudio.pipelines import CONVTASNET_BASE_LIBRI2MIX
>>> import torch
>>>
>>> # Build the separation model.
>>> model = CONVTASNET_BASE_LIBRI2MIX.get_model()
>>> 100%|███████████████████████████████|19.1M/19.1M [00:04<00:00, 4.93MB/s]
>>>
>>> # Instantiate the test set of Libri2Mix dataset.
>>> dataset = torchaudio.datasets.LibriMix("/home/datasets/", subset="test")
>>>
>>> # Apply source separation on mixture audio.
>>> for i, data in enumerate(dataset):
>>> sample_rate, mixture, clean_sources = data
>>> # Make sure the shape of input suits the model requirement.
>>> mixture = mixture.reshape(1, 1, -1)
>>> estimated_sources = model(mixture)
>>> score = si_snr_pit(estimated_sources, clean_sources) # for demonstration
>>> print(f"Si-SNR score is : {score}.)
>>> break
>>> Si-SNR score is : 16.24.
>>>
"""
_model_path: str
_model_factory_func: Callable[[], torch.nn.Module]
_sample_rate: int
@property
def sample_rate(self) -> int:
"""Sample rate of the audio that the model is trained on.
:type: int
"""
return self._sample_rate
def get_model(self) -> torch.nn.Module:
"""Construct the model and load the pretrained weight."""
model = self._model_factory_func()
path = torchaudio.utils.download_asset(self._model_path)
state_dict = torch.load(path)
model.load_state_dict(state_dict)
model.eval()
return model
CONVTASNET_BASE_LIBRI2MIX = SourceSeparationBundle(
_model_path="models/conv_tasnet_base_libri2mix.pt",
_model_factory_func=partial(conv_tasnet_base, num_sources=2),
_sample_rate=8000,
)
CONVTASNET_BASE_LIBRI2MIX.__doc__ = """Pre-trained Source Separation pipeline with *ConvTasNet*
:cite:`Luo_2019` trained on *Libri2Mix dataset* :cite:`cosentino2020librimix`.
The source separation model is constructed by :func:`~torchaudio.models.conv_tasnet_base`
and is trained using the training script ``lightning_train.py``
`here <https://github.com/pytorch/audio/tree/release/0.12/examples/source_separation/>`__
with default arguments.
Please refer to :class:`SourceSeparationBundle` for usage instructions.
"""
HDEMUCS_HIGH_MUSDB_PLUS = SourceSeparationBundle(
_model_path="models/hdemucs_high_trained.pt",
_model_factory_func=partial(hdemucs_high, sources=["drums", "bass", "other", "vocals"]),
_sample_rate=44100,
)
HDEMUCS_HIGH_MUSDB_PLUS.__doc__ = """Pre-trained music source separation pipeline with
*Hybrid Demucs* :cite:`defossez2021hybrid` trained on both training and test sets of
MUSDB-HQ :cite:`MUSDB18HQ` and an additional 150 extra songs from an internal database
that was specifically produced for Meta.
The model is constructed by :func:`~torchaudio.models.hdemucs_high`.
Training was performed in the original HDemucs repository `here <https://github.com/facebookresearch/demucs/>`__.
Please refer to :class:`SourceSeparationBundle` for usage instructions.
"""
HDEMUCS_HIGH_MUSDB = SourceSeparationBundle(
_model_path="models/hdemucs_high_musdbhq_only.pt",
_model_factory_func=partial(hdemucs_high, sources=["drums", "bass", "other", "vocals"]),
_sample_rate=44100,
)
HDEMUCS_HIGH_MUSDB.__doc__ = """Pre-trained music source separation pipeline with
*Hybrid Demucs* :cite:`defossez2021hybrid` trained on the training set of MUSDB-HQ :cite:`MUSDB18HQ`.
The model is constructed by :func:`~torchaudio.models.hdemucs_high`.
Training was performed in the original HDemucs repository `here <https://github.com/facebookresearch/demucs/>`__.
Please refer to :class:`SourceSeparationBundle` for usage instructions.
"""
|
from dataclasses import dataclass
from functools import partial
from typing import Callable
import torch
import torchaudio
from torchaudio.models import conv_tasnet_base, hdemucs_high
@dataclass
class SourceSeparationBundle:
"""Dataclass that bundles components for performing source separation.
Example
>>> import torchaudio
>>> from torchaudio.pipelines import CONVTASNET_BASE_LIBRI2MIX
>>> import torch
>>>
>>> # Build the separation model.
>>> model = CONVTASNET_BASE_LIBRI2MIX.get_model()
>>> 100%|███████████████████████████████|19.1M/19.1M [00:04<00:00, 4.93MB/s]
>>>
>>> # Instantiate the test set of Libri2Mix dataset.
>>> dataset = torchaudio.datasets.LibriMix("/home/datasets/", subset="test")
>>>
>>> # Apply source separation on mixture audio.
>>> for i, data in enumerate(dataset):
>>> sample_rate, mixture, clean_sources = data
>>> # Make sure the shape of input suits the model requirement.
>>> mixture = mixture.reshape(1, 1, -1)
>>> estimated_sources = model(mixture)
>>> score = si_snr_pit(estimated_sources, clean_sources) # for demonstration
>>> print(f"Si-SNR score is : {score}.)
>>> break
>>> Si-SNR score is : 16.24.
>>>
"""
_model_path: str
_model_factory_func: Callable[[], torch.nn.Module]
_sample_rate: int
@property
def sample_rate(self) -> int:
"""Sample rate of the audio that the model is trained on.
:type: int
"""
return self._sample_rate
def get_model(self) -> torch.nn.Module:
"""Construct the model and load the pretrained weight."""
model = self._model_factory_func()
path = torchaudio.utils.download_asset(self._model_path)
state_dict = torch.load(path)
model.load_state_dict(state_dict)
model.eval()
return model
CONVTASNET_BASE_LIBRI2MIX = SourceSeparationBundle(
_model_path="models/conv_tasnet_base_libri2mix.pt",
_model_factory_func=partial(conv_tasnet_base, num_sources=2),
_sample_rate=8000,
)
CONVTASNET_BASE_LIBRI2MIX.__doc__ = """Pre-trained Source Separation pipeline with *ConvTasNet*
:cite:`Luo_2019` trained on *Libri2Mix dataset* :cite:`cosentino2020librimix`.
The source separation model is constructed by :func:`~torchaudio.models.conv_tasnet_base`
and is trained using the training script ``lightning_train.py``
`here <https://github.com/pytorch/audio/tree/release/0.12/examples/source_separation/>`__
with default arguments.
Please refer to :class:`SourceSeparationBundle` for usage instructions.
"""
HDEMUCS_HIGH_MUSDB_PLUS = SourceSeparationBundle(
_model_path="models/hdemucs_high_trained.pt",
_model_factory_func=partial(hdemucs_high, sources=["drums", "bass", "other", "vocals"]),
_sample_rate=44100,
)
HDEMUCS_HIGH_MUSDB_PLUS.__doc__ = """Pre-trained music source separation pipeline with
*Hybrid Demucs* :cite:`defossez2021hybrid` trained on MUSDB-HQ :cite:`MUSDB18HQ`
and additional internal training data.
The model is constructed by :func:`~torchaudio.models.hdemucs_high`.
Training was performed in the original HDemucs repository `here <https://github.com/facebookresearch/demucs/>`__.
Please refer to :class:`SourceSeparationBundle` for usage instructions.
"""
HDEMUCS_HIGH_MUSDB = SourceSeparationBundle(
_model_path="models/hdemucs_high_musdbhq_only.pt",
_model_factory_func=partial(hdemucs_high, sources=["drums", "bass", "other", "vocals"]),
_sample_rate=44100,
)
HDEMUCS_HIGH_MUSDB.__doc__ = """Pre-trained music source separation pipeline with
*Hybrid Demucs* :cite:`defossez2021hybrid` trained on MUSDB-HQ :cite:`MUSDB18HQ`.
The model is constructed by :func:`~torchaudio.models.hdemucs_high`.
Training was performed in the original HDemucs repository `here <https://github.com/facebookresearch/demucs/>`__.
Please refer to :class:`SourceSeparationBundle` for usage instructions.
"""
|
"""
===================================================
Recursive feature elimination with cross-validation
===================================================
A Recursive Feature Elimination (RFE) example with automatic tuning of the
number of features selected with cross-validation.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Data generation
# ---------------
#
# We build a classification task using 3 informative features. The introduction
# of 2 additional redundant (i.e. correlated) features has the effect that the
# selected features vary depending on the cross-validation fold. The remaining
# features are non-informative as they are drawn at random.
from sklearn.datasets import make_classification
n_features = 15
feat_names = [f"feature_{i}" for i in range(15)]
X, y = make_classification(
n_samples=500,
n_features=n_features,
n_informative=3,
n_redundant=2,
n_repeated=0,
n_classes=8,
n_clusters_per_class=1,
class_sep=0.8,
random_state=0,
)
# %%
# Model training and selection
# ----------------------------
#
# We create the RFE object and compute the cross-validated scores. The scoring
# strategy "accuracy" optimizes the proportion of correctly classified samples.
from sklearn.feature_selection import RFECV
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
min_features_to_select = 1 # Minimum number of features to consider
clf = LogisticRegression()
cv = StratifiedKFold(5)
rfecv = RFECV(
estimator=clf,
step=1,
cv=cv,
scoring="accuracy",
min_features_to_select=min_features_to_select,
n_jobs=2,
)
rfecv.fit(X, y)
print(f"Optimal number of features: {rfecv.n_features_}")
# %%
# In the present case, the model with 3 features (which corresponds to the true
# generative model) is found to be the most optimal.
#
# Plot number of features VS. cross-validation scores
# ---------------------------------------------------
import matplotlib.pyplot as plt
import pandas as pd
data = {
key: value
for key, value in rfecv.cv_results_.items()
if key in ["n_features", "mean_test_score", "std_test_score"]
}
cv_results = pd.DataFrame(data)
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Mean test accuracy")
plt.errorbar(
x=cv_results["n_features"],
y=cv_results["mean_test_score"],
yerr=cv_results["std_test_score"],
)
plt.title("Recursive Feature Elimination \nwith correlated features")
plt.show()
# %%
# From the plot above one can further notice a plateau of equivalent scores
# (similar mean value and overlapping errorbars) for 3 to 5 selected features.
# This is the result of introducing correlated features. Indeed, the optimal
# model selected by the RFE can lie within this range, depending on the
# cross-validation technique. The test accuracy decreases above 5 selected
# features, this is, keeping non-informative features leads to over-fitting and
# is therefore detrimental for the statistical performance of the models.
# %%
import numpy as np
for i in range(cv.n_splits):
mask = rfecv.cv_results_[f"split{i}_support"][
rfecv.n_features_
] # mask of features selected by the RFE
features_selected = np.ma.compressed(np.ma.masked_array(feat_names, mask=1 - mask))
print(f"Features selected in fold {i}: {features_selected}")
# %%
# In the five folds, the selected features are consistant. This is good news,
# it means that the selection is stable accross folds, and it confirms that
# these features are the most informative ones.
|
"""
===================================================
Recursive feature elimination with cross-validation
===================================================
A Recursive Feature Elimination (RFE) example with automatic tuning of the
number of features selected with cross-validation.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Data generation
# ---------------
#
# We build a classification task using 3 informative features. The introduction
# of 2 additional redundant (i.e. correlated) features has the effect that the
# selected features vary depending on the cross-validation fold. The remaining
# features are non-informative as they are drawn at random.
from sklearn.datasets import make_classification
X, y = make_classification(
n_samples=500,
n_features=15,
n_informative=3,
n_redundant=2,
n_repeated=0,
n_classes=8,
n_clusters_per_class=1,
class_sep=0.8,
random_state=0,
)
# %%
# Model training and selection
# ----------------------------
#
# We create the RFE object and compute the cross-validated scores. The scoring
# strategy "accuracy" optimizes the proportion of correctly classified samples.
from sklearn.feature_selection import RFECV
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
min_features_to_select = 1 # Minimum number of features to consider
clf = LogisticRegression()
cv = StratifiedKFold(5)
rfecv = RFECV(
estimator=clf,
step=1,
cv=cv,
scoring="accuracy",
min_features_to_select=min_features_to_select,
n_jobs=2,
)
rfecv.fit(X, y)
print(f"Optimal number of features: {rfecv.n_features_}")
# %%
# In the present case, the model with 3 features (which corresponds to the true
# generative model) is found to be the most optimal.
#
# Plot number of features VS. cross-validation scores
# ---------------------------------------------------
import matplotlib.pyplot as plt
import pandas as pd
cv_results = pd.DataFrame(rfecv.cv_results_)
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Mean test accuracy")
plt.errorbar(
x=cv_results["n_features"],
y=cv_results["mean_test_score"],
yerr=cv_results["std_test_score"],
)
plt.title("Recursive Feature Elimination \nwith correlated features")
plt.show()
# %%
# From the plot above one can further notice a plateau of equivalent scores
# (similar mean value and overlapping errorbars) for 3 to 5 selected features.
# This is the result of introducing correlated features. Indeed, the optimal
# model selected by the RFE can lie within this range, depending on the
# cross-validation technique. The test accuracy decreases above 5 selected
# features, this is, keeping non-informative features leads to over-fitting and
# is therefore detrimental for the statistical performance of the models.
|
_base_ = './cascade-mask-rcnn_r50_fpn_instaboost-4x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = '../glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py'
lang_model_name = 'bert-base-uncased'
model = dict(bbox_head=dict(early_fuse=True))
dataset_type = 'Flickr30kDataset'
data_root = 'data/flickr30k_entities/'
test_pipeline = [
dict(
type='LoadImageFromFile', backend_args=None,
imdecode_backend='pillow'),
dict(
type='FixScaleResize',
scale=(800, 1333),
keep_ratio=True,
backend='pillow'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'text', 'custom_entities',
'tokens_positive', 'phrase_ids', 'phrases'))
]
dataset_Flickr30k_val = dict(
type=dataset_type,
data_root=data_root,
ann_file='final_flickr_separateGT_val.json',
data_prefix=dict(img='flickr30k_images/'),
pipeline=test_pipeline,
)
dataset_Flickr30k_test = dict(
type=dataset_type,
data_root=data_root,
ann_file='final_flickr_separateGT_test.json',
data_prefix=dict(img='flickr30k_images/'),
pipeline=test_pipeline,
)
val_evaluator_Flickr30k = dict(type='Flickr30kMetric', )
test_evaluator_Flickr30k = dict(type='Flickr30kMetric', )
# ----------Config---------- #
dataset_prefixes = ['Flickr30kVal', 'Flickr30kTest']
datasets = [dataset_Flickr30k_val, dataset_Flickr30k_test]
metrics = [val_evaluator_Flickr30k, test_evaluator_Flickr30k]
val_dataloader = dict(
dataset=dict(_delete_=True, type='ConcatDataset', datasets=datasets))
test_dataloader = val_dataloader
val_evaluator = dict(
_delete_=True,
type='MultiDatasetsEvaluator',
metrics=metrics,
dataset_prefixes=dataset_prefixes)
test_evaluator = val_evaluator
|
_base_ = '../glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py'
lang_model_name = 'bert-base-uncased'
model = dict(bbox_head=dict(early_fuse=True), )
dataset_type = 'Flickr30kDataset'
data_root = 'data/flickr30k/'
test_pipeline = [
dict(
type='LoadImageFromFile', backend_args=None,
imdecode_backend='pillow'),
dict(
type='FixScaleResize',
scale=(800, 1333),
keep_ratio=True,
backend='pillow'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'text', 'custom_entities',
'tokens_positive', 'phrase_ids', 'phrases'))
]
dataset_Flickr30k_val = dict(
type=dataset_type,
data_root=data_root,
ann_file='mdetr_annotations/final_flickr_separateGT_val.json',
data_prefix=dict(img='flickr30k_images/'),
pipeline=test_pipeline,
)
dataset_Flickr30k_test = dict(
type=dataset_type,
data_root=data_root,
ann_file='mdetr_annotations/final_flickr_separateGT_test.json',
data_prefix=dict(img='flickr30k_images/'),
pipeline=test_pipeline,
)
val_evaluator_Flickr30k = dict(type='Flickr30kMetric', )
test_evaluator_Flickr30k = dict(type='Flickr30kMetric', )
# ----------Config---------- #
dataset_prefixes = ['Flickr30kVal', 'Flickr30kTest']
datasets = [dataset_Flickr30k_val, dataset_Flickr30k_test]
metrics = [val_evaluator_Flickr30k, test_evaluator_Flickr30k]
val_dataloader = dict(
dataset=dict(_delete_=True, type='ConcatDataset', datasets=datasets))
test_dataloader = val_dataloader
val_evaluator = dict(
_delete_=True,
type='MultiDatasetsEvaluator',
metrics=metrics,
dataset_prefixes=dataset_prefixes)
test_evaluator = val_evaluator
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
def bbox_overlaps(bboxes1,
bboxes2,
mode='iou',
eps=1e-6,
use_legacy_coordinate=False):
"""Calculate the ious between each bbox of bboxes1 and bboxes2.
Args:
bboxes1 (ndarray): Shape (n, 4)
bboxes2 (ndarray): Shape (k, 4)
mode (str): IOU (intersection over union) or IOF (intersection
over foreground)
use_legacy_coordinate (bool): Whether to use coordinate system in
mmdet v1.x. which means width, height should be
calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively.
Note when function is used in `VOCDataset`, it should be
True to align with the official implementation
`http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCdevkit_18-May-2011.tar`
Default: False.
Returns:
ious (ndarray): Shape (n, k)
"""
assert mode in ['iou', 'iof']
if not use_legacy_coordinate:
extra_length = 0.
else:
extra_length = 1.
bboxes1 = bboxes1.astype(np.float32)
bboxes2 = bboxes2.astype(np.float32)
rows = bboxes1.shape[0]
cols = bboxes2.shape[0]
ious = np.zeros((rows, cols), dtype=np.float32)
if rows * cols == 0:
return ious
exchange = False
if bboxes1.shape[0] > bboxes2.shape[0]:
bboxes1, bboxes2 = bboxes2, bboxes1
ious = np.zeros((cols, rows), dtype=np.float32)
exchange = True
area1 = (bboxes1[:, 2] - bboxes1[:, 0] + extra_length) * (
bboxes1[:, 3] - bboxes1[:, 1] + extra_length)
area2 = (bboxes2[:, 2] - bboxes2[:, 0] + extra_length) * (
bboxes2[:, 3] - bboxes2[:, 1] + extra_length)
for i in range(bboxes1.shape[0]):
x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])
y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])
x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])
y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])
overlap = np.maximum(x_end - x_start + extra_length, 0) * np.maximum(
y_end - y_start + extra_length, 0)
if mode == 'iou':
union = area1[i] + area2 - overlap
else:
union = area1[i] if not exchange else area2
union = np.maximum(union, eps)
ious[i, :] = overlap / union
if exchange:
ious = ious.T
return ious
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
def bbox_overlaps(bboxes1, bboxes2, mode='iou', eps=1e-6):
"""Calculate the ious between each bbox of bboxes1 and bboxes2.
Args:
bboxes1(ndarray): shape (n, 4)
bboxes2(ndarray): shape (k, 4)
mode(str): iou (intersection over union) or iof (intersection
over foreground)
Returns:
ious(ndarray): shape (n, k)
"""
assert mode in ['iou', 'iof']
bboxes1 = bboxes1.astype(np.float32)
bboxes2 = bboxes2.astype(np.float32)
rows = bboxes1.shape[0]
cols = bboxes2.shape[0]
ious = np.zeros((rows, cols), dtype=np.float32)
if rows * cols == 0:
return ious
exchange = False
if bboxes1.shape[0] > bboxes2.shape[0]:
bboxes1, bboxes2 = bboxes2, bboxes1
ious = np.zeros((cols, rows), dtype=np.float32)
exchange = True
area1 = (bboxes1[:, 2] - bboxes1[:, 0]) * (bboxes1[:, 3] - bboxes1[:, 1])
area2 = (bboxes2[:, 2] - bboxes2[:, 0]) * (bboxes2[:, 3] - bboxes2[:, 1])
for i in range(bboxes1.shape[0]):
x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])
y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])
x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])
y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])
overlap = np.maximum(x_end - x_start, 0) * np.maximum(
y_end - y_start, 0)
if mode == 'iou':
union = area1[i] + area2 - overlap
else:
union = area1[i] if not exchange else area2
union = np.maximum(union, eps)
ious[i, :] = overlap / union
if exchange:
ious = ious.T
return ious
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
@_register_proto(proto_type_name='audio_torch_tensor')
class AudioTorchTensor(AbstractAudioTensor, TorchTensor, metaclass=metaTorchAndNode):
"""
Subclass of [`TorchTensor`][docarray.typing.TorchTensor], to represent an audio tensor.
Adds audio-specific features to the tensor.
---
```python
from typing import Optional
import torch
from docarray import BaseDoc
from docarray.typing import AudioBytes, AudioTorchTensor, AudioUrl
class MyAudioDoc(BaseDoc):
title: str
audio_tensor: Optional[AudioTorchTensor]
url: Optional[AudioUrl]
bytes_: Optional[AudioBytes]
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=torch.zeros(1000, 2),
)
# doc_1.audio_tensor.save(file_path='/tmp/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor, _ = doc_2.url.load()
# doc_2.audio_tensor.save(file_path='/tmp/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
```
---
"""
...
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
@_register_proto(proto_type_name='audio_torch_tensor')
class AudioTorchTensor(AbstractAudioTensor, TorchTensor, metaclass=metaTorchAndNode):
"""
Subclass of TorchTensor, to represent an audio tensor.
Adds audio-specific features to the tensor.
---
```python
from typing import Optional
import torch
from docarray import BaseDoc
from docarray.typing import AudioBytes, AudioTorchTensor, AudioUrl
class MyAudioDoc(BaseDoc):
title: str
audio_tensor: Optional[AudioTorchTensor]
url: Optional[AudioUrl]
bytes_: Optional[AudioBytes]
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=torch.zeros(1000, 2),
)
doc_1.audio_tensor.save(file_path='/tmp/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor, _ = doc_2.url.load()
doc_2.audio_tensor.save(file_path='/tmp/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
```
---
"""
...
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .base_detr import DetectionTransformer
from .boxinst import BoxInst
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .condinst import CondInst
from .conditional_detr import ConditionalDETR
from .cornernet import CornerNet
from .crowddet import CrowdDet
from .d2_wrapper import Detectron2Wrapper
from .dab_detr import DABDETR
from .ddod import DDOD
from .deformable_detr import DeformableDETR
from .detr import DETR
from .dino import DINO
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask2former import Mask2Former
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .rtmdet import RTMDet
from .scnet import SCNet
from .semi_base import SemiBaseDetector
from .single_stage import SingleStageDetector
from .soft_teacher import SoftTeacher
from .solo import SOLO
from .solov2 import SOLOv2
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer', 'DDOD', 'Mask2Former', 'SemiBaseDetector', 'SoftTeacher',
'RTMDet', 'Detectron2Wrapper', 'CrowdDet', 'CondInst', 'BoxInst',
'DetectionTransformer', 'ConditionalDETR', 'DINO', 'DABDETR'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .boxinst import BoxInst
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .condinst import CondInst
from .cornernet import CornerNet
from .crowddet import CrowdDet
from .d2_wrapper import Detectron2Wrapper
from .ddod import DDOD
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask2former import Mask2Former
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .rtmdet import RTMDet
from .scnet import SCNet
from .semi_base import SemiBaseDetector
from .single_stage import SingleStageDetector
from .soft_teacher import SoftTeacher
from .solo import SOLO
from .solov2 import SOLOv2
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer', 'DDOD', 'Mask2Former', 'SemiBaseDetector', 'SoftTeacher',
'RTMDet', 'Detectron2Wrapper', 'RTMDet', 'CrowdDet', 'CondInst', 'BoxInst'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import glob
import os
import os.path as osp
import urllib
import warnings
from typing import Union
import torch
from mmengine.config import Config, ConfigDict
from mmengine.logging import print_log
from mmengine.utils import scandir
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif',
'.tiff', '.webp')
def find_latest_checkpoint(path, suffix='pth'):
"""Find the latest checkpoint from the working directory.
Args:
path(str): The path to find checkpoints.
suffix(str): File extension.
Defaults to pth.
Returns:
latest_path(str | None): File path of the latest checkpoint.
References:
.. [1] https://github.com/microsoft/SoftTeacher
/blob/main/ssod/utils/patch.py
"""
if not osp.exists(path):
warnings.warn('The path of checkpoints does not exist.')
return None
if osp.exists(osp.join(path, f'latest.{suffix}')):
return osp.join(path, f'latest.{suffix}')
checkpoints = glob.glob(osp.join(path, f'*.{suffix}'))
if len(checkpoints) == 0:
warnings.warn('There are no checkpoints in the path.')
return None
latest = -1
latest_path = None
for checkpoint in checkpoints:
count = int(osp.basename(checkpoint).split('_')[-1].split('.')[0])
if count > latest:
latest = count
latest_path = checkpoint
return latest_path
def update_data_root(cfg, logger=None):
"""Update data root according to env MMDET_DATASETS.
If set env MMDET_DATASETS, update cfg.data_root according to
MMDET_DATASETS. Otherwise, using cfg.data_root as default.
Args:
cfg (:obj:`Config`): The model config need to modify
logger (logging.Logger | str | None): the way to print msg
"""
assert isinstance(cfg, Config), \
f'cfg got wrong type: {type(cfg)}, expected mmengine.Config'
if 'MMDET_DATASETS' in os.environ:
dst_root = os.environ['MMDET_DATASETS']
print_log(f'MMDET_DATASETS has been set to be {dst_root}.'
f'Using {dst_root} as data root.')
else:
return
assert isinstance(cfg, Config), \
f'cfg got wrong type: {type(cfg)}, expected mmengine.Config'
def update(cfg, src_str, dst_str):
for k, v in cfg.items():
if isinstance(v, ConfigDict):
update(cfg[k], src_str, dst_str)
if isinstance(v, str) and src_str in v:
cfg[k] = v.replace(src_str, dst_str)
update(cfg.data, cfg.data_root, dst_root)
cfg.data_root = dst_root
def get_test_pipeline_cfg(cfg: Union[str, ConfigDict]) -> ConfigDict:
"""Get the test dataset pipeline from entire config.
Args:
cfg (str or :obj:`ConfigDict`): the entire config. Can be a config
file or a ``ConfigDict``.
Returns:
:obj:`ConfigDict`: the config of test dataset.
"""
if isinstance(cfg, str):
cfg = Config.fromfile(cfg)
def _get_test_pipeline_cfg(dataset_cfg):
if 'pipeline' in dataset_cfg:
return dataset_cfg.pipeline
# handle dataset wrapper
elif 'dataset' in dataset_cfg:
return _get_test_pipeline_cfg(dataset_cfg.dataset)
# handle dataset wrappers like ConcatDataset
elif 'datasets' in dataset_cfg:
return _get_test_pipeline_cfg(dataset_cfg.datasets[0])
raise RuntimeError('Cannot find `pipeline` in `test_dataloader`')
return _get_test_pipeline_cfg(cfg.test_dataloader.dataset)
def get_file_list(source_root: str) -> [list, dict]:
"""Get file list.
Args:
source_root (str): image or video source path
Return:
source_file_path_list (list): A list for all source file.
source_type (dict): Source type: file or url or dir.
"""
is_dir = os.path.isdir(source_root)
is_url = source_root.startswith(('http:/', 'https:/'))
is_file = os.path.splitext(source_root)[-1].lower() in IMG_EXTENSIONS
source_file_path_list = []
if is_dir:
# when input source is dir
for file in scandir(source_root, IMG_EXTENSIONS, recursive=True):
source_file_path_list.append(os.path.join(source_root, file))
elif is_url:
# when input source is url
filename = os.path.basename(
urllib.parse.unquote(source_root).split('?')[0])
file_save_path = os.path.join(os.getcwd(), filename)
print(f'Downloading source file to {file_save_path}')
torch.hub.download_url_to_file(source_root, file_save_path)
source_file_path_list = [file_save_path]
elif is_file:
# when input source is single image
source_file_path_list = [source_root]
else:
print('Cannot find image file.')
source_type = dict(is_dir=is_dir, is_url=is_url, is_file=is_file)
return source_file_path_list, source_type
|
# Copyright (c) OpenMMLab. All rights reserved.
import glob
import os
import os.path as osp
import warnings
from typing import Union
from mmengine.config import Config, ConfigDict
from mmengine.logging import print_log
def find_latest_checkpoint(path, suffix='pth'):
"""Find the latest checkpoint from the working directory.
Args:
path(str): The path to find checkpoints.
suffix(str): File extension.
Defaults to pth.
Returns:
latest_path(str | None): File path of the latest checkpoint.
References:
.. [1] https://github.com/microsoft/SoftTeacher
/blob/main/ssod/utils/patch.py
"""
if not osp.exists(path):
warnings.warn('The path of checkpoints does not exist.')
return None
if osp.exists(osp.join(path, f'latest.{suffix}')):
return osp.join(path, f'latest.{suffix}')
checkpoints = glob.glob(osp.join(path, f'*.{suffix}'))
if len(checkpoints) == 0:
warnings.warn('There are no checkpoints in the path.')
return None
latest = -1
latest_path = None
for checkpoint in checkpoints:
count = int(osp.basename(checkpoint).split('_')[-1].split('.')[0])
if count > latest:
latest = count
latest_path = checkpoint
return latest_path
def update_data_root(cfg, logger=None):
"""Update data root according to env MMDET_DATASETS.
If set env MMDET_DATASETS, update cfg.data_root according to
MMDET_DATASETS. Otherwise, using cfg.data_root as default.
Args:
cfg (:obj:`Config`): The model config need to modify
logger (logging.Logger | str | None): the way to print msg
"""
assert isinstance(cfg, Config), \
f'cfg got wrong type: {type(cfg)}, expected mmengine.Config'
if 'MMDET_DATASETS' in os.environ:
dst_root = os.environ['MMDET_DATASETS']
print_log(f'MMDET_DATASETS has been set to be {dst_root}.'
f'Using {dst_root} as data root.')
else:
return
assert isinstance(cfg, Config), \
f'cfg got wrong type: {type(cfg)}, expected mmengine.Config'
def update(cfg, src_str, dst_str):
for k, v in cfg.items():
if isinstance(v, ConfigDict):
update(cfg[k], src_str, dst_str)
if isinstance(v, str) and src_str in v:
cfg[k] = v.replace(src_str, dst_str)
update(cfg.data, cfg.data_root, dst_root)
cfg.data_root = dst_root
def get_test_pipeline_cfg(cfg: Union[str, ConfigDict]) -> ConfigDict:
"""Get the test dataset pipeline from entire config.
Args:
cfg (str or :obj:`ConfigDict`): the entire config. Can be a config
file or a ``ConfigDict``.
Returns:
:obj:`ConfigDict`: the config of test dataset.
"""
if isinstance(cfg, str):
cfg = Config.fromfile(cfg)
def _get_test_pipeline_cfg(dataset_cfg):
if 'pipeline' in dataset_cfg:
return dataset_cfg.pipeline
# handle dataset wrapper
elif 'dataset' in dataset_cfg:
return _get_test_pipeline_cfg(dataset_cfg.dataset)
# handle dataset wrappers like ConcatDataset
elif 'datasets' in dataset_cfg:
return _get_test_pipeline_cfg(dataset_cfg.datasets[0])
raise RuntimeError('Cannot find `pipeline` in `test_dataloader`')
return _get_test_pipeline_cfg(cfg.test_dataloader.dataset)
|
"""
This is a simple application for sentence embeddings: semantic search
We have a corpus with various sentences. Then, for a given query sentence,
we want to find the most similar sentence in this corpus.
This script outputs for various queries the top 5 most similar sentences in the corpus.
"""
import torch
from sentence_transformers import SentenceTransformer
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example documents
corpus = [
"Machine learning is a field of study that gives computers the ability to learn without being explicitly programmed.",
"Deep learning is part of a broader family of machine learning methods based on artificial neural networks with representation learning.",
"Neural networks are computing systems vaguely inspired by the biological neural networks that constitute animal brains.",
"Mars rovers are robotic vehicles designed to travel on the surface of Mars to collect data and perform experiments.",
"The James Webb Space Telescope is the largest optical telescope in space, designed to conduct infrared astronomy.",
"SpaceX's Starship is designed to be a fully reusable transportation system capable of carrying humans to Mars and beyond.",
"Global warming is the long-term heating of Earth's climate system observed since the pre-industrial period due to human activities.",
"Renewable energy sources include solar, wind, hydro, and geothermal power that naturally replenish over time.",
"Carbon capture technologies aim to collect CO2 emissions before they enter the atmosphere and store them underground.",
]
# Use "convert_to_tensor=True" to keep the tensors on GPU (if available)
corpus_embeddings = embedder.encode_document(corpus, convert_to_tensor=True)
# Query sentences:
queries = [
"How do artificial neural networks work?",
"What technology is used for modern space exploration?",
"How can we address climate change challenges?",
]
# Find the closest 5 sentences of the corpus for each query sentence based on cosine similarity
top_k = min(5, len(corpus))
for query in queries:
query_embedding = embedder.encode_query(query, convert_to_tensor=True)
# We use cosine-similarity and torch.topk to find the highest 5 scores
similarity_scores = embedder.similarity(query_embedding, corpus_embeddings)[0]
scores, indices = torch.topk(similarity_scores, k=top_k)
print("\nQuery:", query)
print("Top 5 most similar sentences in corpus:")
for score, idx in zip(scores, indices):
print(f"(Score: {score:.4f})", corpus[idx])
"""
# Alternatively, we can also use util.semantic_search to perform cosine similarty + topk
hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=5)
hits = hits[0] #Get the hits for the first query
for hit in hits:
print(corpus[hit['corpus_id']], "(Score: {:.4f})".format(hit['score']))
"""
|
"""
This is a simple application for sentence embeddings: semantic search
We have a corpus with various sentences. Then, for a given query sentence,
we want to find the most similar sentence in this corpus.
This script outputs for various queries the top 5 most similar sentences in the corpus.
"""
import torch
from sentence_transformers import SentenceTransformer
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"The girl is carrying a baby.",
"A man is riding a horse.",
"A woman is playing violin.",
"Two men pushed carts through the woods.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"A cheetah is running behind its prey.",
]
# Use "convert_to_tensor=True" to keep the tensors on GPU (if available)
corpus_embeddings = embedder.encode(corpus, convert_to_tensor=True)
# Query sentences:
queries = [
"A man is eating pasta.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah chases prey on across a field.",
]
# Find the closest 5 sentences of the corpus for each query sentence based on cosine similarity
top_k = min(5, len(corpus))
for query in queries:
query_embedding = embedder.encode(query, convert_to_tensor=True)
# We use cosine-similarity and torch.topk to find the highest 5 scores
similarity_scores = embedder.similarity(query_embedding, corpus_embeddings)[0]
scores, indices = torch.topk(similarity_scores, k=top_k)
print("\nQuery:", query)
print("Top 5 most similar sentences in corpus:")
for score, idx in zip(scores, indices):
print(corpus[idx], f"(Score: {score:.4f})")
"""
# Alternatively, we can also use util.semantic_search to perform cosine similarty + topk
hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=5)
hits = hits[0] #Get the hits for the first query
for hit in hits:
print(corpus[hit['corpus_id']], "(Score: {:.4f})".format(hit['score']))
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centernet_update_head import CenterNetUpdateHead
from .centripetal_head import CentripetalHead
from .condinst_head import CondInstBboxHead, CondInstMaskHead
from .corner_head import CornerHead
from .ddod_head import DDODHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .rtmdet_head import RTMDetHead, RTMDetSepBNHead
from .rtmdet_ins_head import RTMDetInsHead, RTMDetInsSepBNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead',
'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead',
'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead',
'DeformableDETRHead', 'CenterNetHead', 'YOLOXHead', 'SOLOHead',
'DecoupledSOLOHead', 'DecoupledSOLOLightHead', 'SOLOV2Head', 'LADHead',
'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'DDODHead',
'CenterNetUpdateHead', 'RTMDetHead', 'RTMDetSepBNHead', 'CondInstBboxHead',
'CondInstMaskHead', 'RTMDetInsHead', 'RTMDetInsSepBNHead',
'BoxInstBboxHead', 'BoxInstMaskHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centernet_update_head import CenterNetUpdateHead
from .centripetal_head import CentripetalHead
from .condinst_head import CondInstBboxHead, CondInstMaskHead
from .corner_head import CornerHead
from .ddod_head import DDODHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .rtmdet_head import RTMDetHead, RTMDetSepBNHead
from .rtmdet_ins_head import RTMDetInsHead, RTMDetInsSepBNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead',
'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead',
'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead',
'DeformableDETRHead', 'CenterNetHead', 'YOLOXHead', 'SOLOHead',
'DecoupledSOLOHead', 'DecoupledSOLOLightHead', 'SOLOV2Head', 'LADHead',
'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'DDODHead',
'CenterNetUpdateHead', 'RTMDetHead', 'RTMDetSepBNHead', 'CondInstBboxHead',
'CondInstMaskHead', 'RTMDetInsHead', 'RTMDetInsSepBNHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones.hourglass import HourglassNet
def test_hourglass_backbone():
with pytest.raises(AssertionError):
# HourglassNet's num_stacks should larger than 0
HourglassNet(num_stacks=0)
with pytest.raises(AssertionError):
# len(stage_channels) should equal len(stage_blocks)
HourglassNet(
stage_channels=[256, 256, 384, 384, 384],
stage_blocks=[2, 2, 2, 2, 2, 4])
with pytest.raises(AssertionError):
# len(stage_channels) should lagrer than downsample_times
HourglassNet(
downsample_times=5,
stage_channels=[256, 256, 384, 384, 384],
stage_blocks=[2, 2, 2, 2, 2])
# Test HourglassNet-52
model = HourglassNet(
num_stacks=1,
stage_channels=(64, 64, 96, 96, 96, 128),
feat_channel=64)
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 1
assert feat[0].shape == torch.Size([1, 64, 64, 64])
# Test HourglassNet-104
model = HourglassNet(
num_stacks=2,
stage_channels=(64, 64, 96, 96, 96, 128),
feat_channel=64)
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 2
assert feat[0].shape == torch.Size([1, 64, 64, 64])
assert feat[1].shape == torch.Size([1, 64, 64, 64])
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones.hourglass import HourglassNet
def test_hourglass_backbone():
with pytest.raises(AssertionError):
# HourglassNet's num_stacks should larger than 0
HourglassNet(num_stacks=0)
with pytest.raises(AssertionError):
# len(stage_channels) should equal len(stage_blocks)
HourglassNet(
stage_channels=[256, 256, 384, 384, 384],
stage_blocks=[2, 2, 2, 2, 2, 4])
with pytest.raises(AssertionError):
# len(stage_channels) should lagrer than downsample_times
HourglassNet(
downsample_times=5,
stage_channels=[256, 256, 384, 384, 384],
stage_blocks=[2, 2, 2, 2, 2])
# Test HourglassNet-52
model = HourglassNet(num_stacks=1)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 1
assert feat[0].shape == torch.Size([1, 256, 64, 64])
# Test HourglassNet-104
model = HourglassNet(num_stacks=2)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 2
assert feat[0].shape == torch.Size([1, 256, 64, 64])
assert feat[1].shape == torch.Size([1, 256, 64, 64])
|
from ...utils import is_flax_available, is_torch_available
if is_torch_available():
from .controlnet import ControlNetModel, ControlNetOutput
from .controlnet_flux import FluxControlNetModel, FluxControlNetOutput, FluxMultiControlNetModel
from .controlnet_hunyuan import (
HunyuanControlNetOutput,
HunyuanDiT2DControlNetModel,
HunyuanDiT2DMultiControlNetModel,
)
from .controlnet_sd3 import SD3ControlNetModel, SD3ControlNetOutput, SD3MultiControlNetModel
from .controlnet_sparsectrl import (
SparseControlNetConditioningEmbedding,
SparseControlNetModel,
SparseControlNetOutput,
)
from .controlnet_union import ControlNetUnionInput, ControlNetUnionInputProMax, ControlNetUnionModel
from .controlnet_xs import ControlNetXSAdapter, ControlNetXSOutput, UNetControlNetXSModel
from .multicontrolnet import MultiControlNetModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
|
from ...utils import is_flax_available, is_torch_available
if is_torch_available():
from .controlnet import ControlNetModel, ControlNetOutput
from .controlnet_flux import FluxControlNetModel, FluxControlNetOutput, FluxMultiControlNetModel
from .controlnet_hunyuan import (
HunyuanControlNetOutput,
HunyuanDiT2DControlNetModel,
HunyuanDiT2DMultiControlNetModel,
)
from .controlnet_sd3 import SD3ControlNetModel, SD3ControlNetOutput, SD3MultiControlNetModel
from .controlnet_sparsectrl import (
SparseControlNetConditioningEmbedding,
SparseControlNetModel,
SparseControlNetOutput,
)
from .controlnet_xs import ControlNetXSAdapter, ControlNetXSOutput, UNetControlNetXSModel
from .multicontrolnet import MultiControlNetModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
|
# Copyright (c) OpenMMLab. All rights reserved.
import glob
import os
import os.path as osp
import warnings
from mmengine.config import Config, ConfigDict
from mmengine.logging import print_log
def find_latest_checkpoint(path, suffix='pth'):
"""Find the latest checkpoint from the working directory.
Args:
path(str): The path to find checkpoints.
suffix(str): File extension.
Defaults to pth.
Returns:
latest_path(str | None): File path of the latest checkpoint.
References:
.. [1] https://github.com/microsoft/SoftTeacher
/blob/main/ssod/utils/patch.py
"""
if not osp.exists(path):
warnings.warn('The path of checkpoints does not exist.')
return None
if osp.exists(osp.join(path, f'latest.{suffix}')):
return osp.join(path, f'latest.{suffix}')
checkpoints = glob.glob(osp.join(path, f'*.{suffix}'))
if len(checkpoints) == 0:
warnings.warn('There are no checkpoints in the path.')
return None
latest = -1
latest_path = None
for checkpoint in checkpoints:
count = int(osp.basename(checkpoint).split('_')[-1].split('.')[0])
if count > latest:
latest = count
latest_path = checkpoint
return latest_path
def update_data_root(cfg, logger=None):
"""Update data root according to env MMDET_DATASETS.
If set env MMDET_DATASETS, update cfg.data_root according to
MMDET_DATASETS. Otherwise, using cfg.data_root as default.
Args:
cfg (:obj:`Config`): The model config need to modify
logger (logging.Logger | str | None): the way to print msg
"""
assert isinstance(cfg, Config), \
f'cfg got wrong type: {type(cfg)}, expected mmengine.Config'
if 'MMDET_DATASETS' in os.environ:
dst_root = os.environ['MMDET_DATASETS']
print_log(f'MMDET_DATASETS has been set to be {dst_root}.'
f'Using {dst_root} as data root.')
else:
return
assert isinstance(cfg, Config), \
f'cfg got wrong type: {type(cfg)}, expected mmengine.Config'
def update(cfg, src_str, dst_str):
for k, v in cfg.items():
if isinstance(v, ConfigDict):
update(cfg[k], src_str, dst_str)
if isinstance(v, str) and src_str in v:
cfg[k] = v.replace(src_str, dst_str)
update(cfg.data, cfg.data_root, dst_root)
cfg.data_root = dst_root
|
# Copyright (c) OpenMMLab. All rights reserved.
import glob
import os
import os.path as osp
import warnings
from mmengine.config import Config, ConfigDict
from mmengine.logging import print_log
def find_latest_checkpoint(path, suffix='pth'):
"""Find the latest checkpoint from the working directory.
Args:
path(str): The path to find checkpoints.
suffix(str): File extension.
Defaults to pth.
Returns:
latest_path(str | None): File path of the latest checkpoint.
References:
.. [1] https://github.com/microsoft/SoftTeacher
/blob/main/ssod/utils/patch.py
"""
if not osp.exists(path):
warnings.warn('The path of checkpoints does not exist.')
return None
if osp.exists(osp.join(path, f'latest.{suffix}')):
return osp.join(path, f'latest.{suffix}')
checkpoints = glob.glob(osp.join(path, f'*.{suffix}'))
if len(checkpoints) == 0:
warnings.warn('There are no checkpoints in the path.')
return None
latest = -1
latest_path = None
for checkpoint in checkpoints:
count = int(osp.basename(checkpoint).split('_')[-1].split('.')[0])
if count > latest:
latest = count
latest_path = checkpoint
return latest_path
def update_data_root(cfg, logger=None):
"""Update data root according to env MMDET_DATASETS.
If set env MMDET_DATASETS, update cfg.data_root according to
MMDET_DATASETS. Otherwise, using cfg.data_root as default.
Args:
cfg (:obj:`Config`): The model config need to modify
logger (logging.Logger | str | None): the way to print msg
"""
assert isinstance(cfg, Config), \
f'cfg got wrong type: {type(cfg)}, expected mmengine.Config'
if 'MMDET_DATASETS' in os.environ:
dst_root = os.environ['MMDET_DATASETS']
print_log(f'MMDET_DATASETS has been set to be {dst_root}.'
f'Using {dst_root} as data root.')
else:
return
assert isinstance(cfg, Config), \
f'cfg got wrong type: {type(cfg)}, expected mmengine.Config'
def update(cfg, src_str, dst_str):
for k, v in cfg.items():
if isinstance(v, ConfigDict):
update(cfg[k], src_str, dst_str)
if isinstance(v, str) and src_str in v:
cfg[k] = v.replace(src_str, dst_str)
update(cfg.data, cfg.data_root, dst_root)
cfg.data_root = dst_root
def add_dump_metric(args, cfg):
dump_metric = dict(type='DumpResults', out_file_path=args.out)
if isinstance(cfg.test_evaluator, (list, tuple)):
cfg.test_evaluator = list(cfg.test_evaluator).append(dump_metric)
elif isinstance(cfg.test_evaluator, dict):
if isinstance(cfg.test_evaluator.metric, str):
cfg.test_evaluator = [cfg.test_evaluator, dump_metric]
elif isinstance(cfg.test_evaluator.metric, (list, tuple)):
cfg.test_evaluator.metric = list(
cfg.test_evaluator.metric).append(dump_metric)
else:
cfg.test_evaluator.metric = [
cfg.test_evaluator.metric, dump_metric
]
|
import pathlib
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, JsonParser, Mapper, UnBatcher
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_accessor,
path_comparator,
)
from torchvision.prototype.features import Label
from .._api import register_dataset, register_info
NAME = "clevr"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict()
@register_dataset(NAME)
class CLEVR(Dataset):
"""
- **homepage**: https://cs.stanford.edu/people/jcjohns/clevr/
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "val", "test"))
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
archive = HttpResource(
"https://dl.fbaipublicfiles.com/clevr/CLEVR_v1.0.zip",
sha256="5cd61cf1096ed20944df93c9adb31e74d189b8459a94f54ba00090e5c59936d1",
)
return [archive]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.parents[1].name == "images":
return 0
elif path.parent.name == "scenes":
return 1
else:
return None
def _filter_scene_anns(self, data: Tuple[str, Any]) -> bool:
key, _ = data
return key == "scenes"
def _add_empty_anns(self, data: Tuple[str, BinaryIO]) -> Tuple[Tuple[str, BinaryIO], None]:
return data, None
def _prepare_sample(self, data: Tuple[Tuple[str, BinaryIO], Optional[Dict[str, Any]]]) -> Dict[str, Any]:
image_data, scenes_data = data
path, buffer = image_data
return dict(
path=path,
image=EncodedImage.from_file(buffer),
label=Label(len(scenes_data["objects"])) if scenes_data else None,
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
images_dp, scenes_dp = Demultiplexer(
archive_dp,
2,
self._classify_archive,
drop_none=True,
buffer_size=INFINITE_BUFFER_SIZE,
)
images_dp = Filter(images_dp, path_comparator("parent.name", self._split))
images_dp = hint_shuffling(images_dp)
images_dp = hint_sharding(images_dp)
if self._split != "test":
scenes_dp = Filter(scenes_dp, path_comparator("name", f"CLEVR_{self._split}_scenes.json"))
scenes_dp = JsonParser(scenes_dp)
scenes_dp = Mapper(scenes_dp, getitem(1, "scenes"))
scenes_dp = UnBatcher(scenes_dp)
dp = IterKeyZipper(
images_dp,
scenes_dp,
key_fn=path_accessor("name"),
ref_key_fn=getitem("image_filename"),
buffer_size=INFINITE_BUFFER_SIZE,
)
else:
for _, file in scenes_dp:
file.close()
dp = Mapper(images_dp, self._add_empty_anns)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 70_000 if self._split == "train" else 15_000
|
import pathlib
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, JsonParser, Mapper, UnBatcher
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_accessor,
path_comparator,
)
from torchvision.prototype.features import EncodedImage, Label
from .._api import register_dataset, register_info
NAME = "clevr"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict()
@register_dataset(NAME)
class CLEVR(Dataset):
"""
- **homepage**: https://cs.stanford.edu/people/jcjohns/clevr/
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "val", "test"))
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
archive = HttpResource(
"https://dl.fbaipublicfiles.com/clevr/CLEVR_v1.0.zip",
sha256="5cd61cf1096ed20944df93c9adb31e74d189b8459a94f54ba00090e5c59936d1",
)
return [archive]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.parents[1].name == "images":
return 0
elif path.parent.name == "scenes":
return 1
else:
return None
def _filter_scene_anns(self, data: Tuple[str, Any]) -> bool:
key, _ = data
return key == "scenes"
def _add_empty_anns(self, data: Tuple[str, BinaryIO]) -> Tuple[Tuple[str, BinaryIO], None]:
return data, None
def _prepare_sample(self, data: Tuple[Tuple[str, BinaryIO], Optional[Dict[str, Any]]]) -> Dict[str, Any]:
image_data, scenes_data = data
path, buffer = image_data
return dict(
path=path,
image=EncodedImage.from_file(buffer),
label=Label(len(scenes_data["objects"])) if scenes_data else None,
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
images_dp, scenes_dp = Demultiplexer(
archive_dp,
2,
self._classify_archive,
drop_none=True,
buffer_size=INFINITE_BUFFER_SIZE,
)
images_dp = Filter(images_dp, path_comparator("parent.name", self._split))
images_dp = hint_shuffling(images_dp)
images_dp = hint_sharding(images_dp)
if self._split != "test":
scenes_dp = Filter(scenes_dp, path_comparator("name", f"CLEVR_{self._split}_scenes.json"))
scenes_dp = JsonParser(scenes_dp)
scenes_dp = Mapper(scenes_dp, getitem(1, "scenes"))
scenes_dp = UnBatcher(scenes_dp)
dp = IterKeyZipper(
images_dp,
scenes_dp,
key_fn=path_accessor("name"),
ref_key_fn=getitem("image_filename"),
buffer_size=INFINITE_BUFFER_SIZE,
)
else:
for _, file in scenes_dp:
file.close()
dp = Mapper(images_dp, self._add_empty_anns)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 70_000 if self._split == "train" else 15_000
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.core import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
class TestSingleStageInstanceSegmentor(TestCase):
@parameterized.expand([
'solo/solo_r50_fpn_1x_coco.py',
'solo/decoupled_solo_r50_fpn_1x_coco.py',
'solo/decoupled_solo_light_r50_fpn_3x_coco.py'
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.mask_head)
self.assertEqual(detector.device.type, 'cpu')
@parameterized.expand([
('solo/solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled_solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled_solo_light_r50_fpn_3x_coco.py', ('cpu', 'cuda'))
])
def test_single_stage_forward_train(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
detector.init_weights()
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]], with_mask=True)
# Test forward train
losses = detector.forward(packed_inputs, return_loss=True)
assert isinstance(losses, dict)
# Test forward_dummy
batch = torch.ones((1, 3, 64, 64)).to(device=device)
out = detector.forward_dummy(batch)
assert isinstance(out, tuple)
@parameterized.expand([
('solo/solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled_solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled_solo_light_r50_fpn_3x_coco.py', ('cpu', 'cuda'))
])
def test_single_stage_forward_test(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]], with_mask=True)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
packed_inputs, return_loss=False)
assert len(batch_results) == 2
assert isinstance(batch_results[0], DetDataSample)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.core import DetDataSample
from .utils import demo_mm_inputs, get_detector_cfg
class TestSingleStageInstanceSegmentor(TestCase):
@parameterized.expand([
'solo/solo_r50_fpn_1x_coco.py',
'solo/decoupled_solo_r50_fpn_1x_coco.py',
'solo/decoupled_solo_light_r50_fpn_3x_coco.py'
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.mask_head)
self.assertEqual(detector.device.type, 'cpu')
@parameterized.expand([
('solo/solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled_solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled_solo_light_r50_fpn_3x_coco.py', ('cpu', 'cuda'))
])
def test_single_stage_forward_train(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
detector.init_weights()
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]], with_mask=True)
# Test forward train
losses = detector.forward(packed_inputs, return_loss=True)
assert isinstance(losses, dict)
# Test forward_dummy
batch = torch.ones((1, 3, 64, 64)).to(device=device)
out = detector.forward_dummy(batch)
assert isinstance(out, tuple)
@parameterized.expand([
('solo/solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled_solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled_solo_light_r50_fpn_3x_coco.py', ('cpu', 'cuda'))
])
def test_single_stage_forward_test(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]], with_mask=True)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
packed_inputs, return_loss=False)
assert len(batch_results) == 2
assert isinstance(batch_results[0], DetDataSample)
|
from __future__ import annotations
from .PhraseTokenizer import PhraseTokenizer
from .WhitespaceTokenizer import WhitespaceTokenizer
from .WordTokenizer import ENGLISH_STOP_WORDS, TransformersTokenizerWrapper, WordTokenizer
__all__ = [
"WordTokenizer",
"WhitespaceTokenizer",
"PhraseTokenizer",
"ENGLISH_STOP_WORDS",
"TransformersTokenizerWrapper",
]
|
from __future__ import annotations
from .PhraseTokenizer import PhraseTokenizer
from .WhitespaceTokenizer import WhitespaceTokenizer
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
__all__ = ["WordTokenizer", "WhitespaceTokenizer", "PhraseTokenizer", "ENGLISH_STOP_WORDS"]
|
_base_ = ['../_base_/models/retinanet_r50_fpn.py', '../common/ms_3x_coco.py']
# optimizer
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
optim_wrapper = dict(optimizer=dict(type='SGD', lr=0.01))
|
_base_ = ['../_base_/models/retinanet_r50_fpn.py', '../common/ms_3x_coco.py']
# optimizer
model = dict(
pretrained='open-mmlab://resnext101_64x4d',
backbone=dict(type='ResNeXt', depth=101, groups=64, base_width=4))
optim_wrapper = dict(optimizer=dict(type='SGD', lr=0.01))
|
from typing import Any, Optional, Type, TypeVar, Union
from pydantic import Field
from docarray.base_doc import BaseDoc
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import AnyEmbedding
T = TypeVar('T', bound='TextDoc')
class TextDoc(BaseDoc):
"""
Document for handling text.
It can contain:
- a [`TextUrl`][docarray.typing.url.TextUrl] (`TextDoc.url`)
- a `str` (`TextDoc.text`)
- an [`AnyEmbedding`](../../../api_references/typing/tensor/embedding) (`TextDoc.embedding`)
- a `bytes` object (`TextDoc.bytes_`)
You can use this Document directly:
```python
from docarray.documents import TextDoc
# use it directly
txt_doc = TextDoc(url='https://www.gutenberg.org/files/1065/1065-0.txt')
txt_doc.text = txt_doc.url.load()
# model = MyEmbeddingModel()
# txt_doc.embedding = model(txt_doc.text)
```
You can initialize directly from a string:
```python
from docarray.documents import TextDoc
txt_doc = TextDoc('hello world')
```
You can extend this Document:
```python
from docarray.documents import TextDoc
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyText(TextDoc):
second_embedding: Optional[AnyEmbedding]
txt_doc = MyText(url='https://www.gutenberg.org/files/1065/1065-0.txt')
txt_doc.text = txt_doc.url.load()
# model = MyEmbeddingModel()
# txt_doc.embedding = model(txt_doc.text)
# txt_doc.second_embedding = model(txt_doc.text)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import ImageDoc, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
image_doc: ImageDoc
text_doc: TextDoc
mmdoc = MultiModalDoc(
image_doc=ImageDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true'
),
text_doc=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.image_doc.tensor = mmdoc.image_doc.url.load()
# or
mmdoc.image_doc.bytes_ = mmdoc.image_doc.url.load_bytes()
mmdoc.image_doc.tensor = mmdoc.image_doc.bytes_.load()
```
This Document can be compared against another Document of the same type or a string.
When compared against another object of the same type, the pydantic BaseModel
equality check will apply which checks the equality of every attribute,
excluding `id`. When compared against a str, it will check the equality
of the `text` attribute against the given string.
```python
from docarray.documents import TextDoc
doc = TextDoc(text='This is the main text', url='exampleurl.com/file')
doc2 = TextDoc(text='This is the main text', url='exampleurl.com/file')
doc == 'This is the main text' # True
doc == doc2 # True
```
"""
text: Optional[str] = Field(
description='The text content stored in the document',
example='This is an example text content of the document',
default=None,
)
url: Optional[TextUrl] = Field(
description='URL to a (potentially remote) text file that can be loaded',
example='https://www.w3.org/History/19921103-hypertext/hypertext/README.html',
default=None,
)
embedding: Optional[AnyEmbedding] = Field(
description='Store an embedding: a vector representation of the text',
example=[1, 0, 1],
default=None,
)
bytes_: Optional[bytes] = Field(
description='Bytes representation of the text',
default=None,
)
def __init__(self, text: Optional[str] = None, **kwargs):
if 'text' not in kwargs:
kwargs['text'] = text
super().__init__(**kwargs)
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(text=value)
return super().validate(value)
def __eq__(self, other: Any) -> bool:
if isinstance(other, str):
return self.text == other
else:
# BaseModel has a default equality
return super().__eq__(other)
def __contains__(self, item: str) -> bool:
"""
This method makes `TextDoc` behave the same as an `str`.
:param item: A string to be checked if is a substring of `text` attribute
:return: A boolean determining the presence of `item` as a substring in `text`
```python
from docarray.documents import TextDoc
t = TextDoc(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
```
"""
if self.text is not None:
return self.text.__contains__(item)
else:
return False
def _get_string_for_regex_filter(self):
return self.text
|
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_doc import BaseDoc
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import AnyEmbedding
T = TypeVar('T', bound='TextDoc')
class TextDoc(BaseDoc):
"""
Document for handling text.
It can contain:
- a [`TextUrl`][docarray.typing.url.TextUrl] (`TextDoc.url`)
- a `str` (`TextDoc.text`)
- an [`AnyEmbedding`](../../../api_references/typing/tensor/embedding) (`TextDoc.embedding`)
- a `bytes` object (`TextDoc.bytes_`)
You can use this Document directly:
```python
from docarray.documents import TextDoc
# use it directly
txt_doc = TextDoc(url='https://www.gutenberg.org/files/1065/1065-0.txt')
txt_doc.text = txt_doc.url.load()
# model = MyEmbeddingModel()
# txt_doc.embedding = model(txt_doc.text)
```
You can initialize directly from a string:
```python
from docarray.documents import TextDoc
txt_doc = TextDoc('hello world')
```
You can extend this Document:
```python
from docarray.documents import TextDoc
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyText(TextDoc):
second_embedding: Optional[AnyEmbedding]
txt_doc = MyText(url='https://www.gutenberg.org/files/1065/1065-0.txt')
txt_doc.text = txt_doc.url.load()
# model = MyEmbeddingModel()
# txt_doc.embedding = model(txt_doc.text)
# txt_doc.second_embedding = model(txt_doc.text)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import ImageDoc, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
image_doc: ImageDoc
text_doc: TextDoc
mmdoc = MultiModalDoc(
image_doc=ImageDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true'
),
text_doc=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.image_doc.tensor = mmdoc.image_doc.url.load()
# or
mmdoc.image_doc.bytes_ = mmdoc.image_doc.url.load_bytes()
mmdoc.image_doc.tensor = mmdoc.image_doc.bytes_.load()
```
This Document can be compared against another Document of the same type or a string.
When compared against another object of the same type, the pydantic BaseModel
equality check will apply which checks the equality of every attribute,
excluding `id`. When compared against a str, it will check the equality
of the `text` attribute against the given string.
```python
from docarray.documents import TextDoc
doc = TextDoc(text='This is the main text', url='exampleurl.com/file')
doc2 = TextDoc(text='This is the main text', url='exampleurl.com/file')
doc == 'This is the main text' # True
doc == doc2 # True
```
"""
text: Optional[str] = None
url: Optional[TextUrl] = None
embedding: Optional[AnyEmbedding] = None
bytes_: Optional[bytes] = None
def __init__(self, text: Optional[str] = None, **kwargs):
if 'text' not in kwargs:
kwargs['text'] = text
super().__init__(**kwargs)
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(text=value)
return super().validate(value)
def __eq__(self, other: Any) -> bool:
if isinstance(other, str):
return self.text == other
else:
# BaseModel has a default equality
return super().__eq__(other)
def __contains__(self, item: str) -> bool:
"""
This method makes `TextDoc` behave the same as an `str`.
:param item: A string to be checked if is a substring of `text` attribute
:return: A boolean determining the presence of `item` as a substring in `text`
```python
from docarray.documents import TextDoc
t = TextDoc(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
```
"""
if self.text is not None:
return self.text.__contains__(item)
else:
return False
def _get_string_for_regex_filter(self):
return self.text
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from pathlib import Path
import pytest
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def build_docker_image(docker_image_name: str) -> str:
subprocess.run(['docker', 'build', '-t', docker_image_name, '.'], check=True)
return docker_image_name
@pytest.fixture(scope='session')
def build_docker_image_gpu(docker_image_name: str) -> str:
image_name = f'{docker_image_name}:gpu'
subprocess.run(
['docker', 'build', '-t', image_name, '-f', 'Dockerfile.gpu', '.'], check=True
)
return image_name
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import pytest
from jina import Document, DocumentArray
@pytest.fixture()
def test_dir() -> str:
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def data_generator(test_dir: str):
def _generator():
data_file_path = os.path.join(test_dir, 'test_data', 'test_data.txt')
with open(data_file_path, 'r') as file:
lines = file.readlines()
for line in lines:
yield Document(text=line.strip())
return _generator
@pytest.fixture()
def docs_with_text() -> DocumentArray:
return DocumentArray([
Document(text='hello world') for _ in range(10)
])
@pytest.fixture()
def docs_with_chunk_text() -> DocumentArray:
return DocumentArray([
Document(
chunks=[Document(text='hello world') for _ in range(10)]
)
])
@pytest.fixture()
def docs_with_chunk_chunk_text() -> DocumentArray:
return DocumentArray([
Document(
chunks=[Document(
chunks=[Document(text='hello world') for _ in range(10)])])
])
|
AMI_ID = {
# Managed by XGBoost team
"linux-amd64-gpu": {
"us-west-2": "ami-08c3bc1dd5ec8bc5c",
},
"linux-amd64-mgpu": {
"us-west-2": "ami-08c3bc1dd5ec8bc5c",
},
"windows-gpu": {
"us-west-2": "ami-03c7f2156f93b22a7",
},
"windows-cpu": {
"us-west-2": "ami-03c7f2156f93b22a7",
},
# Managed by BuildKite
# from https://s3.amazonaws.com/buildkite-aws-stack/latest/aws-stack.yml
"linux-amd64-cpu": {
"us-west-2": "ami-015e64acb52b3e595",
},
"pipeline-loader": {
"us-west-2": "ami-015e64acb52b3e595",
},
"linux-arm64-cpu": {
"us-west-2": "ami-0884e9c23a2fa98d0",
},
}
STACK_PARAMS = {
"linux-amd64-gpu": {
"InstanceOperatingSystem": "linux",
"InstanceTypes": "g4dn.xlarge",
"AgentsPerInstance": "1",
"MinSize": "0",
"MaxSize": "8",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
"linux-amd64-mgpu": {
"InstanceOperatingSystem": "linux",
"InstanceTypes": "g4dn.12xlarge",
"AgentsPerInstance": "1",
"MinSize": "0",
"MaxSize": "1",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
"windows-gpu": {
"InstanceOperatingSystem": "windows",
"InstanceTypes": "g4dn.2xlarge",
"AgentsPerInstance": "1",
"MinSize": "0",
"MaxSize": "2",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
"windows-cpu": {
"InstanceOperatingSystem": "windows",
"InstanceTypes": "c5a.2xlarge",
"AgentsPerInstance": "1",
"MinSize": "0",
"MaxSize": "2",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
"linux-amd64-cpu": {
"InstanceOperatingSystem": "linux",
"InstanceTypes": "c5a.4xlarge",
"AgentsPerInstance": "1",
"MinSize": "0",
"MaxSize": "16",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
"pipeline-loader": {
"InstanceOperatingSystem": "linux",
"InstanceTypes": "t3a.micro",
"AgentsPerInstance": "1",
"MinSize": "2",
"MaxSize": "2",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
"linux-arm64-cpu": {
"InstanceOperatingSystem": "linux",
"InstanceTypes": "c6g.4xlarge",
"AgentsPerInstance": "1",
"MinSize": "0",
"MaxSize": "8",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
}
COMMON_STACK_PARAMS = {
"BuildkiteAgentTimestampLines": "false",
"BuildkiteWindowsAdministrator": "true",
"AssociatePublicIpAddress": "true",
"ScaleOutForWaitingJobs": "false",
"EnableCostAllocationTags": "true",
"CostAllocationTagName": "CreatedBy",
"ECRAccessPolicy": "full",
"EnableSecretsPlugin": "false",
"EnableECRPlugin": "false",
"EnableDockerLoginPlugin": "false",
"EnableDockerUserNamespaceRemap": "false",
"BuildkiteAgentExperiments": "normalised-upload-paths,resolve-commit-after-checkout",
}
|
AMI_ID = {
# Managed by XGBoost team
"linux-amd64-gpu": {
"us-west-2": "ami-094271bed4788ddb5",
},
"linux-amd64-mgpu": {
"us-west-2": "ami-094271bed4788ddb5",
},
"windows-gpu": {
"us-west-2": "ami-0839681594a1d7627",
},
"windows-cpu": {
"us-west-2": "ami-0839681594a1d7627",
},
# Managed by BuildKite
# from https://s3.amazonaws.com/buildkite-aws-stack/latest/aws-stack.yml
"linux-amd64-cpu": {
"us-west-2": "ami-00f2127550cf03658",
},
"pipeline-loader": {
"us-west-2": "ami-00f2127550cf03658",
},
"linux-arm64-cpu": {
"us-west-2": "ami-0c5789068f4a2d1b5",
},
}
STACK_PARAMS = {
"linux-amd64-gpu": {
"InstanceOperatingSystem": "linux",
"InstanceType": "g4dn.xlarge",
"AgentsPerInstance": "1",
"MinSize": "0",
"MaxSize": "8",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
"linux-amd64-mgpu": {
"InstanceOperatingSystem": "linux",
"InstanceType": "g4dn.12xlarge",
"AgentsPerInstance": "1",
"MinSize": "0",
"MaxSize": "1",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
"windows-gpu": {
"InstanceOperatingSystem": "windows",
"InstanceType": "g4dn.2xlarge",
"AgentsPerInstance": "1",
"MinSize": "0",
"MaxSize": "2",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
"windows-cpu": {
"InstanceOperatingSystem": "windows",
"InstanceType": "c5a.2xlarge",
"AgentsPerInstance": "1",
"MinSize": "0",
"MaxSize": "2",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
"linux-amd64-cpu": {
"InstanceOperatingSystem": "linux",
"InstanceType": "c5a.4xlarge",
"AgentsPerInstance": "1",
"MinSize": "0",
"MaxSize": "16",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
"pipeline-loader": {
"InstanceOperatingSystem": "linux",
"InstanceType": "t3a.micro",
"AgentsPerInstance": "1",
"MinSize": "2",
"MaxSize": "2",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
"linux-arm64-cpu": {
"InstanceOperatingSystem": "linux",
"InstanceType": "c6g.4xlarge",
"AgentsPerInstance": "1",
"MinSize": "0",
"MaxSize": "8",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
}
COMMON_STACK_PARAMS = {
"BuildkiteAgentTimestampLines": "false",
"BuildkiteWindowsAdministrator": "true",
"AssociatePublicIpAddress": "true",
"ScaleOutForWaitingJobs": "false",
"EnableCostAllocationTags": "true",
"CostAllocationTagName": "CreatedBy",
"ECRAccessPolicy": "full",
"EnableSecretsPlugin": "false",
"EnableECRPlugin": "false",
"EnableDockerLoginPlugin": "false",
"EnableDockerUserNamespaceRemap": "false",
"BuildkiteAgentExperiments": "normalised-upload-paths,resolve-commit-after-checkout",
}
|
import builtins
import json
from enum import Enum
from typing import List, Optional, Type, Union
from langchain_core.callbacks import AsyncCallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.ainetwork.base import AINBaseTool
class AppOperationType(str, Enum):
"""Type of app operation as enumerator."""
SET_ADMIN = "SET_ADMIN"
GET_CONFIG = "GET_CONFIG"
class AppSchema(BaseModel):
"""Schema for app operations."""
type: AppOperationType = Field(...)
appName: str = Field(..., description="Name of the application on the blockchain")
address: Optional[Union[str, List[str]]] = Field(
None,
description=(
"A single address or a list of addresses. Default: current session's "
"address"
),
)
class AINAppOps(AINBaseTool):
"""Tool for app operations."""
name: str = "AINappOps"
description: str = """
Create an app in the AINetwork Blockchain database by creating the /apps/<appName> path.
An address set as `admin` can grant `owner` rights to other addresses (refer to `AINownerOps` for more details).
Also, `admin` is initialized to have all `owner` permissions and `rule` allowed for that path.
## appName Rule
- [a-z_0-9]+
## address Rules
- 0x[0-9a-fA-F]{40}
- Defaults to the current session's address
- Multiple addresses can be specified if needed
## SET_ADMIN Example 1
- type: SET_ADMIN
- appName: ain_project
### Result:
1. Path /apps/ain_project created.
2. Current session's address registered as admin.
## SET_ADMIN Example 2
- type: SET_ADMIN
- appName: test_project
- address: [<address1>, <address2>]
### Result:
1. Path /apps/test_project created.
2. <address1> and <address2> registered as admin.
""" # noqa: E501
args_schema: Type[BaseModel] = AppSchema
async def _arun(
self,
type: AppOperationType,
appName: str,
address: Optional[Union[str, List[str]]] = None,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
from ain.types import ValueOnlyTransactionInput
from ain.utils import getTimestamp
try:
if type is AppOperationType.SET_ADMIN:
if address is None:
address = self.interface.wallet.defaultAccount.address
if isinstance(address, str):
address = [address]
res = await self.interface.db.ref(
f"/manage_app/{appName}/create/{getTimestamp()}"
).setValue(
transactionInput=ValueOnlyTransactionInput(
value={"admin": {address: True for address in address}}
)
)
elif type is AppOperationType.GET_CONFIG:
res = await self.interface.db.ref(
f"/manage_app/{appName}/config"
).getValue()
else:
raise ValueError(f"Unsupported 'type': {type}.")
return json.dumps(res, ensure_ascii=False)
except Exception as e:
return f"{builtins.type(e).__name__}: {str(e)}"
|
import builtins
import json
from enum import Enum
from typing import List, Optional, Type, Union
from langchain_core.callbacks import AsyncCallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.ainetwork.base import AINBaseTool
class AppOperationType(str, Enum):
"""Type of app operation as enumerator."""
SET_ADMIN = "SET_ADMIN"
GET_CONFIG = "GET_CONFIG"
class AppSchema(BaseModel):
"""Schema for app operations."""
type: AppOperationType = Field(...)
appName: str = Field(..., description="Name of the application on the blockchain")
address: Optional[Union[str, List[str]]] = Field(
None,
description=(
"A single address or a list of addresses. Default: current session's "
"address"
),
)
class AINAppOps(AINBaseTool): # type: ignore[override, override]
"""Tool for app operations."""
name: str = "AINappOps"
description: str = """
Create an app in the AINetwork Blockchain database by creating the /apps/<appName> path.
An address set as `admin` can grant `owner` rights to other addresses (refer to `AINownerOps` for more details).
Also, `admin` is initialized to have all `owner` permissions and `rule` allowed for that path.
## appName Rule
- [a-z_0-9]+
## address Rules
- 0x[0-9a-fA-F]{40}
- Defaults to the current session's address
- Multiple addresses can be specified if needed
## SET_ADMIN Example 1
- type: SET_ADMIN
- appName: ain_project
### Result:
1. Path /apps/ain_project created.
2. Current session's address registered as admin.
## SET_ADMIN Example 2
- type: SET_ADMIN
- appName: test_project
- address: [<address1>, <address2>]
### Result:
1. Path /apps/test_project created.
2. <address1> and <address2> registered as admin.
""" # noqa: E501
args_schema: Type[BaseModel] = AppSchema
async def _arun(
self,
type: AppOperationType,
appName: str,
address: Optional[Union[str, List[str]]] = None,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
from ain.types import ValueOnlyTransactionInput
from ain.utils import getTimestamp
try:
if type is AppOperationType.SET_ADMIN:
if address is None:
address = self.interface.wallet.defaultAccount.address
if isinstance(address, str):
address = [address]
res = await self.interface.db.ref(
f"/manage_app/{appName}/create/{getTimestamp()}"
).setValue(
transactionInput=ValueOnlyTransactionInput(
value={"admin": {address: True for address in address}}
)
)
elif type is AppOperationType.GET_CONFIG:
res = await self.interface.db.ref(
f"/manage_app/{appName}/config"
).getValue()
else:
raise ValueError(f"Unsupported 'type': {type}.")
return json.dumps(res, ensure_ascii=False)
except Exception as e:
return f"{builtins.type(e).__name__}: {str(e)}"
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
fp16 = dict(loss_scale=512.)
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
fp16 = dict(loss_scale=512.)
|
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from typing import Literal
from pydantic import BaseModel, ConfigDict, SecretStr
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import (
CredentialsField,
CredentialsMetaInput,
SchemaField,
UserPasswordCredentials,
)
from backend.integrations.providers import ProviderName
TEST_CREDENTIALS = UserPasswordCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="smtp",
username=SecretStr("mock-smtp-username"),
password=SecretStr("mock-smtp-password"),
title="Mock SMTP credentials",
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.title,
}
SMTPCredentials = UserPasswordCredentials
SMTPCredentialsInput = CredentialsMetaInput[
Literal[ProviderName.SMTP],
Literal["user_password"],
]
def SMTPCredentialsField() -> SMTPCredentialsInput:
return CredentialsField(
description="The SMTP integration requires a username and password.",
)
class SMTPConfig(BaseModel):
smtp_server: str = SchemaField(
default="smtp.example.com", description="SMTP server address"
)
smtp_port: int = SchemaField(default=25, description="SMTP port number")
model_config = ConfigDict(title="SMTP Config")
class SendEmailBlock(Block):
class Input(BlockSchema):
to_email: str = SchemaField(
description="Recipient email address", placeholder="[email protected]"
)
subject: str = SchemaField(
description="Subject of the email", placeholder="Enter the email subject"
)
body: str = SchemaField(
description="Body of the email", placeholder="Enter the email body"
)
config: SMTPConfig = SchemaField(
description="SMTP Config",
default=SMTPConfig(),
)
credentials: SMTPCredentialsInput = SMTPCredentialsField()
class Output(BlockSchema):
status: str = SchemaField(description="Status of the email sending operation")
error: str = SchemaField(
description="Error message if the email sending failed"
)
def __init__(self):
super().__init__(
id="4335878a-394e-4e67-adf2-919877ff49ae",
description="This block sends an email using the provided SMTP credentials.",
categories={BlockCategory.OUTPUT},
input_schema=SendEmailBlock.Input,
output_schema=SendEmailBlock.Output,
test_input={
"to_email": "[email protected]",
"subject": "Test Email",
"body": "This is a test email.",
"config": {
"smtp_server": "smtp.gmail.com",
"smtp_port": 25,
},
"credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,
test_output=[("status", "Email sent successfully")],
test_mock={"send_email": lambda *args, **kwargs: "Email sent successfully"},
)
@staticmethod
def send_email(
config: SMTPConfig,
to_email: str,
subject: str,
body: str,
credentials: SMTPCredentials,
) -> str:
smtp_server = config.smtp_server
smtp_port = config.smtp_port
smtp_username = credentials.username.get_secret_value()
smtp_password = credentials.password.get_secret_value()
msg = MIMEMultipart()
msg["From"] = smtp_username
msg["To"] = to_email
msg["Subject"] = subject
msg.attach(MIMEText(body, "plain"))
with smtplib.SMTP(smtp_server, smtp_port) as server:
server.starttls()
server.login(smtp_username, smtp_password)
server.sendmail(smtp_username, to_email, msg.as_string())
return "Email sent successfully"
async def run(
self, input_data: Input, *, credentials: SMTPCredentials, **kwargs
) -> BlockOutput:
yield "status", self.send_email(
config=input_data.config,
to_email=input_data.to_email,
subject=input_data.subject,
body=input_data.body,
credentials=credentials,
)
|
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from typing import Literal
from pydantic import BaseModel, ConfigDict, SecretStr
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import (
CredentialsField,
CredentialsMetaInput,
SchemaField,
UserPasswordCredentials,
)
from backend.integrations.providers import ProviderName
TEST_CREDENTIALS = UserPasswordCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="smtp",
username=SecretStr("mock-smtp-username"),
password=SecretStr("mock-smtp-password"),
title="Mock SMTP credentials",
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.title,
}
SMTPCredentials = UserPasswordCredentials
SMTPCredentialsInput = CredentialsMetaInput[
Literal[ProviderName.SMTP],
Literal["user_password"],
]
def SMTPCredentialsField() -> SMTPCredentialsInput:
return CredentialsField(
description="The SMTP integration requires a username and password.",
)
class SMTPConfig(BaseModel):
smtp_server: str = SchemaField(
default="smtp.example.com", description="SMTP server address"
)
smtp_port: int = SchemaField(default=25, description="SMTP port number")
model_config = ConfigDict(title="SMTP Config")
class SendEmailBlock(Block):
class Input(BlockSchema):
to_email: str = SchemaField(
description="Recipient email address", placeholder="[email protected]"
)
subject: str = SchemaField(
description="Subject of the email", placeholder="Enter the email subject"
)
body: str = SchemaField(
description="Body of the email", placeholder="Enter the email body"
)
config: SMTPConfig = SchemaField(
description="SMTP Config",
default=SMTPConfig(),
)
credentials: SMTPCredentialsInput = SMTPCredentialsField()
class Output(BlockSchema):
status: str = SchemaField(description="Status of the email sending operation")
error: str = SchemaField(
description="Error message if the email sending failed"
)
def __init__(self):
super().__init__(
id="4335878a-394e-4e67-adf2-919877ff49ae",
description="This block sends an email using the provided SMTP credentials.",
categories={BlockCategory.OUTPUT},
input_schema=SendEmailBlock.Input,
output_schema=SendEmailBlock.Output,
test_input={
"to_email": "[email protected]",
"subject": "Test Email",
"body": "This is a test email.",
"config": {
"smtp_server": "smtp.gmail.com",
"smtp_port": 25,
},
"credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,
test_output=[("status", "Email sent successfully")],
test_mock={"send_email": lambda *args, **kwargs: "Email sent successfully"},
)
@staticmethod
def send_email(
config: SMTPConfig,
to_email: str,
subject: str,
body: str,
credentials: SMTPCredentials,
) -> str:
smtp_server = config.smtp_server
smtp_port = config.smtp_port
smtp_username = credentials.username.get_secret_value()
smtp_password = credentials.password.get_secret_value()
msg = MIMEMultipart()
msg["From"] = smtp_username
msg["To"] = to_email
msg["Subject"] = subject
msg.attach(MIMEText(body, "plain"))
with smtplib.SMTP(smtp_server, smtp_port) as server:
server.starttls()
server.login(smtp_username, smtp_password)
server.sendmail(smtp_username, to_email, msg.as_string())
return "Email sent successfully"
def run(
self, input_data: Input, *, credentials: SMTPCredentials, **kwargs
) -> BlockOutput:
yield "status", self.send_email(
config=input_data.config,
to_email=input_data.to_email,
subject=input_data.subject,
body=input_data.body,
credentials=credentials,
)
|
_base_ = './fast-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='BN', requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=2000),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=None),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['proposals']),
dict(
type='ToDataContainer',
fields=[dict(key='proposals', stack=False)]),
dict(type='Collect', keys=['img', 'proposals']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
_base_ = './fast_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='BN', requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=2000),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=None),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['proposals']),
dict(
type='ToDataContainer',
fields=[dict(key='proposals', stack=False)]),
dict(type='Collect', keys=['img', 'proposals']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
_base_ = [
'../_base_/models/cascade-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
type='DetectoRS_ResNet',
conv_cfg=dict(type='ConvAWS'),
output_img=True),
neck=dict(
type='RFP',
rfp_steps=2,
aspp_out_channels=64,
aspp_dilations=(1, 3, 6, 1),
rfp_backbone=dict(
rfp_inplanes=256,
type='DetectoRS_ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
conv_cfg=dict(type='ConvAWS'),
pretrained='torchvision://resnet50',
style='pytorch')))
|
_base_ = [
'../_base_/models/cascade_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
type='DetectoRS_ResNet',
conv_cfg=dict(type='ConvAWS'),
output_img=True),
neck=dict(
type='RFP',
rfp_steps=2,
aspp_out_channels=64,
aspp_dilations=(1, 3, 6, 1),
rfp_backbone=dict(
rfp_inplanes=256,
type='DetectoRS_ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
conv_cfg=dict(type='ConvAWS'),
pretrained='torchvision://resnet50',
style='pytorch')))
|
# Copyright (c) OpenMMLab. All rights reserved.
_base_ = [
'mmdet::_base_/models/faster-rcnn_r50_fpn.py',
'mmdet::_base_/datasets/coco_detection.py',
'mmdet::_base_/schedules/schedule_1x.py',
'mmdet::_base_/default_runtime.py'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
_base_ = [
'mmdet::_base_/models/faster_rcnn_r50_fpn.py',
'mmdet::_base_/datasets/coco_detection.py',
'mmdet::_base_/schedules/schedule_1x.py',
'mmdet::_base_/default_runtime.py'
]
|
import math
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torch.autograd import gradcheck
from torchaudio_unittest.common_utils import TestBaseMixin
class AutogradTestImpl(TestBaseMixin):
@parameterized.expand(
[
(8000, (2, 3, 5, 7)),
(8000, (8000, 1)),
]
)
def test_oscillator_bank(self, sample_rate, shape):
numel = math.prod(shape)
# use 1.9 instead of 2 so as to include values above nyquist frequency
fmax = sample_rate / 1.9
freq = torch.linspace(-fmax, fmax, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(
shape
)
amps = torch.linspace(-5, 5, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(shape)
assert gradcheck(F.oscillator_bank, (freq, amps, sample_rate))
def test_extend_pitch(self):
num_frames, num_pitches = 5, 7
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype, requires_grad=True)
pattern = torch.linspace(1, num_pitches, num_pitches, device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.extend_pitch, (input, num_pitches))
assert gradcheck(F.extend_pitch, (input, pattern))
def test_sinc_ir(self):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, False))
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, True))
def test_freq_ir(self):
mags = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.frequency_impulse_response, (mags,))
def test_filter_waveform(self):
waveform = torch.rand(3, 1, 2, 10, device=self.device, dtype=self.dtype, requires_grad=True)
filters = torch.rand(3, 2, device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.filter_waveform, (waveform, filters))
|
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torch.autograd import gradcheck
from torchaudio_unittest.common_utils import TestBaseMixin
class AutogradTestImpl(TestBaseMixin):
@parameterized.expand(
[
(8000, (2, 3, 5, 7)),
(8000, (8000, 1)),
]
)
def test_oscillator_bank(self, sample_rate, shape):
# can be replaced with math.prod when we drop 3.7 support
def prod(iterable):
ret = 1
for item in iterable:
ret *= item
return ret
numel = prod(shape)
# use 1.9 instead of 2 so as to include values above nyquist frequency
fmax = sample_rate / 1.9
freq = torch.linspace(-fmax, fmax, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(
shape
)
amps = torch.linspace(-5, 5, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(shape)
assert gradcheck(F.oscillator_bank, (freq, amps, sample_rate))
def test_extend_pitch(self):
num_frames, num_pitches = 5, 7
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype, requires_grad=True)
pattern = torch.linspace(1, num_pitches, num_pitches, device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.extend_pitch, (input, num_pitches))
assert gradcheck(F.extend_pitch, (input, pattern))
def test_sinc_ir(self):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, False))
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, True))
def test_freq_ir(self):
mags = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.frequency_impulse_response, (mags,))
def test_filter_waveform(self):
waveform = torch.rand(3, 1, 2, 10, device=self.device, dtype=self.dtype, requires_grad=True)
filters = torch.rand(3, 2, device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.filter_waveform, (waveform, filters))
|
#!/usr/bin/env python3
# Write the available versions page (--rst) and the version switcher JSON (--json).
# Version switcher see:
# https://pydata-sphinx-theme.readthedocs.io/en/stable/user_guide/version-dropdown.html
# https://pydata-sphinx-theme.readthedocs.io/en/stable/user_guide/announcements.html#announcement-banners
import argparse
import json
import re
import sys
from urllib.request import urlopen
from sklearn.utils.fixes import parse_version
def json_urlread(url):
try:
return json.loads(urlopen(url).read().decode("utf8"))
except Exception:
print("Error reading", url, file=sys.stderr)
raise
def human_readable_data_quantity(quantity, multiple=1024):
# https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
if quantity == 0:
quantity = +0
SUFFIXES = ["B"] + [i + {1000: "B", 1024: "iB"}[multiple] for i in "KMGTPEZY"]
for suffix in SUFFIXES:
if quantity < multiple or suffix == SUFFIXES[-1]:
if suffix == SUFFIXES[0]:
return "%d %s" % (quantity, suffix)
else:
return "%.1f %s" % (quantity, suffix)
else:
quantity /= multiple
def get_file_extension(version):
if "dev" in version:
# The 'dev' branch should be explicitly handled
return "zip"
current_version = parse_version(version)
min_zip_version = parse_version("0.24")
return "zip" if current_version >= min_zip_version else "pdf"
def get_file_size(version):
api_url = ROOT_URL + "%s/_downloads" % version
for path_details in json_urlread(api_url):
file_extension = get_file_extension(version)
file_path = f"scikit-learn-docs.{file_extension}"
if path_details["name"] == file_path:
return human_readable_data_quantity(path_details["size"], 1000)
parser = argparse.ArgumentParser()
parser.add_argument("--rst", type=str, required=True)
parser.add_argument("--json", type=str, required=True)
args = parser.parse_args()
heading = "Available documentation for scikit-learn"
json_content = []
rst_content = [
":orphan:\n",
heading,
"=" * len(heading) + "\n",
"Web-based documentation is available for versions listed below:\n",
]
ROOT_URL = "https://api.github.com/repos/scikit-learn/scikit-learn.github.io/contents/"
RAW_FMT = "https://raw.githubusercontent.com/scikit-learn/scikit-learn.github.io/master/%s/index.html"
VERSION_RE = re.compile(r"scikit-learn ([\w\.\-]+) documentation</title>")
NAMED_DIRS = ["dev", "stable"]
# Gather data for each version directory, including symlinks
dirs = {}
symlinks = {}
root_listing = json_urlread(ROOT_URL)
for path_details in root_listing:
name = path_details["name"]
if not (name[:1].isdigit() or name in NAMED_DIRS):
continue
if path_details["type"] == "dir":
html = urlopen(RAW_FMT % name).read().decode("utf8")
version_num = VERSION_RE.search(html).group(1)
file_size = get_file_size(name)
dirs[name] = (version_num, file_size)
if path_details["type"] == "symlink":
symlinks[name] = json_urlread(path_details["_links"]["self"])["target"]
# Symlinks should have same data as target
for src, dst in symlinks.items():
if dst in dirs:
dirs[src] = dirs[dst]
# Output in order: dev, stable, decreasing other version
seen = set()
for i, name in enumerate(
NAMED_DIRS
+ sorted((k for k in dirs if k[:1].isdigit()), key=parse_version, reverse=True)
):
version_num, file_size = dirs[name]
if version_num in seen:
# symlink came first
continue
else:
seen.add(version_num)
full_name = f"{version_num}" if name[:1].isdigit() else f"{version_num} ({name})"
path = f"https://scikit-learn.org/{name}/"
# Update JSON for the version switcher; only keep the 8 latest versions to avoid
# overloading the version switcher dropdown
if i < 8:
info = {"name": full_name, "version": version_num, "url": path}
if name == "stable":
info["preferred"] = True
json_content.append(info)
# Printout for the historical version page
out = f"* `scikit-learn {full_name} documentation <{path}>`_"
if file_size is not None:
file_extension = get_file_extension(version_num)
out += (
f" (`{file_extension.upper()} {file_size} <{path}/"
f"_downloads/scikit-learn-docs.{file_extension}>`_)"
)
rst_content.append(out)
with open(args.rst, "w", encoding="utf-8") as f:
f.write("\n".join(rst_content) + "\n")
print(f"Written {args.rst}")
with open(args.json, "w", encoding="utf-8") as f:
json.dump(json_content, f, indent=2)
print(f"Written {args.json}")
|
#!/usr/bin/env python3
# Write the available versions page (--rst) and the version switcher JSON (--json).
# Version switcher see:
# https://pydata-sphinx-theme.readthedocs.io/en/stable/user_guide/version-dropdown.html
# https://pydata-sphinx-theme.readthedocs.io/en/stable/user_guide/announcements.html#announcement-banners
import argparse
import json
import re
import sys
from urllib.request import urlopen
from sklearn.utils.fixes import parse_version
def json_urlread(url):
try:
return json.loads(urlopen(url).read().decode("utf8"))
except Exception:
print("Error reading", url, file=sys.stderr)
raise
def human_readable_data_quantity(quantity, multiple=1024):
# https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
if quantity == 0:
quantity = +0
SUFFIXES = ["B"] + [i + {1000: "B", 1024: "iB"}[multiple] for i in "KMGTPEZY"]
for suffix in SUFFIXES:
if quantity < multiple or suffix == SUFFIXES[-1]:
if suffix == SUFFIXES[0]:
return "%d %s" % (quantity, suffix)
else:
return "%.1f %s" % (quantity, suffix)
else:
quantity /= multiple
def get_file_extension(version):
if "dev" in version:
# The 'dev' branch should be explicitly handled
return "zip"
current_version = parse_version(version)
min_zip_version = parse_version("0.24")
return "zip" if current_version >= min_zip_version else "pdf"
def get_file_size(version):
api_url = ROOT_URL + "%s/_downloads" % version
for path_details in json_urlread(api_url):
file_extension = get_file_extension(version)
file_path = f"scikit-learn-docs.{file_extension}"
if path_details["name"] == file_path:
return human_readable_data_quantity(path_details["size"], 1000)
parser = argparse.ArgumentParser()
parser.add_argument("--rst", type=str, required=True)
parser.add_argument("--json", type=str, required=True)
args = parser.parse_args()
heading = "Available documentation for scikit-learn"
json_content = []
rst_content = [
":orphan:\n",
heading,
"=" * len(heading) + "\n",
"Web-based documentation is available for versions listed below:\n",
]
ROOT_URL = (
"https://api.github.com/repos/scikit-learn/scikit-learn.github.io/contents/" # noqa
)
RAW_FMT = "https://raw.githubusercontent.com/scikit-learn/scikit-learn.github.io/master/%s/index.html" # noqa
VERSION_RE = re.compile(r"scikit-learn ([\w\.\-]+) documentation</title>")
NAMED_DIRS = ["dev", "stable"]
# Gather data for each version directory, including symlinks
dirs = {}
symlinks = {}
root_listing = json_urlread(ROOT_URL)
for path_details in root_listing:
name = path_details["name"]
if not (name[:1].isdigit() or name in NAMED_DIRS):
continue
if path_details["type"] == "dir":
html = urlopen(RAW_FMT % name).read().decode("utf8")
version_num = VERSION_RE.search(html).group(1)
file_size = get_file_size(name)
dirs[name] = (version_num, file_size)
if path_details["type"] == "symlink":
symlinks[name] = json_urlread(path_details["_links"]["self"])["target"]
# Symlinks should have same data as target
for src, dst in symlinks.items():
if dst in dirs:
dirs[src] = dirs[dst]
# Output in order: dev, stable, decreasing other version
seen = set()
for i, name in enumerate(
NAMED_DIRS
+ sorted((k for k in dirs if k[:1].isdigit()), key=parse_version, reverse=True)
):
version_num, file_size = dirs[name]
if version_num in seen:
# symlink came first
continue
else:
seen.add(version_num)
full_name = f"{version_num}" if name[:1].isdigit() else f"{version_num} ({name})"
path = f"https://scikit-learn.org/{name}/"
# Update JSON for the version switcher; only keep the 8 latest versions to avoid
# overloading the version switcher dropdown
if i < 8:
info = {"name": full_name, "version": version_num, "url": path}
if name == "stable":
info["preferred"] = True
json_content.append(info)
# Printout for the historical version page
out = f"* `scikit-learn {full_name} documentation <{path}>`_"
if file_size is not None:
file_extension = get_file_extension(version_num)
out += (
f" (`{file_extension.upper()} {file_size} <{path}/"
f"_downloads/scikit-learn-docs.{file_extension}>`_)"
)
rst_content.append(out)
with open(args.rst, "w", encoding="utf-8") as f:
f.write("\n".join(rst_content) + "\n")
print(f"Written {args.rst}")
with open(args.json, "w", encoding="utf-8") as f:
json.dump(json_content, f, indent=2)
print(f"Written {args.json}")
|
"""
===================================
Visualizations with Display Objects
===================================
.. currentmodule:: sklearn.metrics
In this example, we will construct display objects,
:class:`ConfusionMatrixDisplay`, :class:`RocCurveDisplay`, and
:class:`PrecisionRecallDisplay` directly from their respective metrics. This
is an alternative to using their corresponding plot functions when
a model's predictions are already computed or expensive to compute. Note that
this is advanced usage, and in general we recommend using their respective
plot functions.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Load Data and train model
# -------------------------
# For this example, we load a blood transfusion service center data set from
# `OpenML <https://www.openml.org/d/1464>`_. This is a binary classification
# problem where the target is whether an individual donated blood. Then the
# data is split into a train and test dataset and a logistic regression is
# fitted with the train dataset.
from sklearn.datasets import fetch_openml
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
X, y = fetch_openml(data_id=1464, return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y)
clf = make_pipeline(StandardScaler(), LogisticRegression(random_state=0))
clf.fit(X_train, y_train)
# %%
# Create :class:`ConfusionMatrixDisplay`
# ######################################
# With the fitted model, we compute the predictions of the model on the test
# dataset. These predictions are used to compute the confusion matrix which
# is plotted with the :class:`ConfusionMatrixDisplay`
from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix
y_pred = clf.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
cm_display = ConfusionMatrixDisplay(cm).plot()
# %%
# Create :class:`RocCurveDisplay`
# ###############################
# The roc curve requires either the probabilities or the non-thresholded
# decision values from the estimator. Since the logistic regression provides
# a decision function, we will use it to plot the roc curve:
from sklearn.metrics import RocCurveDisplay, roc_curve
y_score = clf.decision_function(X_test)
fpr, tpr, _ = roc_curve(y_test, y_score, pos_label=clf.classes_[1])
roc_display = RocCurveDisplay(fpr=fpr, tpr=tpr).plot()
# %%
# Create :class:`PrecisionRecallDisplay`
# ######################################
# Similarly, the precision recall curve can be plotted using `y_score` from
# the prevision sections.
from sklearn.metrics import PrecisionRecallDisplay, precision_recall_curve
prec, recall, _ = precision_recall_curve(y_test, y_score, pos_label=clf.classes_[1])
pr_display = PrecisionRecallDisplay(precision=prec, recall=recall).plot()
# %%
# Combining the display objects into a single plot
# ################################################
# The display objects store the computed values that were passed as arguments.
# This allows for the visualizations to be easliy combined using matplotlib's
# API. In the following example, we place the displays next to each other in a
# row.
# sphinx_gallery_thumbnail_number = 4
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8))
roc_display.plot(ax=ax1)
pr_display.plot(ax=ax2)
plt.show()
|
"""
===================================
Visualizations with Display Objects
===================================
.. currentmodule:: sklearn.metrics
In this example, we will construct display objects,
:class:`ConfusionMatrixDisplay`, :class:`RocCurveDisplay`, and
:class:`PrecisionRecallDisplay` directly from their respective metrics. This
is an alternative to using their corresponding plot functions when
a model's predictions are already computed or expensive to compute. Note that
this is advanced usage, and in general we recommend using their respective
plot functions.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Load Data and train model
# -------------------------
# For this example, we load a blood transfusion service center data set from
# `OpenML <https://www.openml.org/d/1464>`. This is a binary classification
# problem where the target is whether an individual donated blood. Then the
# data is split into a train and test dataset and a logistic regression is
# fitted with the train dataset.
from sklearn.datasets import fetch_openml
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
X, y = fetch_openml(data_id=1464, return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y)
clf = make_pipeline(StandardScaler(), LogisticRegression(random_state=0))
clf.fit(X_train, y_train)
# %%
# Create :class:`ConfusionMatrixDisplay`
##############################################################################
# With the fitted model, we compute the predictions of the model on the test
# dataset. These predictions are used to compute the confusion matrix which
# is plotted with the :class:`ConfusionMatrixDisplay`
from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix
y_pred = clf.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
cm_display = ConfusionMatrixDisplay(cm).plot()
# %%
# Create :class:`RocCurveDisplay`
##############################################################################
# The roc curve requires either the probabilities or the non-thresholded
# decision values from the estimator. Since the logistic regression provides
# a decision function, we will use it to plot the roc curve:
from sklearn.metrics import RocCurveDisplay, roc_curve
y_score = clf.decision_function(X_test)
fpr, tpr, _ = roc_curve(y_test, y_score, pos_label=clf.classes_[1])
roc_display = RocCurveDisplay(fpr=fpr, tpr=tpr).plot()
# %%
# Create :class:`PrecisionRecallDisplay`
##############################################################################
# Similarly, the precision recall curve can be plotted using `y_score` from
# the prevision sections.
from sklearn.metrics import PrecisionRecallDisplay, precision_recall_curve
prec, recall, _ = precision_recall_curve(y_test, y_score, pos_label=clf.classes_[1])
pr_display = PrecisionRecallDisplay(precision=prec, recall=recall).plot()
# %%
# Combining the display objects into a single plot
##############################################################################
# The display objects store the computed values that were passed as arguments.
# This allows for the visualizations to be easliy combined using matplotlib's
# API. In the following example, we place the displays next to each other in a
# row.
# sphinx_gallery_thumbnail_number = 4
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8))
roc_display.plot(ax=ax1)
pr_display.plot(ax=ax2)
plt.show()
|
from __future__ import annotations
import json
import os
from typing import Any
import torch
from torch import nn
class SpladePooling(nn.Module):
"""SPLADE pooling layer that aggregates MLM logits using max or sum pooling.
This pooling layer takes MLM logits (shape: batch_size, seq_length, vocab_size)
and applies SPLADE transformation (ReLU + log) followed by pooling across the
sequence length dimension.
Args:
word_embedding_dimension: Dimension of the word embeddings (vocab size)
pooling_strategy: Either 'max' or 'sum' for SPLADE pooling
"""
SPLADE_POOLING_MODES = ("sum", "max")
def __init__(self, pooling_strategy: str = "max") -> None:
super().__init__()
self.pooling_strategy = pooling_strategy
if pooling_strategy not in self.SPLADE_POOLING_MODES:
raise ValueError("pooling_strategy must be either 'max' or 'sum'")
self.config_keys = ["pooling_strategy"]
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
"""FForward pass of the model.
Args:
features: Dictionary containing input features with 'mlm_logits' key
Returns:
Dictionary containing SPLADE pooled embeddings
"""
# Get the MLM head logits (shape: batch_size, seq_length, vocab_size)
mlm_logits = features["mlm_logits"]
# Apply ReLU and log transformation for SPLADE
splade_scores = torch.log1p(torch.relu(mlm_logits))
# Pool across sequence length dimension
if self.pooling_strategy == "max":
pooled_scores = torch.max(splade_scores, dim=1)[0] # shape: batch_size, vocab_size
else: # sum
pooled_scores = torch.sum(splade_scores, dim=1) # shape: batch_size, vocab_size
return {"sentence_embedding": pooled_scores}
def get_config_dict(self) -> dict[str, Any]:
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path) -> SpladePooling:
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return SpladePooling(**config)
def __repr__(self) -> str:
return f"SpladePooling({self.get_config_dict()})"
|
from __future__ import annotations
import json
import os
from typing import Any
import torch
from torch import nn
class SpladePooling(nn.Module):
"""SPLADE pooling layer that aggregates MLM logits using max or sum pooling.
This pooling layer takes MLM logits (shape: batch_size, seq_length, vocab_size)
and applies SPLADE transformation (ReLU + log) followed by pooling across the
sequence length dimension.
Args:
word_embedding_dimension: Dimension of the word embeddings (vocab size)
pooling_strategy: Either 'max' or 'sum' for SPLADE pooling
"""
SPLADE_POOLING_MODES = ("sum", "max")
def __init__(self, pooling_strategy: str = "max") -> None:
super().__init__()
self.pooling_strategy = pooling_strategy
if pooling_strategy not in self.SPLADE_POOLING_MODES:
raise ValueError("pooling_strategy must be either 'max' or 'sum'")
self.config_keys = ["pooling_strategy"]
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
"""FForward pass of the model.
Args:
features: Dictionary containing input features with 'mlm_logits' key
Returns:
Dictionary containing SPLADE pooled embeddings
"""
# Get the MLM head logits (shape: batch_size, seq_length, vocab_size)
mlm_logits = features["mlm_logits"]
# Apply ReLU and log transformation for SPLADE
splade_scores = torch.log1p(torch.relu(mlm_logits))
# Pool across sequence length dimension
if self.pooling_strategy == "max":
pooled_scores = torch.max(splade_scores, dim=1)[0] # shape: batch_size, vocab_size
else: # sum
pooled_scores = torch.sum(splade_scores, dim=1) # shape: batch_size, vocab_size
return {"sentence_embedding": pooled_scores}
def get_config_dict(self) -> dict[str, Any]:
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path) -> SpladePooling:
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return SpladePooling(**config)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dropblock import DropBlock
from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
__all__ = [
'DropBlock', 'PixelDecoder', 'TransformerEncoderPixelDecoder',
'MSDeformAttnPixelDecoder'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dropblock import DropBlock
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
__all__ = ['DropBlock', 'PixelDecoder', 'TransformerEncoderPixelDecoder']
|
r"""
AgentSearch reader.
Example as of 1/8/2024:
```python
AgentSearch = download_loader("AgentSearch")
document = reader.load_data(
query="latest news",
search_provider="bing"
)[0]
print(f'Document:\n{document} ')
```
```plaintext
Document:
Doc ID: 67a57dfe-8bd6-4c69-af9d-683e76177119
Text: The latest news encompasses a wide array of topics, reflecting
the dynamic and complex nature of the world today. Notable events
include the conviction of a man for killing his ex-wife's new partner,
highlighting the ongoing issue of domestic violence and its legal
consequences [2]. In the realm of international relations, the release
of Jeffrey...
```
For more information, refer to the docs:
https://agent-search.readthedocs.io/en/latest/
"""
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class AgentSearchReader(BaseReader):
"""AgentSearch reader."""
def __init__(
self,
api_base: Optional[str] = None,
api_key: Optional[str] = None,
):
"""Initialize with parameters."""
import_err_msg = (
"`agent-search` package not found, please run `pip install agent-search`"
)
try:
import agent_search # noqa: F401
except ImportError:
raise ImportError(import_err_msg)
from agent_search import SciPhi
self._client = SciPhi(api_base=api_base, api_key=api_key)
def load_data(
self,
query: str,
search_provider: str = "bing",
llm_model: str = "SciPhi/Sensei-7B-V1",
) -> List[Document]:
"""
Load data from AgentSearch, hosted by SciPhi.
Args:
collection_name (str): Name of the Milvus collection.
query_vector (List[float]): Query vector.
limit (int): Number of results to return.
Returns:
List[Document]: A list of documents.
"""
rag_response = self._client.get_search_rag_response(
query=query, search_provider=search_provider, llm_model=llm_model
)
return [Document(text=rag_response.pop("response"), metadata=rag_response)]
|
r"""AgentSearch reader.
Example as of 1/8/2024:
```python
AgentSearch = download_loader("AgentSearch")
document = reader.load_data(
query="latest news",
search_provider="bing"
)[0]
print(f'Document:\n{document} ')
```
```plaintext
Document:
Doc ID: 67a57dfe-8bd6-4c69-af9d-683e76177119
Text: The latest news encompasses a wide array of topics, reflecting
the dynamic and complex nature of the world today. Notable events
include the conviction of a man for killing his ex-wife's new partner,
highlighting the ongoing issue of domestic violence and its legal
consequences [2]. In the realm of international relations, the release
of Jeffrey...
```
For more information, refer to the docs:
https://agent-search.readthedocs.io/en/latest/
"""
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class AgentSearchReader(BaseReader):
"""AgentSearch reader."""
def __init__(
self,
api_base: Optional[str] = None,
api_key: Optional[str] = None,
):
"""Initialize with parameters."""
import_err_msg = (
"`agent-search` package not found, please run `pip install agent-search`"
)
try:
import agent_search # noqa: F401
except ImportError:
raise ImportError(import_err_msg)
from agent_search import SciPhi
self._client = SciPhi(api_base=api_base, api_key=api_key)
def load_data(
self,
query: str,
search_provider: str = "bing",
llm_model: str = "SciPhi/Sensei-7B-V1",
) -> List[Document]:
"""
Load data from AgentSearch, hosted by SciPhi.
Args:
collection_name (str): Name of the Milvus collection.
query_vector (List[float]): Query vector.
limit (int): Number of results to return.
Returns:
List[Document]: A list of documents.
"""
rag_response = self._client.get_search_rag_response(
query=query, search_provider=search_provider, llm_model=llm_model
)
return [Document(text=rag_response.pop("response"), metadata=rag_response)]
|
"""
=====================================
How to write your own Datapoint class
=====================================
This guide is intended for advanced users and downstream library maintainers. We explain how to
write your own datapoint class, and how to make it compatible with the built-in
Torchvision v2 transforms. Before continuing, make sure you have read
:ref:`sphx_glr_auto_examples_plot_datapoints.py`.
"""
# %%
import torch
import torchvision
# We are using BETA APIs, so we deactivate the associated warning, thereby acknowledging that
# some APIs may slightly change in the future
torchvision.disable_beta_transforms_warning()
from torchvision import datapoints
from torchvision.transforms import v2
# %%
# We will create a very simple class that just inherits from the base
# :class:`~torchvision.datapoints.Datapoint` class. It will be enough to cover
# what you need to know to implement your more elaborate uses-cases. If you need
# to create a class that carries meta-data, take a look at how the
# :class:`~torchvision.datapoints.BoundingBoxes` class is `implemented
# <https://github.com/pytorch/vision/blob/main/torchvision/datapoints/_bounding_box.py>`_.
class MyDatapoint(datapoints.Datapoint):
pass
my_dp = MyDatapoint([1, 2, 3])
my_dp
# %%
# Now that we have defined our custom Datapoint class, we want it to be
# compatible with the built-in torchvision transforms, and the functional API.
# For that, we need to implement a kernel which performs the core of the
# transformation, and then "hook" it to the functional that we want to support
# via :func:`~torchvision.transforms.v2.functional.register_kernel`.
#
# We illustrate this process below: we create a kernel for the "horizontal flip"
# operation of our MyDatapoint class, and register it to the functional API.
from torchvision.transforms.v2 import functional as F
@F.register_kernel(functional="hflip", datapoint_cls=MyDatapoint)
def hflip_my_datapoint(my_dp, *args, **kwargs):
print("Flipping!")
out = my_dp.flip(-1)
return MyDatapoint.wrap_like(my_dp, out)
# %%
# To understand why ``wrap_like`` is used, see
# :ref:`datapoint_unwrapping_behaviour`. Ignore the ``*args, **kwargs`` for now,
# we will explain it below in :ref:`param_forwarding`.
#
# .. note::
#
# In our call to ``register_kernel`` above we used a string
# ``functional="hflip"`` to refer to the functional we want to hook into. We
# could also have used the functional *itself*, i.e.
# ``@register_kernel(functional=F.hflip, ...)``.
#
# Now that we have registered our kernel, we can call the functional API on a
# ``MyDatapoint`` instance:
my_dp = MyDatapoint(torch.rand(3, 256, 256))
_ = F.hflip(my_dp)
# %%
# And we can also use the
# :class:`~torchvision.transforms.v2.RandomHorizontalFlip` transform, since it relies on :func:`~torchvision.transforms.v2.functional.hflip` internally:
t = v2.RandomHorizontalFlip(p=1)
_ = t(my_dp)
# %%
# .. note::
#
# We cannot register a kernel for a transform class, we can only register a
# kernel for a **functional**. The reason we can't register a transform
# class is because one transform may internally rely on more than one
# functional, so in general we can't register a single kernel for a given
# class.
#
# .. _param_forwarding:
#
# Parameter forwarding, and ensuring future compatibility of your kernels
# -----------------------------------------------------------------------
#
# The functional API that you're hooking into is public and therefore
# **backward** compatible: we guarantee that the parameters of these functionals
# won't be removed or renamed without a proper deprecation cycle. However, we
# don't guarantee **forward** compatibility, and we may add new parameters in
# the future.
#
# Imagine that in a future version, Torchvision adds a new ``inplace`` parameter
# to its :func:`~torchvision.transforms.v2.functional.hflip` functional. If you
# already defined and registered your own kernel as
def hflip_my_datapoint(my_dp): # noqa
print("Flipping!")
out = my_dp.flip(-1)
return MyDatapoint.wrap_like(my_dp, out)
# %%
# then calling ``F.hflip(my_dp)`` will **fail**, because ``hflip`` will try to
# pass the new ``inplace`` parameter to your kernel, but your kernel doesn't
# accept it.
#
# For this reason, we recommend to always define your kernels with
# ``*args, **kwargs`` in their signature, as done above. This way, your kernel
# will be able to accept any new parameter that we may add in the future.
# (Technically, adding `**kwargs` only should be enough).
|
"""
=====================================
How to write your own Datapoint class
=====================================
This guide is intended for downstream library maintainers. We explain how to
write your own datapoint class, and how to make it compatible with the built-in
Torchvision v2 transforms. Before continuing, make sure you have read
:ref:`sphx_glr_auto_examples_plot_datapoints.py`.
"""
# %%
import torch
import torchvision
# We are using BETA APIs, so we deactivate the associated warning, thereby acknowledging that
# some APIs may slightly change in the future
torchvision.disable_beta_transforms_warning()
from torchvision import datapoints
from torchvision.transforms import v2
# %%
# We will create a very simple class that just inherits from the base
# :class:`~torchvision.datapoints.Datapoint` class. It will be enough to cover
# what you need to know to implement your more elaborate uses-cases. If you need
# to create a class that carries meta-data, take a look at how the
# :class:`~torchvision.datapoints.BoundingBoxes` class is `implemented
# <https://github.com/pytorch/vision/blob/main/torchvision/datapoints/_bounding_box.py>`_.
class MyDatapoint(datapoints.Datapoint):
pass
my_dp = MyDatapoint([1, 2, 3])
my_dp
# %%
# Now that we have defined our custom Datapoint class, we want it to be
# compatible with the built-in torchvision transforms, and the functional API.
# For that, we need to implement a kernel which performs the core of the
# transformation, and then "hook" it to the functional that we want to support
# via :func:`~torchvision.transforms.v2.functional.register_kernel`.
#
# We illustrate this process below: we create a kernel for the "horizontal flip"
# operation of our MyDatapoint class, and register it to the functional API.
from torchvision.transforms.v2 import functional as F
@F.register_kernel(functional="hflip", datapoint_cls=MyDatapoint)
def hflip_my_datapoint(my_dp, *args, **kwargs):
print("Flipping!")
out = my_dp.flip(-1)
return MyDatapoint.wrap_like(my_dp, out)
# %%
# To understand why ``wrap_like`` is used, see
# :ref:`datapoint_unwrapping_behaviour`. Ignore the ``*args, **kwargs`` for now,
# we will explain it below in :ref:`param_forwarding`.
#
# .. note::
#
# In our call to ``register_kernel`` above we used a string
# ``functional="hflip"`` to refer to the functional we want to hook into. We
# could also have used the functional *itself*, i.e.
# ``@register_kernel(functional=F.hflip, ...)``.
#
# The functionals that you can be hooked into are the ones in
# ``torchvision.transforms.v2.functional`` and they are documented in
# :ref:`functional_transforms`.
#
# Now that we have registered our kernel, we can call the functional API on a
# ``MyDatapoint`` instance:
my_dp = MyDatapoint(torch.rand(3, 256, 256))
_ = F.hflip(my_dp)
# %%
# And we can also use the
# :class:`~torchvision.transforms.v2.RandomHorizontalFlip` transform, since it relies on :func:`~torchvision.transforms.v2.functional.hflip` internally:
t = v2.RandomHorizontalFlip(p=1)
_ = t(my_dp)
# %%
# .. note::
#
# We cannot register a kernel for a transform class, we can only register a
# kernel for a **functional**. The reason we can't register a transform
# class is because one transform may internally rely on more than one
# functional, so in general we can't register a single kernel for a given
# class.
#
# .. _param_forwarding:
#
# Parameter forwarding, and ensuring future compatibility of your kernels
# -----------------------------------------------------------------------
#
# The functional API that you're hooking into is public and therefore
# **backward** compatible: we guarantee that the parameters of these functionals
# won't be removed or renamed without a proper deprecation cycle. However, we
# don't guarantee **forward** compatibility, and we may add new parameters in
# the future.
#
# Imagine that in a future version, Torchvision adds a new ``inplace`` parameter
# to its :func:`~torchvision.transforms.v2.functional.hflip` functional. If you
# already defined and registered your own kernel as
def hflip_my_datapoint(my_dp): # noqa
print("Flipping!")
out = my_dp.flip(-1)
return MyDatapoint.wrap_like(my_dp, out)
# %%
# then calling ``F.hflip(my_dp)`` will **fail**, because ``hflip`` will try to
# pass the new ``inplace`` parameter to your kernel, but your kernel doesn't
# accept it.
#
# For this reason, we recommend to always define your kernels with
# ``*args, **kwargs`` in their signature, as done above. This way, your kernel
# will be able to accept any new parameter that we may add in the future.
# (Technically, adding `**kwargs` only should be enough).
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/debug',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=self.host, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.port = self.runtime_args.port[0]
self.host = self.runtime_args.host
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/debug',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=self.host, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
"""Callback Handler that prints to std out."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional
from typing_extensions import override
from langchain_core.callbacks.base import BaseCallbackHandler
from langchain_core.utils import print_text
if TYPE_CHECKING:
from langchain_core.agents import AgentAction, AgentFinish
class StdOutCallbackHandler(BaseCallbackHandler):
"""Callback Handler that prints to std out."""
def __init__(self, color: Optional[str] = None) -> None:
"""Initialize callback handler.
Args:
color: The color to use for the text. Defaults to None.
"""
self.color = color
@override
def on_chain_start(
self, serialized: dict[str, Any], inputs: dict[str, Any], **kwargs: Any
) -> None:
"""Print out that we are entering a chain.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Dict[str, Any]): The inputs to the chain.
**kwargs (Any): Additional keyword arguments.
"""
if "name" in kwargs:
name = kwargs["name"]
else:
if serialized:
name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
else:
name = "<unknown>"
print(f"\n\n\033[1m> Entering new {name} chain...\033[0m") # noqa: T201
@override
def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None:
"""Print out that we finished a chain.
Args:
outputs (Dict[str, Any]): The outputs of the chain.
**kwargs (Any): Additional keyword arguments.
"""
print("\n\033[1m> Finished chain.\033[0m") # noqa: T201
@override
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
"""Run on agent action.
Args:
action (AgentAction): The agent action.
color (Optional[str]): The color to use for the text. Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
print_text(action.log, color=color or self.color)
@override
def on_tool_end(
self,
output: Any,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""If not the final action, print out observation.
Args:
output (Any): The output to print.
color (Optional[str]): The color to use for the text. Defaults to None.
observation_prefix (Optional[str]): The observation prefix.
Defaults to None.
llm_prefix (Optional[str]): The LLM prefix. Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
output = str(output)
if observation_prefix is not None:
print_text(f"\n{observation_prefix}")
print_text(output, color=color or self.color)
if llm_prefix is not None:
print_text(f"\n{llm_prefix}")
@override
def on_text(
self,
text: str,
color: Optional[str] = None,
end: str = "",
**kwargs: Any,
) -> None:
"""Run when the agent ends.
Args:
text (str): The text to print.
color (Optional[str]): The color to use for the text. Defaults to None.
end (str): The end character to use. Defaults to "".
**kwargs (Any): Additional keyword arguments.
"""
print_text(text, color=color or self.color, end=end)
@override
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
"""Run on the agent end.
Args:
finish (AgentFinish): The agent finish.
color (Optional[str]): The color to use for the text. Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
print_text(finish.log, color=color or self.color, end="\n")
|
"""Callback Handler that prints to std out."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional
from langchain_core.callbacks.base import BaseCallbackHandler
from langchain_core.utils import print_text
if TYPE_CHECKING:
from langchain_core.agents import AgentAction, AgentFinish
class StdOutCallbackHandler(BaseCallbackHandler):
"""Callback Handler that prints to std out."""
def __init__(self, color: Optional[str] = None) -> None:
"""Initialize callback handler.
Args:
color: The color to use for the text. Defaults to None.
"""
self.color = color
def on_chain_start(
self, serialized: dict[str, Any], inputs: dict[str, Any], **kwargs: Any
) -> None:
"""Print out that we are entering a chain.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Dict[str, Any]): The inputs to the chain.
**kwargs (Any): Additional keyword arguments.
"""
if "name" in kwargs:
name = kwargs["name"]
else:
if serialized:
name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
else:
name = "<unknown>"
print(f"\n\n\033[1m> Entering new {name} chain...\033[0m") # noqa: T201
def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None:
"""Print out that we finished a chain.
Args:
outputs (Dict[str, Any]): The outputs of the chain.
**kwargs (Any): Additional keyword arguments.
"""
print("\n\033[1m> Finished chain.\033[0m") # noqa: T201
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
"""Run on agent action.
Args:
action (AgentAction): The agent action.
color (Optional[str]): The color to use for the text. Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
print_text(action.log, color=color or self.color)
def on_tool_end(
self,
output: Any,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""If not the final action, print out observation.
Args:
output (Any): The output to print.
color (Optional[str]): The color to use for the text. Defaults to None.
observation_prefix (Optional[str]): The observation prefix.
Defaults to None.
llm_prefix (Optional[str]): The LLM prefix. Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
output = str(output)
if observation_prefix is not None:
print_text(f"\n{observation_prefix}")
print_text(output, color=color or self.color)
if llm_prefix is not None:
print_text(f"\n{llm_prefix}")
def on_text(
self,
text: str,
color: Optional[str] = None,
end: str = "",
**kwargs: Any,
) -> None:
"""Run when the agent ends.
Args:
text (str): The text to print.
color (Optional[str]): The color to use for the text. Defaults to None.
end (str): The end character to use. Defaults to "".
**kwargs (Any): Additional keyword arguments.
"""
print_text(text, color=color or self.color, end=end)
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
"""Run on the agent end.
Args:
finish (AgentFinish): The agent finish.
color (Optional[str]): The color to use for the text. Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
print_text(finish.log, color=color or self.color, end="\n")
|
"""General node utils."""
import logging
import uuid
from typing import List, Optional, Protocol, runtime_checkable
from llama_index.core.schema import (
BaseNode,
Document,
ImageDocument,
ImageNode,
NodeRelationship,
TextNode,
)
from llama_index.core.utils import truncate_text
logger = logging.getLogger(__name__)
@runtime_checkable
class IdFuncCallable(Protocol):
def __call__(self, i: int, doc: BaseNode) -> str: ...
def default_id_func(i: int, doc: BaseNode) -> str:
return str(uuid.uuid4())
def build_nodes_from_splits(
text_splits: List[str],
document: BaseNode,
ref_doc: Optional[BaseNode] = None,
id_func: Optional[IdFuncCallable] = None,
) -> List[TextNode]:
"""Build nodes from splits."""
ref_doc = ref_doc or document
id_func = id_func or default_id_func
nodes: List[TextNode] = []
"""Calling as_related_node_info() on a document recomputes the hash for the whole text and metadata"""
"""It is not that bad, when creating relationships between the nodes, but is terrible when adding a relationship"""
"""between the node and a document, hence we create the relationship only once here and pass it to the nodes"""
relationships = {NodeRelationship.SOURCE: ref_doc.as_related_node_info()}
for i, text_chunk in enumerate(text_splits):
logger.debug(f"> Adding chunk: {truncate_text(text_chunk, 50)}")
if isinstance(document, ImageDocument):
image_node = ImageNode(
id_=id_func(i, document),
text=text_chunk,
embedding=document.embedding,
image=document.image,
image_path=document.image_path,
image_url=document.image_url,
excluded_embed_metadata_keys=document.excluded_embed_metadata_keys,
excluded_llm_metadata_keys=document.excluded_llm_metadata_keys,
metadata_seperator=document.metadata_separator,
metadata_template=document.metadata_template,
text_template=document.text_template,
relationships=relationships,
)
nodes.append(image_node) # type: ignore
elif isinstance(document, Document):
node = TextNode(
id_=id_func(i, document),
text=text_chunk,
embedding=document.embedding,
excluded_embed_metadata_keys=document.excluded_embed_metadata_keys,
excluded_llm_metadata_keys=document.excluded_llm_metadata_keys,
metadata_seperator=document.metadata_separator,
metadata_template=document.metadata_template,
text_template=document.text_template,
relationships=relationships,
)
nodes.append(node)
elif isinstance(document, TextNode):
node = TextNode(
id_=id_func(i, document),
text=text_chunk,
embedding=document.embedding,
excluded_embed_metadata_keys=document.excluded_embed_metadata_keys,
excluded_llm_metadata_keys=document.excluded_llm_metadata_keys,
metadata_seperator=document.metadata_seperator,
metadata_template=document.metadata_template,
text_template=document.text_template,
relationships=relationships,
)
nodes.append(node)
else:
raise ValueError(f"Unknown document type: {type(document)}")
return nodes
|
"""General node utils."""
import logging
import uuid
from typing import List, Optional, Protocol, runtime_checkable
from llama_index.core.schema import (
BaseNode,
Document,
ImageDocument,
ImageNode,
NodeRelationship,
TextNode,
)
from llama_index.core.utils import truncate_text
logger = logging.getLogger(__name__)
@runtime_checkable
class IdFuncCallable(Protocol):
def __call__(self, i: int, doc: BaseNode) -> str:
...
def default_id_func(i: int, doc: BaseNode) -> str:
return str(uuid.uuid4())
def build_nodes_from_splits(
text_splits: List[str],
document: BaseNode,
ref_doc: Optional[BaseNode] = None,
id_func: Optional[IdFuncCallable] = None,
) -> List[TextNode]:
"""Build nodes from splits."""
ref_doc = ref_doc or document
id_func = id_func or default_id_func
nodes: List[TextNode] = []
"""Calling as_related_node_info() on a document recomputes the hash for the whole text and metadata"""
"""It is not that bad, when creating relationships between the nodes, but is terrible when adding a relationship"""
"""between the node and a document, hence we create the relationship only once here and pass it to the nodes"""
relationships = {NodeRelationship.SOURCE: ref_doc.as_related_node_info()}
for i, text_chunk in enumerate(text_splits):
logger.debug(f"> Adding chunk: {truncate_text(text_chunk, 50)}")
if isinstance(document, ImageDocument):
image_node = ImageNode(
id_=id_func(i, document),
text=text_chunk,
embedding=document.embedding,
image=document.image,
image_path=document.image_path,
image_url=document.image_url,
excluded_embed_metadata_keys=document.excluded_embed_metadata_keys,
excluded_llm_metadata_keys=document.excluded_llm_metadata_keys,
metadata_seperator=document.metadata_separator,
metadata_template=document.metadata_template,
text_template=document.text_template,
relationships=relationships,
)
nodes.append(image_node) # type: ignore
elif isinstance(document, Document):
node = TextNode(
id_=id_func(i, document),
text=text_chunk,
embedding=document.embedding,
excluded_embed_metadata_keys=document.excluded_embed_metadata_keys,
excluded_llm_metadata_keys=document.excluded_llm_metadata_keys,
metadata_seperator=document.metadata_separator,
metadata_template=document.metadata_template,
text_template=document.text_template,
relationships=relationships,
)
nodes.append(node)
elif isinstance(document, TextNode):
node = TextNode(
id_=id_func(i, document),
text=text_chunk,
embedding=document.embedding,
excluded_embed_metadata_keys=document.excluded_embed_metadata_keys,
excluded_llm_metadata_keys=document.excluded_llm_metadata_keys,
metadata_seperator=document.metadata_seperator,
metadata_template=document.metadata_template,
text_template=document.text_template,
relationships=relationships,
)
nodes.append(node)
else:
raise ValueError(f"Unknown document type: {type(document)}")
return nodes
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import DiTTransformer2DModel, Transformer2DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
slow,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class DiTTransformer2DModelTests(ModelTesterMixin, unittest.TestCase):
model_class = DiTTransformer2DModel
main_input_name = "hidden_states"
@property
def dummy_input(self):
batch_size = 4
in_channels = 4
sample_size = 8
scheduler_num_train_steps = 1000
num_class_labels = 4
hidden_states = floats_tensor((batch_size, in_channels, sample_size, sample_size)).to(torch_device)
timesteps = torch.randint(0, scheduler_num_train_steps, size=(batch_size,)).to(torch_device)
class_label_ids = torch.randint(0, num_class_labels, size=(batch_size,)).to(torch_device)
return {"hidden_states": hidden_states, "timestep": timesteps, "class_labels": class_label_ids}
@property
def input_shape(self):
return (4, 8, 8)
@property
def output_shape(self):
return (8, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"in_channels": 4,
"out_channels": 8,
"activation_fn": "gelu-approximate",
"num_attention_heads": 2,
"attention_head_dim": 4,
"attention_bias": True,
"num_layers": 1,
"norm_type": "ada_norm_zero",
"num_embeds_ada_norm": 8,
"patch_size": 2,
"sample_size": 8,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_output(self):
super().test_output(
expected_output_shape=(self.dummy_input[self.main_input_name].shape[0],) + self.output_shape
)
def test_correct_class_remapping_from_dict_config(self):
init_dict, _ = self.prepare_init_args_and_inputs_for_common()
model = Transformer2DModel.from_config(init_dict)
assert isinstance(model, DiTTransformer2DModel)
def test_gradient_checkpointing_is_applied(self):
expected_set = {"DiTTransformer2DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
def test_effective_gradient_checkpointing(self):
super().test_effective_gradient_checkpointing(loss_tolerance=1e-4)
def test_correct_class_remapping_from_pretrained_config(self):
config = DiTTransformer2DModel.load_config("facebook/DiT-XL-2-256", subfolder="transformer")
model = Transformer2DModel.from_config(config)
assert isinstance(model, DiTTransformer2DModel)
@slow
def test_correct_class_remapping(self):
model = Transformer2DModel.from_pretrained("facebook/DiT-XL-2-256", subfolder="transformer")
assert isinstance(model, DiTTransformer2DModel)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import DiTTransformer2DModel, Transformer2DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
slow,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class DiTTransformer2DModelTests(ModelTesterMixin, unittest.TestCase):
model_class = DiTTransformer2DModel
main_input_name = "hidden_states"
@property
def dummy_input(self):
batch_size = 4
in_channels = 4
sample_size = 8
scheduler_num_train_steps = 1000
num_class_labels = 4
hidden_states = floats_tensor((batch_size, in_channels, sample_size, sample_size)).to(torch_device)
timesteps = torch.randint(0, scheduler_num_train_steps, size=(batch_size,)).to(torch_device)
class_label_ids = torch.randint(0, num_class_labels, size=(batch_size,)).to(torch_device)
return {"hidden_states": hidden_states, "timestep": timesteps, "class_labels": class_label_ids}
@property
def input_shape(self):
return (4, 8, 8)
@property
def output_shape(self):
return (8, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"in_channels": 4,
"out_channels": 8,
"activation_fn": "gelu-approximate",
"num_attention_heads": 2,
"attention_head_dim": 4,
"attention_bias": True,
"num_layers": 1,
"norm_type": "ada_norm_zero",
"num_embeds_ada_norm": 8,
"patch_size": 2,
"sample_size": 8,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_output(self):
super().test_output(
expected_output_shape=(self.dummy_input[self.main_input_name].shape[0],) + self.output_shape
)
def test_correct_class_remapping_from_dict_config(self):
init_dict, _ = self.prepare_init_args_and_inputs_for_common()
model = Transformer2DModel.from_config(init_dict)
assert isinstance(model, DiTTransformer2DModel)
def test_correct_class_remapping_from_pretrained_config(self):
config = DiTTransformer2DModel.load_config("facebook/DiT-XL-2-256", subfolder="transformer")
model = Transformer2DModel.from_config(config)
assert isinstance(model, DiTTransformer2DModel)
@slow
def test_correct_class_remapping(self):
model = Transformer2DModel.from_pretrained("facebook/DiT-XL-2-256", subfolder="transformer")
assert isinstance(model, DiTTransformer2DModel)
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
image_size = (1024, 1024)
file_client_args = dict(backend='disk')
# comment out the code below to use different file client
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
# Standard Scale Jittering (SSJ) resizes and crops an image
# with a resize range of 0.8 to 1.25 of the original image size.
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.8, 1.25),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='InfiniteSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = val_evaluator
# The model is trained by 270k iterations with batch_size 64,
# which is roughly equivalent to 144 epochs.
max_iters = 270000
train_cfg = dict(
type='IterBasedTrainLoop', max_iters=max_iters, val_interval=10000)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# optimizer assumes bs=64
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004))
# learning rate policy
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=270000,
by_epoch=False,
milestones=[243000, 256500, 263250],
gamma=0.1)
]
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=10000))
log_processor = dict(by_epoch=False)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
image_size = (1024, 1024)
file_client_args = dict(backend='disk')
# comment out the code below to use different file client
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
# Standard Scale Jittering (SSJ) resizes and crops an image
# with a resize range of 0.8 to 1.25 of the original image size.
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.8, 1.25),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='InfiniteSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = val_evaluator
# The model is trained by 270k iterations with batch_size 64,
# which is roughly equivalent to 144 epochs.
max_iters = 270000
train_cfg = dict(
type='IterBasedTrainLoop', max_iters=max_iters, val_interval=10000)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# optimizer assumes bs=64
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004))
# learning rate policy
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=270000,
by_epoch=False,
milestones=[243000, 256500, 263250],
gamma=0.1)
]
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=10000))
log_processor = dict(by_epoch=False)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
from typing import List, Optional, Literal
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class SpiderWebReader(BasePydanticReader):
"""
Scrapes a URL for data and returns llm-ready data with `Spider.cloud`.
Must have the Python package `spider-client` installed and a Spider API key.
See https://spider.cloud for more.
Args:
api_key (str): The Spider API key, get one at https://spider.cloud
mode (Mode): "Scrape" the url (default) or "crawl" the url following all subpages.
params (dict): Additional parameters to pass to the Spider API.
"""
class Config:
use_enum_values = True
extra = "allow"
def __init__(
self,
*,
api_key: Optional[str] = None,
mode: Literal["scrape", "crawl"] = "scrape",
params: Optional[dict] = None,
) -> None:
super().__init__(api_key=api_key, mode=mode, params=params)
if params is None:
params = {"return_format": "markdown", "metadata": True}
try:
from spider import Spider
except ImportError:
raise ImportError(
"`spider-client` package not found, please run `pip install spider-client`"
)
self.spider = Spider(api_key=api_key)
self.mode = mode
self.params = params
def load_data(self, url: str) -> List[Document]:
if self.mode != "scrape" and self.mode != "crawl":
raise ValueError(
"Unknown mode in `mode` parameter, `scrape` or `crawl` is the allowed modes"
)
action = (
self.spider.scrape_url if self.mode == "scrape" else self.spider.crawl_url
)
spider_docs = action(url=url, params=self.params)
if not spider_docs:
return [Document(page_content="", metadata={})]
documents = []
if isinstance(spider_docs, list):
for doc in spider_docs:
text = doc.get("content", "")
if text is not None:
documents.append(
Document(
text=text,
metadata=doc.get("metadata", {}),
)
)
return documents
|
from typing import List, Optional, Literal
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class SpiderWebReader(BasePydanticReader):
"""
Scrapes a URL for data and returns llm-ready data with `Spider.cloud`.
Must have the Python package `spider-client` installed and a Spider API key.
See https://spider.cloud for more.
Args:
api_key (str): The Spider API key, get one at https://spider.cloud
mode (Mode): "Scrape" the url (default) or "crawl" the url following all subpages.
params (dict): Additional parameters to pass to the Spider API.
"""
class Config:
use_enum_values = True
extra = "allow"
def __init__(
self,
*,
api_key: Optional[str] = None,
mode: Literal["scrape", "crawl"] = "scrape",
params: Optional[dict] = None,
) -> None:
super().__init__(api_key=api_key, mode=mode, params=params)
if params is None:
params = {"return_format": "markdown", "metadata": True}
try:
from spider import Spider
except ImportError:
raise ImportError(
"`spider-client` package not found, please run `pip install spider-client`"
)
self.spider = Spider(api_key=api_key)
self.mode = mode
self.params = params
def load_data(self, url: str) -> List[Document]:
if self.mode != "scrape" and self.mode != "crawl":
raise ValueError(
"Unknown mode in `mode` parameter, `scrape` or `crawl` is the allowed modes"
)
action = (
self.spider.scrape_url if self.mode == "scrape" else self.spider.crawl_url
)
spider_docs = action(url=url, params=self.params)
if not spider_docs:
return [Document(page_content="", metadata={})]
documents = []
if isinstance(spider_docs, list):
for doc in spider_docs:
text = doc.get("content", "")
if text is not None:
documents.append(
Document(
text=text,
metadata=doc.get("metadata", {}),
)
)
return documents
|
import os
import time
import pytest
from jina import Client, Document, DocumentArray, Flow
cur_dir = os.path.dirname(os.path.abspath(__file__))
img_name = 'jina/replica-exec'
@pytest.fixture(scope='function')
def docker_image_built():
import docker
client = docker.from_env()
client.images.build(path=os.path.join(cur_dir, 'replica-exec'), tag=img_name)
client.close()
yield
time.sleep(2)
client = docker.from_env()
client.containers.prune()
@pytest.mark.parametrize('shards', [1, 2])
@pytest.mark.parametrize('replicas', [1, 3, 4])
def test_containerruntime_args(docker_image_built, shards, replicas, port_generator):
exposed_port = port_generator()
f = Flow(port=exposed_port).add(
name='executor_container',
uses=f'docker://{img_name}',
replicas=replicas,
shards=shards,
polling='ANY',
)
with f:
ret1 = Client(port=exposed_port).index(
inputs=DocumentArray([Document() for _ in range(2000)]),
request_size=10,
return_responses=True,
)
assert len(ret1) == 200
unique_replicas = set()
shard_ids = set()
for r in ret1:
assert len(r.docs) == 10
for replica in r.docs[:, 'tags__replica']:
unique_replicas.add(replica)
for shard_id in r.docs[:, 'tags__shard_id']:
shard_ids.add(shard_id)
for doc in r.docs:
assert doc.tags['shards'] == shards
assert shard_ids == set(range(shards))
assert len(unique_replicas) == replicas * shards
|
import os
import time
import pytest
from jina import Client, Document, DocumentArray, Flow
cur_dir = os.path.dirname(os.path.abspath(__file__))
img_name = 'jina/replica-exec'
exposed_port = 12345
@pytest.fixture(scope='function')
def docker_image_built():
import docker
client = docker.from_env()
client.images.build(path=os.path.join(cur_dir, 'replica-exec'), tag=img_name)
client.close()
yield
time.sleep(2)
client = docker.from_env()
client.containers.prune()
@pytest.mark.parametrize('shards', [1, 2])
@pytest.mark.parametrize('replicas', [1, 3, 4])
def test_containerruntime_args(docker_image_built, shards, replicas):
f = Flow(port=exposed_port).add(
name='executor_container',
uses=f'docker://{img_name}',
replicas=replicas,
shards=shards,
polling='ANY',
)
with f:
ret1 = Client(port=exposed_port).index(
inputs=DocumentArray([Document() for _ in range(2000)]),
request_size=10,
return_responses=True,
)
assert len(ret1) == 200
unique_replicas = set()
shard_ids = set()
for r in ret1:
assert len(r.docs) == 10
for replica in r.docs[:, 'tags__replica']:
unique_replicas.add(replica)
for shard_id in r.docs[:, 'tags__shard_id']:
shard_ids.add(shard_id)
for doc in r.docs:
assert doc.tags['shards'] == shards
assert shard_ids == set(range(shards))
assert len(unique_replicas) == replicas * shards
|
import logging
import time
from abc import ABC, abstractmethod
from typing import ClassVar, Optional
from backend.data.model import OAuth2Credentials
from backend.integrations.providers import ProviderName
logger = logging.getLogger(__name__)
class BaseOAuthHandler(ABC):
# --8<-- [start:BaseOAuthHandler1]
PROVIDER_NAME: ClassVar[ProviderName]
DEFAULT_SCOPES: ClassVar[list[str]] = []
# --8<-- [end:BaseOAuthHandler1]
@abstractmethod
# --8<-- [start:BaseOAuthHandler2]
def __init__(self, client_id: str, client_secret: str, redirect_uri: str): ...
# --8<-- [end:BaseOAuthHandler2]
@abstractmethod
# --8<-- [start:BaseOAuthHandler3]
def get_login_url(
self, scopes: list[str], state: str, code_challenge: Optional[str]
) -> str:
# --8<-- [end:BaseOAuthHandler3]
"""Constructs a login URL that the user can be redirected to"""
...
@abstractmethod
# --8<-- [start:BaseOAuthHandler4]
async def exchange_code_for_tokens(
self, code: str, scopes: list[str], code_verifier: Optional[str]
) -> OAuth2Credentials:
# --8<-- [end:BaseOAuthHandler4]
"""Exchanges the acquired authorization code from login for a set of tokens"""
...
@abstractmethod
# --8<-- [start:BaseOAuthHandler5]
async def _refresh_tokens(
self, credentials: OAuth2Credentials
) -> OAuth2Credentials:
# --8<-- [end:BaseOAuthHandler5]
"""Implements the token refresh mechanism"""
...
@abstractmethod
# --8<-- [start:BaseOAuthHandler6]
async def revoke_tokens(self, credentials: OAuth2Credentials) -> bool:
# --8<-- [end:BaseOAuthHandler6]
"""Revokes the given token at provider,
returns False provider does not support it"""
...
async def refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
if credentials.provider != self.PROVIDER_NAME:
raise ValueError(
f"{self.__class__.__name__} can not refresh tokens "
f"for other provider '{credentials.provider}'"
)
return await self._refresh_tokens(credentials)
async def get_access_token(self, credentials: OAuth2Credentials) -> str:
"""Returns a valid access token, refreshing it first if needed"""
if self.needs_refresh(credentials):
credentials = await self.refresh_tokens(credentials)
return credentials.access_token.get_secret_value()
def needs_refresh(self, credentials: OAuth2Credentials) -> bool:
"""Indicates whether the given tokens need to be refreshed"""
return (
credentials.access_token_expires_at is not None
and credentials.access_token_expires_at < int(time.time()) + 300
)
def handle_default_scopes(self, scopes: list[str]) -> list[str]:
"""Handles the default scopes for the provider"""
# If scopes are empty, use the default scopes for the provider
if not scopes:
logger.debug(
f"Using default scopes for provider {self.PROVIDER_NAME.value}"
)
scopes = self.DEFAULT_SCOPES
return scopes
|
import logging
import time
from abc import ABC, abstractmethod
from typing import ClassVar, Optional
from backend.data.model import OAuth2Credentials
from backend.integrations.providers import ProviderName
logger = logging.getLogger(__name__)
class BaseOAuthHandler(ABC):
# --8<-- [start:BaseOAuthHandler1]
PROVIDER_NAME: ClassVar[ProviderName]
DEFAULT_SCOPES: ClassVar[list[str]] = []
# --8<-- [end:BaseOAuthHandler1]
@abstractmethod
# --8<-- [start:BaseOAuthHandler2]
def __init__(self, client_id: str, client_secret: str, redirect_uri: str): ...
# --8<-- [end:BaseOAuthHandler2]
@abstractmethod
# --8<-- [start:BaseOAuthHandler3]
def get_login_url(
self, scopes: list[str], state: str, code_challenge: Optional[str]
) -> str:
# --8<-- [end:BaseOAuthHandler3]
"""Constructs a login URL that the user can be redirected to"""
...
@abstractmethod
# --8<-- [start:BaseOAuthHandler4]
def exchange_code_for_tokens(
self, code: str, scopes: list[str], code_verifier: Optional[str]
) -> OAuth2Credentials:
# --8<-- [end:BaseOAuthHandler4]
"""Exchanges the acquired authorization code from login for a set of tokens"""
...
@abstractmethod
# --8<-- [start:BaseOAuthHandler5]
def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
# --8<-- [end:BaseOAuthHandler5]
"""Implements the token refresh mechanism"""
...
@abstractmethod
# --8<-- [start:BaseOAuthHandler6]
def revoke_tokens(self, credentials: OAuth2Credentials) -> bool:
# --8<-- [end:BaseOAuthHandler6]
"""Revokes the given token at provider,
returns False provider does not support it"""
...
def refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
if credentials.provider != self.PROVIDER_NAME:
raise ValueError(
f"{self.__class__.__name__} can not refresh tokens "
f"for other provider '{credentials.provider}'"
)
return self._refresh_tokens(credentials)
def get_access_token(self, credentials: OAuth2Credentials) -> str:
"""Returns a valid access token, refreshing it first if needed"""
if self.needs_refresh(credentials):
credentials = self.refresh_tokens(credentials)
return credentials.access_token.get_secret_value()
def needs_refresh(self, credentials: OAuth2Credentials) -> bool:
"""Indicates whether the given tokens need to be refreshed"""
return (
credentials.access_token_expires_at is not None
and credentials.access_token_expires_at < int(time.time()) + 300
)
def handle_default_scopes(self, scopes: list[str]) -> list[str]:
"""Handles the default scopes for the provider"""
# If scopes are empty, use the default scopes for the provider
if not scopes:
logger.debug(
f"Using default scopes for provider {self.PROVIDER_NAME.value}"
)
scopes = self.DEFAULT_SCOPES
return scopes
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from mmcv.runner import BaseModule
class BaseMaskHead(BaseModule, metaclass=ABCMeta):
"""Base class for mask heads used in One-Stage Instance Segmentation."""
def __init__(self, init_cfg):
super(BaseMaskHead, self).__init__(init_cfg)
@abstractmethod
def loss(self, **kwargs):
pass
@abstractmethod
def get_results(self, **kwargs):
"""Get precessed :obj:`InstanceData` of multiple images."""
pass
def forward_train(self,
x,
gt_labels,
gt_masks,
img_metas,
gt_bboxes=None,
gt_bboxes_ignore=None,
positive_infos=None,
**kwargs):
"""
Args:
x (list[Tensor] | tuple[Tensor]): Features from FPN.
Each has a shape (B, C, H, W).
gt_labels (list[Tensor]): Ground truth labels of all images.
each has a shape (num_gts,).
gt_masks (list[Tensor]) : Masks for each bbox, has a shape
(num_gts, h , w).
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes (list[Tensor], optional): Ground truth bboxes of
the image, each item has a shape (num_gts, 4).
gt_bboxes_ignore (list[Tensor], optional): Ground truth bboxes
to be ignored, each item has a shape (num_ignored_gts, 4).
positive_infos (list[:obj:`InstanceData`], optional): Information
of positive samples. Used when the label assignment is
done outside the MaskHead, e.g., in BboxHead in
YOLACT or CondInst, etc. When the label assignment is done in
MaskHead, it would be None, like SOLO. All values
in it should have shape (num_positive_samples, *).
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
if positive_infos is None:
outs = self(x)
else:
outs = self(x, positive_infos)
assert isinstance(outs, tuple), 'Forward results should be a tuple, ' \
'even if only one item is returned'
loss = self.loss(
*outs,
gt_labels=gt_labels,
gt_masks=gt_masks,
img_metas=img_metas,
gt_bboxes=gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
positive_infos=positive_infos,
**kwargs)
return loss
def simple_test(self,
feats,
img_metas,
rescale=False,
instances_list=None,
**kwargs):
"""Test function without test-time augmentation.
Args:
feats (tuple[torch.Tensor]): Multi-level features from the
upstream network, each is a 4D-tensor.
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
instances_list (list[obj:`InstanceData`], optional): Detection
results of each image after the post process. Only exist
if there is a `bbox_head`, like `YOLACT`, `CondInst`, etc.
Returns:
list[obj:`InstanceData`]: Instance segmentation \
results of each image after the post process. \
Each item usually contains following keys. \
- scores (Tensor): Classification scores, has a shape
(num_instance,)
- labels (Tensor): Has a shape (num_instances,).
- masks (Tensor): Processed mask results, has a
shape (num_instances, h, w).
"""
if instances_list is None:
outs = self(feats)
else:
outs = self(feats, instances_list=instances_list)
mask_inputs = outs + (img_metas, )
results_list = self.get_results(
*mask_inputs,
rescale=rescale,
instances_list=instances_list,
**kwargs)
return results_list
def onnx_export(self, img, img_metas):
raise NotImplementedError(f'{self.__class__.__name__} does '
f'not support ONNX EXPORT')
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from mmcv.runner import BaseModule
class BaseMaskHead(BaseModule, metaclass=ABCMeta):
"""Base class for mask heads used in One-Stage Instance Segmentation."""
def __init__(self, init_cfg):
super(BaseMaskHead, self).__init__(init_cfg)
@abstractmethod
def loss(self, **kwargs):
pass
@abstractmethod
def get_results(self, **kwargs):
"""Get precessed :obj:`InstanceData` of multiple images."""
pass
def forward_train(self,
x,
gt_labels,
gt_masks,
img_metas,
gt_bboxes=None,
gt_bboxes_ignore=None,
positive_infos=None,
**kwargs):
"""
Args:
x (list[Tensor] | tuple[Tensor]): Features from FPN.
Each has a shape (B, C, H, W).
gt_labels (list[Tensor]): Ground truth labels of all images.
each has a shape (num_gts,).
gt_masks (list[Tensor]) : Masks for each bbox, has a shape
(num_gts, h , w).
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes (list[Tensor]): Ground truth bboxes of the image,
each item has a shape (num_gts, 4).
gt_bboxes_ignore (list[Tensor], None): Ground truth bboxes to be
ignored, each item has a shape (num_ignored_gts, 4).
positive_infos (list[:obj:`InstanceData`], optional): Information
of positive samples. Used when the label assignment is
done outside the MaskHead, e.g., in BboxHead in
YOLACT or CondInst, etc. When the label assignment is done in
MaskHead, it would be None, like SOLO. All values
in it should have shape (num_positive_samples, *).
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
if positive_infos is None:
outs = self(x)
else:
outs = self(x, positive_infos)
assert isinstance(outs, tuple), 'Forward results should be a tuple, ' \
'even if only one item is returned'
loss = self.loss(
*outs,
gt_labels=gt_labels,
gt_masks=gt_masks,
img_metas=img_metas,
gt_bboxes=gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
positive_infos=positive_infos,
**kwargs)
return loss
def simple_test(self,
feats,
img_metas,
rescale=False,
instances_list=None,
**kwargs):
"""Test function without test-time augmentation.
Args:
feats (tuple[torch.Tensor]): Multi-level features from the
upstream network, each is a 4D-tensor.
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
instances_list (list[obj:`InstanceData`], optional): Detection
results of each image after the post process. Only exist
if there is a `bbox_head`, like `YOLACT`, `CondInst`, etc.
Returns:
list[obj:`InstanceData`]: Instance segmentation \
results of each image after the post process. \
Each item usually contains following keys. \
- scores (Tensor): Classification scores, has a shape
(num_instance,)
- labels (Tensor): Has a shape (num_instances,).
- masks (Tensor): Processed mask results, has a
shape (num_instances, h, w).
"""
if instances_list is None:
outs = self(feats)
else:
outs = self(feats, instances_list=instances_list)
mask_inputs = outs + (img_metas, )
results_list = self.get_results(
*mask_inputs,
rescale=rescale,
instances_list=instances_list,
**kwargs)
return results_list
def onnx_export(self, img, img_metas):
raise NotImplementedError(f'{self.__class__.__name__} does '
f'not support ONNX EXPORT')
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import tempfile
import unittest
from transformers import GemmaTokenizer, PaliGemmaProcessor
from transformers.testing_utils import get_tests_dir, require_torch, require_vision
from transformers.utils import is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from transformers import SiglipImageProcessor
SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model")
@require_vision
class PaliGemmaProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = PaliGemmaProcessor
@classmethod
def setUpClass(cls):
cls.tmpdirname = tempfile.mkdtemp()
image_processor = SiglipImageProcessor.from_pretrained("google/siglip-so400m-patch14-384")
image_processor.image_seq_length = 0 # TODO: raushan fix me in #37342
tokenizer = GemmaTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokenizer.add_special_tokens({"additional_special_tokens": ["<image>"]})
processor = PaliGemmaProcessor(image_processor=image_processor, tokenizer=tokenizer)
processor.save_pretrained(cls.tmpdirname)
cls.image_token = processor.image_token
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
@require_torch
@require_vision
def test_image_seq_length(self):
input_str = "lower newer"
image_input = self.prepare_image_inputs()
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer", max_length=112, padding="max_length")
image_processor.image_seq_length = 14
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
inputs = processor(
text=input_str, images=image_input, return_tensors="pt", max_length=112, padding="max_length"
)
self.assertEqual(len(inputs["input_ids"][0]), 112)
def test_text_with_image_tokens(self):
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer")
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
text_multi_images = "<image><image>Dummy text!"
text_single_image = "<image>Dummy text!"
text_no_image = "Dummy text!"
image = self.prepare_image_inputs()
out_noimage = processor(text=text_no_image, images=image, return_tensors="np")
out_singlimage = processor(text=text_single_image, images=image, return_tensors="np")
for k in out_noimage:
self.assertTrue(out_noimage[k].tolist() == out_singlimage[k].tolist())
out_multiimages = processor(text=text_multi_images, images=[image, image], return_tensors="np")
out_noimage = processor(text=text_no_image, images=[[image, image]], return_tensors="np")
# We can't be sure what is users intention, whether user want "one text + two images" or user forgot to add the second text
with self.assertRaises(ValueError):
out_noimage = processor(text=text_no_image, images=[image, image], return_tensors="np")
for k in out_noimage:
self.assertTrue(out_noimage[k].tolist() == out_multiimages[k].tolist())
text_batched = ["Dummy text!", "Dummy text!"]
text_batched_with_image = ["<image>Dummy text!", "<image>Dummy text!"]
out_images = processor(text=text_batched_with_image, images=[image, image], return_tensors="np")
out_noimage_nested = processor(text=text_batched, images=[[image], [image]], return_tensors="np")
out_noimage = processor(text=text_batched, images=[image, image], return_tensors="np")
for k in out_noimage:
self.assertTrue(out_noimage[k].tolist() == out_images[k].tolist() == out_noimage_nested[k].tolist())
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import tempfile
import unittest
from transformers import GemmaTokenizer, PaliGemmaProcessor
from transformers.testing_utils import get_tests_dir, require_torch, require_vision
from transformers.utils import is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from transformers import SiglipImageProcessor
SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model")
@require_vision
class PaliGemmaProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = PaliGemmaProcessor
@classmethod
def setUpClass(cls):
cls.tmpdirname = tempfile.mkdtemp()
image_processor = SiglipImageProcessor.from_pretrained("google/siglip-so400m-patch14-384")
image_processor.image_seq_length = 0 # TODO: raushan fix me in #37342
tokenizer = GemmaTokenizer(SAMPLE_VOCAB, keep_accents=True)
processor = PaliGemmaProcessor(image_processor=image_processor, tokenizer=tokenizer)
processor.save_pretrained(cls.tmpdirname)
cls.image_token = processor.image_token
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
@require_torch
@require_vision
def test_image_seq_length(self):
input_str = "lower newer"
image_input = self.prepare_image_inputs()
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer", max_length=112, padding="max_length")
image_processor.image_seq_length = 14
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
inputs = processor(
text=input_str, images=image_input, return_tensors="pt", max_length=112, padding="max_length"
)
self.assertEqual(len(inputs["input_ids"][0]), 112 + 14)
def test_text_with_image_tokens(self):
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer")
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
text_multi_images = "<image><image>Dummy text!"
text_single_image = "<image>Dummy text!"
text_no_image = "Dummy text!"
image = self.prepare_image_inputs()
out_noimage = processor(text=text_no_image, images=image, return_tensors="np")
out_singlimage = processor(text=text_single_image, images=image, return_tensors="np")
for k in out_noimage:
self.assertTrue(out_noimage[k].tolist() == out_singlimage[k].tolist())
out_multiimages = processor(text=text_multi_images, images=[image, image], return_tensors="np")
out_noimage = processor(text=text_no_image, images=[[image, image]], return_tensors="np")
# We can't be sure what is users intention, whether user want "one text + two images" or user forgot to add the second text
with self.assertRaises(ValueError):
out_noimage = processor(text=text_no_image, images=[image, image], return_tensors="np")
for k in out_noimage:
self.assertTrue(out_noimage[k].tolist() == out_multiimages[k].tolist())
text_batched = ["Dummy text!", "Dummy text!"]
text_batched_with_image = ["<image>Dummy text!", "<image>Dummy text!"]
out_images = processor(text=text_batched_with_image, images=[image, image], return_tensors="np")
out_noimage_nested = processor(text=text_batched, images=[[image], [image]], return_tensors="np")
out_noimage = processor(text=text_batched, images=[image, image], return_tensors="np")
for k in out_noimage:
self.assertTrue(out_noimage[k].tolist() == out_images[k].tolist() == out_noimage_nested[k].tolist())
|
# model settings
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_size_divisor=32)
model = dict(
type='MaskRCNN',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
|
# model settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
model = dict(
type='MaskRCNN',
img_norm_cfg=img_norm_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
|
from pathlib import Path
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .utils import download_and_extract_archive
from .vision import VisionDataset
class SUN397(VisionDataset):
"""`The SUN397 Data Set <https://vision.princeton.edu/projects/2010/SUN/>`_.
The SUN397 or Scene UNderstanding (SUN) is a dataset for scene recognition consisting of
397 categories with 108'754 images.
Args:
root (string): Root directory of the dataset.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
_DATASET_URL = "http://vision.princeton.edu/projects/2010/SUN/SUN397.tar.gz"
_DATASET_MD5 = "8ca2778205c41d23104230ba66911c7a"
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._data_dir = Path(self.root) / "SUN397"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
with open(self._data_dir / "ClassName.txt") as f:
self.classes = [c[3:].strip() for c in f]
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
self._image_files = list(self._data_dir.rglob("sun_*.jpg"))
self._labels = [
self.class_to_idx["/".join(path.relative_to(self._data_dir).parts[1:-1])] for path in self._image_files
]
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def _check_exists(self) -> bool:
return self._data_dir.is_dir()
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._DATASET_URL, download_root=self.root, md5=self._DATASET_MD5)
|
from pathlib import Path
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .utils import download_and_extract_archive
from .vision import VisionDataset
class SUN397(VisionDataset):
"""`The SUN397 Data Set <https://vision.princeton.edu/projects/2010/SUN/>`_.
The SUN397 or Scene UNderstanding (SUN) is a dataset for scene recognition consisting of
397 categories with 108'754 images.
Args:
root (string): Root directory of the dataset.
transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
_DATASET_URL = "http://vision.princeton.edu/projects/2010/SUN/SUN397.tar.gz"
_DATASET_MD5 = "8ca2778205c41d23104230ba66911c7a"
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._data_dir = Path(self.root) / "SUN397"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
with open(self._data_dir / "ClassName.txt") as f:
self.classes = [c[3:].strip() for c in f]
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
self._image_files = list(self._data_dir.rglob("sun_*.jpg"))
self._labels = [
self.class_to_idx["/".join(path.relative_to(self._data_dir).parts[1:-1])] for path in self._image_files
]
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def _check_exists(self) -> bool:
return self._data_dir.is_dir()
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._DATASET_URL, download_root=self.root, md5=self._DATASET_MD5)
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
import pytest
import mmengine
def test_timer_init():
timer = mmengine.Timer(start=False)
assert not timer.is_running
timer.start()
assert timer.is_running
timer = mmengine.Timer()
assert timer.is_running
def test_timer_run():
timer = mmengine.Timer()
time.sleep(1)
assert abs(timer.since_start() - 1) < 1e-2
time.sleep(1)
assert abs(timer.since_last_check() - 1) < 1e-2
assert abs(timer.since_start() - 2) < 1e-2
timer = mmengine.Timer(False)
with pytest.raises(mmengine.TimerError):
timer.since_start()
with pytest.raises(mmengine.TimerError):
timer.since_last_check()
def test_timer_context(capsys):
with mmengine.Timer():
time.sleep(1)
out, _ = capsys.readouterr()
assert abs(float(out) - 1) < 1e-2
with mmengine.Timer(print_tmpl='time: {:.1f}s'):
time.sleep(1)
out, _ = capsys.readouterr()
assert out == 'time: 1.0s\n'
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
import mmcv
import pytest
def test_timer_init():
timer = mmcv.Timer(start=False)
assert not timer.is_running
timer.start()
assert timer.is_running
timer = mmcv.Timer()
assert timer.is_running
def test_timer_run():
timer = mmcv.Timer()
time.sleep(1)
assert abs(timer.since_start() - 1) < 1e-2
time.sleep(1)
assert abs(timer.since_last_check() - 1) < 1e-2
assert abs(timer.since_start() - 2) < 1e-2
timer = mmcv.Timer(False)
with pytest.raises(mmcv.TimerError):
timer.since_start()
with pytest.raises(mmcv.TimerError):
timer.since_last_check()
def test_timer_context(capsys):
with mmcv.Timer():
time.sleep(1)
out, _ = capsys.readouterr()
assert abs(float(out) - 1) < 1e-2
with mmcv.Timer(print_tmpl='time: {:.1f}s'):
time.sleep(1)
out, _ = capsys.readouterr()
assert out == 'time: 1.0s\n'
|
_base_ = '../rpn/rpn_r50_caffe_fpn_1x_coco.py'
model = dict(
rpn_head=dict(
_delete_=True,
type='GARPNHead',
in_channels=256,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=8,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[8],
strides=[4, 8, 16, 32, 64]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.14, 0.14]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.11, 0.11]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(
rpn=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
center_ratio=0.2,
ignore_ratio=0.5)),
test_cfg=dict(rpn=dict(nms_post=1000)))
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
|
_base_ = '../rpn/rpn_r50_caffe_fpn_1x_coco.py'
model = dict(
rpn_head=dict(
_delete_=True,
type='GARPNHead',
in_channels=256,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=8,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[8],
strides=[4, 8, 16, 32, 64]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.14, 0.14]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.11, 0.11]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(
rpn=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
center_ratio=0.2,
ignore_ratio=0.5)),
test_cfg=dict(rpn=dict(nms_post=1000)))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
import multiprocessing
import os
import signal
import time
import pytest
from jina import Document, DocumentArray, Executor, requests
from jina.clients.request import request_generator
from jina.parsers import set_gateway_parser
from jina.serve.networking.utils import send_request_sync
from jina_cli.api import executor_native, gateway
from tests.helper import _generate_pod_args
class DummyExecutor(Executor):
def __init__(self, dir=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dir = dir
self.request_count = 0
@requests
def slow_count(self, **kwargs):
time.sleep(0.5)
self.request_count += 1
def close(self):
super().close()
with open(f'{self.dir}/test.txt', 'w', encoding='utf-8') as fp:
fp.write(f'proper close;{self.request_count}')
def _create_test_data_message():
req = list(
request_generator(
'/', DocumentArray([Document(text='input document') for _ in range(10)])
)
)[0]
return req
@pytest.mark.parametrize('signal', [signal.SIGTERM, signal.SIGINT])
def test_executor_runtimes(signal, tmpdir):
import time
args = _generate_pod_args()
def run(args):
args.uses = {
'jtype': 'DummyExecutor',
'with': {'dir': str(tmpdir)},
'metas': {'workspace': str(tmpdir)},
}
executor_native(args)
process = multiprocessing.Process(target=run, args=(args,))
process.start()
time.sleep(0.5)
send_request_sync(_create_test_data_message(), target=f'{args.host}:{args.port[0]}')
time.sleep(0.1)
os.kill(process.pid, signal)
process.join()
with open(f'{tmpdir}/test.txt', 'r', encoding='utf-8') as fp:
output = fp.read()
split = output.split(';')
assert split[0] == 'proper close'
assert split[1] == '1'
@pytest.mark.parametrize('signal', [signal.SIGTERM, signal.SIGINT])
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_gateway(signal, protocol):
import time
def run():
args = set_gateway_parser().parse_args(
[
'--protocol',
protocol,
'--graph-description',
'{}',
'--deployments-addresses',
'{}',
]
)
gateway(args)
process = multiprocessing.Process(target=run)
process.start()
time.sleep(0.5)
os.kill(process.pid, signal)
process.join()
|
import multiprocessing
import os
import signal
import time
import pytest
from jina import Document, DocumentArray, Executor, requests
from jina.clients.request import request_generator
from jina.parsers import set_gateway_parser
from jina.serve.networking.utils import send_request_sync
from jina_cli.api import executor_native, gateway
from tests.helper import _generate_pod_args
class DummyExecutor(Executor):
def __init__(self, dir=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dir = dir
self.request_count = 0
@requests
def slow_count(self, **kwargs):
time.sleep(0.5)
self.request_count += 1
def close(self):
super().close()
with open(f'{self.dir}/test.txt', 'w') as fp:
fp.write(f'proper close;{self.request_count}')
def _create_test_data_message():
req = list(
request_generator(
'/', DocumentArray([Document(text='input document') for _ in range(10)])
)
)[0]
return req
@pytest.mark.parametrize('signal', [signal.SIGTERM, signal.SIGINT])
def test_executor_runtimes(signal, tmpdir):
import time
args = _generate_pod_args()
def run(args):
args.uses = {
'jtype': 'DummyExecutor',
'with': {'dir': str(tmpdir)},
'metas': {'workspace': str(tmpdir)},
}
executor_native(args)
process = multiprocessing.Process(target=run, args=(args,))
process.start()
time.sleep(0.5)
send_request_sync(_create_test_data_message(), target=f'{args.host}:{args.port[0]}')
time.sleep(0.1)
os.kill(process.pid, signal)
process.join()
with open(f'{tmpdir}/test.txt', 'r') as fp:
output = fp.read()
split = output.split(';')
assert split[0] == 'proper close'
assert split[1] == '1'
@pytest.mark.parametrize('signal', [signal.SIGTERM, signal.SIGINT])
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_gateway(signal, protocol):
import time
def run():
args = set_gateway_parser().parse_args(
[
'--protocol',
protocol,
'--graph-description',
'{}',
'--deployments-addresses',
'{}',
]
)
gateway(args)
process = multiprocessing.Process(target=run)
process.start()
time.sleep(0.5)
os.kill(process.pid, signal)
process.join()
|
from typing import Union
from fastapi import FastAPI
from pydantic import BaseModel
class Item(BaseModel):
name: str
description: Union[str, None] = None
price: float
tax: Union[float, None] = None
app = FastAPI()
@app.post("/items/")
async def create_item(item: Item):
item_dict = item.dict()
if item.tax is not None:
price_with_tax = item.price + item.tax
item_dict.update({"price_with_tax": price_with_tax})
return item_dict
|
from typing import Union
from fastapi import FastAPI
from pydantic import BaseModel
class Item(BaseModel):
name: str
description: Union[str, None] = None
price: float
tax: Union[float, None] = None
app = FastAPI()
@app.post("/items/")
async def create_item(item: Item):
item_dict = item.dict()
if item.tax:
price_with_tax = item.price + item.tax
item_dict.update({"price_with_tax": price_with_tax})
return item_dict
|
"""Chat Message."""
from typing import Any, Literal
from typing_extensions import override
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
)
from langchain_core.utils._merge import merge_dicts
class ChatMessage(BaseMessage):
"""Message that can be assigned an arbitrary speaker (i.e. role)."""
role: str
"""The speaker / role of the Message."""
type: Literal["chat"] = "chat"
"""The type of the message (used during serialization). Defaults to "chat"."""
ChatMessage.model_rebuild()
class ChatMessageChunk(ChatMessage, BaseMessageChunk):
"""Chat Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore
"""The type of the message (used during serialization).
Defaults to "ChatMessageChunk"."""
@override
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, ChatMessageChunk):
if self.role != other.role:
msg = "Cannot concatenate ChatMessageChunks with different roles."
raise ValueError(msg)
return self.__class__(
role=self.role,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
id=self.id,
)
if isinstance(other, BaseMessageChunk):
return self.__class__(
role=self.role,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
id=self.id,
)
return super().__add__(other)
|
"""Chat Message."""
from typing import Any, Literal
from typing_extensions import override
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
)
from langchain_core.utils._merge import merge_dicts
class ChatMessage(BaseMessage):
"""Message that can be assigned an arbitrary speaker (i.e. role)."""
role: str
"""The speaker / role of the Message."""
type: Literal["chat"] = "chat"
"""The type of the message (used during serialization). Defaults to "chat"."""
ChatMessage.model_rebuild()
class ChatMessageChunk(ChatMessage, BaseMessageChunk):
"""Chat Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore
"""The type of the message (used during serialization).
Defaults to "ChatMessageChunk"."""
@override
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, ChatMessageChunk):
if self.role != other.role:
msg = "Cannot concatenate ChatMessageChunks with different roles."
raise ValueError(msg)
return self.__class__(
role=self.role,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
id=self.id,
)
elif isinstance(other, BaseMessageChunk):
return self.__class__(
role=self.role,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
id=self.id,
)
else:
return super().__add__(other)
|
"""Test HyDE."""
from typing import Any, Optional
import numpy as np
from langchain_core.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.embeddings import Embeddings
from langchain_core.language_models.llms import BaseLLM
from langchain_core.outputs import Generation, LLMResult
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder
from langchain.chains.hyde.prompts import PROMPT_MAP
class FakeEmbeddings(Embeddings):
"""Fake embedding class for tests."""
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Return random floats."""
return [list(np.random.uniform(0, 1, 10)) for _ in range(10)]
def embed_query(self, text: str) -> list[float]:
"""Return random floats."""
return list(np.random.uniform(0, 1, 10))
class FakeLLM(BaseLLM):
"""Fake LLM wrapper for testing purposes."""
n: int = 1
def _generate(
self,
prompts: list[str],
stop: Optional[list[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
return LLMResult(generations=[[Generation(text="foo") for _ in range(self.n)]])
async def _agenerate(
self,
prompts: list[str],
stop: Optional[list[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
return LLMResult(generations=[[Generation(text="foo") for _ in range(self.n)]])
def get_num_tokens(self, text: str) -> int:
"""Return number of tokens."""
return len(text.split())
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake"
def test_hyde_from_llm() -> None:
"""Test loading HyDE from all prompts."""
for key in PROMPT_MAP:
embedding = HypotheticalDocumentEmbedder.from_llm(
FakeLLM(),
FakeEmbeddings(),
key,
)
embedding.embed_query("foo")
def test_hyde_from_llm_with_multiple_n() -> None:
"""Test loading HyDE from all prompts."""
for key in PROMPT_MAP:
embedding = HypotheticalDocumentEmbedder.from_llm(
FakeLLM(n=8),
FakeEmbeddings(),
key,
)
embedding.embed_query("foo")
|
"""Test HyDE."""
from typing import Any, Optional
import numpy as np
from langchain_core.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.embeddings import Embeddings
from langchain_core.language_models.llms import BaseLLM
from langchain_core.outputs import Generation, LLMResult
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder
from langchain.chains.hyde.prompts import PROMPT_MAP
class FakeEmbeddings(Embeddings):
"""Fake embedding class for tests."""
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Return random floats."""
return [list(np.random.uniform(0, 1, 10)) for _ in range(10)]
def embed_query(self, text: str) -> list[float]:
"""Return random floats."""
return list(np.random.uniform(0, 1, 10))
class FakeLLM(BaseLLM):
"""Fake LLM wrapper for testing purposes."""
n: int = 1
def _generate(
self,
prompts: list[str],
stop: Optional[list[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
return LLMResult(generations=[[Generation(text="foo") for _ in range(self.n)]])
async def _agenerate(
self,
prompts: list[str],
stop: Optional[list[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
return LLMResult(generations=[[Generation(text="foo") for _ in range(self.n)]])
def get_num_tokens(self, text: str) -> int:
"""Return number of tokens."""
return len(text.split())
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake"
def test_hyde_from_llm() -> None:
"""Test loading HyDE from all prompts."""
for key in PROMPT_MAP:
embedding = HypotheticalDocumentEmbedder.from_llm(
FakeLLM(), FakeEmbeddings(), key
)
embedding.embed_query("foo")
def test_hyde_from_llm_with_multiple_n() -> None:
"""Test loading HyDE from all prompts."""
for key in PROMPT_MAP:
embedding = HypotheticalDocumentEmbedder.from_llm(
FakeLLM(n=8), FakeEmbeddings(), key
)
embedding.embed_query("foo")
|
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
# use caffe img_norm
preprocess_cfg=preprocess_cfg,
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)]),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from typing import Optional
import fire
from llama import Llama
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.6,
top_p: float = 0.9,
max_seq_len: int = 512,
max_batch_size: int = 8,
max_gen_len: Optional[int] = None,
):
generator = Llama.build(
ckpt_dir=ckpt_dir,
tokenizer_path=tokenizer_path,
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
)
dialogs = [
[{"role": "user", "content": "what is the recipe of mayonnaise?"}],
[
{"role": "user", "content": "I am going to Paris, what should I see?"},
{
"role": "assistant",
"content": """\
Paris, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:
1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.
2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.
3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.
These are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world.""",
},
{"role": "user", "content": "What is so great about #1?"},
],
[
{"role": "system", "content": "Always answer with Haiku"},
{"role": "user", "content": "I am going to Paris, what should I see?"},
],
[
{
"role": "system",
"content": "Always answer with emojis",
},
{"role": "user", "content": "How to go from Beijing to NY?"},
],
[
{
"role": "system",
"content": """\
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""",
},
{"role": "user", "content": "Write a brief birthday message to John"},
],
]
results = generator.chat_completion(
dialogs, # type: ignore
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
)
for dialog, result in zip(dialogs, results):
for msg in dialog:
print(f"{msg['role'].capitalize()}: {msg['content']}\n")
print(
f"> {result['generation']['role'].capitalize()}: {result['generation']['content']}"
)
print("\n==================================\n")
if __name__ == "__main__":
fire.Fire(main)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from typing import Optional
import fire
from llama import Llama
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.6,
top_p: float = 0.9,
max_seq_len: int = 512,
max_batch_size: int = 4,
max_gen_len: Optional[int] = None,
):
generator = Llama.build(
ckpt_dir=ckpt_dir,
tokenizer_path=tokenizer_path,
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
)
dialogs = [
[{"role": "user", "content": "what is the recipe of mayonnaise?"}],
[
{"role": "user", "content": "I am going to Paris, what should I see?"},
{
"role": "assistant",
"content": """\
Paris, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:
1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.
2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.
3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.
These are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world.""",
},
{"role": "user", "content": "What is so great about #1?"},
],
[
{"role": "system", "content": "Always answer with Haiku"},
{"role": "user", "content": "I am going to Paris, what should I see?"},
],
[
{
"role": "system",
"content": "Always answer with emojis",
},
{"role": "user", "content": "How to go from Beijing to NY?"},
],
]
results = generator.chat_completion(
dialogs, # type: ignore
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
)
for dialog, result in zip(dialogs, results):
for msg in dialog:
print(f"{msg['role'].capitalize()}: {msg['content']}\n")
print(
f"> {result['generation']['role'].capitalize()}: {result['generation']['content']}"
)
print("\n==================================\n")
if __name__ == "__main__":
fire.Fire(main)
|
import os
import sys
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
sys.path.append("tests/python")
import test_basic_models as test_bm
# Don't import the test class, otherwise they will run twice.
import test_callback as test_cb # noqa
rng = np.random.RandomState(1994)
class TestGPUBasicModels:
cpu_test_cb = test_cb.TestCallbacks()
cpu_test_bm = test_bm.TestModels()
def run_cls(self, X, y):
cls = xgb.XGBClassifier(tree_method="hist", device="cuda")
cls.fit(X, y)
cls.get_booster().save_model("test_deterministic_gpu_hist-0.json")
cls = xgb.XGBClassifier(tree_method="hist", device="cuda")
cls.fit(X, y)
cls.get_booster().save_model("test_deterministic_gpu_hist-1.json")
with open("test_deterministic_gpu_hist-0.json", "r") as fd:
model_0 = fd.read()
with open("test_deterministic_gpu_hist-1.json", "r") as fd:
model_1 = fd.read()
os.remove("test_deterministic_gpu_hist-0.json")
os.remove("test_deterministic_gpu_hist-1.json")
return hash(model_0), hash(model_1)
def test_custom_objective(self):
self.cpu_test_bm.run_custom_objective("gpu_hist")
def test_eta_decay(self):
self.cpu_test_cb.run_eta_decay("gpu_hist")
@pytest.mark.parametrize(
"objective", ["binary:logistic", "reg:absoluteerror", "reg:quantileerror"]
)
def test_eta_decay_leaf_output(self, objective) -> None:
self.cpu_test_cb.run_eta_decay_leaf_output("gpu_hist", objective)
def test_deterministic_gpu_hist(self):
kRows = 1000
kCols = 64
kClasses = 4
# Create large values to force rounding.
X = np.random.randn(kRows, kCols) * 1e4
y = np.random.randint(0, kClasses, size=kRows)
model_0, model_1 = self.run_cls(X, y)
assert model_0 == model_1
@pytest.mark.skipif(**tm.no_sklearn())
def test_invalid_gpu_id(self):
from sklearn.datasets import load_digits
X, y = load_digits(return_X_y=True)
# should pass with invalid gpu id
cls1 = xgb.XGBClassifier(tree_method="gpu_hist", gpu_id=9999)
cls1.fit(X, y)
# should throw error with fail_on_invalid_gpu_id enabled
cls2 = xgb.XGBClassifier(
tree_method="gpu_hist", gpu_id=9999, fail_on_invalid_gpu_id=True
)
with pytest.raises(ValueError, match="ordinal 9999 is invalid"):
cls2.fit(X, y)
cls2 = xgb.XGBClassifier(
tree_method="hist", device="cuda:9999", fail_on_invalid_gpu_id=True
)
with pytest.raises(ValueError, match="ordinal 9999 is invalid"):
cls2.fit(X, y)
|
import os
import sys
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
sys.path.append("tests/python")
import test_basic_models as test_bm
# Don't import the test class, otherwise they will run twice.
import test_callback as test_cb # noqa
rng = np.random.RandomState(1994)
class TestGPUBasicModels:
cpu_test_cb = test_cb.TestCallbacks()
cpu_test_bm = test_bm.TestModels()
def run_cls(self, X, y):
cls = xgb.XGBClassifier(tree_method='gpu_hist')
cls.fit(X, y)
cls.get_booster().save_model('test_deterministic_gpu_hist-0.json')
cls = xgb.XGBClassifier(tree_method='gpu_hist')
cls.fit(X, y)
cls.get_booster().save_model('test_deterministic_gpu_hist-1.json')
with open('test_deterministic_gpu_hist-0.json', 'r') as fd:
model_0 = fd.read()
with open('test_deterministic_gpu_hist-1.json', 'r') as fd:
model_1 = fd.read()
os.remove('test_deterministic_gpu_hist-0.json')
os.remove('test_deterministic_gpu_hist-1.json')
return hash(model_0), hash(model_1)
def test_custom_objective(self):
self.cpu_test_bm.run_custom_objective("gpu_hist")
def test_eta_decay(self):
self.cpu_test_cb.run_eta_decay('gpu_hist')
@pytest.mark.parametrize(
"objective", ["binary:logistic", "reg:absoluteerror", "reg:quantileerror"]
)
def test_eta_decay_leaf_output(self, objective) -> None:
self.cpu_test_cb.run_eta_decay_leaf_output("gpu_hist", objective)
def test_deterministic_gpu_hist(self):
kRows = 1000
kCols = 64
kClasses = 4
# Create large values to force rounding.
X = np.random.randn(kRows, kCols) * 1e4
y = np.random.randint(0, kClasses, size=kRows)
model_0, model_1 = self.run_cls(X, y)
assert model_0 == model_1
@pytest.mark.skipif(**tm.no_sklearn())
def test_invalid_gpu_id(self):
from sklearn.datasets import load_digits
X, y = load_digits(return_X_y=True)
# should pass with invalid gpu id
cls1 = xgb.XGBClassifier(tree_method="gpu_hist", gpu_id=9999)
cls1.fit(X, y)
# should throw error with fail_on_invalid_gpu_id enabled
cls2 = xgb.XGBClassifier(
tree_method="gpu_hist", gpu_id=9999, fail_on_invalid_gpu_id=True
)
with pytest.raises(ValueError, match="ordinal 9999 is invalid"):
cls2.fit(X, y)
cls2 = xgb.XGBClassifier(
tree_method="hist", device="cuda:9999", fail_on_invalid_gpu_id=True
)
with pytest.raises(ValueError, match="ordinal 9999 is invalid"):
cls2.fit(X, y)
|
from typing_extensions import TYPE_CHECKING
if TYPE_CHECKING:
from rich.console import Console, ConsoleOptions, RenderResult
from rich.measure import Measurement
from docarray.typing.tensor.abstract_tensor import AbstractTensor
class TensorDisplay:
"""
Rich representation of a tensor.
"""
tensor_min_width: int = 30
def __init__(self, tensor: 'AbstractTensor'):
self.tensor = tensor
def __rich_console__(
self, console: 'Console', options: 'ConsoleOptions'
) -> 'RenderResult':
comp_be = self.tensor.get_comp_backend()
t_squeezed = comp_be.squeeze(comp_be.detach(self.tensor))
if comp_be.n_dim(t_squeezed) == 1 and comp_be.shape(t_squeezed)[0] < 200:
import colorsys
from rich.color import Color
from rich.segment import Segment
from rich.style import Style
tensor_normalized = comp_be.minmax_normalize(t_squeezed, (0, 5))
hue = 0.75
saturation = 1.0
for idx, y in enumerate(tensor_normalized):
luminance = 0.1 + ((y / 5) * 0.7)
r, g, b = colorsys.hls_to_rgb(hue, luminance + 0.07, saturation)
color = Color.from_rgb(r * 255, g * 255, b * 255)
yield Segment('▄', Style(color=color, bgcolor=color))
if idx != 0 and idx % options.max_width == 0:
yield Segment.line()
else:
from rich.text import Text
yield Text(
f'{self.tensor.__class__.__name__} of '
f'shape {comp_be.shape(self.tensor)}, '
f'dtype: {str(comp_be.dtype(self.tensor))}'
)
def __rich_measure__(
self, console: 'Console', options: 'ConsoleOptions'
) -> 'Measurement':
from rich.measure import Measurement
width = self._compute_table_width(max_width=options.max_width)
return Measurement(1, width)
def _compute_table_width(self, max_width: int) -> int:
"""
Compute the width of the table. Depending on the length of the tensor, the width
should be in the range of 30 (min) and a given `max_width`.
:return: the width of the table
"""
comp_be = self.tensor.get_comp_backend()
t_squeezed = comp_be.squeeze(comp_be.detach(self.tensor))
if comp_be.n_dim(t_squeezed) == 1 and comp_be.shape(t_squeezed)[0] < max_width:
min_capped = max(comp_be.shape(t_squeezed)[0], self.tensor_min_width)
min_max_capped = min(min_capped, max_width)
return min_max_capped
else:
return max_width
|
from typing_extensions import TYPE_CHECKING
if TYPE_CHECKING:
from rich.console import Console, ConsoleOptions, RenderResult
from rich.measure import Measurement
from docarray.typing.tensor.abstract_tensor import AbstractTensor
class TensorDisplay:
"""
Rich representation of a tensor.
"""
tensor_min_width: int = 30
def __init__(self, tensor: 'AbstractTensor'):
self.tensor = tensor
def __rich_console__(
self, console: 'Console', options: 'ConsoleOptions'
) -> 'RenderResult':
comp_be = self.tensor.get_comp_backend()
t_squeezed = comp_be.squeeze(comp_be.detach(self.tensor))
if comp_be.n_dim(t_squeezed) == 1 and comp_be.shape(t_squeezed)[0] < 200:
import colorsys
from rich.color import Color
from rich.segment import Segment
from rich.style import Style
tensor_normalized = comp_be.minmax_normalize(
comp_be.detach(self.tensor), (0, 5)
)
hue = 0.75
saturation = 1.0
for idx, y in enumerate(tensor_normalized):
luminance = 0.1 + ((y / 5) * 0.7)
r, g, b = colorsys.hls_to_rgb(hue, luminance + 0.07, saturation)
color = Color.from_rgb(r * 255, g * 255, b * 255)
yield Segment('▄', Style(color=color, bgcolor=color))
if idx != 0 and idx % options.max_width == 0:
yield Segment.line()
else:
from rich.text import Text
yield Text(
f'{self.tensor.__class__.__name__} of '
f'shape {comp_be.shape(self.tensor)}, '
f'dtype: {str(comp_be.dtype(self.tensor))}'
)
def __rich_measure__(
self, console: 'Console', options: 'ConsoleOptions'
) -> 'Measurement':
from rich.measure import Measurement
width = self._compute_table_width(max_width=options.max_width)
return Measurement(1, width)
def _compute_table_width(self, max_width: int) -> int:
"""
Compute the width of the table. Depending on the length of the tensor, the width
should be in the range of 30 (min) and a given `max_width`.
:return: the width of the table
"""
comp_be = self.tensor.get_comp_backend()
t_squeezed = comp_be.squeeze(comp_be.detach(self.tensor))
if comp_be.n_dim(t_squeezed) == 1 and comp_be.shape(t_squeezed)[0] < max_width:
min_capped = max(comp_be.shape(t_squeezed)[0], self.tensor_min_width)
min_max_capped = min(min_capped, max_width)
return min_max_capped
else:
return max_width
|
_base_ = './yolox_s_8x8_300e_coco.py'
# model settings
model = dict(
backbone=dict(deepen_factor=0.33, widen_factor=0.375),
neck=dict(in_channels=[96, 192, 384], out_channels=96),
bbox_head=dict(in_channels=96, feat_channels=96))
# dataset settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_scale = (640, 640)
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.5, 1.5),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Resize', keep_ratio=True),
dict(type='Pad', pad_to_square=True, pad_val=114.0),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(416, 416),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Pad', size=(416, 416), pad_val=114.0),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
train_dataset = dict(pipeline=train_pipeline)
data = dict(
train=train_dataset,
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
resume_from = None
interval = 10
# Execute in the order of insertion when the priority is the same.
# The smaller the value, the higher the priority
custom_hooks = [
dict(type='YOLOXModeSwitchHook', num_last_epochs=15, priority=48),
dict(
type='SyncRandomSizeHook',
ratio_range=(10, 20),
img_scale=img_scale,
priority=48),
dict(
type='SyncNormHook',
num_last_epochs=15,
interval=interval,
priority=48),
dict(type='ExpMomentumEMAHook', resume_from=resume_from, priority=49)
]
checkpoint_config = dict(interval=interval)
evaluation = dict(interval=interval, metric='bbox')
|
_base_ = './yolox_s_8x8_300e_coco.py'
# model settings
model = dict(
backbone=dict(deepen_factor=0.33, widen_factor=0.375),
neck=dict(in_channels=[96, 192, 384], out_channels=96),
bbox_head=dict(in_channels=96, feat_channels=96))
# dataset settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_scale = (640, 640)
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.5, 1.5),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Resize', keep_ratio=True),
dict(type='Pad', pad_to_square=True, pad_val=114.0),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(416, 416),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Pad', size=(416, 416), pad_val=114.0),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
train_dataset = dict(pipeline=train_pipeline)
data = dict(
train=train_dataset,
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
resume_from = None
interval = 10
# Execute in the order of insertion when the priority is the same.
# The smaller the value, the higher the priority
custom_hooks = [
dict(type='YOLOXModeSwitchHook', num_last_epochs=15, priority=48),
dict(
type='SyncRandomSizeHook',
ratio_range=(10, 20),
img_scale=img_scale,
interval=interval,
priority=48),
dict(
type='SyncNormHook',
num_last_epochs=15,
interval=interval,
priority=48),
dict(type='ExpMomentumEMAHook', resume_from=resume_from, priority=49)
]
checkpoint_config = dict(interval=interval)
evaluation = dict(interval=interval, metric='bbox')
|
from .dpr_reader import DPRReaderRanker
|
from .dpr_reader import DPRReaderRanker
|
import logging
import os
from typing import Optional
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.gateway import BaseGateway
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
class WebSocketGateway(BaseGateway):
"""WebSocket Gateway implementation"""
def __init__(
self,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
uvicorn_kwargs: Optional[dict] = None,
proxy: Optional[bool] = None,
**kwargs
):
"""Initialize the gateway
:param ssl_keyfile: the path to the key file
:param ssl_certfile: the path to the certificate file
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
:param proxy: If set, respect the http_proxy and https_proxy environment variables, otherwise, it will unset
these proxy variables before start. gRPC seems to prefer no proxy
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self._set_single_port_protocol()
self.host = self.runtime_args.host
self.ssl_keyfile = ssl_keyfile
self.ssl_certfile = ssl_certfile
self.uvicorn_kwargs = uvicorn_kwargs
if not proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
async def setup_server(self):
"""
Setup WebSocket Server
"""
from jina.helper import extend_rest_interface
self.app = extend_rest_interface(
get_fastapi_app(
streamer=self.streamer,
logger=self.logger,
tracing=self.tracing,
tracer_provider=self.tracer_provider,
)
)
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
uvicorn_kwargs = self.uvicorn_kwargs or {}
if self.ssl_keyfile and 'ssl_keyfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_keyfile'] = self.ssl_keyfile
if self.ssl_certfile and 'ssl_certfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_certfile'] = self.ssl_certfile
self.server = UviServer(
config=Config(
app=self.app,
host=self.host,
port=self.port,
ws_max_size=1024 * 1024 * 1024,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs,
)
)
await self.server.setup()
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
self.server.should_exit = True
await self.server.shutdown()
async def run_server(self):
"""Run WebSocket server forever"""
await self.server.serve()
|
import logging
import os
from typing import Optional
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.gateway import BaseGateway
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
class WebSocketGateway(BaseGateway):
"""WebSocket Gateway implementation"""
def __init__(
self,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
uvicorn_kwargs: Optional[dict] = None,
proxy: Optional[bool] = None,
**kwargs
):
"""Initialize the gateway
:param ssl_keyfile: the path to the key file
:param ssl_certfile: the path to the certificate file
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
:param proxy: If set, respect the http_proxy and https_proxy environment variables, otherwise, it will unset
these proxy variables before start. gRPC seems to prefer no proxy
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self._set_single_port_protocol()
self.ssl_keyfile = ssl_keyfile
self.ssl_certfile = ssl_certfile
self.uvicorn_kwargs = uvicorn_kwargs
if not proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
async def setup_server(self):
"""
Setup WebSocket Server
"""
from jina.helper import extend_rest_interface
self.app = extend_rest_interface(
get_fastapi_app(
streamer=self.streamer,
logger=self.logger,
tracing=self.tracing,
tracer_provider=self.tracer_provider,
)
)
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
uvicorn_kwargs = self.uvicorn_kwargs or {}
if self.ssl_keyfile and 'ssl_keyfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_keyfile'] = self.ssl_keyfile
if self.ssl_certfile and 'ssl_certfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_certfile'] = self.ssl_certfile
self.server = UviServer(
config=Config(
app=self.app,
host=__default_host__,
port=self.port,
ws_max_size=1024 * 1024 * 1024,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs,
)
)
await self.server.setup()
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
self.server.should_exit = True
await self.server.shutdown()
async def run_server(self):
"""Run WebSocket server forever"""
await self.server.serve()
|
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from typing import Literal
from pydantic import BaseModel, ConfigDict, SecretStr
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import (
CredentialsField,
CredentialsMetaInput,
SchemaField,
UserPasswordCredentials,
)
from backend.integrations.providers import ProviderName
TEST_CREDENTIALS = UserPasswordCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="smtp",
username=SecretStr("mock-smtp-username"),
password=SecretStr("mock-smtp-password"),
title="Mock SMTP credentials",
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.title,
}
SMTPCredentials = UserPasswordCredentials
SMTPCredentialsInput = CredentialsMetaInput[
Literal[ProviderName.SMTP],
Literal["user_password"],
]
def SMTPCredentialsField() -> SMTPCredentialsInput:
return CredentialsField(
description="The SMTP integration requires a username and password.",
)
class SMTPConfig(BaseModel):
smtp_server: str = SchemaField(
default="smtp.example.com", description="SMTP server address"
)
smtp_port: int = SchemaField(default=25, description="SMTP port number")
model_config = ConfigDict(title="SMTP Config")
class SendEmailBlock(Block):
class Input(BlockSchema):
to_email: str = SchemaField(
description="Recipient email address", placeholder="[email protected]"
)
subject: str = SchemaField(
description="Subject of the email", placeholder="Enter the email subject"
)
body: str = SchemaField(
description="Body of the email", placeholder="Enter the email body"
)
config: SMTPConfig = SchemaField(
description="SMTP Config",
default=SMTPConfig(),
)
credentials: SMTPCredentialsInput = SMTPCredentialsField()
class Output(BlockSchema):
status: str = SchemaField(description="Status of the email sending operation")
error: str = SchemaField(
description="Error message if the email sending failed"
)
def __init__(self):
super().__init__(
id="4335878a-394e-4e67-adf2-919877ff49ae",
description="This block sends an email using the provided SMTP credentials.",
categories={BlockCategory.OUTPUT},
input_schema=SendEmailBlock.Input,
output_schema=SendEmailBlock.Output,
test_input={
"to_email": "[email protected]",
"subject": "Test Email",
"body": "This is a test email.",
"config": {
"smtp_server": "smtp.gmail.com",
"smtp_port": 25,
},
"credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,
test_output=[("status", "Email sent successfully")],
test_mock={"send_email": lambda *args, **kwargs: "Email sent successfully"},
)
@staticmethod
def send_email(
config: SMTPConfig,
to_email: str,
subject: str,
body: str,
credentials: SMTPCredentials,
) -> str:
smtp_server = config.smtp_server
smtp_port = config.smtp_port
smtp_username = credentials.username.get_secret_value()
smtp_password = credentials.password.get_secret_value()
msg = MIMEMultipart()
msg["From"] = smtp_username
msg["To"] = to_email
msg["Subject"] = subject
msg.attach(MIMEText(body, "plain"))
with smtplib.SMTP(smtp_server, smtp_port) as server:
server.starttls()
server.login(smtp_username, smtp_password)
server.sendmail(smtp_username, to_email, msg.as_string())
return "Email sent successfully"
def run(
self, input_data: Input, *, credentials: SMTPCredentials, **kwargs
) -> BlockOutput:
yield "status", self.send_email(
config=input_data.config,
to_email=input_data.to_email,
subject=input_data.subject,
body=input_data.body,
credentials=credentials,
)
|
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from pydantic import BaseModel, ConfigDict
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import BlockSecret, SchemaField, SecretField
class EmailCredentials(BaseModel):
smtp_server: str = SchemaField(
default="smtp.gmail.com", description="SMTP server address"
)
smtp_port: int = SchemaField(default=25, description="SMTP port number")
smtp_username: BlockSecret = SecretField(key="smtp_username")
smtp_password: BlockSecret = SecretField(key="smtp_password")
model_config = ConfigDict(title="Email Credentials")
class SendEmailBlock(Block):
class Input(BlockSchema):
to_email: str = SchemaField(
description="Recipient email address", placeholder="[email protected]"
)
subject: str = SchemaField(
description="Subject of the email", placeholder="Enter the email subject"
)
body: str = SchemaField(
description="Body of the email", placeholder="Enter the email body"
)
creds: EmailCredentials = SchemaField(
description="SMTP credentials",
default=EmailCredentials(),
)
class Output(BlockSchema):
status: str = SchemaField(description="Status of the email sending operation")
error: str = SchemaField(
description="Error message if the email sending failed"
)
def __init__(self):
super().__init__(
disabled=True,
id="4335878a-394e-4e67-adf2-919877ff49ae",
description="This block sends an email using the provided SMTP credentials.",
categories={BlockCategory.OUTPUT},
input_schema=SendEmailBlock.Input,
output_schema=SendEmailBlock.Output,
test_input={
"to_email": "[email protected]",
"subject": "Test Email",
"body": "This is a test email.",
"creds": {
"smtp_server": "smtp.gmail.com",
"smtp_port": 25,
"smtp_username": "[email protected]",
"smtp_password": "your-gmail-password",
},
},
test_output=[("status", "Email sent successfully")],
test_mock={"send_email": lambda *args, **kwargs: "Email sent successfully"},
)
@staticmethod
def send_email(
creds: EmailCredentials, to_email: str, subject: str, body: str
) -> str:
smtp_server = creds.smtp_server
smtp_port = creds.smtp_port
smtp_username = creds.smtp_username.get_secret_value()
smtp_password = creds.smtp_password.get_secret_value()
msg = MIMEMultipart()
msg["From"] = smtp_username
msg["To"] = to_email
msg["Subject"] = subject
msg.attach(MIMEText(body, "plain"))
with smtplib.SMTP(smtp_server, smtp_port) as server:
server.starttls()
server.login(smtp_username, smtp_password)
server.sendmail(smtp_username, to_email, msg.as_string())
return "Email sent successfully"
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "status", self.send_email(
input_data.creds,
input_data.to_email,
input_data.subject,
input_data.body,
)
|
"""
Demo for prediction using individual trees and model slices
===========================================================
"""
import os
import numpy as np
from scipy.special import logit
from sklearn.datasets import load_svmlight_file
import xgboost as xgb
CURRENT_DIR = os.path.dirname(__file__)
train = os.path.join(CURRENT_DIR, "../data/agaricus.txt.train")
test = os.path.join(CURRENT_DIR, "../data/agaricus.txt.test")
def individual_tree() -> None:
"""Get prediction from each individual tree and combine them together."""
X_train, y_train = load_svmlight_file(train)
X_test, y_test = load_svmlight_file(test)
Xy_train = xgb.QuantileDMatrix(X_train, y_train)
n_rounds = 4
# Specify the base score, otherwise xgboost will estimate one from the training
# data.
base_score = 0.5
params = {
"max_depth": 2,
"eta": 1,
"objective": "reg:logistic",
"tree_method": "hist",
"base_score": base_score,
}
booster = xgb.train(params, Xy_train, num_boost_round=n_rounds)
# Use logit to inverse the base score back to raw leaf value (margin)
scores = np.full((X_test.shape[0],), logit(base_score))
for i in range(n_rounds):
# - Use output_margin to get raw leaf values
# - Use iteration_range to get prediction for only one tree
# - Use previous prediction as base marign for the model
Xy_test = xgb.DMatrix(X_test, base_margin=scores)
if i == n_rounds - 1:
# last round, get the transformed prediction
scores = booster.predict(
Xy_test, iteration_range=(i, i + 1), output_margin=False
)
else:
# get raw leaf value for accumulation
scores = booster.predict(
Xy_test, iteration_range=(i, i + 1), output_margin=True
)
full = booster.predict(xgb.DMatrix(X_test), output_margin=False)
np.testing.assert_allclose(scores, full)
def model_slices() -> None:
"""Inference with each individual tree using model slices."""
X_train, y_train = load_svmlight_file(train)
X_test, y_test = load_svmlight_file(test)
Xy_train = xgb.QuantileDMatrix(X_train, y_train)
n_rounds = 4
# Specify the base score, otherwise xgboost will estimate one from the training
# data.
base_score = 0.5
params = {
"max_depth": 2,
"eta": 1,
"objective": "reg:logistic",
"tree_method": "hist",
"base_score": base_score,
}
booster = xgb.train(params, Xy_train, num_boost_round=n_rounds)
trees = [booster[t] for t in range(n_rounds)]
# Use logit to inverse the base score back to raw leaf value (margin)
scores = np.full((X_test.shape[0],), logit(base_score))
for i, t in enumerate(trees):
# Feed previous scores into base margin.
Xy_test = xgb.DMatrix(X_test, base_margin=scores)
if i == n_rounds - 1:
# last round, get the transformed prediction
scores = t.predict(Xy_test, output_margin=False)
else:
# get raw leaf value for accumulation
scores = t.predict(Xy_test, output_margin=True)
full = booster.predict(xgb.DMatrix(X_test), output_margin=False)
np.testing.assert_allclose(scores, full)
if __name__ == "__main__":
individual_tree()
model_slices()
|
"""
Demo for prediction using individual trees and model slices
===========================================================
"""
import os
import numpy as np
from scipy.special import logit
from sklearn.datasets import load_svmlight_file
import xgboost as xgb
CURRENT_DIR = os.path.dirname(__file__)
train = os.path.join(CURRENT_DIR, "../data/agaricus.txt.train")
test = os.path.join(CURRENT_DIR, "../data/agaricus.txt.test")
def individual_tree() -> None:
"""Get prediction from each individual tree and combine them together."""
X_train, y_train = load_svmlight_file(train)
X_test, y_test = load_svmlight_file(test)
Xy_train = xgb.QuantileDMatrix(X_train, y_train)
n_rounds = 4
# Specify the base score, otherwise xgboost will estimate one from the training
# data.
base_score = 0.5
params = {
"max_depth": 2,
"eta": 1,
"objective": "reg:logistic",
"tree_method": "hist",
"base_score": base_score,
}
booster = xgb.train(params, Xy_train, num_boost_round=n_rounds)
# Use logit to inverse the base score back to raw leaf value (margin)
scores = np.full((X_test.shape[0],), logit(base_score))
for i in range(n_rounds):
# - Use output_margin to get raw leaf values
# - Use iteration_range to get prediction for only one tree
# - Use previous prediction as base marign for the model
Xy_test = xgb.DMatrix(X_test, base_margin=scores)
if i == n_rounds - 1:
# last round, get the transformed prediction
scores = booster.predict(
Xy_test, iteration_range=(i, i + 1), output_margin=False
)
else:
# get raw leaf value for accumulation
scores = booster.predict(
Xy_test, iteration_range=(i, i + 1), output_margin=True
)
full = booster.predict(xgb.DMatrix(X_test), output_margin=False)
np.testing.assert_allclose(scores, full)
def model_slices() -> None:
"""Inference with each individual using model slices."""
X_train, y_train = load_svmlight_file(train)
X_test, y_test = load_svmlight_file(test)
Xy_train = xgb.QuantileDMatrix(X_train, y_train)
n_rounds = 4
# Specify the base score, otherwise xgboost will estimate one from the training
# data.
base_score = 0.5
params = {
"max_depth": 2,
"eta": 1,
"objective": "reg:logistic",
"tree_method": "hist",
"base_score": base_score,
}
booster = xgb.train(params, Xy_train, num_boost_round=n_rounds)
trees = [booster[t] for t in range(n_rounds)]
# Use logit to inverse the base score back to raw leaf value (margin)
scores = np.full((X_test.shape[0],), logit(base_score))
for i, t in enumerate(trees):
# Feed previous scores into base margin.
Xy_test = xgb.DMatrix(X_test, base_margin=scores)
if i == n_rounds - 1:
# last round, get the transformed prediction
scores = t.predict(Xy_test, output_margin=False)
else:
# get raw leaf value for accumulation
scores = t.predict(Xy_test, output_margin=True)
full = booster.predict(xgb.DMatrix(X_test), output_margin=False)
np.testing.assert_allclose(scores, full)
if __name__ == "__main__":
individual_tree()
model_slices()
|
from llama_index.core.storage.kvstore.types import BaseKVStore
from llama_index.storage.docstore.azurecosmosnosql import AzureCosmosNoSqlDocumentStore
def test_class():
names_of_base_classes = [b.__name__ for b in AzureCosmosNoSqlDocumentStore.__mro__]
assert BaseKVStore.__name__ in names_of_base_classes
|
from llama_index.core.storage.docstore.keyval_docstore import KVDocumentStore
from llama_index.storage.docstore.azurecosmosnosql import AzureCosmosNoSqlDocumentStore
def test_class():
names_of_base_classes = [b.__name__ for b in AzureCosmosNoSqlDocumentStore.__mro__]
assert KVDocumentStore.__name__ in names_of_base_classes
|
"""Argparser module for WorkerRuntime"""
from jina import __default_host__, helper
from jina.parsers.helper import KVAppendAction
def mixin_base_runtime_parser(arg_group):
"""Mixing in arguments required by any class that extends :class:`AsynNewLoopRuntime` into the given parser.
:param arg_group: the parser instance to which we add arguments
"""
arg_group.add_argument(
'--port-in',
type=int,
default=helper.random_port(),
dest='port',
help='The port for input data to bind to, default a random port between [49152, 65535]',
)
arg_group.add_argument(
'--host-in',
type=str,
default=__default_host__,
help=f'The host address for binding to, by default it is {__default_host__}',
)
arg_group.add_argument(
'--grpc-server-options',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help="Dictionary of kwargs arguments that will be passed to the grpc server as options when starting the server, example : {'grpc.max_send_message_length': -1}",
default=None,
)
|
"""Argparser module for WorkerRuntime"""
from jina import __default_host__, helper
from jina.parsers.helper import KVAppendAction, add_arg_group
def mixin_base_runtime_parser(arg_group):
"""Mixing in arguments required by any class that extends :class:`AsynNewLoopRuntime` into the given parser.
:param arg_group: the parser instance to which we add arguments
"""
arg_group.add_argument(
'--port-in',
type=int,
default=helper.random_port(),
dest='port',
help='The port for input data to bind to, default a random port between [49152, 65535]',
)
arg_group.add_argument(
'--host-in',
type=str,
default=__default_host__,
help=f'The host address for binding to, by default it is {__default_host__}',
)
arg_group.add_argument(
'--grpc-server-options',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help="Dictionary of kwargs arguments that will be passed to the grpc server as options when starting the server, example : {'grpc.max_send_message_length': -1}",
default=None,
)
|
_base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
# model settings
model = dict(
type='CornerNet',
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, 2, 2, 2, 2, 4],
norm_cfg=dict(type='BN', requires_grad=True)),
neck=None,
bbox_head=dict(
type='CornerHead',
num_classes=80,
in_channels=256,
num_feat_levels=2,
corner_emb_channels=1,
loss_heatmap=dict(
type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
loss_embedding=dict(
type='AssociativeEmbeddingLoss',
pull_weight=0.10,
push_weight=0.10),
loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)),
# training and testing settings
train_cfg=None,
test_cfg=dict(
corner_topk=100,
local_maximum_kernel=3,
distance_threshold=0.5,
score_thr=0.05,
max_per_img=100,
nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
# data settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
test_mode=False,
test_pad_mode=None,
**img_norm_cfg),
dict(type='Resize', img_scale=(511, 511), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(
type='MultiScaleFlipAug',
scale_factor=1.0,
flip=True,
transforms=[
dict(type='Resize'),
dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
test_mode=True,
test_pad_mode=['logical_or', 127],
**img_norm_cfg),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(
type='Collect',
keys=['img'],
meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'flip', 'img_norm_cfg', 'border')),
])
]
data = dict(
samples_per_gpu=3,
workers_per_gpu=3,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='Adam', lr=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[180])
runner = dict(type='EpochBasedRunner', max_epochs=210)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (3 samples per GPU)
auto_scale_lr = dict(base_batch_size=96)
|
_base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
# model settings
model = dict(
type='CornerNet',
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, 2, 2, 2, 2, 4],
norm_cfg=dict(type='BN', requires_grad=True)),
neck=None,
bbox_head=dict(
type='CornerHead',
num_classes=80,
in_channels=256,
num_feat_levels=2,
corner_emb_channels=1,
loss_heatmap=dict(
type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
loss_embedding=dict(
type='AssociativeEmbeddingLoss',
pull_weight=0.10,
push_weight=0.10),
loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)),
# training and testing settings
train_cfg=None,
test_cfg=dict(
corner_topk=100,
local_maximum_kernel=3,
distance_threshold=0.5,
score_thr=0.05,
max_per_img=100,
nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
# data settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
test_mode=False,
test_pad_mode=None,
**img_norm_cfg),
dict(type='Resize', img_scale=(511, 511), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(
type='MultiScaleFlipAug',
scale_factor=1.0,
flip=True,
transforms=[
dict(type='Resize'),
dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
test_mode=True,
test_pad_mode=['logical_or', 127],
**img_norm_cfg),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(
type='Collect',
keys=['img'],
meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'flip', 'img_norm_cfg', 'border')),
])
]
data = dict(
samples_per_gpu=3,
workers_per_gpu=3,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='Adam', lr=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[180])
runner = dict(type='EpochBasedRunner', max_epochs=210)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.runner import BaseModule
from mmdet.registry import MODELS
from ...core import bbox_cxcywh_to_xyxy
@MODELS.register_module()
class EmbeddingRPNHead(BaseModule):
"""RPNHead in the `Sparse R-CNN <https://arxiv.org/abs/2011.12450>`_ .
Unlike traditional RPNHead, this module does not need FPN input, but just
decode `init_proposal_bboxes` and expand the first dimension of
`init_proposal_bboxes` and `init_proposal_features` to the batch_size.
Args:
num_proposals (int): Number of init_proposals. Default 100.
proposal_feature_channel (int): Channel number of
init_proposal_feature. Defaults to 256.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
num_proposals=100,
proposal_feature_channel=256,
init_cfg=None,
**kwargs):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super(EmbeddingRPNHead, self).__init__(init_cfg)
self.num_proposals = num_proposals
self.proposal_feature_channel = proposal_feature_channel
self._init_layers()
def _init_layers(self):
"""Initialize a sparse set of proposal boxes and proposal features."""
self.init_proposal_bboxes = nn.Embedding(self.num_proposals, 4)
self.init_proposal_features = nn.Embedding(
self.num_proposals, self.proposal_feature_channel)
def init_weights(self):
"""Initialize the init_proposal_bboxes as normalized.
[c_x, c_y, w, h], and we initialize it to the size of the entire
image.
"""
super(EmbeddingRPNHead, self).init_weights()
nn.init.constant_(self.init_proposal_bboxes.weight[:, :2], 0.5)
nn.init.constant_(self.init_proposal_bboxes.weight[:, 2:], 1)
def _decode_init_proposals(self, imgs, img_metas):
"""Decode init_proposal_bboxes according to the size of images and
expand dimension of init_proposal_features to batch_size.
Args:
imgs (list[Tensor]): List of FPN features.
img_metas (list[dict]): List of meta-information of
images. Need the img_shape to decode the init_proposals.
Returns:
Tuple(Tensor):
- proposals (Tensor): Decoded proposal bboxes,
has shape (batch_size, num_proposals, 4).
- init_proposal_features (Tensor): Expanded proposal
features, has shape
(batch_size, num_proposals, proposal_feature_channel).
- imgs_whwh (Tensor): Tensor with shape
(batch_size, 4), the dimension means
[img_width, img_height, img_width, img_height].
"""
proposals = self.init_proposal_bboxes.weight.clone()
proposals = bbox_cxcywh_to_xyxy(proposals)
num_imgs = len(imgs[0])
imgs_whwh = []
for meta in img_metas:
h, w, _ = meta['img_shape']
imgs_whwh.append(imgs[0].new_tensor([[w, h, w, h]]))
imgs_whwh = torch.cat(imgs_whwh, dim=0)
imgs_whwh = imgs_whwh[:, None, :]
# imgs_whwh has shape (batch_size, 1, 4)
# The shape of proposals change from (num_proposals, 4)
# to (batch_size ,num_proposals, 4)
proposals = proposals * imgs_whwh
init_proposal_features = self.init_proposal_features.weight.clone()
init_proposal_features = init_proposal_features[None].expand(
num_imgs, *init_proposal_features.size())
return proposals, init_proposal_features, imgs_whwh
def forward_dummy(self, img, img_metas):
"""Dummy forward function.
Used in flops calculation.
"""
return self._decode_init_proposals(img, img_metas)
def forward_train(self, img, img_metas):
"""Forward function in training stage."""
return self._decode_init_proposals(img, img_metas)
def simple_test_rpn(self, img, img_metas):
"""Forward function in testing stage."""
return self._decode_init_proposals(img, img_metas)
def simple_test(self, img, img_metas):
"""Forward function in testing stage."""
raise NotImplementedError
def aug_test_rpn(self, feats, img_metas):
raise NotImplementedError(
'EmbeddingRPNHead does not support test-time augmentation')
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.runner import BaseModule
from mmdet.models.builder import HEADS
from ...core import bbox_cxcywh_to_xyxy
@HEADS.register_module()
class EmbeddingRPNHead(BaseModule):
"""RPNHead in the `Sparse R-CNN <https://arxiv.org/abs/2011.12450>`_ .
Unlike traditional RPNHead, this module does not need FPN input, but just
decode `init_proposal_bboxes` and expand the first dimension of
`init_proposal_bboxes` and `init_proposal_features` to the batch_size.
Args:
num_proposals (int): Number of init_proposals. Default 100.
proposal_feature_channel (int): Channel number of
init_proposal_feature. Defaults to 256.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
num_proposals=100,
proposal_feature_channel=256,
init_cfg=None,
**kwargs):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super(EmbeddingRPNHead, self).__init__(init_cfg)
self.num_proposals = num_proposals
self.proposal_feature_channel = proposal_feature_channel
self._init_layers()
def _init_layers(self):
"""Initialize a sparse set of proposal boxes and proposal features."""
self.init_proposal_bboxes = nn.Embedding(self.num_proposals, 4)
self.init_proposal_features = nn.Embedding(
self.num_proposals, self.proposal_feature_channel)
def init_weights(self):
"""Initialize the init_proposal_bboxes as normalized.
[c_x, c_y, w, h], and we initialize it to the size of the entire
image.
"""
super(EmbeddingRPNHead, self).init_weights()
nn.init.constant_(self.init_proposal_bboxes.weight[:, :2], 0.5)
nn.init.constant_(self.init_proposal_bboxes.weight[:, 2:], 1)
def _decode_init_proposals(self, imgs, img_metas):
"""Decode init_proposal_bboxes according to the size of images and
expand dimension of init_proposal_features to batch_size.
Args:
imgs (list[Tensor]): List of FPN features.
img_metas (list[dict]): List of meta-information of
images. Need the img_shape to decode the init_proposals.
Returns:
Tuple(Tensor):
- proposals (Tensor): Decoded proposal bboxes,
has shape (batch_size, num_proposals, 4).
- init_proposal_features (Tensor): Expanded proposal
features, has shape
(batch_size, num_proposals, proposal_feature_channel).
- imgs_whwh (Tensor): Tensor with shape
(batch_size, 4), the dimension means
[img_width, img_height, img_width, img_height].
"""
proposals = self.init_proposal_bboxes.weight.clone()
proposals = bbox_cxcywh_to_xyxy(proposals)
num_imgs = len(imgs[0])
imgs_whwh = []
for meta in img_metas:
h, w, _ = meta['img_shape']
imgs_whwh.append(imgs[0].new_tensor([[w, h, w, h]]))
imgs_whwh = torch.cat(imgs_whwh, dim=0)
imgs_whwh = imgs_whwh[:, None, :]
# imgs_whwh has shape (batch_size, 1, 4)
# The shape of proposals change from (num_proposals, 4)
# to (batch_size ,num_proposals, 4)
proposals = proposals * imgs_whwh
init_proposal_features = self.init_proposal_features.weight.clone()
init_proposal_features = init_proposal_features[None].expand(
num_imgs, *init_proposal_features.size())
return proposals, init_proposal_features, imgs_whwh
def forward_dummy(self, img, img_metas):
"""Dummy forward function.
Used in flops calculation.
"""
return self._decode_init_proposals(img, img_metas)
def forward_train(self, img, img_metas):
"""Forward function in training stage."""
return self._decode_init_proposals(img, img_metas)
def simple_test_rpn(self, img, img_metas):
"""Forward function in testing stage."""
return self._decode_init_proposals(img, img_metas)
def simple_test(self, img, img_metas):
"""Forward function in testing stage."""
raise NotImplementedError
def aug_test_rpn(self, feats, img_metas):
raise NotImplementedError(
'EmbeddingRPNHead does not support test-time augmentation')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.