input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import os
import pytest
from langchain_core.outputs import GenerationChunk
from langchain_openai import OpenAI
from langchain_openai.llms.base import _stream_response_to_generation_chunk
os.environ["OPENAI_API_KEY"] = "foo"
def test_openai_model_param() -> None:
llm = OpenAI(model="foo")
assert llm.model_name == "foo"
llm = OpenAI(model_name="foo") # type: ignore[call-arg]
assert llm.model_name == "foo"
# Test standard tracing params
ls_params = llm._get_ls_params()
assert ls_params == {
"ls_provider": "openai",
"ls_model_type": "llm",
"ls_model_name": "foo",
"ls_temperature": 0.7,
"ls_max_tokens": 256,
}
def test_openai_model_kwargs() -> None:
llm = OpenAI(model_kwargs={"foo": "bar"})
assert llm.model_kwargs == {"foo": "bar"}
def test_openai_fields_in_model_kwargs() -> None:
"""Test that for backwards compatibility fields can be passed in as model_kwargs."""
llm = OpenAI(model_kwargs={"model_name": "foo"})
assert llm.model_name == "foo"
llm = OpenAI(model_kwargs={"model": "foo"})
assert llm.model_name == "foo"
def test_openai_incorrect_field() -> None:
with pytest.warns(match="not default parameter"):
llm = OpenAI(foo="bar") # type: ignore[call-arg]
assert llm.model_kwargs == {"foo": "bar"}
@pytest.fixture
def mock_completion() -> dict:
return {
"id": "cmpl-3evkmQda5Hu7fcZavknQda3SQ",
"object": "text_completion",
"created": 1689989000,
"model": "text-davinci-003",
"choices": [
{"text": "Bar Baz", "index": 0, "logprobs": None, "finish_reason": "length"}
],
"usage": {"prompt_tokens": 1, "completion_tokens": 2, "total_tokens": 3},
}
@pytest.mark.parametrize("model", ["gpt-3.5-turbo-instruct"])
def test_get_token_ids(model: str) -> None:
OpenAI(model=model).get_token_ids("foo")
return
def test_custom_token_counting() -> None:
def token_encoder(text: str) -> list[int]:
return [1, 2, 3]
llm = OpenAI(custom_get_token_ids=token_encoder)
assert llm.get_token_ids("foo") == [1, 2, 3]
def test_stream_response_to_generation_chunk() -> None:
completion = {
"id": "cmpl-abc123",
"choices": [
{"finish_reason": None, "index": 0, "logprobs": None, "text": "foo"}
],
"created": 1749214401,
"model": "my-model",
"object": "text_completion",
"system_fingerprint": None,
"usage": None,
}
chunk = _stream_response_to_generation_chunk(completion)
assert chunk == GenerationChunk(
text="foo", generation_info={"finish_reason": None, "logprobs": None}
)
# Pathological completion with None text (e.g., from other providers)
completion = {
"id": "cmpl-abc123",
"choices": [
{"finish_reason": None, "index": 0, "logprobs": None, "text": None}
],
"created": 1749214401,
"model": "my-model",
"object": "text_completion",
"system_fingerprint": None,
"usage": None,
}
chunk = _stream_response_to_generation_chunk(completion)
assert chunk == GenerationChunk(
text="", generation_info={"finish_reason": None, "logprobs": None}
)
|
import os
import pytest
from langchain_openai import OpenAI
os.environ["OPENAI_API_KEY"] = "foo"
def test_openai_model_param() -> None:
llm = OpenAI(model="foo")
assert llm.model_name == "foo"
llm = OpenAI(model_name="foo") # type: ignore[call-arg]
assert llm.model_name == "foo"
# Test standard tracing params
ls_params = llm._get_ls_params()
assert ls_params == {
"ls_provider": "openai",
"ls_model_type": "llm",
"ls_model_name": "foo",
"ls_temperature": 0.7,
"ls_max_tokens": 256,
}
def test_openai_model_kwargs() -> None:
llm = OpenAI(model_kwargs={"foo": "bar"})
assert llm.model_kwargs == {"foo": "bar"}
def test_openai_fields_in_model_kwargs() -> None:
"""Test that for backwards compatibility fields can be passed in as model_kwargs."""
llm = OpenAI(model_kwargs={"model_name": "foo"})
assert llm.model_name == "foo"
llm = OpenAI(model_kwargs={"model": "foo"})
assert llm.model_name == "foo"
def test_openai_incorrect_field() -> None:
with pytest.warns(match="not default parameter"):
llm = OpenAI(foo="bar") # type: ignore[call-arg]
assert llm.model_kwargs == {"foo": "bar"}
@pytest.fixture
def mock_completion() -> dict:
return {
"id": "cmpl-3evkmQda5Hu7fcZavknQda3SQ",
"object": "text_completion",
"created": 1689989000,
"model": "text-davinci-003",
"choices": [
{"text": "Bar Baz", "index": 0, "logprobs": None, "finish_reason": "length"}
],
"usage": {"prompt_tokens": 1, "completion_tokens": 2, "total_tokens": 3},
}
@pytest.mark.parametrize("model", ["gpt-3.5-turbo-instruct"])
def test_get_token_ids(model: str) -> None:
OpenAI(model=model).get_token_ids("foo")
return
def test_custom_token_counting() -> None:
def token_encoder(text: str) -> list[int]:
return [1, 2, 3]
llm = OpenAI(custom_get_token_ids=token_encoder)
assert llm.get_token_ids("foo") == [1, 2, 3]
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from mmengine import MMLogger
from mmengine.config import Config, DictAction
from mmengine.dist import init_dist
from mmengine.registry import init_default_scope
from mmengine.utils import mkdir_or_exist
from mmdet.utils.benchmark import (DataLoaderBenchmark, DatasetBenchmark,
InferenceBenchmark)
def parse_args():
parser = argparse.ArgumentParser(description='MMDet benchmark')
parser.add_argument('config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file')
parser.add_argument(
'--task',
choices=['inference', 'dataloader', 'dataset'],
default='dataloader',
help='Which task do you want to go to benchmark')
parser.add_argument(
'--repeat-num',
type=int,
default=1,
help='number of repeat times of measurement for averaging the results')
parser.add_argument(
'--max-iter', type=int, default=2000, help='num of max iter')
parser.add_argument(
'--log-interval', type=int, default=50, help='interval of logging')
parser.add_argument(
'--num-warmup', type=int, default=5, help='Number of warmup')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--dataset-type',
choices=['train', 'val', 'test'],
default='test',
help='Benchmark dataset type. only supports train, val and test')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing '
'benchmark metrics')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def inference_benchmark(args, cfg, distributed, logger):
benchmark = InferenceBenchmark(
cfg,
args.checkpoint,
distributed,
args.fuse_conv_bn,
args.max_iter,
args.log_interval,
args.num_warmup,
logger=logger)
return benchmark
def dataloader_benchmark(args, cfg, distributed, logger):
benchmark = DataLoaderBenchmark(
cfg,
distributed,
args.dataset_type,
args.max_iter,
args.log_interval,
args.num_warmup,
logger=logger)
return benchmark
def dataset_benchmark(args, cfg, distributed, logger):
benchmark = DatasetBenchmark(
cfg,
args.dataset_type,
args.max_iter,
args.log_interval,
args.num_warmup,
logger=logger)
return benchmark
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
init_default_scope(cfg.get('default_scope', 'mmdet'))
distributed = False
if args.launcher != 'none':
init_dist(args.launcher, **cfg.get('env_cfg', {}).get('dist_cfg', {}))
distributed = True
log_file = None
if args.work_dir:
log_file = os.path.join(args.work_dir, 'benchmark.log')
mkdir_or_exist(args.work_dir)
logger = MMLogger.get_instance(
'mmdet', log_file=log_file, log_level='INFO')
benchmark = eval(f'{args.task}_benchmark')(args, cfg, distributed, logger)
benchmark.run(args.repeat_num)
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from mmengine import MMLogger
from mmengine.config import Config, DictAction
from mmengine.dist import init_dist
from mmengine.utils import mkdir_or_exist
from mmdet.utils import register_all_modules
from mmdet.utils.benchmark import (DataLoaderBenchmark, DatasetBenchmark,
InferenceBenchmark)
def parse_args():
parser = argparse.ArgumentParser(description='MMDet benchmark')
parser.add_argument('config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file')
parser.add_argument(
'--task',
choices=['inference', 'dataloader', 'dataset'],
default='dataloader',
help='Which task do you want to go to benchmark')
parser.add_argument(
'--repeat-num',
type=int,
default=1,
help='number of repeat times of measurement for averaging the results')
parser.add_argument(
'--max-iter', type=int, default=2000, help='num of max iter')
parser.add_argument(
'--log-interval', type=int, default=50, help='interval of logging')
parser.add_argument(
'--num-warmup', type=int, default=5, help='Number of warmup')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--dataset-type',
choices=['train', 'val', 'test'],
default='test',
help='Benchmark dataset type. only supports train, val and test')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing '
'benchmark metrics')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def inference_benchmark(args, cfg, distributed, logger):
benchmark = InferenceBenchmark(
cfg,
args.checkpoint,
distributed,
args.fuse_conv_bn,
args.max_iter,
args.log_interval,
args.num_warmup,
logger=logger)
return benchmark
def dataloader_benchmark(args, cfg, distributed, logger):
benchmark = DataLoaderBenchmark(
cfg,
distributed,
args.dataset_type,
args.max_iter,
args.log_interval,
args.num_warmup,
logger=logger)
return benchmark
def dataset_benchmark(args, cfg, distributed, logger):
benchmark = DatasetBenchmark(
cfg,
args.dataset_type,
args.max_iter,
args.log_interval,
args.num_warmup,
logger=logger)
return benchmark
def main():
register_all_modules()
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
distributed = False
if args.launcher != 'none':
init_dist(args.launcher, **cfg.get('env_cfg', {}).get('dist_cfg', {}))
distributed = True
log_file = None
if args.work_dir:
log_file = os.path.join(args.work_dir, 'benchmark.log')
mkdir_or_exist(args.work_dir)
logger = MMLogger.get_instance(
'mmdet', log_file=log_file, log_level='INFO')
benchmark = eval(f'{args.task}_benchmark')(args, cfg, distributed, logger)
benchmark.run(args.repeat_num)
if __name__ == '__main__':
main()
|
from __future__ import annotations
from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseTripletLoss(TripletLoss):
def __init__(
self, model: SparseEncoder, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SparseEncoder
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
}
)
loss = losses.SparseTripletLoss(model)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
super().__init__(model, distance_metric=distance_metric, triplet_margin=triplet_margin)
|
from __future__ import annotations
from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseTripletLoss(TripletLoss):
def __init__(
self, model: SparseEncoder, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SparseEncoder
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
}
)
loss = losses.SparseTripletLoss(model)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
super().__init__(model, distance_metric=distance_metric, triplet_margin=triplet_margin)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
from keras.src.ops.nn import sparse_plus
from keras.src.ops.nn import sparsemax
from keras.src.ops.nn import squareplus
from keras.src.ops.nn import tanh_shrink
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
from keras.src.ops.nn import sparse_plus
from keras.src.ops.nn import squareplus
from keras.src.ops.nn import tanh_shrink
|
from collections.abc import Generator
import pytest
from langchain_core.vectorstores import VectorStore
from langchain_tests.integration_tests.vectorstores import VectorStoreIntegrationTests
from langchain_chroma import Chroma
class TestChromaStandard(VectorStoreIntegrationTests):
@pytest.fixture()
def vectorstore(self) -> Generator[VectorStore, None, None]: # type: ignore
"""Get an empty vectorstore for unit tests."""
store = Chroma(embedding_function=self.get_embeddings())
try:
yield store
finally:
store.delete_collection()
pass
|
from typing import Generator
import pytest
from langchain_core.vectorstores import VectorStore
from langchain_tests.integration_tests.vectorstores import VectorStoreIntegrationTests
from langchain_chroma import Chroma
class TestChromaStandard(VectorStoreIntegrationTests):
@pytest.fixture()
def vectorstore(self) -> Generator[VectorStore, None, None]: # type: ignore
"""Get an empty vectorstore for unit tests."""
store = Chroma(embedding_function=self.get_embeddings())
try:
yield store
finally:
store.delete_collection()
pass
|
import gc
import unittest
import torch
from diffusers import (
StableDiffusionXLPipeline,
)
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
slow,
torch_device,
)
from .single_file_testing_utils import SDXLSingleFileTesterMixin
enable_full_determinism()
@slow
@require_torch_accelerator
class StableDiffusionXLPipelineSingleFileSlowTests(unittest.TestCase, SDXLSingleFileTesterMixin):
pipeline_class = StableDiffusionXLPipeline
ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors"
repo_id = "stabilityai/stable-diffusion-xl-base-1.0"
original_config = (
"https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml"
)
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
generator = torch.Generator(device=generator_device).manual_seed(seed)
inputs = {
"prompt": "a fantasy landscape, concept art, high resolution",
"generator": generator,
"num_inference_steps": 2,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "np",
}
return inputs
def test_single_file_format_inference_is_same_as_pretrained(self):
super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3)
|
import gc
import unittest
import torch
from diffusers import (
StableDiffusionXLPipeline,
)
from diffusers.utils.testing_utils import (
enable_full_determinism,
require_torch_gpu,
slow,
)
from .single_file_testing_utils import SDXLSingleFileTesterMixin
enable_full_determinism()
@slow
@require_torch_gpu
class StableDiffusionXLPipelineSingleFileSlowTests(unittest.TestCase, SDXLSingleFileTesterMixin):
pipeline_class = StableDiffusionXLPipeline
ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors"
repo_id = "stabilityai/stable-diffusion-xl-base-1.0"
original_config = (
"https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml"
)
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
generator = torch.Generator(device=generator_device).manual_seed(seed)
inputs = {
"prompt": "a fantasy landscape, concept art, high resolution",
"generator": generator,
"num_inference_steps": 2,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "np",
}
return inputs
def test_single_file_format_inference_is_same_as_pretrained(self):
super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3)
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseAnglELoss(SparseCoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0) -> None:
"""
This class implements AnglE (Angle Optimized).
This is a modification of :class:`SparseCoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SparseEncoder
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseCoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(
model=model, loss=losses.SparseAnglELoss(model), document_regularizer_weight=5e-5, use_document_regularizer_only=True
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseAnglELoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseAnglELoss(SparseCoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0) -> None:
"""
This class implements AnglE (Angle Optimized).
This is a modification of :class:`SparseCoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SparseEncoder
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseCoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(
model=model, loss=losses.SparseAnglELoss(model), corpus_regularizer_weight=5e-5, use_corpus_regularizer_only=True
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseAnglELoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
_base_ = './mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
from .PhraseTokenizer import PhraseTokenizer
from .WhitespaceTokenizer import WhitespaceTokenizer
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
__all__ = ["WordTokenizer", "WhitespaceTokenizer", "PhraseTokenizer", "ENGLISH_STOP_WORDS"]
|
from .WordTokenizer import WordTokenizer, ENGLISH_STOP_WORDS
from .WhitespaceTokenizer import WhitespaceTokenizer
from .PhraseTokenizer import PhraseTokenizer
|
import pytest
from llama_index.llms.bedrock_converse.utils import get_model_name
from io import BytesIO
from unittest.mock import MagicMock, patch
from llama_index.core.base.llms.types import (
AudioBlock,
ImageBlock,
MessageRole,
TextBlock,
)
from llama_index.llms.bedrock_converse.utils import (
__get_img_format_from_image_mimetype,
_content_block_to_bedrock_format,
)
def test_get_model_name_translates_us():
assert (
get_model_name("us.meta.llama3-2-3b-instruct-v1:0")
== "meta.llama3-2-3b-instruct-v1:0"
)
def test_get_model_name_does_nottranslate_cn():
assert (
get_model_name("cn.meta.llama3-2-3b-instruct-v1:0")
== "cn.meta.llama3-2-3b-instruct-v1:0"
)
def test_get_model_name_does_nottranslate_unsupported():
assert get_model_name("cohere.command-r-plus-v1:0") == "cohere.command-r-plus-v1:0"
def test_get_model_name_throws_inference_profile_exception():
with pytest.raises(ValueError):
assert get_model_name("us.cohere.command-r-plus-v1:0")
def test_get_img_format_jpeg():
assert __get_img_format_from_image_mimetype("image/jpeg") == "jpeg"
def test_get_img_format_png():
assert __get_img_format_from_image_mimetype("image/png") == "png"
def test_get_img_format_gif():
assert __get_img_format_from_image_mimetype("image/gif") == "gif"
def test_get_img_format_webp():
assert __get_img_format_from_image_mimetype("image/webp") == "webp"
def test_get_img_format_unsupported(caplog):
result = __get_img_format_from_image_mimetype("image/unsupported")
assert result == "png"
assert "Unsupported image mimetype" in caplog.text
def test_content_block_to_bedrock_format_text():
text_block = TextBlock(text="Hello, world!")
result = _content_block_to_bedrock_format(text_block, MessageRole.USER)
assert result == {"text": "Hello, world!"}
@patch("llama_index.core.base.llms.types.ImageBlock.resolve_image")
def test_content_block_to_bedrock_format_image_user(mock_resolve):
mock_bytes = BytesIO(b"fake_image_data")
mock_bytes.read = MagicMock(return_value=b"fake_image_data")
mock_resolve.return_value = mock_bytes
image_block = ImageBlock(image=b"", image_mimetype="image/png")
result = _content_block_to_bedrock_format(image_block, MessageRole.USER)
assert "image" in result
assert result["image"]["format"] == "png"
assert "bytes" in result["image"]["source"]
mock_resolve.assert_called_once()
@patch("llama_index.core.base.llms.types.ImageBlock.resolve_image")
def test_content_block_to_bedrock_format_image_assistant(mock_resolve, caplog):
image_block = ImageBlock(image=b"", image_mimetype="image/png")
result = _content_block_to_bedrock_format(image_block, MessageRole.ASSISTANT)
assert result is None
assert "only supports image blocks for user messages" in caplog.text
mock_resolve.assert_not_called()
def test_content_block_to_bedrock_format_audio(caplog):
audio_block = AudioBlock(audio=b"test_audio")
result = _content_block_to_bedrock_format(audio_block, MessageRole.USER)
assert result is None
assert "Audio blocks are not supported" in caplog.text
def test_content_block_to_bedrock_format_unsupported(caplog):
unsupported_block = object()
result = _content_block_to_bedrock_format(unsupported_block, MessageRole.USER)
assert result is None
assert "Unsupported block type" in caplog.text
assert str(type(unsupported_block)) in caplog.text
|
import pytest
from llama_index.llms.bedrock_converse.utils import get_model_name
def test_get_model_name_translates_us():
assert (
get_model_name("us.meta.llama3-2-3b-instruct-v1:0")
== "meta.llama3-2-3b-instruct-v1:0"
)
def test_get_model_name_does_nottranslate_cn():
assert (
get_model_name("cn.meta.llama3-2-3b-instruct-v1:0")
== "cn.meta.llama3-2-3b-instruct-v1:0"
)
def test_get_model_name_does_nottranslate_unsupported():
assert get_model_name("cohere.command-r-plus-v1:0") == "cohere.command-r-plus-v1:0"
def test_get_model_name_throws_inference_profile_exception():
with pytest.raises(ValueError):
assert get_model_name("us.cohere.command-r-plus-v1:0")
|
import unittest
import torch
import torchaudio.functional as F
from parameterized import parameterized
import unittest
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoSox, TorchaudioTestCase
from .functional_impl import Functional, FunctionalCPUOnly
class TestFunctionalFloat32(Functional, FunctionalCPUOnly, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
@unittest.expectedFailure
def test_lfilter_9th_order_filter_stability(self):
super().test_lfilter_9th_order_filter_stability()
class TestFunctionalFloat64(Functional, PytorchTestCase):
dtype = torch.float64
device = torch.device("cpu")
@unittest.skip("deprecated")
@skipIfNoSox
class TestApplyCodec(TorchaudioTestCase):
def _smoke_test(self, format, compression, check_num_frames):
"""
The purpose of this test suite is to verify that apply_codec functionalities do not exhibit
abnormal behaviors.
"""
sample_rate = 8000
num_frames = 3 * sample_rate
num_channels = 2
waveform = torch.rand(num_channels, num_frames)
augmented = F.apply_codec(waveform, sample_rate, format, True, compression)
assert augmented.dtype == waveform.dtype
assert augmented.shape[0] == num_channels
if check_num_frames:
assert augmented.shape[1] == num_frames
def test_wave(self):
self._smoke_test("wav", compression=None, check_num_frames=True)
@parameterized.expand([(96,), (128,), (160,), (192,), (224,), (256,), (320,)])
def test_mp3(self, compression):
self._smoke_test("mp3", compression, check_num_frames=False)
@parameterized.expand([(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,)])
def test_flac(self, compression):
self._smoke_test("flac", compression, check_num_frames=False)
@parameterized.expand([(-1,), (0,), (1,), (2,), (3,), (3.6,), (5,), (10,)])
def test_vorbis(self, compression):
self._smoke_test("vorbis", compression, check_num_frames=False)
|
import unittest
import torch
import torchaudio.functional as F
from parameterized import parameterized
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoSox, TorchaudioTestCase
from .functional_impl import Functional, FunctionalCPUOnly
class TestFunctionalFloat32(Functional, FunctionalCPUOnly, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
@unittest.expectedFailure
def test_lfilter_9th_order_filter_stability(self):
super().test_lfilter_9th_order_filter_stability()
class TestFunctionalFloat64(Functional, PytorchTestCase):
dtype = torch.float64
device = torch.device("cpu")
@skipIfNoSox
class TestApplyCodec(TorchaudioTestCase):
def _smoke_test(self, format, compression, check_num_frames):
"""
The purpose of this test suite is to verify that apply_codec functionalities do not exhibit
abnormal behaviors.
"""
sample_rate = 8000
num_frames = 3 * sample_rate
num_channels = 2
waveform = torch.rand(num_channels, num_frames)
augmented = F.apply_codec(waveform, sample_rate, format, True, compression)
assert augmented.dtype == waveform.dtype
assert augmented.shape[0] == num_channels
if check_num_frames:
assert augmented.shape[1] == num_frames
def test_wave(self):
self._smoke_test("wav", compression=None, check_num_frames=True)
@parameterized.expand([(96,), (128,), (160,), (192,), (224,), (256,), (320,)])
def test_mp3(self, compression):
self._smoke_test("mp3", compression, check_num_frames=False)
@parameterized.expand([(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,)])
def test_flac(self, compression):
self._smoke_test("flac", compression, check_num_frames=False)
@parameterized.expand([(-1,), (0,), (1,), (2,), (3,), (3.6,), (5,), (10,)])
def test_vorbis(self, compression):
self._smoke_test("vorbis", compression, check_num_frames=False)
|
_base_ = './yolof_r50_c5_8x8_1x_coco.py'
# We implemented the iter-based config according to the source code.
# COCO dataset has 117266 images after filtering. We use 8 gpu and
# 8 batch size training, so 22500 is equivalent to
# 22500/(117266/(8x8))=12.3 epoch, 15000 is equivalent to 8.2 epoch,
# 20000 is equivalent to 10.9 epoch. Due to lr(0.12) is large,
# the iter-based and epoch-based setting have about 0.2 difference on
# the mAP evaluation value.
train_cfg = dict(
_delete_=True,
type='IterBasedTrainLoop',
max_iters=22500,
val_interval=4500)
# learning rate policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=22500,
by_epoch=False,
milestones=[15000, 20000],
gamma=0.1)
]
train_dataloader = dict(sampler=dict(type='InfiniteSampler'))
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=2500))
log_processor = dict(by_epoch=False)
|
_base_ = './yolof_r50_c5_8x8_1x_coco.py'
# We implemented the iter-based config according to the source code.
# COCO dataset has 117266 images after filtering. We use 8 gpu and
# 8 batch size training, so 22500 is equivalent to
# 22500/(117266/(8x8))=12.3 epoch, 15000 is equivalent to 8.2 epoch,
# 20000 is equivalent to 10.9 epoch. Due to lr(0.12) is large,
# the iter-based and epoch-based setting have about 0.2 difference on
# the mAP evaluation value.
lr_config = dict(step=[15000, 20000])
runner = dict(_delete_=True, type='IterBasedRunner', max_iters=22500)
checkpoint_config = dict(interval=2500)
evaluation = dict(interval=4500)
log_config = dict(interval=20)
|
import os
from typing import Dict
from hubble.executor.helper import parse_hub_uri
from hubble.executor.hubio import HubIO
from jina import (
__default_executor__,
__default_grpc_gateway__,
__default_http_gateway__,
__default_websocket_gateway__,
__version__,
)
from jina.enums import PodRoleType
def get_image_name(uses: str) -> str:
"""The image can be provided in different formats by the user.
This function converts it to an image name which can be understood by k8s.
It uses the Hub api to get the image name and the latest tag on Docker Hub.
If you don't want to rebuild image on Jina Hub,
you can set `JINA_HUB_NO_IMAGE_REBUILD` environment variable.
:param uses: image name
:return: normalized image name
"""
try:
rebuild_image = 'JINA_HUB_NO_IMAGE_REBUILD' not in os.environ
scheme, name, tag, secret = parse_hub_uri(uses)
meta_data, _ = HubIO.fetch_meta(
name, tag, secret=secret, rebuild_image=rebuild_image, force=True
)
image_name = meta_data.image_name
return image_name
except Exception:
if uses.startswith('docker'):
# docker:// is a valid requirement and user may want to put its own image
return uses.replace('docker://', '')
raise
def to_compatible_name(name: str) -> str:
"""Converts the deployment name to a valid name for K8s and docker compose.
:param name: name of the deployment
:return: compatible name
"""
return name.replace('/', '-').replace('_', '-').lower()
def get_base_executor_version():
"""
Get the version of jina to be used
:return: the version tag
"""
import requests
try:
url = 'https://registry.hub.docker.com/v2/repositories/jinaai/jina/tags'
result: Dict = requests.get(url, params={'name': __version__}).json()
if result.get('count', 0) > 0:
return __version__
else:
return 'master'
except:
return 'master'
def construct_runtime_container_args(cargs, uses_metas, uses_with, pod_type):
"""
Construct a set of Namespace arguments into a list of arguments to pass to a container entrypoint
:param cargs: The namespace arguments
:param uses_metas: The uses_metas to override
:param uses_with: The uses_with to override
:param pod_type: The pod_type
:return: Arguments to pass to container
"""
import json
from jina.helper import ArgNamespace
from jina.parsers import set_pod_parser
taboo = {
'uses_with',
'uses_metas',
'volumes',
'uses_before',
'uses_after',
'workspace_id',
'upload_files',
'noblock_on_start',
'env',
}
if pod_type == PodRoleType.HEAD:
taboo.add('uses')
taboo.add('workspace')
if pod_type in {PodRoleType.WORKER, PodRoleType.GATEWAY}:
taboo.add('polling')
non_defaults = ArgNamespace.get_non_defaults_args(
cargs,
set_pod_parser(),
taboo=taboo,
)
_args = ArgNamespace.kwargs2list(non_defaults)
container_args = ['executor'] + _args
if uses_metas is not None:
container_args.extend(['--uses-metas', json.dumps(uses_metas)])
if uses_with is not None:
container_args.extend(['--uses-with', json.dumps(uses_with)])
container_args.append('--native')
return container_args
def validate_uses(uses: str):
"""Validate uses argument
:param uses: uses argument
:return: boolean indicating whether is a valid uses to be used in K8s or docker compose
"""
# Uses can be either None (not specified), default gateway class, default executor or docker image
# None => deplyoment uses base container image and uses is determined inside container
# default gateway class or default executor => deployment uses base container and sets uses in command
# container images => deployment uses the specified container image and uses is defined by container
if (
uses is None
or uses
in [
__default_http_gateway__,
__default_websocket_gateway__,
__default_grpc_gateway__,
__default_executor__,
]
or uses.startswith('docker://')
):
return True
try:
scheme, _, _, _ = parse_hub_uri(uses)
if scheme in {'jinahub+docker', 'jinahub+sandbox'}:
return True
except ValueError:
return False
|
import os
from typing import Dict
from hubble.executor.helper import parse_hub_uri
from hubble.executor.hubio import HubIO
from jina import __default_executor__, __version__
from jina.enums import PodRoleType
def get_image_name(uses: str) -> str:
"""The image can be provided in different formats by the user.
This function converts it to an image name which can be understood by k8s.
It uses the Hub api to get the image name and the latest tag on Docker Hub.
If you don't want to rebuild image on Jina Hub,
you can set `JINA_HUB_NO_IMAGE_REBUILD` environment variable.
:param uses: image name
:return: normalized image name
"""
try:
rebuild_image = 'JINA_HUB_NO_IMAGE_REBUILD' not in os.environ
scheme, name, tag, secret = parse_hub_uri(uses)
meta_data, _ = HubIO.fetch_meta(
name, tag, secret=secret, rebuild_image=rebuild_image, force=True
)
image_name = meta_data.image_name
return image_name
except Exception:
if uses.startswith('docker'):
# docker:// is a valid requirement and user may want to put its own image
return uses.replace('docker://', '')
raise
def to_compatible_name(name: str) -> str:
"""Converts the deployment name to a valid name for K8s and docker compose.
:param name: name of the deployment
:return: compatible name
"""
return name.replace('/', '-').replace('_', '-').lower()
def get_base_executor_version():
"""
Get the version of jina to be used
:return: the version tag
"""
import requests
try:
url = 'https://registry.hub.docker.com/v2/repositories/jinaai/jina/tags'
result: Dict = requests.get(url, params={'name': __version__}).json()
if result.get('count', 0) > 0:
return __version__
else:
return 'master'
except:
return 'master'
def construct_runtime_container_args(cargs, uses_metas, uses_with, pod_type):
"""
Construct a set of Namespace arguments into a list of arguments to pass to a container entrypoint
:param cargs: The namespace arguments
:param uses_metas: The uses_metas to override
:param uses_with: The uses_with to override
:param pod_type: The pod_type
:return: Arguments to pass to container
"""
import json
from jina.helper import ArgNamespace
from jina.parsers import set_pod_parser
taboo = {
'uses_with',
'uses_metas',
'volumes',
'uses_before',
'uses_after',
'workspace_id',
'upload_files',
'noblock_on_start',
'env',
}
if pod_type == PodRoleType.HEAD:
taboo.add('uses')
taboo.add('workspace')
if pod_type in {PodRoleType.WORKER, PodRoleType.GATEWAY}:
taboo.add('polling')
non_defaults = ArgNamespace.get_non_defaults_args(
cargs,
set_pod_parser(),
taboo=taboo,
)
_args = ArgNamespace.kwargs2list(non_defaults)
container_args = ['executor'] + _args
if uses_metas is not None:
container_args.extend(['--uses-metas', json.dumps(uses_metas)])
if uses_with is not None:
container_args.extend(['--uses-with', json.dumps(uses_with)])
container_args.append('--native')
return container_args
def validate_uses(uses: str):
"""Validate uses argument
:param uses: uses argument
:return: boolean indicating whether is a valid uses to be used in K8s or docker compose
"""
if uses == __default_executor__ or uses.startswith('docker://'):
return True
try:
scheme, _, _, _ = parse_hub_uri(uses)
if scheme in {'jinahub+docker', 'jinahub+sandbox'}:
return True
except ValueError:
return False
|
from pathlib import Path
from typing import Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document, ImageDocument
from llama_index.core.utils import infer_torch_device
class ImageVisionLLMReader(BaseReader):
"""Image parser.
Caption image using Blip2 (a multimodal VisionLLM similar to GPT4).
"""
def __init__(
self,
parser_config: Optional[Dict] = None,
keep_image: bool = False,
prompt: str = "Question: describe what you see in this image. Answer:",
):
"""Init params."""
if parser_config is None:
try:
import sentencepiece # noqa
import torch
from PIL import Image # noqa
from transformers import Blip2ForConditionalGeneration, Blip2Processor
except ImportError:
raise ImportError(
"Please install extra dependencies that are required for "
"the ImageCaptionReader: "
"`pip install torch transformers sentencepiece Pillow`"
)
self._torch = torch
self._torch_imported = True
device = infer_torch_device()
dtype = (
self._torch.float16
if self._torch.cuda.is_available()
else self._torch.float32
)
processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
model = Blip2ForConditionalGeneration.from_pretrained(
"Salesforce/blip2-opt-2.7b", torch_dtype=dtype
)
parser_config = {
"processor": processor,
"model": model,
"device": device,
"dtype": dtype,
}
# Try to import PyTorch in order to run inference efficiently.
self._import_torch()
self._parser_config = parser_config
self._keep_image = keep_image
self._prompt = prompt
def load_data(
self, file: Path, extra_info: Optional[Dict] = None
) -> List[Document]:
"""Parse file."""
from llama_index.core.img_utils import img_2_b64
from PIL import Image
# load document image
image = Image.open(file)
if image.mode != "RGB":
image = image.convert("RGB")
# Encode image into base64 string and keep in document
image_str: Optional[str] = None
if self._keep_image:
image_str = img_2_b64(image)
# Parse image into text
model = self._parser_config["model"]
processor = self._parser_config["processor"]
device = self._parser_config["device"]
dtype = self._parser_config["dtype"]
model.to(device)
# unconditional image captioning
inputs = processor(image, self._prompt, return_tensors="pt").to(device, dtype)
if self._torch_imported:
# Gradients are not needed during inference. If PyTorch is
# installed, we can instruct it to not track the gradients.
# This reduces GPU memory usage and improves inference efficiency.
with self._torch.no_grad():
out = model.generate(**inputs)
else:
# Fallback to less efficient behavior if PyTorch is not installed.
out = model.generate(**inputs)
text_str = processor.decode(out[0], skip_special_tokens=True)
return [
ImageDocument(
text=text_str,
image=image_str,
image_path=str(file),
metadata=extra_info or {},
)
]
def _import_torch(self) -> None:
self._torch = None
try:
import torch
self._torch = torch
self._torch_imported = True
except ImportError:
self._torch_imported = False
|
from pathlib import Path
from typing import Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document, ImageDocument
from llama_index.core.utils import infer_torch_device
class ImageVisionLLMReader(BaseReader):
"""Image parser.
Caption image using Blip2 (a multimodal VisionLLM similar to GPT4).
"""
def __init__(
self,
parser_config: Optional[Dict] = None,
keep_image: bool = False,
prompt: str = "Question: describe what you see in this image. Answer:",
):
"""Init params."""
if parser_config is None:
try:
import sentencepiece # noqa
import torch
from PIL import Image # noqa
from transformers import Blip2ForConditionalGeneration, Blip2Processor
except ImportError:
raise ImportError(
"Please install extra dependencies that are required for "
"the ImageCaptionReader: "
"`pip install torch transformers sentencepiece Pillow`"
)
device = infer_torch_device()
dtype = torch.float16 if torch.cuda.is_available() else torch.float32
processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
model = Blip2ForConditionalGeneration.from_pretrained(
"Salesforce/blip2-opt-2.7b", torch_dtype=dtype
)
parser_config = {
"processor": processor,
"model": model,
"device": device,
"dtype": dtype,
}
self._parser_config = parser_config
self._keep_image = keep_image
self._prompt = prompt
def load_data(
self, file: Path, extra_info: Optional[Dict] = None
) -> List[Document]:
"""Parse file."""
from llama_index.core.img_utils import img_2_b64
from PIL import Image
# load document image
image = Image.open(file)
if image.mode != "RGB":
image = image.convert("RGB")
# Encode image into base64 string and keep in document
image_str: Optional[str] = None
if self._keep_image:
image_str = img_2_b64(image)
# Parse image into text
model = self._parser_config["model"]
processor = self._parser_config["processor"]
device = self._parser_config["device"]
dtype = self._parser_config["dtype"]
model.to(device)
# unconditional image captioning
inputs = processor(image, self._prompt, return_tensors="pt").to(device, dtype)
out = model.generate(**inputs)
text_str = processor.decode(out[0], skip_special_tokens=True)
return [
ImageDocument(
text=text_str,
image=image_str,
image_path=str(file),
metadata=extra_info or {},
)
]
|
import importlib
import pytest
from fastapi.testclient import TestClient
from ...utils import needs_py39
@pytest.fixture(
name="client",
params=[
"tutorial013",
"tutorial013_an",
pytest.param("tutorial013_an_py39", marks=needs_py39),
],
)
def get_client(request: pytest.FixtureRequest):
mod = importlib.import_module(
f"docs_src.query_params_str_validations.{request.param}"
)
client = TestClient(mod.app)
return client
def test_multi_query_values(client: TestClient):
url = "/items/?q=foo&q=bar"
response = client.get(url)
assert response.status_code == 200, response.text
assert response.json() == {"q": ["foo", "bar"]}
def test_query_no_values(client: TestClient):
url = "/items/"
response = client.get(url)
assert response.status_code == 200, response.text
assert response.json() == {"q": []}
def test_openapi_schema(client: TestClient):
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Read Items",
"operationId": "read_items_items__get",
"parameters": [
{
"required": False,
"schema": {
"title": "Q",
"type": "array",
"items": {},
"default": [],
},
"name": "q",
"in": "query",
}
],
}
}
},
"components": {
"schemas": {
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
|
from fastapi.testclient import TestClient
from docs_src.query_params_str_validations.tutorial013 import app
client = TestClient(app)
def test_multi_query_values():
url = "/items/?q=foo&q=bar"
response = client.get(url)
assert response.status_code == 200, response.text
assert response.json() == {"q": ["foo", "bar"]}
def test_query_no_values():
url = "/items/"
response = client.get(url)
assert response.status_code == 200, response.text
assert response.json() == {"q": []}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Read Items",
"operationId": "read_items_items__get",
"parameters": [
{
"required": False,
"schema": {
"title": "Q",
"type": "array",
"items": {},
"default": [],
},
"name": "q",
"in": "query",
}
],
}
}
},
"components": {
"schemas": {
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
|
import mimetypes
from typing import TYPE_CHECKING, Optional
from docarray.document.mixins._property import _PropertyMixin
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import DocumentContentType, ArrayType
from docarray import DocumentArray
_all_mime_types = set(mimetypes.types_map.values())
class PropertyMixin(_PropertyMixin):
def _clear_content(self):
self._data.content = None
self._data.text = None
self._data.tensor = None
self._data.blob = None
@property
def content(self) -> Optional['DocumentContentType']:
ct = self.content_type
if ct:
return getattr(self, ct)
@_PropertyMixin.text.setter
def text(self, value: str):
if value is not None:
self._clear_content()
self._data.text = value
@_PropertyMixin.blob.setter
def blob(self, value: bytes):
if value is not None:
self._clear_content()
self._data.blob = value
@_PropertyMixin.tensor.setter
def tensor(self, value: 'ArrayType'):
if value is not None:
self._clear_content()
self._data.tensor = value
@content.setter
def content(self, value: 'DocumentContentType'):
self._clear_content()
if isinstance(value, bytes):
self._data.blob = value
elif isinstance(value, str):
self._data.text = value
elif value is not None:
self._data.tensor = value
@_PropertyMixin.uri.setter
def uri(self, value: str):
if value:
mime_type = mimetypes.guess_type(value)[0]
if mime_type:
self._data.mime_type = mime_type
self._data.uri = value
@_PropertyMixin.mime_type.setter
def mime_type(self, value: str):
if value and value not in _all_mime_types:
# given but not recognizable, do best guess
r = mimetypes.guess_type(f'*.{value}')[0]
value = r or value
self._data.mime_type = value
@_PropertyMixin.chunks.setter
def chunks(self, value: 'DocumentArray'):
from docarray.array.chunk import ChunkArray
if not isinstance(value, ChunkArray):
value = ChunkArray(value, reference_doc=self._data._reference_doc)
self._data.chunks = value
@_PropertyMixin.matches.setter
def matches(self, value: 'DocumentArray'):
from docarray.array.match import MatchArray
if not isinstance(value, MatchArray):
value = MatchArray(value, reference_doc=self._data._reference_doc)
self._data.matches = value
@property
def content_type(self) -> Optional[str]:
nf = self.non_empty_fields
if 'text' in nf:
return 'text'
elif 'tensor' in nf:
return 'tensor'
elif 'blob' in nf:
return 'blob'
|
import mimetypes
from typing import TYPE_CHECKING, Optional
from docarray.document.mixins._property import _PropertyMixin
if TYPE_CHECKING:
from docarray.typing import DocumentContentType, ArrayType
from docarray import DocumentArray
_all_mime_types = set(mimetypes.types_map.values())
class PropertyMixin(_PropertyMixin):
def _clear_content(self):
self._data.content = None
self._data.text = None
self._data.tensor = None
self._data.blob = None
@property
def content(self) -> Optional['DocumentContentType']:
ct = self.content_type
if ct:
return getattr(self, ct)
@_PropertyMixin.text.setter
def text(self, value: str):
if value is not None:
self._clear_content()
self._data.text = value
@_PropertyMixin.blob.setter
def blob(self, value: bytes):
if value is not None:
self._clear_content()
self._data.blob = value
@_PropertyMixin.tensor.setter
def tensor(self, value: 'ArrayType'):
if value is not None:
self._clear_content()
self._data.tensor = value
@content.setter
def content(self, value: 'DocumentContentType'):
self._clear_content()
if isinstance(value, bytes):
self._data.blob = value
elif isinstance(value, str):
self._data.text = value
elif value is not None:
self._data.tensor = value
@_PropertyMixin.uri.setter
def uri(self, value: str):
if value:
mime_type = mimetypes.guess_type(value)[0]
if mime_type:
self._data.mime_type = mime_type
self._data.uri = value
@_PropertyMixin.mime_type.setter
def mime_type(self, value: str):
if value and value not in _all_mime_types:
# given but not recognizable, do best guess
r = mimetypes.guess_type(f'*.{value}')[0]
value = r or value
self._data.mime_type = value
@_PropertyMixin.chunks.setter
def chunks(self, value: 'DocumentArray'):
from docarray.array.chunk import ChunkArray
if not isinstance(value, ChunkArray):
value = ChunkArray(value, reference_doc=self._data._reference_doc)
self._data.chunks = value
@_PropertyMixin.matches.setter
def matches(self, value: 'DocumentArray'):
from docarray.array.match import MatchArray
if not isinstance(value, MatchArray):
value = MatchArray(value, reference_doc=self._data._reference_doc)
self._data.matches = value
@property
def content_type(self) -> Optional[str]:
nf = self.non_empty_fields
if 'text' in nf:
return 'text'
elif 'tensor' in nf:
return 'tensor'
elif 'blob' in nf:
return 'blob'
|
from ._conformer_wav2vec2 import (
conformer_wav2vec2_base,
conformer_wav2vec2_model,
conformer_wav2vec2_pretrain_base,
conformer_wav2vec2_pretrain_large,
conformer_wav2vec2_pretrain_model,
ConformerWav2Vec2PretrainModel,
)
from ._emformer_hubert import emformer_hubert_base, emformer_hubert_model
from .conv_emformer import ConvEmformer
from .hifi_gan import hifigan_vocoder, hifigan_vocoder_v1, hifigan_vocoder_v2, hifigan_vocoder_v3, HiFiGANVocoder
from .rnnt import conformer_rnnt_base, conformer_rnnt_model
__all__ = [
"conformer_rnnt_base",
"conformer_rnnt_model",
"ConvEmformer",
"conformer_wav2vec2_model",
"conformer_wav2vec2_base",
"conformer_wav2vec2_pretrain_model",
"conformer_wav2vec2_pretrain_base",
"conformer_wav2vec2_pretrain_large",
"ConformerWav2Vec2PretrainModel",
"emformer_hubert_base",
"emformer_hubert_model",
"HiFiGANVocoder",
"hifigan_vocoder_v1",
"hifigan_vocoder_v2",
"hifigan_vocoder_v3",
"hifigan_vocoder",
]
|
from ._conformer_wav2vec2 import (
conformer_wav2vec2_base,
conformer_wav2vec2_model,
conformer_wav2vec2_pretrain_base,
conformer_wav2vec2_pretrain_large,
conformer_wav2vec2_pretrain_model,
ConformerWav2Vec2PretrainModel,
)
from ._emformer_hubert import emformer_hubert_base, emformer_hubert_model
from .conv_emformer import ConvEmformer
from .hifi_gan import (
hifigan_generator,
hifigan_generator_v1,
hifigan_generator_v2,
hifigan_generator_v3,
HiFiGANGenerator,
)
from .rnnt import conformer_rnnt_base, conformer_rnnt_model
__all__ = [
"conformer_rnnt_base",
"conformer_rnnt_model",
"ConvEmformer",
"conformer_wav2vec2_model",
"conformer_wav2vec2_base",
"conformer_wav2vec2_pretrain_model",
"conformer_wav2vec2_pretrain_base",
"conformer_wav2vec2_pretrain_large",
"ConformerWav2Vec2PretrainModel",
"emformer_hubert_base",
"emformer_hubert_model",
"HiFiGANGenerator",
"hifigan_generator_v1",
"hifigan_generator_v2",
"hifigan_generator_v3",
"hifigan_generator",
]
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It generates sentence embeddings
that can be compared using cosine-similarity to measure the similarity.
Usage:
python training_nli.py
OR
python training_nli.py pretrained_transformer_model_name
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Hugging Face pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
train_batch_size = 16
num_epochs = 4
output_dir = (
"output/training_stsbenchmark_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and one
# similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# train_loss = losses.CoSENTLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts')`."
)
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It generates sentence embeddings
that can be compared using cosine-similarity to measure the similarity.
Usage:
python training_nli.py
OR
python training_nli.py pretrained_transformer_model_name
"""
import traceback
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
import sys
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Hugging Face pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
train_batch_size = 16
num_epochs = 4
output_dir = (
"output/training_stsbenchmark_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and one
# similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# train_loss = losses.CoSENTLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts')`."
)
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
pytestmark = pytest.mark.integration
@pytest.mark.parametrize("path", ["paws", "csv"])
def test_inspect_dataset(path, tmp_path):
inspect_dataset(path, tmp_path)
script_name = path + ".py"
assert script_name in os.listdir(tmp_path)
assert "__pycache__" not in os.listdir(tmp_path)
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning")
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning")
@pytest.mark.parametrize("path", ["accuracy"])
def test_inspect_metric(path, tmp_path):
inspect_metric(path, tmp_path)
script_name = path + ".py"
assert script_name in os.listdir(tmp_path)
assert "__pycache__" not in os.listdir(tmp_path)
@pytest.mark.parametrize(
"path, config_name, expected_splits",
[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
],
)
def test_get_dataset_config_info(path, config_name, expected_splits):
info = get_dataset_config_info(path, config_name=config_name)
assert info.config_name == config_name
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception",
[
("paws", None, ValueError),
],
)
def test_get_dataset_config_info_error(path, config_name, expected_exception):
with pytest.raises(expected_exception):
get_dataset_config_info(path, config_name=config_name)
@pytest.mark.parametrize(
"path, expected",
[
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
],
)
def test_get_dataset_config_names(path, expected):
config_names = get_dataset_config_names(path)
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config",
[
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
],
)
def test_get_dataset_info(path, expected_configs, expected_splits_in_first_config):
infos = get_dataset_infos(path)
assert list(infos.keys()) == expected_configs
expected_config = expected_configs[0]
assert expected_config in infos
info = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits",
[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
],
)
def test_get_dataset_split_names(path, expected_config, expected_splits):
infos = get_dataset_infos(path)
assert expected_config in infos
info = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception",
[
("paws", None, ValueError),
],
)
def test_get_dataset_split_names_error(path, config_name, expected_exception):
with pytest.raises(expected_exception):
get_dataset_split_names(path, config_name=config_name)
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
pytestmark = pytest.mark.integration
@pytest.mark.parametrize("path", ["paws", "csv"])
def test_inspect_dataset(path, tmp_path):
inspect_dataset(path, tmp_path)
script_name = path + ".py"
assert script_name in os.listdir(tmp_path)
assert "__pycache__" not in os.listdir(tmp_path)
@pytest.mark.parametrize("path", ["accuracy"])
def test_inspect_metric(path, tmp_path):
inspect_metric(path, tmp_path)
script_name = path + ".py"
assert script_name in os.listdir(tmp_path)
assert "__pycache__" not in os.listdir(tmp_path)
@pytest.mark.parametrize(
"path, config_name, expected_splits",
[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
],
)
def test_get_dataset_config_info(path, config_name, expected_splits):
info = get_dataset_config_info(path, config_name=config_name)
assert info.config_name == config_name
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception",
[
("paws", None, ValueError),
],
)
def test_get_dataset_config_info_error(path, config_name, expected_exception):
with pytest.raises(expected_exception):
get_dataset_config_info(path, config_name=config_name)
@pytest.mark.parametrize(
"path, expected",
[
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
],
)
def test_get_dataset_config_names(path, expected):
config_names = get_dataset_config_names(path)
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config",
[
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
],
)
def test_get_dataset_info(path, expected_configs, expected_splits_in_first_config):
infos = get_dataset_infos(path)
assert list(infos.keys()) == expected_configs
expected_config = expected_configs[0]
assert expected_config in infos
info = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits",
[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
],
)
def test_get_dataset_split_names(path, expected_config, expected_splits):
infos = get_dataset_infos(path)
assert expected_config in infos
info = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception",
[
("paws", None, ValueError),
],
)
def test_get_dataset_split_names_error(path, config_name, expected_exception):
with pytest.raises(expected_exception):
get_dataset_split_names(path, config_name=config_name)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.retrievers import MilvusRetriever
from langchain_community.retrievers.milvus import MilvusRetreiver
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"MilvusRetriever": "langchain_community.retrievers",
"MilvusRetreiver": "langchain_community.retrievers.milvus",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MilvusRetreiver",
"MilvusRetriever",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.retrievers import MilvusRetriever
from langchain_community.retrievers.milvus import MilvusRetreiver
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"MilvusRetriever": "langchain_community.retrievers",
"MilvusRetreiver": "langchain_community.retrievers.milvus",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MilvusRetriever",
"MilvusRetreiver",
]
|
from __future__ import annotations
from collections.abc import Generator
import torch
from torch import Tensor, nn
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.util import fullname
class MultipleNegativesRankingLoss(nn.Module):
def __init__(
self,
model: CrossEncoder,
num_negatives: int | None = 4,
scale: int = 10.0,
activation_fct: nn.Module | None = nn.Sigmoid(),
) -> None:
super().__init__()
self.model = model
self.num_negatives = num_negatives
self.scale = scale
self.activation_fct = activation_fct
self.cross_entropy_loss = nn.CrossEntropyLoss()
if self.model.num_labels != 1:
raise ValueError(
f"{self.__class__.__name__} expects a model with 1 output label, "
f"but got a model with {self.model.num_labels} output labels."
)
def call_model_with_columns(self, anchors: list[str], candidates: list[str]) -> Tensor:
pairs = list(zip(anchors, candidates))
return self.call_model_with_pairs(pairs)
def call_model_with_pairs(self, pairs: list[list[str]]) -> Tensor:
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0]
return logits.squeeze(1)
def get_in_batch_negatives(
self, anchors: list[str], candidates: list[list[str]]
) -> Generator[list[str], None, None]:
batch_size = len(anchors)
num_candidates = len(candidates)
# Given N anchors, we want to select num_negatives negatives for each anchor
candidates_flattened = [candidate for sublist in candidates for candidate in sublist]
if self.num_negatives is not None:
# Create a mask for each anchor to each candidate index, where the matching positive
# and hard negatives are masked out. From the remaining options, we randomly select
# num_negatives indices.
mask = ~torch.eye(batch_size, dtype=torch.bool).repeat(1, num_candidates)
negative_indices = torch.multinomial(mask.float(), self.num_negatives)
else:
# If num_negatives is None, we select all negatives
negative_indices = torch.arange(len(candidates[0])).repeat(len(candidates), 1)
for negative_indices_row in negative_indices.T:
yield [candidates_flattened[negative_idx] for negative_idx in negative_indices_row]
def calculate_loss(self, logits: Tensor, batch_size: int) -> Tensor:
# (bsz, 1 + num_rand_negatives + num_hard_negatives)
logits = torch.cat(logits, dim=0).reshape(-1, batch_size).T
# Apply the post-processing on the logits
if self.activation_fct:
logits = self.activation_fct(logits)
if self.scale:
logits = logits * self.scale
# For each sample in the batch, the first label is the positive, the rest are negatives
labels = torch.zeros(batch_size, device=logits.device, dtype=torch.long)
loss = self.cross_entropy_loss(logits, labels)
return loss
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
anchors = inputs[0]
positives = inputs[1]
batch_size = len(anchors)
scores = [self.call_model_with_columns(anchors, positives)]
# In-batch negatives:
for negatives in self.get_in_batch_negatives(anchors, inputs[1:]):
scores.append(self.call_model_with_columns(anchors, negatives))
# Hard negatives:
for negatives in inputs[2:]:
scores.append(self.call_model_with_columns(anchors, negatives))
return self.calculate_loss(scores, batch_size)
def get_config_dict(self) -> dict[str, float]:
return {
"scale": self.scale,
"num_negatives": self.num_negatives,
"activation_fct": fullname(self.activation_fct),
}
|
from __future__ import annotations
from collections.abc import Generator
import torch
from torch import Tensor, nn
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.util import fullname
class MultipleNegativesRankingLoss(nn.Module):
def __init__(
self,
model: CrossEncoder,
num_negatives: int | None = 4,
scale: int = 20.0,
activation_fct: nn.Module | None = nn.Tanh(),
) -> None:
super().__init__()
self.model = model
self.num_negatives = num_negatives
self.scale = scale
self.activation_fct = activation_fct
self.cross_entropy_loss = nn.CrossEntropyLoss()
if self.model.num_labels != 1:
raise ValueError(
f"{self.__class__.__name__} expects a model with 1 output label, "
f"but got a model with {self.model.num_labels} output labels."
)
def call_model_with_columns(self, anchors: list[str], candidates: list[str]) -> Tensor:
pairs = list(zip(anchors, candidates))
return self.call_model_with_pairs(pairs)
def call_model_with_pairs(self, pairs: list[list[str]]) -> Tensor:
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0]
return logits.squeeze(1)
def get_in_batch_negatives(
self, anchors: list[str], candidates: list[list[str]]
) -> Generator[list[str], None, None]:
batch_size = len(anchors)
num_candidates = len(candidates)
# Given N anchors, we want to select num_negatives negatives for each anchor
candidates_flattened = [candidate for sublist in candidates for candidate in sublist]
if self.num_negatives is not None:
# Create a mask for each anchor to each candidate index, where the matching positive
# and hard negatives are masked out. From the remaining options, we randomly select
# num_negatives indices.
mask = ~torch.eye(batch_size, dtype=torch.bool).repeat(1, num_candidates)
negative_indices = torch.multinomial(mask.float(), self.num_negatives)
else:
# If num_negatives is None, we select all negatives
negative_indices = torch.arange(len(candidates[0])).repeat(len(candidates), 1)
for negative_indices_row in negative_indices.T:
yield [candidates_flattened[negative_idx] for negative_idx in negative_indices_row]
def calculate_loss(self, logits: Tensor, batch_size: int) -> Tensor:
# (bsz, 1 + num_rand_negatives + num_hard_negatives)
logits = torch.cat(logits, dim=0).reshape(-1, batch_size).T
# Apply the post-processing on the logits
if self.activation_fct:
logits = self.activation_fct(logits)
if self.scale:
logits = logits * self.scale
# For each sample in the batch, the first label is the positive, the rest are negatives
labels = torch.zeros(batch_size, device=logits.device, dtype=torch.long)
loss = self.cross_entropy_loss(logits, labels)
return loss
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
anchors = inputs[0]
positives = inputs[1]
batch_size = len(anchors)
scores = [self.call_model_with_columns(anchors, positives)]
# In-batch negatives:
for negatives in self.get_in_batch_negatives(anchors, inputs[1:]):
scores.append(self.call_model_with_columns(anchors, negatives))
# Hard negatives:
for negatives in inputs[2:]:
scores.append(self.call_model_with_columns(anchors, negatives))
return self.calculate_loss(scores, batch_size)
def get_config_dict(self) -> dict[str, float]:
return {
"scale": self.scale,
"num_negatives": self.num_negatives,
"activation_fct": fullname(self.activation_fct),
}
|
import os
import shutil
from pathlib import Path
import pytest
import numpy as np
import PIL.Image as Image
from jina import DocumentArray, Document, Executor
from ...big_transfer import BigTransferEncoder
directory = os.path.dirname(os.path.realpath(__file__))
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.model_path == 'pretrained'
assert ex.model_name == 'Imagenet21k/R50x1'
def test_initialization_and_model_download():
shutil.rmtree('pretrained', ignore_errors=True)
# This call will download the model
encoder = BigTransferEncoder()
assert encoder.model_path == 'pretrained'
assert encoder.model_name == 'Imagenet21k/R50x1'
assert not encoder.on_gpu
assert os.path.exists('pretrained')
assert os.path.exists(os.path.join('pretrained', 'saved_model.pb'))
# This call will use the downloaded model
_ = BigTransferEncoder()
shutil.rmtree('pretrained', ignore_errors=True)
with pytest.raises(AttributeError):
_ = BigTransferEncoder(model_name='model_not_exists')
def test_encoding():
doc = Document(uri=os.path.join(directory, '../test_data/test_image.png'))
doc.convert_image_uri_to_blob()
img = Image.fromarray(doc.blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.blob = img
assert doc.embedding is None
encoder = BigTransferEncoder()
encoder.encode(DocumentArray([doc]), {})
assert doc.embedding.shape == (2048,)
def test_preprocessing():
doc = Document(uri=os.path.join(directory, '../test_data/test_image.png'))
doc.convert_image_uri_to_blob()
img = Image.fromarray(doc.blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.blob = img
assert doc.embedding is None
encoder = BigTransferEncoder(target_dim=(256, 256, 3))
encoder.encode(DocumentArray([doc]), {})
assert doc.embedding.shape == (2048,)
def test_encoding_default_chunks():
doc = Document(text="testing")
chunk = Document(uri=os.path.join(directory, '../test_data/test_image.png'))
for i in range(3):
doc.chunks.append(chunk)
doc.chunks[i].convert_image_uri_to_blob()
img = Image.fromarray(doc.chunks[i].blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.chunks[i].blob = img
encoder = BigTransferEncoder(default_traversal_paths=['c'])
encoder.encode(DocumentArray([doc]), {})
assert doc.embedding is None
for i in range(3):
assert doc.chunks[i].embedding.shape == (2048,)
def test_encoding_override_chunks():
doc = Document(text="testing")
chunk = Document(uri=os.path.join(directory, '../test_data/test_image.png'))
for i in range(3):
doc.chunks.append(chunk)
doc.chunks[i].convert_image_uri_to_blob()
img = Image.fromarray(doc.chunks[i].blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.chunks[i].blob = img
encoder = BigTransferEncoder()
assert encoder.default_traversal_paths == ['r']
encoder.encode(DocumentArray([doc]), parameters={'traversal_paths': ['c']})
assert doc.embedding is None
for i in range(3):
assert doc.chunks[i].embedding.shape == (2048,)
|
import os
import shutil
from pathlib import Path
import pytest
import numpy as np
import PIL.Image as Image
from jina import DocumentArray, Document, Executor
from ...big_transfer import BigTransferEncoder
directory = os.path.dirname(os.path.realpath(__file__))
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.model_path == 'pretrained'
assert ex.model_name == 'R50x1'
def test_initialization_and_model_download():
shutil.rmtree('pretrained', ignore_errors=True)
# This call will download the model
encoder = BigTransferEncoder()
assert encoder.model_path == 'pretrained'
assert encoder.model_name == 'R50x1'
assert not encoder.on_gpu
assert os.path.exists('pretrained')
assert os.path.exists(os.path.join('pretrained', 'saved_model.pb'))
# This call will use the downloaded model
_ = BigTransferEncoder()
shutil.rmtree('pretrained', ignore_errors=True)
with pytest.raises(AttributeError):
_ = BigTransferEncoder(model_name='model_not_exists')
def test_encoding():
doc = Document(uri=os.path.join(directory, '../test_data/test_image.png'))
doc.convert_image_uri_to_blob()
img = Image.fromarray(doc.blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.blob = img
assert doc.embedding is None
encoder = BigTransferEncoder()
encoder.encode(DocumentArray([doc]), {})
assert doc.embedding.shape == (2048,)
def test_preprocessing():
doc = Document(uri=os.path.join(directory, '../test_data/test_image.png'))
doc.convert_image_uri_to_blob()
img = Image.fromarray(doc.blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.blob = img
assert doc.embedding is None
encoder = BigTransferEncoder(target_dim=(256, 256, 3))
encoder.encode(DocumentArray([doc]), {})
assert doc.embedding.shape == (2048,)
def test_encoding_default_chunks():
doc = Document(text="testing")
chunk = Document(uri=os.path.join(directory, '../test_data/test_image.png'))
for i in range(3):
doc.chunks.append(chunk)
doc.chunks[i].convert_image_uri_to_blob()
img = Image.fromarray(doc.chunks[i].blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.chunks[i].blob = img
encoder = BigTransferEncoder(default_traversal_paths=['c'])
encoder.encode(DocumentArray([doc]), {})
assert doc.embedding is None
for i in range(3):
assert doc.chunks[i].embedding.shape == (2048,)
def test_encoding_override_chunks():
doc = Document(text="testing")
chunk = Document(uri=os.path.join(directory, '../test_data/test_image.png'))
for i in range(3):
doc.chunks.append(chunk)
doc.chunks[i].convert_image_uri_to_blob()
img = Image.fromarray(doc.chunks[i].blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.chunks[i].blob = img
encoder = BigTransferEncoder()
assert encoder.default_traversal_paths == ['r']
encoder.encode(DocumentArray([doc]), parameters={'traversal_paths': ['c']})
assert doc.embedding is None
for i in range(3):
assert doc.chunks[i].embedding.shape == (2048,)
|
from __future__ import annotations
import os
from typing import Literal, Optional, overload
import nomic # type: ignore[import]
from langchain_core.embeddings import Embeddings
from nomic import embed
class NomicEmbeddings(Embeddings):
"""NomicEmbeddings embedding model.
Example:
.. code-block:: python
from langchain_nomic import NomicEmbeddings
model = NomicEmbeddings()
"""
@overload
def __init__(
self,
*,
model: str,
nomic_api_key: Optional[str] = ...,
dimensionality: Optional[int] = ...,
inference_mode: Literal["remote"] = ...,
): ...
@overload
def __init__(
self,
*,
model: str,
nomic_api_key: Optional[str] = ...,
dimensionality: Optional[int] = ...,
inference_mode: Literal["local", "dynamic"],
device: Optional[str] = ...,
): ...
@overload
def __init__(
self,
*,
model: str,
nomic_api_key: Optional[str] = ...,
dimensionality: Optional[int] = ...,
inference_mode: str,
device: Optional[str] = ...,
): ...
def __init__(
self,
*,
model: str,
nomic_api_key: Optional[str] = None,
dimensionality: Optional[int] = None,
inference_mode: str = "remote",
device: Optional[str] = None,
vision_model: Optional[str] = None,
):
"""Initialize NomicEmbeddings model.
Args:
model: model name
nomic_api_key: optionally, set the Nomic API key. Uses the ``NOMIC_API_KEY``
environment variable by default.
dimensionality: The embedding dimension, for use with Matryoshka-capable
models. Defaults to full-size.
inference_mode: How to generate embeddings. One of ``'remote'``, ``'local'``
(Embed4All), or ``'dynamic'`` (automatic). Defaults to ``'remote'``.
device: The device to use for local embeddings. Choices include
``'cpu'``, ``'gpu'``, ``'nvidia'``, ``'amd'``, or a specific device
name. See the docstring for ``GPT4All.__init__`` for more info.
Typically defaults to ``'cpu'``. Do not use on macOS.
vision_model: The vision model to use for image embeddings.
"""
_api_key = nomic_api_key or os.environ.get("NOMIC_API_KEY")
if _api_key:
nomic.login(_api_key)
self.model = model
self.dimensionality = dimensionality
self.inference_mode = inference_mode
self.device = device
self.vision_model = vision_model
def embed(self, texts: list[str], *, task_type: str) -> list[list[float]]:
"""Embed texts.
Args:
texts: list of texts to embed
task_type: the task type to use when embedding. One of ``'search_query'``,
``'search_document'``, ``'classification'``, ``'clustering'``
"""
output = embed.text(
texts=texts,
model=self.model,
task_type=task_type,
dimensionality=self.dimensionality,
inference_mode=self.inference_mode,
device=self.device,
)
return output["embeddings"]
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Embed search docs.
Args:
texts: list of texts to embed as documents
"""
return self.embed(
texts=texts,
task_type="search_document",
)
def embed_query(self, text: str) -> list[float]:
"""Embed query text.
Args:
text: query text
"""
return self.embed(
texts=[text],
task_type="search_query",
)[0]
def embed_image(self, uris: list[str]) -> list[list[float]]:
return embed.image(
images=uris,
model=self.vision_model,
)["embeddings"]
|
import os
from typing import Literal, Optional, overload
import nomic # type: ignore[import]
from langchain_core.embeddings import Embeddings
from nomic import embed
class NomicEmbeddings(Embeddings):
"""NomicEmbeddings embedding model.
Example:
.. code-block:: python
from langchain_nomic import NomicEmbeddings
model = NomicEmbeddings()
"""
@overload
def __init__(
self,
*,
model: str,
nomic_api_key: Optional[str] = ...,
dimensionality: Optional[int] = ...,
inference_mode: Literal["remote"] = ...,
):
...
@overload
def __init__(
self,
*,
model: str,
nomic_api_key: Optional[str] = ...,
dimensionality: Optional[int] = ...,
inference_mode: Literal["local", "dynamic"],
device: Optional[str] = ...,
):
...
@overload
def __init__(
self,
*,
model: str,
nomic_api_key: Optional[str] = ...,
dimensionality: Optional[int] = ...,
inference_mode: str,
device: Optional[str] = ...,
):
...
def __init__(
self,
*,
model: str,
nomic_api_key: Optional[str] = None,
dimensionality: Optional[int] = None,
inference_mode: str = "remote",
device: Optional[str] = None,
vision_model: Optional[str] = None,
):
"""Initialize NomicEmbeddings model.
Args:
model: model name
nomic_api_key: optionally, set the Nomic API key. Uses the ``NOMIC_API_KEY``
environment variable by default.
dimensionality: The embedding dimension, for use with Matryoshka-capable
models. Defaults to full-size.
inference_mode: How to generate embeddings. One of ``'remote'``, ``'local'``
(Embed4All), or ``'dynamic'`` (automatic). Defaults to ``'remote'``.
device: The device to use for local embeddings. Choices include
``'cpu'``, ``'gpu'``, ``'nvidia'``, ``'amd'``, or a specific device
name. See the docstring for ``GPT4All.__init__`` for more info.
Typically defaults to ``'cpu'``. Do not use on macOS.
"""
_api_key = nomic_api_key or os.environ.get("NOMIC_API_KEY")
if _api_key:
nomic.login(_api_key)
self.model = model
self.dimensionality = dimensionality
self.inference_mode = inference_mode
self.device = device
self.vision_model = vision_model
def embed(self, texts: list[str], *, task_type: str) -> list[list[float]]:
"""Embed texts.
Args:
texts: list of texts to embed
task_type: the task type to use when embedding. One of ``'search_query'``,
``'search_document'``, ``'classification'``, ``'clustering'``
"""
output = embed.text(
texts=texts,
model=self.model,
task_type=task_type,
dimensionality=self.dimensionality,
inference_mode=self.inference_mode,
device=self.device,
)
return output["embeddings"]
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Embed search docs.
Args:
texts: list of texts to embed as documents
"""
return self.embed(
texts=texts,
task_type="search_document",
)
def embed_query(self, text: str) -> list[float]:
"""Embed query text.
Args:
text: query text
"""
return self.embed(
texts=[text],
task_type="search_query",
)[0]
def embed_image(self, uris: list[str]) -> list[list[float]]:
return embed.image(
images=uris,
model=self.vision_model,
)["embeddings"]
|
"""langchain-core version information and utilities."""
VERSION = "0.3.58"
|
"""langchain-core version information and utilities."""
VERSION = "0.3.57"
|
# Copyright (c) OpenMMLab. All rights reserved.
# Copyright (c) 2019 Western Digital Corporation or its affiliates.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class YOLOV3(SingleStageDetector):
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(YOLOV3, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
# Copyright (c) 2019 Western Digital Corporation or its affiliates.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class YOLOV3(SingleStageDetector):
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(YOLOV3, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
import logging
from typing import Any
from autogpt_libs.utils.cache import thread_cached
from backend.data.block import (
Block,
BlockCategory,
BlockInput,
BlockOutput,
BlockSchema,
BlockType,
get_block,
)
from backend.data.execution import ExecutionStatus
from backend.data.model import SchemaField
from backend.util import json
logger = logging.getLogger(__name__)
@thread_cached
def get_executor_manager_client():
from backend.executor import ExecutionManager
from backend.util.service import get_service_client
return get_service_client(ExecutionManager)
@thread_cached
def get_event_bus():
from backend.data.execution import RedisExecutionEventBus
return RedisExecutionEventBus()
class AgentExecutorBlock(Block):
class Input(BlockSchema):
user_id: str = SchemaField(description="User ID")
graph_id: str = SchemaField(description="Graph ID")
graph_version: int = SchemaField(description="Graph Version")
data: BlockInput = SchemaField(description="Input data for the graph")
input_schema: dict = SchemaField(description="Input schema for the graph")
output_schema: dict = SchemaField(description="Output schema for the graph")
@classmethod
def get_input_schema(cls, data: BlockInput) -> dict[str, Any]:
return data.get("input_schema", {})
@classmethod
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
return data.get("data", {})
@classmethod
def get_missing_input(cls, data: BlockInput) -> set[str]:
required_fields = cls.get_input_schema(data).get("required", [])
return set(required_fields) - set(data)
@classmethod
def get_mismatch_error(cls, data: BlockInput) -> str | None:
return json.validate_with_jsonschema(cls.get_input_schema(data), data)
class Output(BlockSchema):
pass
def __init__(self):
super().__init__(
id="e189baac-8c20-45a1-94a7-55177ea42565",
description="Executes an existing agent inside your agent",
input_schema=AgentExecutorBlock.Input,
output_schema=AgentExecutorBlock.Output,
block_type=BlockType.AGENT,
categories={BlockCategory.AGENT},
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
from backend.data.execution import ExecutionEventType
executor_manager = get_executor_manager_client()
event_bus = get_event_bus()
graph_exec = executor_manager.add_execution(
graph_id=input_data.graph_id,
graph_version=input_data.graph_version,
user_id=input_data.user_id,
data=input_data.data,
)
log_id = f"Graph #{input_data.graph_id}-V{input_data.graph_version}, exec-id: {graph_exec.graph_exec_id}"
logger.info(f"Starting execution of {log_id}")
for event in event_bus.listen(
user_id=graph_exec.user_id,
graph_id=graph_exec.graph_id,
graph_exec_id=graph_exec.graph_exec_id,
):
if event.event_type == ExecutionEventType.GRAPH_EXEC_UPDATE:
if event.status in [
ExecutionStatus.COMPLETED,
ExecutionStatus.TERMINATED,
ExecutionStatus.FAILED,
]:
logger.info(f"Execution {log_id} ended with status {event.status}")
break
else:
continue
logger.info(
f"Execution {log_id} produced input {event.input_data} output {event.output_data}"
)
if not event.block_id:
logger.warning(f"{log_id} received event without block_id {event}")
continue
block = get_block(event.block_id)
if not block or block.block_type != BlockType.OUTPUT:
continue
output_name = event.input_data.get("name")
if not output_name:
logger.warning(f"{log_id} produced an output with no name {event}")
continue
for output_data in event.output_data.get("output", []):
logger.info(f"Execution {log_id} produced {output_name}: {output_data}")
yield output_name, output_data
|
import logging
from typing import Any
from autogpt_libs.utils.cache import thread_cached
from backend.data.block import (
Block,
BlockCategory,
BlockInput,
BlockOutput,
BlockSchema,
BlockType,
get_block,
)
from backend.data.execution import ExecutionStatus
from backend.data.model import SchemaField
from backend.util import json
logger = logging.getLogger(__name__)
@thread_cached
def get_executor_manager_client():
from backend.executor import ExecutionManager
from backend.util.service import get_service_client
return get_service_client(ExecutionManager)
@thread_cached
def get_event_bus():
from backend.data.execution import RedisExecutionEventBus
return RedisExecutionEventBus()
class AgentExecutorBlock(Block):
class Input(BlockSchema):
user_id: str = SchemaField(description="User ID")
graph_id: str = SchemaField(description="Graph ID")
graph_version: int = SchemaField(description="Graph Version")
data: BlockInput = SchemaField(description="Input data for the graph")
input_schema: dict = SchemaField(description="Input schema for the graph")
output_schema: dict = SchemaField(description="Output schema for the graph")
@classmethod
def get_input_schema(cls, data: BlockInput) -> dict[str, Any]:
return data.get("input_schema", {})
@classmethod
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
return data.get("data", {})
@classmethod
def get_missing_input(cls, data: BlockInput) -> set[str]:
required_fields = cls.get_input_schema(data).get("required", [])
return set(required_fields) - set(data)
@classmethod
def get_mismatch_error(cls, data: BlockInput) -> str | None:
return json.validate_with_jsonschema(cls.get_input_schema(data), data)
class Output(BlockSchema):
pass
def __init__(self):
super().__init__(
id="e189baac-8c20-45a1-94a7-55177ea42565",
description="Executes an existing agent inside your agent",
input_schema=AgentExecutorBlock.Input,
output_schema=AgentExecutorBlock.Output,
block_type=BlockType.AGENT,
categories={BlockCategory.AGENT},
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
from backend.data.execution import ExecutionEventType
executor_manager = get_executor_manager_client()
event_bus = get_event_bus()
graph_exec = executor_manager.add_execution(
graph_id=input_data.graph_id,
graph_version=input_data.graph_version,
user_id=input_data.user_id,
data=input_data.data,
)
log_id = f"Graph #{input_data.graph_id}-V{input_data.graph_version}, exec-id: {graph_exec.graph_exec_id}"
logger.info(f"Starting execution of {log_id}")
for event in event_bus.listen(
graph_id=graph_exec.graph_id, graph_exec_id=graph_exec.graph_exec_id
):
if event.event_type == ExecutionEventType.GRAPH_EXEC_UPDATE:
if event.status in [
ExecutionStatus.COMPLETED,
ExecutionStatus.TERMINATED,
ExecutionStatus.FAILED,
]:
logger.info(f"Execution {log_id} ended with status {event.status}")
break
else:
continue
logger.info(
f"Execution {log_id} produced input {event.input_data} output {event.output_data}"
)
if not event.block_id:
logger.warning(f"{log_id} received event without block_id {event}")
continue
block = get_block(event.block_id)
if not block or block.block_type != BlockType.OUTPUT:
continue
output_name = event.input_data.get("name")
if not output_name:
logger.warning(f"{log_id} produced an output with no name {event}")
continue
for output_data in event.output_data.get("output", []):
logger.info(f"Execution {log_id} produced {output_name}: {output_data}")
yield output_name, output_data
|
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_document import BaseDocument
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import AnyEmbedding
T = TypeVar('T', bound='TextDoc')
class TextDoc(BaseDocument):
"""
Document for handling text.
It can contain a TextUrl (`TextDoc.url`), a str (`TextDoc.text`),
and an AnyEmbedding (`TextDoc.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import TextDoc
# use it directly
txt_doc = Text(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
You can initialize directly from a string:
.. code-block:: python
from docarray.documents import TextDoc
txt_doc = Text('hello world')
You can extend this Document:
.. code-block:: python
from docarray.documents import TextDoc
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyText(Text):
second_embedding: Optional[AnyEmbedding]
txt_doc = MyText(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
txt_doc.second_embedding = model(txt_doc.text)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import ImageDoc, TextDoc
# compose it
class MultiModalDoc(BaseDocument):
image_doc: Image
text_doc: Text
mmdoc = MultiModalDoc(
image_doc=Image(url="http://www.jina.ai/image.jpg"),
text_doc=Text(text="hello world, how are you doing?"),
)
mmdoc.text_doc.text = mmdoc.text_doc.url.load()
# or
mmdoc.text_doc.bytes = mmdoc.text_doc.url.load_bytes()
This Document can be compared against another Document of the same type or a string.
When compared against another object of the same type, the pydantic BaseModel
equality check will apply which checks the equality of every attribute,
including `id`. When compared against a str, it will check the equality
of the `text` attribute against the given string.
.. code-block:: python
from docarray.documents import TextDoc
doc = Text(text='This is the main text', url='exampleurl.com')
doc2 = Text(text='This is the main text', url='exampleurl.com')
doc == 'This is the main text' # True
doc == doc2 # False, their ids are not equivalent
"""
text: Optional[str]
url: Optional[TextUrl]
embedding: Optional[AnyEmbedding]
bytes: Optional[bytes]
def __init__(self, text: Optional[str] = None, **kwargs):
if 'text' not in kwargs:
kwargs['text'] = text
super().__init__(**kwargs)
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(text=value)
return super().validate(value)
def __eq__(self, other: Any) -> bool:
if isinstance(other, str):
return self.text == other
else:
# BaseModel has a default equality
return super().__eq__(other)
def __contains__(self, item: str) -> bool:
"""
This method makes `Text` behave the same as an `str`.
.. code-block:: python
from docarray.documents import Text
t = Text(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
:param item: A string to be checked if is a substring of `text` attribute
:return: A boolean determining the presence of `item` as a substring in `text`
"""
if self.text is not None:
return self.text.__contains__(item)
else:
return False
def _get_string_for_regex_filter(self):
return self.text
|
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_document import BaseDocument
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import AnyEmbedding
T = TypeVar('T', bound='TextDoc')
class TextDoc(BaseDocument):
"""
Document for handling text.
It can contain a TextUrl (`TextDoc.url`), a str (`TextDoc.text`),
and an AnyEmbedding (`TextDoc.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import TextDoc
# use it directly
txt_doc = Text(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
You can initialize directly from a string:
.. code-block:: python
from docarray.documents import TextDoc
txt_doc = Text('hello world')
You can extend this Document:
.. code-block:: python
from docarray.documents import TextDoc
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyText(Text):
second_embedding: Optional[AnyEmbedding]
txt_doc = MyText(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
txt_doc.second_embedding = model(txt_doc.text)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import ImageDoc, TextDoc
# compose it
class MultiModalDoc(BaseDocument):
image_doc: Image
text_doc: Text
mmdoc = MultiModalDoc(
image_doc=Image(url="http://www.jina.ai/image.jpg"),
text_doc=Text(text="hello world, how are you doing?"),
)
mmdoc.text_doc.text = mmdoc.text_doc.url.load()
# or
mmdoc.text_doc.bytes = mmdoc.text_doc.url.load_bytes()
This Document can be compared against another Document of the same type or a string.
When compared against another object of the same type, the pydantic BaseModel
equality check will apply which checks the equality of every attribute,
including `id`. When compared against a str, it will check the equality
of the `text` attribute against the given string.
.. code-block:: python
from docarray.documents import TextDoc
doc = Text(text='This is the main text', url='exampleurl.com')
doc2 = Text(text='This is the main text', url='exampleurl.com')
doc == 'This is the main text' # True
doc == doc2 # False, their ids are not equivalent
"""
text: Optional[str] = None
url: Optional[TextUrl] = None
embedding: Optional[AnyEmbedding] = None
bytes: Optional[bytes] = None
def __init__(self, text: Optional[str] = None, **kwargs):
if 'text' not in kwargs:
kwargs['text'] = text
super().__init__(**kwargs)
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(text=value)
return super().validate(value)
def __eq__(self, other: Any) -> bool:
if isinstance(other, str):
return self.text == other
else:
# BaseModel has a default equality
return super().__eq__(other)
def __contains__(self, item: str) -> bool:
"""
This method makes `Text` behave the same as an `str`.
.. code-block:: python
from docarray.documents import Text
t = Text(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
:param item: A string to be checked if is a substring of `text` attribute
:return: A boolean determining the presence of `item` as a substring in `text`
"""
if self.text is not None:
return self.text.__contains__(item)
else:
return False
def _get_string_for_regex_filter(self):
return self.text
|
import os
from pathlib import Path
from subprocess import check_call
repo_root = Path(__file__).absolute().parent.parent
third_party_path = repo_root / "third_party"
def _read_file(path: Path) -> str:
with path.open(encoding="utf-8") as f:
return f.read().strip()
def _checkout_by_tag(repo: str, tag: str) -> None:
check_call(
[
"git",
"clone",
"--depth",
"1",
"--branch",
tag,
repo,
],
cwd=third_party_path,
)
def read_nccl_pin() -> str:
nccl_file = "nccl-cu12.txt"
if os.getenv("DESIRED_CUDA", os.getenv("CUDA_VERSION", "")).startswith("11"):
nccl_file = "nccl-cu11.txt"
nccl_pin_path = repo_root / ".ci" / "docker" / "ci_commit_pins" / nccl_file
return _read_file(nccl_pin_path)
def checkout_nccl() -> None:
release_tag = read_nccl_pin()
print(f"-- Checkout nccl release tag: {release_tag}")
nccl_basedir = third_party_path / "nccl"
if not nccl_basedir.exists():
_checkout_by_tag("https://github.com/NVIDIA/nccl", release_tag)
def checkout_eigen() -> None:
eigen_tag = _read_file(third_party_path / "eigen_pin.txt")
print(f"-- Checkout Eigen release tag: {eigen_tag}")
eigen_basedir = third_party_path / "eigen"
if not eigen_basedir.exists():
_checkout_by_tag("https://gitlab.com/libeigen/eigen", eigen_tag)
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
# If no arguments are given checkout all optional dependency
checkout_nccl()
checkout_eigen()
else:
# Otherwise just call top-level function of choice
globals()[sys.argv[1]]()
|
import os
from pathlib import Path
from subprocess import check_call
repo_root = Path(__file__).absolute().parent.parent
third_party_path = repo_root / "third_party"
def _read_file(path: Path) -> str:
with path.open(encoding="utf-8") as f:
return f.read().strip()
def _checkout_by_tag(repo: str, tag: str) -> None:
check_call(
[
"git",
"clone",
"--depth",
"1",
"--branch",
tag,
repo,
],
cwd=third_party_path,
)
def read_nccl_pin() -> str:
nccl_file = "nccl-cu12.txt"
if os.getenv("DESIRED_CUDA", os.getenv("CUDA_VERSION", "")).startswith("11"):
nccl_file = "nccl-cu11.txt"
nccl_pin_path = repo_root / ".ci" / "docker" / "ci_commit_pins" / nccl_file
return _read_file(nccl_pin_path)
def checkout_nccl() -> None:
release_tag = read_nccl_pin()
print(f"-- Checkout nccl release tag: {release_tag}")
nccl_basedir = third_party_path / "nccl"
if not nccl_basedir.exists():
_checkout_by_tag("https://github.com/NVIDIA/nccl", release_tag)
if __name__ == "__main__":
checkout_nccl()
|
import warnings
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union
from docarray.typing.bytes.video_bytes import VideoLoadResult
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='VideoUrl')
VIDEO_FILE_FORMATS = ['mp4']
@_register_proto(proto_type_name='video_url')
class VideoUrl(AnyUrl):
"""
URL to a .wav file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config)
has_video_extension = any(ext in url for ext in VIDEO_FILE_FORMATS)
if not has_video_extension:
raise ValueError(
f'Video URL must have one of the following extensions:'
f'{VIDEO_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(self: T, **kwargs) -> VideoLoadResult:
"""
Load the data from the url into a named Tuple of VideoNdArray, AudioNdArray and
NdArray.
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described in:
https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open
:return: AudioNdArray representing the audio content, VideoNdArray representing
the images of the video, NdArray of the key frame indices.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import VideoUrl, VideoNdArray, AudioNdArray, NdArray
class MyDoc(BaseDoc):
video_url: VideoUrl
video: Optional[VideoNdArray]
audio: Optional[AudioNdArray]
key_frame_indices: Optional[NdArray]
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
doc.video, doc.audio, doc.key_frame_indices = doc.video_url.load()
assert isinstance(doc.video, VideoNdArray)
assert isinstance(doc.audio, AudioNdArray)
assert isinstance(doc.key_frame_indices, NdArray)
You can load only the key frames (or video, audio respectively):
.. code-block:: python
from pydantic import parse_obj_as
from docarray.typing import NdArray, VideoUrl
url = parse_obj_as(
VideoUrl,
'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true',
)
key_frame_indices = url.load().key_frame_indices
assert isinstance(key_frame_indices, NdArray)
"""
from docarray.typing.bytes.video_bytes import VideoBytes
buffer = VideoBytes(self.load_bytes(**kwargs))
return buffer.load()
def display(self):
"""
Play video from url in notebook.
"""
if is_notebook():
from IPython.display import display
remote_url = True if self.startswith('http') else False
if remote_url:
from IPython.display import Video
b = self.load_bytes()
display(Video(data=b, embed=True, mimetype='video/mp4'))
else:
import os
from IPython.display import HTML
path = os.path.relpath(self)
src = f'''
<body>
<video width="320" height="240" autoplay muted controls>
<source src="{path}">
Your browser does not support the video tag.
</video>
</body>
'''
display(HTML(src))
else:
warnings.warn('Display of video is only possible in a notebook.')
|
import warnings
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union
from docarray.typing.bytes.video_bytes import VideoLoadResult
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='VideoUrl')
VIDEO_FILE_FORMATS = ['mp4']
@_register_proto(proto_type_name='video_url')
class VideoUrl(AnyUrl):
"""
URL to a .wav file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config)
has_video_extension = any(ext in url for ext in VIDEO_FILE_FORMATS)
if not has_video_extension:
raise ValueError(
f'Video URL must have one of the following extensions:'
f'{VIDEO_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(self: T, **kwargs) -> VideoLoadResult:
"""
Load the data from the url into a named Tuple of VideoNdArray, AudioNdArray and
NdArray.
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described in:
https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open
:return: AudioNdArray representing the audio content, VideoNdArray representing
the images of the video, NdArray of the key frame indices.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
from docarray import BaseDocument
from docarray.typing import VideoUrl, VideoNdArray, AudioNdArray, NdArray
class MyDoc(BaseDocument):
video_url: VideoUrl
video: Optional[VideoNdArray]
audio: Optional[AudioNdArray]
key_frame_indices: Optional[NdArray]
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
doc.video, doc.audio, doc.key_frame_indices = doc.video_url.load()
assert isinstance(doc.video, VideoNdArray)
assert isinstance(doc.audio, AudioNdArray)
assert isinstance(doc.key_frame_indices, NdArray)
You can load only the key frames (or video, audio respectively):
.. code-block:: python
from pydantic import parse_obj_as
from docarray.typing import NdArray, VideoUrl
url = parse_obj_as(
VideoUrl,
'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true',
)
key_frame_indices = url.load().key_frame_indices
assert isinstance(key_frame_indices, NdArray)
"""
from docarray.typing.bytes.video_bytes import VideoBytes
buffer = VideoBytes(self.load_bytes(**kwargs))
return buffer.load()
def display(self):
"""
Play video from url in notebook.
"""
if is_notebook():
from IPython.display import display
remote_url = True if self.startswith('http') else False
if remote_url:
from IPython.display import Video
b = self.load_bytes()
display(Video(data=b, embed=True, mimetype='video/mp4'))
else:
import os
from IPython.display import HTML
path = os.path.relpath(self)
src = f'''
<body>
<video width="320" height="240" autoplay muted controls>
<source src="{path}">
Your browser does not support the video tag.
</video>
</body>
'''
display(HTML(src))
else:
warnings.warn('Display of video is only possible in a notebook.')
|
from typing import Literal
from pydantic import SecretStr
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
JinaCredentials = APIKeyCredentials
JinaCredentialsInput = CredentialsMetaInput[
Literal["jina"],
Literal["api_key"],
]
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="jina",
api_key=SecretStr("mock-jina-api-key"),
title="Mock Jina API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
def JinaCredentialsField() -> JinaCredentialsInput:
"""
Creates a Jina credentials input on a block.
"""
return CredentialsField(
provider="jina",
supported_credential_types={"api_key"},
description="The Jina integration can be used with an API Key.",
)
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="jina",
api_key=SecretStr("mock-jina-api-key"),
title="Mock Jina API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
|
from typing import Literal
from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
from pydantic import SecretStr
from backend.data.model import CredentialsField, CredentialsMetaInput
JinaCredentials = APIKeyCredentials
JinaCredentialsInput = CredentialsMetaInput[
Literal["jina"],
Literal["api_key"],
]
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="jina",
api_key=SecretStr("mock-jina-api-key"),
title="Mock Jina API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
def JinaCredentialsField() -> JinaCredentialsInput:
"""
Creates a Jina credentials input on a block.
"""
return CredentialsField(
provider="jina",
supported_credential_types={"api_key"},
description="The Jina integration can be used with an API Key.",
)
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="jina",
api_key=SecretStr("mock-jina-api-key"),
title="Mock Jina API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
|
import numpy as np
import orjson
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import NdArray
from docarray.typing.tensor import NdArrayEmbedding
def test_proto_tensor():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
tensor._to_node_protobuf()
def test_from_list():
tensor = parse_obj_as(NdArray, [[0.0, 0.0], [0.0, 0.0]])
assert (tensor == np.zeros((2, 2))).all()
def test_json_schema():
schema_json_of(NdArray)
def test_dump_json():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
orjson_dumps(tensor)
def test_load_json():
tensor = parse_obj_as(NdArray, np.zeros((2, 2)))
json = orjson_dumps(tensor)
print(json)
print(type(json))
new_tensor = orjson.loads(json)
assert (new_tensor == tensor).all()
def test_unwrap():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
ndarray = tensor.unwrap()
assert not isinstance(ndarray, NdArray)
assert isinstance(ndarray, np.ndarray)
assert isinstance(tensor, NdArray)
assert (ndarray == np.zeros((3, 224, 224))).all()
def test_parametrized():
# correct shape, single axis
tensor = parse_obj_as(NdArray[128], np.zeros(128))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (128,)
# correct shape, multiple axis
tensor = parse_obj_as(NdArray[3, 224, 224], np.zeros((3, 224, 224)))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (3, 224, 224)
# wrong but reshapable shape
tensor = parse_obj_as(NdArray[3, 224, 224], np.zeros((3, 224, 224)))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (3, 224, 224)
# wrong and not reshapable shape
with pytest.raises(ValueError):
parse_obj_as(NdArray[3, 224, 224], np.zeros((224, 224)))
def test_np_embedding():
# correct shape
tensor = parse_obj_as(NdArrayEmbedding[128], np.zeros((128,)))
assert isinstance(tensor, NdArrayEmbedding)
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (128,)
# wrong shape at data setting time
with pytest.raises(ValueError):
parse_obj_as(NdArrayEmbedding[128], np.zeros((256,)))
# illegal shape at class creation time
with pytest.raises(ValueError):
parse_obj_as(NdArrayEmbedding[128, 128], np.zeros((128, 128)))
def test_parametrized_subclass():
c1 = NdArray[128]
c2 = NdArray[128]
assert issubclass(c1, c2)
assert issubclass(c1, NdArray)
assert issubclass(c1, np.ndarray)
assert not issubclass(c1, NdArray[256])
def test_parametrized_instance():
t = parse_obj_as(NdArray[128], np.zeros(128))
assert isinstance(t, NdArray[128])
assert isinstance(t, NdArray)
assert isinstance(t, np.ndarray)
assert not isinstance(t, NdArray[256])
def test_parametrized_equality():
t1 = parse_obj_as(NdArray[128], np.zeros(128))
t2 = parse_obj_as(NdArray[128], np.zeros(128))
t3 = parse_obj_as(NdArray[256], np.zeros(256))
assert (t1 == t2).all()
assert not t1 == t3
def test_parametrized_operations():
t1 = parse_obj_as(NdArray[128], np.zeros(128))
t2 = parse_obj_as(NdArray[128], np.zeros(128))
t_result = t1 + t2
assert isinstance(t_result, np.ndarray)
assert isinstance(t_result, NdArray)
assert isinstance(t_result, NdArray[128])
|
import numpy as np
import orjson
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import NdArray
from docarray.typing.tensor import NdArrayEmbedding
def test_proto_tensor():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
tensor._to_node_protobuf()
def test_from_list():
tensor = parse_obj_as(NdArray, [[0.0, 0.0], [0.0, 0.0]])
assert (tensor == np.zeros((2, 2))).all()
def test_json_schema():
schema_json_of(NdArray)
def test_dump_json():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
orjson_dumps(tensor)
def test_load_json():
tensor = parse_obj_as(NdArray, np.zeros((2, 2)))
json = orjson_dumps(tensor)
print(json)
print(type(json))
new_tensor = orjson.loads(json)
assert (new_tensor == tensor).all()
def test_unwrap():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
ndarray = tensor.unwrap()
assert not isinstance(ndarray, NdArray)
assert isinstance(ndarray, np.ndarray)
assert isinstance(tensor, NdArray)
assert (ndarray == np.zeros((3, 224, 224))).all()
def test_parametrized():
# correct shape, single axis
tensor = parse_obj_as(NdArray[128], np.zeros(128))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (128,)
# correct shape, multiple axis
tensor = parse_obj_as(NdArray[3, 224, 224], np.zeros((3, 224, 224)))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (3, 224, 224)
# wrong but reshapable shape
tensor = parse_obj_as(NdArray[3, 224, 224], np.zeros((3, 224, 224)))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (3, 224, 224)
# wrong and not reshapable shape
with pytest.raises(ValueError):
parse_obj_as(NdArray[3, 224, 224], np.zeros((224, 224)))
def test_np_embedding():
# correct shape
tensor = parse_obj_as(NdArrayEmbedding[128], np.zeros((128,)))
assert isinstance(tensor, NdArrayEmbedding)
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (128,)
# wrong shape at data setting time
with pytest.raises(ValueError):
parse_obj_as(NdArrayEmbedding[128], np.zeros((256,)))
# illegal shape at class creation time
with pytest.raises(ValueError):
parse_obj_as(NdArrayEmbedding[128, 128], np.zeros((128, 128)))
|
"""Azure Cognitive Services Tools."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import (
AzureCogsFormRecognizerTool,
AzureCogsImageAnalysisTool,
AzureCogsSpeech2TextTool,
AzureCogsText2SpeechTool,
AzureCogsTextAnalyticsHealthTool,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"AzureCogsImageAnalysisTool": "langchain_community.tools",
"AzureCogsFormRecognizerTool": "langchain_community.tools",
"AzureCogsSpeech2TextTool": "langchain_community.tools",
"AzureCogsText2SpeechTool": "langchain_community.tools",
"AzureCogsTextAnalyticsHealthTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AzureCogsFormRecognizerTool",
"AzureCogsImageAnalysisTool",
"AzureCogsSpeech2TextTool",
"AzureCogsText2SpeechTool",
"AzureCogsTextAnalyticsHealthTool",
]
|
"""Azure Cognitive Services Tools."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import (
AzureCogsFormRecognizerTool,
AzureCogsImageAnalysisTool,
AzureCogsSpeech2TextTool,
AzureCogsText2SpeechTool,
AzureCogsTextAnalyticsHealthTool,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"AzureCogsImageAnalysisTool": "langchain_community.tools",
"AzureCogsFormRecognizerTool": "langchain_community.tools",
"AzureCogsSpeech2TextTool": "langchain_community.tools",
"AzureCogsText2SpeechTool": "langchain_community.tools",
"AzureCogsTextAnalyticsHealthTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AzureCogsImageAnalysisTool",
"AzureCogsFormRecognizerTool",
"AzureCogsSpeech2TextTool",
"AzureCogsText2SpeechTool",
"AzureCogsTextAnalyticsHealthTool",
]
|
import json
import logging
import os
from typing import Dict, Optional
import fsspec
from llama_index.core.storage.kvstore.types import (
DEFAULT_COLLECTION,
BaseInMemoryKVStore,
)
logger = logging.getLogger(__name__)
DATA_TYPE = Dict[str, Dict[str, dict]]
class SimpleKVStore(BaseInMemoryKVStore):
"""
Simple in-memory Key-Value store.
Args:
data (Optional[DATA_TYPE]): data to initialize the store with
maximum_data_points (Optional[int]): maximum number of data points that can be stored within a collection in the KVStore
size_strict (Optional[bool]): whether to throw an error or not when the maximum size is exceeded
"""
def __init__(
self,
data: Optional[DATA_TYPE] = None,
) -> None:
"""Init a SimpleKVStore."""
self._data: DATA_TYPE = data or {}
def put(self, key: str, val: dict, collection: str = DEFAULT_COLLECTION) -> None:
"""Put a key-value pair into the store."""
if collection not in self._data:
self._data[collection] = {}
self._data[collection][key] = val.copy()
async def aput(
self, key: str, val: dict, collection: str = DEFAULT_COLLECTION
) -> None:
"""Put a key-value pair into the store."""
self.put(key, val, collection)
def get(self, key: str, collection: str = DEFAULT_COLLECTION) -> Optional[dict]:
"""Get a value from the store."""
collection_data = self._data.get(collection, None)
if not collection_data:
return None
if key not in collection_data:
return None
return collection_data[key].copy()
async def aget(
self, key: str, collection: str = DEFAULT_COLLECTION
) -> Optional[dict]:
"""Get a value from the store."""
return self.get(key, collection)
def get_all(self, collection: str = DEFAULT_COLLECTION) -> Dict[str, dict]:
"""Get all values from the store."""
return self._data.get(collection, {}).copy()
async def aget_all(self, collection: str = DEFAULT_COLLECTION) -> Dict[str, dict]:
"""Get all values from the store."""
return self.get_all(collection)
def delete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
"""Delete a value from the store."""
try:
self._data[collection].pop(key)
return True
except KeyError:
return False
async def adelete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
"""Delete a value from the store."""
return self.delete(key, collection)
def persist(
self, persist_path: str, fs: Optional[fsspec.AbstractFileSystem] = None
) -> None:
"""Persist the store."""
fs = fs or fsspec.filesystem("file")
dirpath = os.path.dirname(persist_path)
if not fs.exists(dirpath):
fs.makedirs(dirpath)
with fs.open(persist_path, "w") as f:
f.write(json.dumps(self._data))
@classmethod
def from_persist_path(
cls, persist_path: str, fs: Optional[fsspec.AbstractFileSystem] = None
) -> "SimpleKVStore":
"""Load a SimpleKVStore from a persist path and filesystem."""
fs = fs or fsspec.filesystem("file")
logger.debug(f"Loading {__name__} from {persist_path}.")
with fs.open(persist_path, "rb") as f:
data = json.load(f)
return cls(data)
def to_dict(self) -> dict:
"""Save the store as dict."""
return self._data
@classmethod
def from_dict(cls, save_dict: dict) -> "SimpleKVStore":
"""Load a SimpleKVStore from dict."""
return cls(save_dict)
|
import json
import logging
import os
from typing import Dict, Optional
import fsspec
from llama_index.core.storage.kvstore.types import (
DEFAULT_COLLECTION,
BaseInMemoryKVStore,
)
logger = logging.getLogger(__name__)
DATA_TYPE = Dict[str, Dict[str, dict]]
class SimpleKVStore(BaseInMemoryKVStore):
"""
Simple in-memory Key-Value store.
Args:
data (Optional[DATA_TYPE]): data to initialize the store with
"""
def __init__(
self,
data: Optional[DATA_TYPE] = None,
) -> None:
"""Init a SimpleKVStore."""
self._data: DATA_TYPE = data or {}
def put(self, key: str, val: dict, collection: str = DEFAULT_COLLECTION) -> None:
"""Put a key-value pair into the store."""
if collection not in self._data:
self._data[collection] = {}
self._data[collection][key] = val.copy()
async def aput(
self, key: str, val: dict, collection: str = DEFAULT_COLLECTION
) -> None:
"""Put a key-value pair into the store."""
self.put(key, val, collection)
def get(self, key: str, collection: str = DEFAULT_COLLECTION) -> Optional[dict]:
"""Get a value from the store."""
collection_data = self._data.get(collection, None)
if not collection_data:
return None
if key not in collection_data:
return None
return collection_data[key].copy()
async def aget(
self, key: str, collection: str = DEFAULT_COLLECTION
) -> Optional[dict]:
"""Get a value from the store."""
return self.get(key, collection)
def get_all(self, collection: str = DEFAULT_COLLECTION) -> Dict[str, dict]:
"""Get all values from the store."""
return self._data.get(collection, {}).copy()
async def aget_all(self, collection: str = DEFAULT_COLLECTION) -> Dict[str, dict]:
"""Get all values from the store."""
return self.get_all(collection)
def delete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
"""Delete a value from the store."""
try:
self._data[collection].pop(key)
return True
except KeyError:
return False
async def adelete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
"""Delete a value from the store."""
return self.delete(key, collection)
def persist(
self, persist_path: str, fs: Optional[fsspec.AbstractFileSystem] = None
) -> None:
"""Persist the store."""
fs = fs or fsspec.filesystem("file")
dirpath = os.path.dirname(persist_path)
if not fs.exists(dirpath):
fs.makedirs(dirpath)
with fs.open(persist_path, "w") as f:
f.write(json.dumps(self._data))
@classmethod
def from_persist_path(
cls, persist_path: str, fs: Optional[fsspec.AbstractFileSystem] = None
) -> "SimpleKVStore":
"""Load a SimpleKVStore from a persist path and filesystem."""
fs = fs or fsspec.filesystem("file")
logger.debug(f"Loading {__name__} from {persist_path}.")
with fs.open(persist_path, "rb") as f:
data = json.load(f)
return cls(data)
def to_dict(self) -> dict:
"""Save the store as dict."""
return self._data
@classmethod
def from_dict(cls, save_dict: dict) -> "SimpleKVStore":
"""Load a SimpleKVStore from dict."""
return cls(save_dict)
|
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation for defining get_compiler_ir."""
from typing import List, Optional
import warnings
from tensorflow.core.function import trace_type
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import random_ops
from tensorflow.python.util import nest
def maybe_get_device_name(device_name):
# TODO(cheshire): This is a hack to get the current "preferred" device,
# there is no current API to get it otherwise.
if device_name is None:
device_name = random_ops.random_normal([]).device
return device_name
# TODO(fmuham): Use trace_type._flatten here instead when available
def make_handledata_tensor_specs(resource_vars):
"""Convert tf.Variable list to its corresponding TensorSpec list."""
if not all(x.dtype is dtypes.resource for x in resource_vars):
raise RuntimeError("Resource_vars must be tf.resource list.")
inner_context = trace_type.InternalTracingContext()
trace_type_inputs = trace_type.from_value(
tuple(resource_vars), inner_context
).components
def to_resource_spec(traced_input):
try:
handle_data = traced_input.dtype._handle_data.shape_inference # pylint: disable=protected-access
shape_and_type = handle_data.shape_and_type[0]
spec = tensor_spec.TensorSpec(
shape=shape_and_type.shape, dtype=shape_and_type.dtype
)
return spec
except Exception as e:
raise ValueError(
"Fail to convert tf.Variable list to TensorSpec list. The error"
" is: %s" % e
) from e
return [to_resource_spec(trace_type) for trace_type in trace_type_inputs]
def from_concrete_function(
concrete_fn,
specialized_flat_specs: Optional[List[tensor_spec.TensorSpec]] = None,
):
"""Generate the Compiler Ir from tf concrete function with TensorSpec.
Args:
concrete_fn: returned by using get_concrete_function.
specialized_flat_specs: specialized flat tf.TensorSpecs for function args.
Returns:
Function callable that generate the HLO text.
Raises:
ValueError: if concrete_fn is not "compilable" without concrete
inputs.
"""
context.ensure_initialized()
fn_name = concrete_fn.name
filtered_flat_specs = specialized_flat_specs or list(
nest.flatten(concrete_fn.structured_input_signature)
)
if not all(s.shape.is_fully_defined() for s in filtered_flat_specs):
raise ValueError(
f"Only support static input shape but got inputs = {concrete_fn.inputs}"
)
def compiler_ir_generator(stage="hlo", device_name=None, platform_name=None):
"""Gets the compiler IR bytes.
Args:
stage: The exported stage for the given function.
device_name: The name of the device with the form as
"/job:localhost/replica:0/task:0/device:CPU:0", "/device:TPU:0" etc.
When this is used, actual device is needed for getting the compiler IR.
platform_name: The name of the platform, e.g. "TPU". See the comment in
`get_compiler_ir` in `context.py`.
Returns:
The compiler IR bytes.
"""
if device_name is not None:
if platform_name is not None:
raise ValueError(
"device_name and platform_name cannot be provided at the same time."
)
warnings.warn("device_name is being deprecated. Use platform_name.")
device_name = maybe_get_device_name(device_name)
res_bytes = context.context().get_compiler_ir(
device_name=device_name,
platform_name=platform_name,
function_name=fn_name,
flat_args=filtered_flat_specs,
captured_inputs=concrete_fn.captured_inputs,
stage=stage,
)
if stage in (
# Ordered by IrExportStage enum order
"stablehlo_serialized",
"hlo_serialized",
"optimized_hlo_serialized",
"optimized_hlo_proto_serialized",
):
return res_bytes
else:
return res_bytes.decode("utf-8")
return compiler_ir_generator
|
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implmentation for defining get_compiler_ir."""
from typing import List, Optional
import warnings
from tensorflow.core.function import trace_type
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import random_ops
from tensorflow.python.util import nest
def maybe_get_device_name(device_name):
# TODO(cheshire): This is a hack to get the current "preferred" device,
# there is no current API to get it otherwise.
if device_name is None:
device_name = random_ops.random_normal([]).device
return device_name
# TODO(fmuham): Use trace_type._flatten here instead when available
def make_handledata_tensor_specs(resource_vars):
"""Convert tf.Variable list to its corresponding TensorSpec list."""
if not all(x.dtype is dtypes.resource for x in resource_vars):
raise RuntimeError("Resource_vars must be tf.resource list.")
inner_context = trace_type.InternalTracingContext()
trace_type_inputs = trace_type.from_value(
tuple(resource_vars), inner_context
).components
def to_resource_spec(traced_input):
try:
handle_data = traced_input.dtype._handle_data.shape_inference # pylint: disable=protected-access
shape_and_type = handle_data.shape_and_type[0]
spec = tensor_spec.TensorSpec(
shape=shape_and_type.shape, dtype=shape_and_type.dtype
)
return spec
except Exception as e:
raise ValueError(
"Fail to convert tf.Variable list to TensorSpec list. The error"
" is: %s" % e
) from e
return [to_resource_spec(trace_type) for trace_type in trace_type_inputs]
def from_concrete_function(
concrete_fn,
specialized_flat_specs: Optional[List[tensor_spec.TensorSpec]] = None,
):
"""Generate the Compiler Ir from tf concrete function with TensorSpec.
Args:
concrete_fn: returned by using get_concrete_function.
specialized_flat_specs: specialized flat tf.TensorSpecs for function args.
Returns:
Function callable that generate the HLO text.
Raises:
ValueError: if concrete_fn is not "compilable" without concrete
inputs.
"""
context.ensure_initialized()
fn_name = concrete_fn.name
filtered_flat_specs = specialized_flat_specs or list(
nest.flatten(concrete_fn.structured_input_signature)
)
if not all(s.shape.is_fully_defined() for s in filtered_flat_specs):
raise ValueError(
f"Only support static input shape but got inputs = {concrete_fn.inputs}"
)
def compiler_ir_generator(stage="hlo", device_name=None, platform_name=None):
"""Gets the compiler IR bytes.
Args:
stage: The exported stage for the given function.
device_name: The name of the device with the form as
"/job:localhost/replica:0/task:0/device:CPU:0", "/device:TPU:0" etc.
When this is used, actual device is needed for getting the compiler IR.
platform_name: The name of the platform, e.g. "TPU". See the comment in
`get_compiler_ir` in `context.py`.
Returns:
The compiler IR bytes.
"""
if device_name is not None:
if platform_name is not None:
raise ValueError(
"device_name and platform_name cannot be provided at the same time."
)
warnings.warn("device_name is being deprecated. Use platform_name.")
device_name = maybe_get_device_name(device_name)
res_bytes = context.context().get_compiler_ir(
device_name=device_name,
platform_name=platform_name,
function_name=fn_name,
flat_args=filtered_flat_specs,
captured_inputs=concrete_fn.captured_inputs,
stage=stage,
)
if stage in (
# Ordered by IrExportStage enum order
"stablehlo_serialized",
"hlo_serialized",
"optimized_hlo_serialized",
"optimized_hlo_proto_serialized",
):
return res_bytes
else:
return res_bytes.decode("utf-8")
return compiler_ir_generator
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner')
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model')
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage constructors that customize the optimization hyperparameters.
OPTIMIZER_CONSTRUCTORS = Registry('optimizer constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry('parameter scheduler')
# manage all kinds of metrics
METRICS = Registry('metric')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage visualizer backend
VISBACKENDS = Registry('vis_backend')
# manage logprocessor
LOG_PROCESSORS = Registry('log_processor')
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner')
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model')
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage constructors that customize the optimization hyperparameters.
OPTIMIZER_CONSTRUCTORS = Registry('optimizer constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry('parameter scheduler')
# manage all kinds of metrics
METRICS = Registry('metric')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage visualizer backend
VISBACKENDS = Registry('vis_backend')
# manage logprocessor
LOG_PROCESSOR = Registry('log_processor')
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import Res2Net
from mmdet.models.backbones.res2net import Bottle2neck
from .utils import is_block
def test_res2net_bottle2neck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
Bottle2neck(64, 64, base_width=26, scales=4, style='tensorflow')
with pytest.raises(AssertionError):
# Scale must be larger than 1
Bottle2neck(64, 64, base_width=26, scales=1, style='pytorch')
# Test Res2Net Bottle2neck structure
block = Bottle2neck(
64, 64, base_width=26, stride=2, scales=4, style='pytorch')
assert block.scales == 4
# Test Res2Net Bottle2neck with DCN
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
# conv_cfg must be None if dcn is not None
Bottle2neck(
64,
64,
base_width=26,
scales=4,
dcn=dcn,
conv_cfg=dict(type='Conv'))
Bottle2neck(64, 64, dcn=dcn)
# Test Res2Net Bottle2neck forward
block = Bottle2neck(64, 16, base_width=26, scales=4)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_res2net_backbone():
with pytest.raises(KeyError):
# Res2Net depth should be in [50, 101, 152]
Res2Net(depth=18)
# Test Res2Net with scales 4, base_width 26
model = Res2Net(depth=50, scales=4, base_width=26)
for m in model.modules():
if is_block(m):
assert m.scales == 4
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 8, 8])
assert feat[1].shape == torch.Size([1, 512, 4, 4])
assert feat[2].shape == torch.Size([1, 1024, 2, 2])
assert feat[3].shape == torch.Size([1, 2048, 1, 1])
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import Res2Net
from mmdet.models.backbones.res2net import Bottle2neck
from .utils import is_block
def test_res2net_bottle2neck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
Bottle2neck(64, 64, base_width=26, scales=4, style='tensorflow')
with pytest.raises(AssertionError):
# Scale must be larger than 1
Bottle2neck(64, 64, base_width=26, scales=1, style='pytorch')
# Test Res2Net Bottle2neck structure
block = Bottle2neck(
64, 64, base_width=26, stride=2, scales=4, style='pytorch')
assert block.scales == 4
# Test Res2Net Bottle2neck with DCN
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
# conv_cfg must be None if dcn is not None
Bottle2neck(
64,
64,
base_width=26,
scales=4,
dcn=dcn,
conv_cfg=dict(type='Conv'))
Bottle2neck(64, 64, dcn=dcn)
# Test Res2Net Bottle2neck forward
block = Bottle2neck(64, 16, base_width=26, scales=4)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_res2net_backbone():
with pytest.raises(KeyError):
# Res2Net depth should be in [50, 101, 152]
Res2Net(depth=18)
# Test Res2Net with scales 4, base_width 26
model = Res2Net(depth=50, scales=4, base_width=26)
for m in model.modules():
if is_block(m):
assert m.scales == 4
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
|
from typing import List
from backend.data.block import BlockOutput, BlockSchema
from backend.data.model import APIKeyCredentials, SchemaField
from ._api import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
Filament,
Slant3DCredentialsField,
Slant3DCredentialsInput,
)
from .base import Slant3DBlockBase
class Slant3DFilamentBlock(Slant3DBlockBase):
"""Block for retrieving available filaments"""
class Input(BlockSchema):
credentials: Slant3DCredentialsInput = Slant3DCredentialsField()
class Output(BlockSchema):
filaments: List[Filament] = SchemaField(
description="List of available filaments"
)
error: str = SchemaField(description="Error message if request failed")
def __init__(self):
super().__init__(
id="7cc416f4-f305-4606-9b3b-452b8a81031c",
description="Get list of available filaments",
input_schema=self.Input,
output_schema=self.Output,
test_input={"credentials": TEST_CREDENTIALS_INPUT},
test_credentials=TEST_CREDENTIALS,
test_output=[
(
"filaments",
[
{
"filament": "PLA BLACK",
"hexColor": "000000",
"colorTag": "black",
"profile": "PLA",
},
{
"filament": "PLA WHITE",
"hexColor": "ffffff",
"colorTag": "white",
"profile": "PLA",
},
],
)
],
test_mock={
"_make_request": lambda *args, **kwargs: {
"filaments": [
{
"filament": "PLA BLACK",
"hexColor": "000000",
"colorTag": "black",
"profile": "PLA",
},
{
"filament": "PLA WHITE",
"hexColor": "ffffff",
"colorTag": "white",
"profile": "PLA",
},
]
}
},
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
result = await self._make_request(
"GET", "filament", credentials.api_key.get_secret_value()
)
yield "filaments", result["filaments"]
except Exception as e:
yield "error", str(e)
raise
|
from typing import List
from backend.data.block import BlockOutput, BlockSchema
from backend.data.model import APIKeyCredentials, SchemaField
from ._api import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
Filament,
Slant3DCredentialsField,
Slant3DCredentialsInput,
)
from .base import Slant3DBlockBase
class Slant3DFilamentBlock(Slant3DBlockBase):
"""Block for retrieving available filaments"""
class Input(BlockSchema):
credentials: Slant3DCredentialsInput = Slant3DCredentialsField()
class Output(BlockSchema):
filaments: List[Filament] = SchemaField(
description="List of available filaments"
)
error: str = SchemaField(description="Error message if request failed")
def __init__(self):
super().__init__(
id="7cc416f4-f305-4606-9b3b-452b8a81031c",
description="Get list of available filaments",
input_schema=self.Input,
output_schema=self.Output,
test_input={"credentials": TEST_CREDENTIALS_INPUT},
test_credentials=TEST_CREDENTIALS,
test_output=[
(
"filaments",
[
{
"filament": "PLA BLACK",
"hexColor": "000000",
"colorTag": "black",
"profile": "PLA",
},
{
"filament": "PLA WHITE",
"hexColor": "ffffff",
"colorTag": "white",
"profile": "PLA",
},
],
)
],
test_mock={
"_make_request": lambda *args, **kwargs: {
"filaments": [
{
"filament": "PLA BLACK",
"hexColor": "000000",
"colorTag": "black",
"profile": "PLA",
},
{
"filament": "PLA WHITE",
"hexColor": "ffffff",
"colorTag": "white",
"profile": "PLA",
},
]
}
},
)
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
result = self._make_request(
"GET", "filament", credentials.api_key.get_secret_value()
)
yield "filaments", result["filaments"]
except Exception as e:
yield "error", str(e)
raise
|
import asyncio
from math import ceil
import pytest
from docarray import Document
from jina.clients.request.asyncio import request_generator
NUM_INPUT_DOCS = 30
REQUEST_SIZE = 10
@pytest.mark.asyncio
async def test_asyncio_req_generator():
async def input_function():
data = [Document() for _ in range(NUM_INPUT_DOCS)]
for doc in data:
yield doc
generator = request_generator('/', input_function(), request_size=REQUEST_SIZE)
i = 0
async for req in generator:
i += 1
assert len(req.docs) == REQUEST_SIZE
await asyncio.sleep(0.1)
assert i == ceil(NUM_INPUT_DOCS / REQUEST_SIZE)
@pytest.mark.asyncio
async def test_asyncio_req_generator_empty_inputs():
generator = request_generator('/', None)
i = 0
async for req in generator:
i += 1
assert len(req.docs) == 0
await asyncio.sleep(0.1)
assert i == 1
def test_asyncio_bad_input_generator():
# exception not handled
data = ['text' for _ in range(20)]
request_generator('/', data, request_size=10)
|
import asyncio
from math import ceil
import pytest
from jina import Document
from jina.clients.request.asyncio import request_generator
NUM_INPUT_DOCS = 30
REQUEST_SIZE = 10
@pytest.mark.asyncio
async def test_asyncio_req_generator():
async def input_function():
data = [Document() for _ in range(NUM_INPUT_DOCS)]
for doc in data:
yield doc
generator = request_generator('/', input_function(), request_size=REQUEST_SIZE)
i = 0
async for req in generator:
i += 1
assert len(req.docs) == REQUEST_SIZE
await asyncio.sleep(0.1)
assert i == ceil(NUM_INPUT_DOCS / REQUEST_SIZE)
@pytest.mark.asyncio
async def test_asyncio_req_generator_empty_inputs():
generator = request_generator('/', None)
i = 0
async for req in generator:
i += 1
assert len(req.docs) == 0
await asyncio.sleep(0.1)
assert i == 1
def test_asyncio_bad_input_generator():
# exception not handled
data = ['text' for _ in range(20)]
request_generator('/', data, request_size=10)
|
import numpy as np
from docarray import BaseDoc
from docarray.typing import AnyEmbedding
def test_set_embedding():
class MyDocument(BaseDoc):
embedding: AnyEmbedding
d = MyDocument(embedding=np.zeros((3, 224, 224)))
assert isinstance(d.embedding, np.ndarray)
assert (d.embedding == np.zeros((3, 224, 224))).all()
|
import numpy as np
from docarray import BaseDocument
from docarray.typing import AnyEmbedding
def test_set_embedding():
class MyDocument(BaseDocument):
embedding: AnyEmbedding
d = MyDocument(embedding=np.zeros((3, 224, 224)))
assert isinstance(d.embedding, np.ndarray)
assert (d.embedding == np.zeros((3, 224, 224))).all()
|
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import SanaTransformer2DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class SanaTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = SanaTransformer2DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
model_split_percents = [0.7, 0.7, 0.9]
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
height = 32
width = 32
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (4, 32, 32)
@property
def output_shape(self):
return (4, 32, 32)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"patch_size": 1,
"in_channels": 4,
"out_channels": 4,
"num_layers": 1,
"attention_head_dim": 4,
"num_attention_heads": 2,
"num_cross_attention_heads": 2,
"cross_attention_head_dim": 4,
"cross_attention_dim": 8,
"caption_channels": 8,
"sample_size": 32,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"SanaTransformer2DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pytest
import torch
from diffusers import SanaTransformer2DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class SanaTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = SanaTransformer2DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
height = 32
width = 32
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (4, 32, 32)
@property
def output_shape(self):
return (4, 32, 32)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"patch_size": 1,
"in_channels": 4,
"out_channels": 4,
"num_layers": 1,
"attention_head_dim": 4,
"num_attention_heads": 2,
"num_cross_attention_heads": 2,
"cross_attention_head_dim": 4,
"cross_attention_dim": 8,
"caption_channels": 8,
"sample_size": 32,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"SanaTransformer2DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
@pytest.mark.xfail(
condition=torch.device(torch_device).type == "cuda",
reason="Test currently fails.",
strict=True,
)
def test_cpu_offload(self):
return super().test_cpu_offload()
@pytest.mark.xfail(
condition=torch.device(torch_device).type == "cuda",
reason="Test currently fails.",
strict=True,
)
def test_disk_offload_with_safetensors(self):
return super().test_disk_offload_with_safetensors()
@pytest.mark.xfail(
condition=torch.device(torch_device).type == "cuda",
reason="Test currently fails.",
strict=True,
)
def test_disk_offload_without_safetensors(self):
return super().test_disk_offload_without_safetensors()
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
from sentence_transformers.util import fullname, import_from_string
class Dense(nn.Module):
"""
Feed-forward function with activation function.
This layer takes a fixed-sized sentence embedding and passes it through a feed-forward layer. Can be used to generate deep averaging networks (DAN).
Args:
in_features: Size of the input dimension
out_features: Output size
bias: Add a bias vector
activation_function: Pytorch activation function applied on
output
init_weight: Initial value for the matrix of the linear layer
init_bias: Initial value for the bias of the linear layer
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
activation_function=nn.Tanh(),
init_weight: Tensor = None,
init_bias: Tensor = None,
):
super(Dense, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.activation_function = activation_function
self.linear = nn.Linear(in_features, out_features, bias=bias)
if init_weight is not None:
self.linear.weight = nn.Parameter(init_weight)
if init_bias is not None:
self.linear.bias = nn.Parameter(init_bias)
def forward(self, features: dict[str, Tensor]):
features.update({"sentence_embedding": self.activation_function(self.linear(features["sentence_embedding"]))})
return features
def get_sentence_embedding_dimension(self) -> int:
return self.out_features
def get_config_dict(self):
return {
"in_features": self.in_features,
"out_features": self.out_features,
"bias": self.bias,
"activation_function": fullname(self.activation_function),
}
def save(self, output_path, safe_serialization: bool = True) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def __repr__(self):
return "Dense({})".format(self.get_config_dict())
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
config["activation_function"] = import_from_string(config["activation_function"])()
model = Dense(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
import json
import os
from typing import Dict
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
from sentence_transformers.util import fullname, import_from_string
class Dense(nn.Module):
"""
Feed-forward function with activation function.
This layer takes a fixed-sized sentence embedding and passes it through a feed-forward layer. Can be used to generate deep averaging networks (DAN).
Args:
in_features: Size of the input dimension
out_features: Output size
bias: Add a bias vector
activation_function: Pytorch activation function applied on
output
init_weight: Initial value for the matrix of the linear layer
init_bias: Initial value for the bias of the linear layer
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
activation_function=nn.Tanh(),
init_weight: Tensor = None,
init_bias: Tensor = None,
):
super(Dense, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.activation_function = activation_function
self.linear = nn.Linear(in_features, out_features, bias=bias)
if init_weight is not None:
self.linear.weight = nn.Parameter(init_weight)
if init_bias is not None:
self.linear.bias = nn.Parameter(init_bias)
def forward(self, features: Dict[str, Tensor]):
features.update({"sentence_embedding": self.activation_function(self.linear(features["sentence_embedding"]))})
return features
def get_sentence_embedding_dimension(self) -> int:
return self.out_features
def get_config_dict(self):
return {
"in_features": self.in_features,
"out_features": self.out_features,
"bias": self.bias,
"activation_function": fullname(self.activation_function),
}
def save(self, output_path, safe_serialization: bool = True) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def __repr__(self):
return "Dense({})".format(self.get_config_dict())
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
config["activation_function"] = import_from_string(config["activation_function"])()
model = Dense(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
default_scope = 'mmdet'
default_hooks = dict(
optimizer=dict(type='OptimizerHook', grad_clip=None),
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=50),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', interval=1),
sampler_seed=dict(type='DistSamplerSeedHook'),
visualization=dict(type='DetVisualizationHook'))
env_cfg = dict(
cudnn_benchmark=False,
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
dist_cfg=dict(backend='nccl'),
)
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer')
log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True)
log_level = 'INFO'
load_from = None
resume = False
|
default_scope = 'mmdet'
default_hooks = dict(
optimizer=dict(type='OptimizerHook', grad_clip=None),
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=50),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', interval=1),
sampler_seed=dict(type='DistSamplerSeedHook'),
visualization=dict(type='DetVisualizationHook'))
env_cfg = dict(
cudnn_benchmark=False,
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
dist_cfg=dict(backend='nccl'),
)
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer')
log_level = 'INFO'
load_from = None
resume = False
# TODO: support auto scaling lr
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.4'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.3'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@MODELS.register_module()
class SOLO(SingleStageInstanceSegmentor):
"""`SOLO: Segmenting Objects by Locations
<https://arxiv.org/abs/1912.04488>`_
"""
def __init__(self,
backbone: ConfigType,
neck: OptConfigType = None,
bbox_head: OptConfigType = None,
mask_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
preprocess_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
preprocess_cfg=preprocess_cfg,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@MODELS.register_module()
class SOLO(SingleStageInstanceSegmentor):
"""`SOLO: Segmenting Objects by Locations
<https://arxiv.org/abs/1912.04488>`_
"""
def __init__(self,
backbone,
neck=None,
bbox_head=None,
mask_head=None,
train_cfg=None,
test_cfg=None,
init_cfg=None,
pretrained=None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg,
pretrained=pretrained)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .base_detr import DetectionTransformer
from .boxinst import BoxInst
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .condinst import CondInst
from .conditional_detr import ConditionalDETR
from .cornernet import CornerNet
from .crowddet import CrowdDet
from .d2_wrapper import Detectron2Wrapper
from .dab_detr import DABDETR
from .ddod import DDOD
from .ddq_detr import DDQDETR
from .deformable_detr import DeformableDETR
from .detr import DETR
from .dino import DINO
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .glip import GLIP
from .grid_rcnn import GridRCNN
from .grounding_dino import GroundingDINO
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask2former import Mask2Former
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .rtmdet import RTMDet
from .scnet import SCNet
from .semi_base import SemiBaseDetector
from .single_stage import SingleStageDetector
from .soft_teacher import SoftTeacher
from .solo import SOLO
from .solov2 import SOLOv2
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer', 'DDOD', 'Mask2Former', 'SemiBaseDetector', 'SoftTeacher',
'RTMDet', 'Detectron2Wrapper', 'CrowdDet', 'CondInst', 'BoxInst',
'DetectionTransformer', 'ConditionalDETR', 'DINO', 'DABDETR', 'GLIP',
'DDQDETR', 'GroundingDINO'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .base_detr import DetectionTransformer
from .boxinst import BoxInst
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .condinst import CondInst
from .conditional_detr import ConditionalDETR
from .cornernet import CornerNet
from .crowddet import CrowdDet
from .d2_wrapper import Detectron2Wrapper
from .dab_detr import DABDETR
from .ddod import DDOD
from .ddq_detr import DDQDETR
from .deformable_detr import DeformableDETR
from .detr import DETR
from .dino import DINO
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .glip import GLIP
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask2former import Mask2Former
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .rtmdet import RTMDet
from .scnet import SCNet
from .semi_base import SemiBaseDetector
from .single_stage import SingleStageDetector
from .soft_teacher import SoftTeacher
from .solo import SOLO
from .solov2 import SOLOv2
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer', 'DDOD', 'Mask2Former', 'SemiBaseDetector', 'SoftTeacher',
'RTMDet', 'Detectron2Wrapper', 'CrowdDet', 'CondInst', 'BoxInst',
'DetectionTransformer', 'ConditionalDETR', 'DINO', 'DABDETR', 'GLIP',
'DDQDETR'
]
|
import re
from typing import Any, Dict, Optional, Tuple
import pytest
from tests.mock_utils.mock_prompts import (
MOCK_REFINE_PROMPT,
MOCK_SCHEMA_EXTRACT_PROMPT,
MOCK_TEXT_QA_PROMPT,
)
def _mock_output_parser(output: str) -> Optional[Dict[str, Any]]:
"""
Mock output parser.
Split via commas instead of newlines, in order to fit
the format of the mock test document (newlines create
separate text chunks in the testing code).
"""
tups = output.split(",")
fields = {}
for tup in tups:
if ":" in tup:
tokens = tup.split(":")
field = re.sub(r"\W+", "", tokens[0])
value = re.sub(r"\W+", "", tokens[1])
fields[field] = value
return fields
@pytest.fixture()
def struct_kwargs() -> Tuple[Dict, Dict]:
"""Index kwargs."""
# NOTE: QuestionAnswer and Refine templates aren't technically used
index_kwargs = {
"schema_extract_prompt": MOCK_SCHEMA_EXTRACT_PROMPT,
"output_parser": _mock_output_parser,
}
query_kwargs = {
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
}
return index_kwargs, query_kwargs
|
import re
from typing import Any, Dict, Optional, Tuple
import pytest
from tests.mock_utils.mock_prompts import (
MOCK_REFINE_PROMPT,
MOCK_SCHEMA_EXTRACT_PROMPT,
MOCK_TEXT_QA_PROMPT,
)
def _mock_output_parser(output: str) -> Optional[Dict[str, Any]]:
"""Mock output parser.
Split via commas instead of newlines, in order to fit
the format of the mock test document (newlines create
separate text chunks in the testing code).
"""
tups = output.split(",")
fields = {}
for tup in tups:
if ":" in tup:
tokens = tup.split(":")
field = re.sub(r"\W+", "", tokens[0])
value = re.sub(r"\W+", "", tokens[1])
fields[field] = value
return fields
@pytest.fixture()
def struct_kwargs() -> Tuple[Dict, Dict]:
"""Index kwargs."""
# NOTE: QuestionAnswer and Refine templates aren't technically used
index_kwargs = {
"schema_extract_prompt": MOCK_SCHEMA_EXTRACT_PROMPT,
"output_parser": _mock_output_parser,
}
query_kwargs = {
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
}
return index_kwargs, query_kwargs
|
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings, semantic_search_faiss
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
num_queries = 1_000
queries = corpus[:num_queries]
# 2. Load the model
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
# 3. Encode the corpus
full_corpus_embeddings = model.encode(corpus, normalize_embeddings=True, show_progress_bar=True)
# 4. Encode the queries using the full precision
query_embeddings = model.encode(queries, normalize_embeddings=True)
for exact in (True, False):
for corpus_precision in ("float32", "uint8", "ubinary"):
corpus_embeddings = quantize_embeddings(full_corpus_embeddings, precision=corpus_precision)
# NOTE: We can also pass "precision=..." to the encode method to quantize the embeddings directly,
# but we want to keep the full precision embeddings to act as a calibration dataset for quantizing
# the query embeddings. This is important only if you are using uint8 or int8 precision
# 5. Perform semantic search using FAISS
rescore_multiplier = 4
results, search_time = semantic_search_faiss(
query_embeddings,
corpus_embeddings=corpus_embeddings,
corpus_precision=corpus_precision,
top_k=10,
calibration_embeddings=full_corpus_embeddings,
rescore=corpus_precision != "float32",
rescore_multiplier=rescore_multiplier,
exact=exact,
)
print(
f"{'Exact' if exact else 'Approximate'} search time using {corpus_precision} corpus: {search_time:.6f} seconds"
+ (f" (rescore_multiplier: {rescore_multiplier})" if corpus_precision != "float32" else "")
)
|
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings, semantic_search_faiss
from datasets import load_dataset
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
num_queries = 1_000
queries = corpus[:num_queries]
# 2. Load the model
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
# 3. Encode the corpus
full_corpus_embeddings = model.encode(corpus, normalize_embeddings=True, show_progress_bar=True)
# 4. Encode the queries using the full precision
query_embeddings = model.encode(queries, normalize_embeddings=True)
for exact in (True, False):
for corpus_precision in ("float32", "uint8", "ubinary"):
corpus_embeddings = quantize_embeddings(full_corpus_embeddings, precision=corpus_precision)
# NOTE: We can also pass "precision=..." to the encode method to quantize the embeddings directly,
# but we want to keep the full precision embeddings to act as a calibration dataset for quantizing
# the query embeddings. This is important only if you are using uint8 or int8 precision
# 5. Perform semantic search using FAISS
rescore_multiplier = 4
results, search_time = semantic_search_faiss(
query_embeddings,
corpus_embeddings=corpus_embeddings,
corpus_precision=corpus_precision,
top_k=10,
calibration_embeddings=full_corpus_embeddings,
rescore=corpus_precision != "float32",
rescore_multiplier=rescore_multiplier,
exact=exact,
)
print(
f"{'Exact' if exact else 'Approximate'} search time using {corpus_precision} corpus: {search_time:.6f} seconds"
+ (f" (rescore_multiplier: {rescore_multiplier})" if corpus_precision != "float32" else "")
)
|
import os
from typing import Type
import orjson
from pydantic import BaseModel, Field, parse_obj_as
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
from docarray.document.io.json import orjson_dumps
from docarray.document.mixins import ProtoMixin
from docarray.typing import ID
class BaseDocument(BaseModel, ProtoMixin, AbstractDocument, BaseNode):
"""
The base class for Document
"""
id: ID = Field(default_factory=lambda: parse_obj_as(ID, os.urandom(16).hex()))
class Config:
json_loads = orjson.loads
json_dumps = orjson_dumps
@classmethod
def _get_field_type(cls, field: str) -> Type['BaseDocument']:
"""
Accessing the nested python Class define in the schema. Could be useful for
reconstruction of Document in serialization/deserilization
:param field: name of the field
:return:
"""
return cls.__fields__[field].outer_type_
|
import os
from typing import Type
import orjson
from pydantic import BaseModel, Field
from pydantic import parse_obj_as
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
from docarray.document.io.json import orjson_dumps
from docarray.document.mixins import ProtoMixin
from docarray.typing import ID
class BaseDocument(BaseModel, ProtoMixin, AbstractDocument, BaseNode):
"""
The base class for Document
"""
id: ID = Field(default_factory=lambda: parse_obj_as(ID, os.urandom(16).hex()))
class Config:
json_loads = orjson.loads
json_dumps = orjson_dumps
@classmethod
def _get_nested_document_class(cls, field: str) -> Type['BaseDocument']:
"""
Accessing the nested python Class define in the schema. Could be useful for
reconstruction of Document in serialization/deserilization
:param field: name of the field
:return:
"""
return cls.__fields__[field].type_
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import mmcv
from mmcv.transforms import Compose
from mmdet.registry import TRANSFORMS
@TRANSFORMS.register_module()
class MultiScaleFlipAug:
"""Test-time augmentation with multiple scales and flipping.
An example configuration is as followed:
.. code-block::
img_scale=[(1333, 400), (1333, 800)],
flip=True,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
]
After MultiScaleFLipAug with above configuration, the results are wrapped
into lists of the same length as followed:
.. code-block::
dict(
img=[...],
img_shape=[...],
scale=[(1333, 400), (1333, 400), (1333, 800), (1333, 800)]
flip=[False, True, False, True]
...
)
Args:
transforms (list[dict]): Transforms to apply in each augmentation.
img_scale (tuple | list[tuple] | None): Images scales for resizing.
scale_factor (float | list[float] | None): Scale factors for resizing.
flip (bool): Whether apply flip augmentation. Default: False.
flip_direction (str | list[str]): Flip augmentation directions,
options are "horizontal", "vertical" and "diagonal". If
flip_direction is a list, multiple flip augmentations will be
applied. It has no effect when flip == False. Default:
"horizontal".
"""
def __init__(self,
transforms,
img_scale=None,
scale_factor=None,
flip=False,
flip_direction='horizontal'):
self.transforms = Compose(transforms)
assert (img_scale is None) ^ (scale_factor is None), (
'Must have but only one variable can be set')
if img_scale is not None:
self.img_scale = img_scale if isinstance(img_scale,
list) else [img_scale]
self.scale_key = 'scale'
assert mmcv.is_list_of(self.img_scale, tuple)
else:
self.img_scale = scale_factor if isinstance(
scale_factor, list) else [scale_factor]
self.scale_key = 'scale_factor'
self.flip = flip
self.flip_direction = flip_direction if isinstance(
flip_direction, list) else [flip_direction]
assert mmcv.is_list_of(self.flip_direction, str)
if not self.flip and self.flip_direction != ['horizontal']:
warnings.warn(
'flip_direction has no effect when flip is set to False')
if (self.flip
and not any([t['type'] == 'RandomFlip' for t in transforms])):
warnings.warn(
'flip has no effect when RandomFlip is not in transforms')
def __call__(self, results):
"""Call function to apply test time augment transforms on results.
Args:
results (dict): Result dict contains the data to transform.
Returns:
dict[str: list]: The augmented data, where each value is wrapped
into a list.
"""
aug_data = []
flip_args = [(False, None)]
if self.flip:
flip_args += [(True, direction)
for direction in self.flip_direction]
for scale in self.img_scale:
for flip, direction in flip_args:
_results = results.copy()
_results[self.scale_key] = scale
_results['flip'] = flip
_results['flip_direction'] = direction
data = self.transforms(_results)
aug_data.append(data)
# list of dict to dict of list
aug_data_dict = {key: [] for key in aug_data[0]}
for data in aug_data:
for key, val in data.items():
aug_data_dict[key].append(val)
return aug_data_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(transforms={self.transforms}, '
repr_str += f'img_scale={self.img_scale}, flip={self.flip}, '
repr_str += f'flip_direction={self.flip_direction})'
return repr_str
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import mmcv
from mmdet.registry import TRANSFORMS
from .compose import Compose
@TRANSFORMS.register_module()
class MultiScaleFlipAug:
"""Test-time augmentation with multiple scales and flipping.
An example configuration is as followed:
.. code-block::
img_scale=[(1333, 400), (1333, 800)],
flip=True,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
]
After MultiScaleFLipAug with above configuration, the results are wrapped
into lists of the same length as followed:
.. code-block::
dict(
img=[...],
img_shape=[...],
scale=[(1333, 400), (1333, 400), (1333, 800), (1333, 800)]
flip=[False, True, False, True]
...
)
Args:
transforms (list[dict]): Transforms to apply in each augmentation.
img_scale (tuple | list[tuple] | None): Images scales for resizing.
scale_factor (float | list[float] | None): Scale factors for resizing.
flip (bool): Whether apply flip augmentation. Default: False.
flip_direction (str | list[str]): Flip augmentation directions,
options are "horizontal", "vertical" and "diagonal". If
flip_direction is a list, multiple flip augmentations will be
applied. It has no effect when flip == False. Default:
"horizontal".
"""
def __init__(self,
transforms,
img_scale=None,
scale_factor=None,
flip=False,
flip_direction='horizontal'):
self.transforms = Compose(transforms)
assert (img_scale is None) ^ (scale_factor is None), (
'Must have but only one variable can be set')
if img_scale is not None:
self.img_scale = img_scale if isinstance(img_scale,
list) else [img_scale]
self.scale_key = 'scale'
assert mmcv.is_list_of(self.img_scale, tuple)
else:
self.img_scale = scale_factor if isinstance(
scale_factor, list) else [scale_factor]
self.scale_key = 'scale_factor'
self.flip = flip
self.flip_direction = flip_direction if isinstance(
flip_direction, list) else [flip_direction]
assert mmcv.is_list_of(self.flip_direction, str)
if not self.flip and self.flip_direction != ['horizontal']:
warnings.warn(
'flip_direction has no effect when flip is set to False')
if (self.flip
and not any([t['type'] == 'RandomFlip' for t in transforms])):
warnings.warn(
'flip has no effect when RandomFlip is not in transforms')
def __call__(self, results):
"""Call function to apply test time augment transforms on results.
Args:
results (dict): Result dict contains the data to transform.
Returns:
dict[str: list]: The augmented data, where each value is wrapped
into a list.
"""
aug_data = []
flip_args = [(False, None)]
if self.flip:
flip_args += [(True, direction)
for direction in self.flip_direction]
for scale in self.img_scale:
for flip, direction in flip_args:
_results = results.copy()
_results[self.scale_key] = scale
_results['flip'] = flip
_results['flip_direction'] = direction
data = self.transforms(_results)
aug_data.append(data)
# list of dict to dict of list
aug_data_dict = {key: [] for key in aug_data[0]}
for data in aug_data:
for key, val in data.items():
aug_data_dict[key].append(val)
return aug_data_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(transforms={self.transforms}, '
repr_str += f'img_scale={self.img_scale}, flip={self.flip}, '
repr_str += f'flip_direction={self.flip_direction})'
return repr_str
|
# Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .config import *
from .data import *
from .dataset import *
from .fileio import *
from .registry import *
from .utils import *
|
# Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .config import *
from .dataset import *
from .data import *
from .fileio import *
from .registry import *
from .utils import *
|
from urllib.parse import quote
from backend.blocks.jina._auth import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.blocks.search import GetRequest
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class SearchTheWebBlock(Block, GetRequest):
class Input(BlockSchema):
credentials: JinaCredentialsInput = JinaCredentialsField()
query: str = SchemaField(description="The search query to search the web for")
class Output(BlockSchema):
results: str = SchemaField(
description="The search results including content from top 5 URLs"
)
error: str = SchemaField(description="Error message if the search fails")
def __init__(self):
super().__init__(
id="87840993-2053-44b7-8da4-187ad4ee518c",
description="This block searches the internet for the given search query.",
categories={BlockCategory.SEARCH},
input_schema=SearchTheWebBlock.Input,
output_schema=SearchTheWebBlock.Output,
test_input={
"credentials": TEST_CREDENTIALS_INPUT,
"query": "Artificial Intelligence",
},
test_credentials=TEST_CREDENTIALS,
test_output=("results", "search content"),
test_mock={"get_request": lambda *args, **kwargs: "search content"},
)
async def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
# Encode the search query
encoded_query = quote(input_data.query)
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
# Prepend the Jina Search URL to the encoded query
jina_search_url = f"https://s.jina.ai/{encoded_query}"
results = await self.get_request(jina_search_url, headers=headers, json=False)
# Output the search results
yield "results", results
class ExtractWebsiteContentBlock(Block, GetRequest):
class Input(BlockSchema):
credentials: JinaCredentialsInput = JinaCredentialsField()
url: str = SchemaField(description="The URL to scrape the content from")
raw_content: bool = SchemaField(
default=False,
title="Raw Content",
description="Whether to do a raw scrape of the content or use Jina-ai Reader to scrape the content",
advanced=True,
)
class Output(BlockSchema):
content: str = SchemaField(description="The scraped content from the given URL")
error: str = SchemaField(
description="Error message if the content cannot be retrieved"
)
def __init__(self):
super().__init__(
id="436c3984-57fd-4b85-8e9a-459b356883bd",
description="This block scrapes the content from the given web URL.",
categories={BlockCategory.SEARCH},
input_schema=ExtractWebsiteContentBlock.Input,
output_schema=ExtractWebsiteContentBlock.Output,
test_input={
"url": "https://en.wikipedia.org/wiki/Artificial_intelligence",
"credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,
test_output=("content", "scraped content"),
test_mock={"get_request": lambda *args, **kwargs: "scraped content"},
)
async def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
if input_data.raw_content:
url = input_data.url
headers = {}
else:
url = f"https://r.jina.ai/{input_data.url}"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
content = await self.get_request(url, json=False, headers=headers)
yield "content", content
|
from urllib.parse import quote
from backend.blocks.jina._auth import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.blocks.search import GetRequest
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class SearchTheWebBlock(Block, GetRequest):
class Input(BlockSchema):
credentials: JinaCredentialsInput = JinaCredentialsField()
query: str = SchemaField(description="The search query to search the web for")
class Output(BlockSchema):
results: str = SchemaField(
description="The search results including content from top 5 URLs"
)
error: str = SchemaField(description="Error message if the search fails")
def __init__(self):
super().__init__(
id="87840993-2053-44b7-8da4-187ad4ee518c",
description="This block searches the internet for the given search query.",
categories={BlockCategory.SEARCH},
input_schema=SearchTheWebBlock.Input,
output_schema=SearchTheWebBlock.Output,
test_input={
"credentials": TEST_CREDENTIALS_INPUT,
"query": "Artificial Intelligence",
},
test_credentials=TEST_CREDENTIALS,
test_output=("results", "search content"),
test_mock={"get_request": lambda *args, **kwargs: "search content"},
)
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
# Encode the search query
encoded_query = quote(input_data.query)
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
# Prepend the Jina Search URL to the encoded query
jina_search_url = f"https://s.jina.ai/{encoded_query}"
results = self.get_request(jina_search_url, headers=headers, json=False)
# Output the search results
yield "results", results
class ExtractWebsiteContentBlock(Block, GetRequest):
class Input(BlockSchema):
credentials: JinaCredentialsInput = JinaCredentialsField()
url: str = SchemaField(description="The URL to scrape the content from")
raw_content: bool = SchemaField(
default=False,
title="Raw Content",
description="Whether to do a raw scrape of the content or use Jina-ai Reader to scrape the content",
advanced=True,
)
class Output(BlockSchema):
content: str = SchemaField(description="The scraped content from the given URL")
error: str = SchemaField(
description="Error message if the content cannot be retrieved"
)
def __init__(self):
super().__init__(
id="436c3984-57fd-4b85-8e9a-459b356883bd",
description="This block scrapes the content from the given web URL.",
categories={BlockCategory.SEARCH},
input_schema=ExtractWebsiteContentBlock.Input,
output_schema=ExtractWebsiteContentBlock.Output,
test_input={
"url": "https://en.wikipedia.org/wiki/Artificial_intelligence",
"credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,
test_output=("content", "scraped content"),
test_mock={"get_request": lambda *args, **kwargs: "scraped content"},
)
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
if input_data.raw_content:
url = input_data.url
headers = {}
else:
url = f"https://r.jina.ai/{input_data.url}"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
content = self.get_request(url, json=False, headers=headers)
yield "content", content
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
conv_cfg = dict(type='ConvWS')
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://jhu/resnet50_gn_ws')),
neck=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg)))
|
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
conv_cfg = dict(type='ConvWS')
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://jhu/resnet50_gn_ws')),
neck=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg)))
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core import bbox2result
from ..builder import DETECTORS, build_head
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class YOLACT(SingleStageDetector):
"""Implementation of `YOLACT <https://arxiv.org/abs/1904.02689>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
segm_head,
mask_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(YOLACT, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
self.segm_head = build_head(segm_head)
self.mask_head = build_head(mask_head)
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
feat = self.extract_feat(img)
bbox_outs = self.bbox_head(feat)
prototypes = self.mask_head.forward_dummy(feat[0])
return (bbox_outs, prototypes)
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# convert Bitmap mask or Polygon Mask to Tensor here
gt_masks = [
gt_mask.to_tensor(dtype=torch.uint8, device=img.device)
for gt_mask in gt_masks
]
x = self.extract_feat(img)
cls_score, bbox_pred, coeff_pred = self.bbox_head(x)
bbox_head_loss_inputs = (cls_score, bbox_pred) + (gt_bboxes, gt_labels,
img_metas)
losses, sampling_results = self.bbox_head.loss(
*bbox_head_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
segm_head_outs = self.segm_head(x[0])
loss_segm = self.segm_head.loss(segm_head_outs, gt_masks, gt_labels)
losses.update(loss_segm)
mask_pred = self.mask_head(x[0], coeff_pred, gt_bboxes, img_metas,
sampling_results)
loss_mask = self.mask_head.loss(mask_pred, gt_masks, gt_bboxes,
img_metas, sampling_results)
losses.update(loss_mask)
# check NaN and Inf
for loss_name in losses.keys():
assert torch.isfinite(torch.stack(losses[loss_name]))\
.all().item(), '{} becomes infinite or NaN!'\
.format(loss_name)
return losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test-time augmentation."""
feat = self.extract_feat(img)
det_bboxes, det_labels, det_coeffs = self.bbox_head.simple_test(
feat, img_metas, rescale=rescale)
bbox_results = [
bbox2result(det_bbox, det_label, self.bbox_head.num_classes)
for det_bbox, det_label in zip(det_bboxes, det_labels)
]
segm_results = self.mask_head.simple_test(
feat,
det_bboxes,
det_labels,
det_coeffs,
img_metas,
rescale=rescale)
return list(zip(bbox_results, segm_results))
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations."""
raise NotImplementedError(
'YOLACT does not support test-time augmentation')
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core import bbox2result
from ..builder import DETECTORS, build_head
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class YOLACT(SingleStageDetector):
"""Implementation of `YOLACT <https://arxiv.org/abs/1904.02689>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
segm_head,
mask_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(YOLACT, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
self.segm_head = build_head(segm_head)
self.mask_head = build_head(mask_head)
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
raise NotImplementedError
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# convert Bitmap mask or Polygon Mask to Tensor here
gt_masks = [
gt_mask.to_tensor(dtype=torch.uint8, device=img.device)
for gt_mask in gt_masks
]
x = self.extract_feat(img)
cls_score, bbox_pred, coeff_pred = self.bbox_head(x)
bbox_head_loss_inputs = (cls_score, bbox_pred) + (gt_bboxes, gt_labels,
img_metas)
losses, sampling_results = self.bbox_head.loss(
*bbox_head_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
segm_head_outs = self.segm_head(x[0])
loss_segm = self.segm_head.loss(segm_head_outs, gt_masks, gt_labels)
losses.update(loss_segm)
mask_pred = self.mask_head(x[0], coeff_pred, gt_bboxes, img_metas,
sampling_results)
loss_mask = self.mask_head.loss(mask_pred, gt_masks, gt_bboxes,
img_metas, sampling_results)
losses.update(loss_mask)
# check NaN and Inf
for loss_name in losses.keys():
assert torch.isfinite(torch.stack(losses[loss_name]))\
.all().item(), '{} becomes infinite or NaN!'\
.format(loss_name)
return losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test-time augmentation."""
feat = self.extract_feat(img)
det_bboxes, det_labels, det_coeffs = self.bbox_head.simple_test(
feat, img_metas, rescale=rescale)
bbox_results = [
bbox2result(det_bbox, det_label, self.bbox_head.num_classes)
for det_bbox, det_label in zip(det_bboxes, det_labels)
]
segm_results = self.mask_head.simple_test(
feat,
det_bboxes,
det_labels,
det_coeffs,
img_metas,
rescale=rescale)
return list(zip(bbox_results, segm_results))
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations."""
raise NotImplementedError(
'YOLACT does not support test-time augmentation')
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import (ConvModule, caffe2_xavier_init, constant_init, is_norm,
normal_init)
from torch.nn import BatchNorm2d
from ..builder import NECKS
class Bottleneck(nn.Module):
"""Bottleneck block for DilatedEncoder used in `YOLOF.
<https://arxiv.org/abs/2103.09460>`.
The Bottleneck contains three ConvLayers and one residual connection.
Args:
in_channels (int): The number of input channels.
mid_channels (int): The number of middle output channels.
dilation (int): Dilation rate.
norm_cfg (dict): Dictionary to construct and config norm layer.
"""
def __init__(self,
in_channels,
mid_channels,
dilation,
norm_cfg=dict(type='BN', requires_grad=True)):
super(Bottleneck, self).__init__()
self.conv1 = ConvModule(
in_channels, mid_channels, 1, norm_cfg=norm_cfg)
self.conv2 = ConvModule(
mid_channels,
mid_channels,
3,
padding=dilation,
dilation=dilation,
norm_cfg=norm_cfg)
self.conv3 = ConvModule(
mid_channels, in_channels, 1, norm_cfg=norm_cfg)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
out = out + identity
return out
@NECKS.register_module()
class DilatedEncoder(nn.Module):
"""Dilated Encoder for YOLOF <https://arxiv.org/abs/2103.09460>`.
This module contains two types of components:
- the original FPN lateral convolution layer and fpn convolution layer,
which are 1x1 conv + 3x3 conv
- the dilated residual block
Args:
in_channels (int): The number of input channels.
out_channels (int): The number of output channels.
block_mid_channels (int): The number of middle block output channels
num_residual_blocks (int): The number of residual blocks.
"""
def __init__(self, in_channels, out_channels, block_mid_channels,
num_residual_blocks):
super(DilatedEncoder, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.block_mid_channels = block_mid_channels
self.num_residual_blocks = num_residual_blocks
self.block_dilations = [2, 4, 6, 8]
self._init_layers()
def _init_layers(self):
self.lateral_conv = nn.Conv2d(
self.in_channels, self.out_channels, kernel_size=1)
self.lateral_norm = BatchNorm2d(self.out_channels)
self.fpn_conv = nn.Conv2d(
self.out_channels, self.out_channels, kernel_size=3, padding=1)
self.fpn_norm = BatchNorm2d(self.out_channels)
encoder_blocks = []
for i in range(self.num_residual_blocks):
dilation = self.block_dilations[i]
encoder_blocks.append(
Bottleneck(
self.out_channels,
self.block_mid_channels,
dilation=dilation))
self.dilated_encoder_blocks = nn.Sequential(*encoder_blocks)
def init_weights(self):
caffe2_xavier_init(self.lateral_conv)
caffe2_xavier_init(self.fpn_conv)
for m in [self.lateral_norm, self.fpn_norm]:
constant_init(m, 1)
for m in self.dilated_encoder_blocks.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, mean=0, std=0.01)
if is_norm(m):
constant_init(m, 1)
def forward(self, feature):
out = self.lateral_norm(self.lateral_conv(feature[-1]))
out = self.fpn_norm(self.fpn_conv(out))
return self.dilated_encoder_blocks(out),
|
import torch.nn as nn
from mmcv.cnn import (ConvModule, caffe2_xavier_init, constant_init, is_norm,
normal_init)
from torch.nn import BatchNorm2d
from ..builder import NECKS
class Bottleneck(nn.Module):
"""Bottleneck block for DilatedEncoder used in `YOLOF.
<https://arxiv.org/abs/2103.09460>`.
The Bottleneck contains three ConvLayers and one residual connection.
Args:
in_channels (int): The number of input channels.
mid_channels (int): The number of middle output channels.
dilation (int): Dilation rate.
norm_cfg (dict): Dictionary to construct and config norm layer.
"""
def __init__(self,
in_channels,
mid_channels,
dilation,
norm_cfg=dict(type='BN', requires_grad=True)):
super(Bottleneck, self).__init__()
self.conv1 = ConvModule(
in_channels, mid_channels, 1, norm_cfg=norm_cfg)
self.conv2 = ConvModule(
mid_channels,
mid_channels,
3,
padding=dilation,
dilation=dilation,
norm_cfg=norm_cfg)
self.conv3 = ConvModule(
mid_channels, in_channels, 1, norm_cfg=norm_cfg)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
out = out + identity
return out
@NECKS.register_module()
class DilatedEncoder(nn.Module):
"""Dilated Encoder for YOLOF <https://arxiv.org/abs/2103.09460>`.
This module contains two types of components:
- the original FPN lateral convolution layer and fpn convolution layer,
which are 1x1 conv + 3x3 conv
- the dilated residual block
Args:
in_channels (int): The number of input channels.
out_channels (int): The number of output channels.
block_mid_channels (int): The number of middle block output channels
num_residual_blocks (int): The number of residual blocks.
"""
def __init__(self, in_channels, out_channels, block_mid_channels,
num_residual_blocks):
super(DilatedEncoder, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.block_mid_channels = block_mid_channels
self.num_residual_blocks = num_residual_blocks
self.block_dilations = [2, 4, 6, 8]
self._init_layers()
def _init_layers(self):
self.lateral_conv = nn.Conv2d(
self.in_channels, self.out_channels, kernel_size=1)
self.lateral_norm = BatchNorm2d(self.out_channels)
self.fpn_conv = nn.Conv2d(
self.out_channels, self.out_channels, kernel_size=3, padding=1)
self.fpn_norm = BatchNorm2d(self.out_channels)
encoder_blocks = []
for i in range(self.num_residual_blocks):
dilation = self.block_dilations[i]
encoder_blocks.append(
Bottleneck(
self.out_channels,
self.block_mid_channels,
dilation=dilation))
self.dilated_encoder_blocks = nn.Sequential(*encoder_blocks)
def init_weights(self):
caffe2_xavier_init(self.lateral_conv)
caffe2_xavier_init(self.fpn_conv)
for m in [self.lateral_norm, self.fpn_norm]:
constant_init(m, 1)
for m in self.dilated_encoder_blocks.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, mean=0, std=0.01)
if is_norm(m):
constant_init(m, 1)
def forward(self, feature):
out = self.lateral_norm(self.lateral_conv(feature[-1]))
out = self.fpn_norm(self.fpn_conv(out))
return self.dilated_encoder_blocks(out),
|
from typing import Annotated, Optional
import typer
from langchain_cli._version import __version__
from langchain_cli.namespaces import app as app_namespace
from langchain_cli.namespaces import integration as integration_namespace
from langchain_cli.namespaces import template as template_namespace
from langchain_cli.namespaces.migrate import main as migrate_namespace
from langchain_cli.utils.packages import get_langserve_export, get_package_root
app = typer.Typer(no_args_is_help=True, add_completion=False)
app.add_typer(
template_namespace.package_cli, name="template", help=template_namespace.__doc__
)
app.add_typer(app_namespace.app_cli, name="app", help=app_namespace.__doc__)
app.add_typer(
integration_namespace.integration_cli,
name="integration",
help=integration_namespace.__doc__,
)
app.command(
name="migrate",
context_settings={
# Let Grit handle the arguments
"allow_extra_args": True,
"ignore_unknown_options": True,
},
)(
migrate_namespace.migrate,
)
def version_callback(show_version: bool) -> None:
if show_version:
typer.echo(f"langchain-cli {__version__}")
raise typer.Exit
@app.callback()
def main(
version: bool = typer.Option(
False,
"--version",
"-v",
help="Print the current CLI version.",
callback=version_callback,
is_eager=True,
),
) -> None:
pass
@app.command()
def serve(
*,
port: Annotated[
Optional[int], typer.Option(help="The port to run the server on")
] = None,
host: Annotated[
Optional[str], typer.Option(help="The host to run the server on")
] = None,
) -> None:
"""Start the LangServe app, whether it's a template or an app."""
# see if is a template
try:
project_dir = get_package_root()
pyproject = project_dir / "pyproject.toml"
get_langserve_export(pyproject)
except KeyError:
# not a template
app_namespace.serve(port=port, host=host)
else:
# is a template
template_namespace.serve(port=port, host=host)
if __name__ == "__main__":
app()
|
from typing import Annotated, Optional
import typer
from langchain_cli._version import __version__
from langchain_cli.namespaces import app as app_namespace
from langchain_cli.namespaces import integration as integration_namespace
from langchain_cli.namespaces import template as template_namespace
from langchain_cli.namespaces.migrate import main as migrate_namespace
from langchain_cli.utils.packages import get_langserve_export, get_package_root
app = typer.Typer(no_args_is_help=True, add_completion=False)
app.add_typer(
template_namespace.package_cli, name="template", help=template_namespace.__doc__
)
app.add_typer(app_namespace.app_cli, name="app", help=app_namespace.__doc__)
app.add_typer(
integration_namespace.integration_cli,
name="integration",
help=integration_namespace.__doc__,
)
app.command(
name="migrate",
context_settings={
# Let Grit handle the arguments
"allow_extra_args": True,
"ignore_unknown_options": True,
},
)(
migrate_namespace.migrate,
)
def version_callback(show_version: bool) -> None:
if show_version:
typer.echo(f"langchain-cli {__version__}")
raise typer.Exit()
@app.callback()
def main(
version: bool = typer.Option(
False,
"--version",
"-v",
help="Print the current CLI version.",
callback=version_callback,
is_eager=True,
),
):
pass
@app.command()
def serve(
*,
port: Annotated[
Optional[int], typer.Option(help="The port to run the server on")
] = None,
host: Annotated[
Optional[str], typer.Option(help="The host to run the server on")
] = None,
) -> None:
"""
Start the LangServe app, whether it's a template or an app.
"""
# see if is a template
try:
project_dir = get_package_root()
pyproject = project_dir / "pyproject.toml"
get_langserve_export(pyproject)
except KeyError:
# not a template
app_namespace.serve(port=port, host=host)
else:
# is a template
template_namespace.serve(port=port, host=host)
if __name__ == "__main__":
app()
|
from io import BytesIO
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from PIL import Image as PILImage
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='ImageBytes')
@_register_proto(proto_type_name='image_bytes')
class ImageBytes(bytes, AbstractType):
"""
Bytes that store an image and that can be load into an image tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load_pil(
self,
) -> 'PILImage.Image':
"""
Load the image from the bytes into a `PIL.Image.Image` instance
---
```python
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import ImageUrl
img_url = "https://upload.wikimedia.org/wikipedia/commons/8/80/Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
img_url = parse_obj_as(ImageUrl, img_url)
img = img_url.load_pil()
from PIL.Image import Image
assert isinstance(img, Image)
```
---
:return: a Pillow image
"""
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
return PILImage.open(BytesIO(self))
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
) -> np.ndarray:
"""
Load the image from the bytes into a numpy.ndarray image tensor
---
```python
from docarray import BaseDoc
from docarray.typing import ImageUrl
import numpy as np
class MyDoc(BaseDoc):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, np.ndarray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
```
---
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:return: np.ndarray representing the image as RGB values
"""
raw_img = self.load_pil()
if width or height:
new_width = width or raw_img.width
new_height = height or raw_img.height
raw_img = raw_img.resize((new_width, new_height))
try:
tensor = np.array(raw_img.convert('RGB'))
except Exception:
tensor = np.array(raw_img)
return self._move_channel_axis(tensor, axis_layout=axis_layout)
@staticmethod
def _move_channel_axis(
tensor: np.ndarray, axis_layout: Tuple[str, str, str] = ('H', 'W', 'C')
) -> np.ndarray:
"""Moves channel axis around."""
channel_to_offset = {'H': 0, 'W': 1, 'C': 2}
permutation = tuple(channel_to_offset[axis] for axis in axis_layout)
return np.transpose(tensor, permutation)
|
from io import BytesIO
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='ImageBytes')
@_register_proto(proto_type_name='image_bytes')
class ImageBytes(bytes, AbstractType):
"""
Bytes that store an image and that can be load into an image tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
) -> np.ndarray:
"""
Load the image from the bytes into a numpy.ndarray image tensor
---
```python
from docarray import BaseDoc
from docarray.typing import ImageUrl
import numpy as np
class MyDoc(BaseDoc):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, np.ndarray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
```
---
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:return: np.ndarray representing the image as RGB values
"""
if TYPE_CHECKING:
from PIL import Image as PILImage
else:
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
raw_img = PILImage.open(BytesIO(self))
if width or height:
new_width = width or raw_img.width
new_height = height or raw_img.height
raw_img = raw_img.resize((new_width, new_height))
try:
tensor = np.array(raw_img.convert('RGB'))
except Exception:
tensor = np.array(raw_img)
return self._move_channel_axis(tensor, axis_layout=axis_layout)
@staticmethod
def _move_channel_axis(
tensor: np.ndarray, axis_layout: Tuple[str, str, str] = ('H', 'W', 'C')
) -> np.ndarray:
"""Moves channel axis around."""
channel_to_offset = {'H': 0, 'W': 1, 'C': 2}
permutation = tuple(channel_to_offset[axis] for axis in axis_layout)
return np.transpose(tensor, permutation)
|
import logging
import sentry_sdk
from pydantic import SecretStr
from sentry_sdk.integrations.anthropic import AnthropicIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from backend.util.settings import Settings
def sentry_init():
sentry_dsn = Settings().secrets.sentry_dsn
sentry_sdk.init(
dsn=sentry_dsn,
traces_sample_rate=1.0,
profiles_sample_rate=1.0,
environment=f"app:{Settings().config.app_env.value}-behave:{Settings().config.behave_as.value}",
_experiments={"enable_logs": True},
integrations=[
LoggingIntegration(sentry_logs_level=logging.INFO),
AnthropicIntegration(
include_prompts=False,
),
],
)
def sentry_capture_error(error: Exception):
sentry_sdk.capture_exception(error)
sentry_sdk.flush()
async def discord_send_alert(content: str):
from backend.blocks.discord import SendDiscordMessageBlock
from backend.data.model import APIKeyCredentials, CredentialsMetaInput, ProviderName
from backend.util.settings import Settings
settings = Settings()
creds = APIKeyCredentials(
provider="discord",
api_key=SecretStr(settings.secrets.discord_bot_token),
title="Provide Discord Bot Token for the platform alert",
expires_at=None,
)
return await SendDiscordMessageBlock().run_once(
SendDiscordMessageBlock.Input(
credentials=CredentialsMetaInput(
id=creds.id,
title=creds.title,
type=creds.type,
provider=ProviderName.DISCORD,
),
message_content=content,
channel_name=settings.config.platform_alert_discord_channel,
),
"status",
credentials=creds,
)
|
import asyncio
import logging
import sentry_sdk
from pydantic import SecretStr
from sentry_sdk.integrations.anthropic import AnthropicIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from backend.util.settings import Settings
def sentry_init():
sentry_dsn = Settings().secrets.sentry_dsn
sentry_sdk.init(
dsn=sentry_dsn,
traces_sample_rate=1.0,
profiles_sample_rate=1.0,
environment=f"app:{Settings().config.app_env.value}-behave:{Settings().config.behave_as.value}",
_experiments={"enable_logs": True},
integrations=[
LoggingIntegration(sentry_logs_level=logging.INFO),
AnthropicIntegration(
include_prompts=False,
),
],
)
def sentry_capture_error(error: Exception):
sentry_sdk.capture_exception(error)
sentry_sdk.flush()
def discord_send_alert(content: str):
from backend.blocks.discord import SendDiscordMessageBlock
from backend.data.model import APIKeyCredentials, CredentialsMetaInput, ProviderName
from backend.util.settings import Settings
settings = Settings()
creds = APIKeyCredentials(
provider="discord",
api_key=SecretStr(settings.secrets.discord_bot_token),
title="Provide Discord Bot Token for the platform alert",
expires_at=None,
)
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return SendDiscordMessageBlock().run_once(
SendDiscordMessageBlock.Input(
credentials=CredentialsMetaInput(
id=creds.id,
title=creds.title,
type=creds.type,
provider=ProviderName.DISCORD,
),
message_content=content,
channel_name=settings.config.platform_alert_discord_channel,
),
"status",
credentials=creds,
)
|
__version__ = '0.21.0'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
from docarray.helper import login, logout
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.20.2'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
from docarray.helper import login, logout
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
import math
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src.api_export import keras_export
@keras_export("keras.layers.GaussianDropout")
class GaussianDropout(layers.Layer):
"""Apply multiplicative 1-centered Gaussian noise.
As it is a regularization layer, it is only active at training time.
Args:
rate: Float, drop probability (as with `Dropout`).
The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`.
seed: Integer, optional random seed to enable deterministic behavior.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
"""
def __init__(self, rate, seed=None, **kwargs):
super().__init__(**kwargs)
if not 0 <= rate <= 1:
raise ValueError(
f"Invalid value received for argument "
"`rate`. Expected a float value between 0 and 1. "
f"Received: rate={rate}"
)
self.rate = rate
self.seed = seed
if rate > 0:
self.seed_generator = backend.random.SeedGenerator(seed)
self.supports_masking = True
self.built = True
def call(self, inputs, training=False):
if training and self.rate > 0:
stddev = math.sqrt(self.rate / (1.0 - self.rate))
return inputs * backend.random.normal(
shape=ops.shape(inputs),
mean=1.0,
stddev=stddev,
dtype=self.compute_dtype,
seed=self.seed_generator,
)
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"rate": self.rate,
"seed": self.seed,
}
return {**base_config, **config}
|
import math
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src.api_export import keras_export
@keras_export("keras.layers.GaussianDropout")
class GaussianDropout(layers.Layer):
"""Apply multiplicative 1-centered Gaussian noise.
As it is a regularization layer, it is only active at training time.
Args:
rate: Float, drop probability (as with `Dropout`).
The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`.
seed: Integer, optional random seed to enable deterministic behavior.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
"""
def __init__(self, rate, seed=None, **kwargs):
super().__init__(**kwargs)
if not 0 <= rate <= 1:
raise ValueError(
f"Invalid value received for argument "
"`rate`. Expected a float value between 0 and 1. "
f"Received: rate={rate}"
)
self.rate = rate
self.seed = seed
if rate > 0:
self.seed_generator = backend.random.SeedGenerator(seed)
self.supports_masking = True
def call(self, inputs, training=False):
if training and self.rate > 0:
stddev = math.sqrt(self.rate / (1.0 - self.rate))
return inputs * backend.random.normal(
shape=ops.shape(inputs),
mean=1.0,
stddev=stddev,
dtype=self.compute_dtype,
seed=self.seed_generator,
)
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"rate": self.rate,
"seed": self.seed,
}
return {**base_config, **config}
|
import os
import urllib
import numpy as np
import pytest
from PIL import Image
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import ImageUrl
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PATH_TO_IMAGE_DATA = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'image-data')
IMAGE_PATHS = {
'png': os.path.join(PATH_TO_IMAGE_DATA, 'so_good.png'),
'jpg': os.path.join(PATH_TO_IMAGE_DATA, '05984.jpg'),
'jpeg': os.path.join(PATH_TO_IMAGE_DATA, '05984-2.jpeg'),
}
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.proto
def test_proto_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
uri._to_node_protobuf()
def test_json_schema():
schema_json_of(ImageUrl)
def test_dump_json():
url = parse_obj_as(ImageUrl, 'http://jina.ai/img.png')
orjson_dumps(url)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
def test_load(image_format, path_to_img):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize('width,height', [(224, None), (None, 224), (224, 224)])
def test_load_width_height(image_format, path_to_img, width, height):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(width=width, height=height)
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
if width:
assert shape[1] == width
if height:
assert shape[0] == height
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize(
'axis_layout',
[
('H', 'W', 'C'),
('H', 'C', 'W'),
('C', 'H', 'W'),
('C', 'W', 'H'),
('W', 'C', 'H'),
('W', 'H', 'C'),
],
)
def test_load_channel_axis(image_format, path_to_img, axis_layout):
sizes = {'H': 100, 'W': 200, 'C': 3}
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(axis_layout=axis_layout, height=sizes['H'], width=sizes['W'])
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
for axis, axis_name in enumerate(axis_layout):
assert shape[axis] == sizes[axis_name]
@pytest.mark.internet
def test_load_timeout():
url = parse_obj_as(ImageUrl, REMOTE_JPG)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
],
)
def test_load_to_bytes(image_format, path_to_img):
url = parse_obj_as(ImageUrl, path_to_img)
_bytes = url.load_bytes()
assert isinstance(_bytes, bytes)
img = Image.frombytes(mode='1', size=(224, 224), data=_bytes)
assert isinstance(img, Image.Image)
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
],
)
def test_validation(image_format, path_to_img):
if image_format == 'illegal':
with pytest.raises(ValueError):
parse_obj_as(ImageUrl, path_to_img)
else:
url = parse_obj_as(ImageUrl, path_to_img)
assert isinstance(url, ImageUrl)
assert isinstance(url, str)
|
import os
import urllib
import numpy as np
import pytest
from PIL import Image
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import ImageUrl
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PATH_TO_IMAGE_DATA = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'image-data')
IMAGE_PATHS = {
'png': os.path.join(PATH_TO_IMAGE_DATA, 'so_good.png'),
'jpg': os.path.join(PATH_TO_IMAGE_DATA, '05984.jpg'),
'jpeg': os.path.join(PATH_TO_IMAGE_DATA, '05984-2.jpeg'),
}
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
def test_proto_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
uri._to_node_protobuf()
def test_json_schema():
schema_json_of(ImageUrl)
def test_dump_json():
url = parse_obj_as(ImageUrl, 'http://jina.ai/img.png')
orjson_dumps(url)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
def test_load(image_format, path_to_img):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize('width,height', [(224, None), (None, 224), (224, 224)])
def test_load_width_height(image_format, path_to_img, width, height):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(width=width, height=height)
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
if width:
assert shape[1] == width
if height:
assert shape[0] == height
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize(
'axis_layout',
[
('H', 'W', 'C'),
('H', 'C', 'W'),
('C', 'H', 'W'),
('C', 'W', 'H'),
('W', 'C', 'H'),
('W', 'H', 'C'),
],
)
def test_load_channel_axis(image_format, path_to_img, axis_layout):
sizes = {'H': 100, 'W': 200, 'C': 3}
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(axis_layout=axis_layout, height=sizes['H'], width=sizes['W'])
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
for axis, axis_name in enumerate(axis_layout):
assert shape[axis] == sizes[axis_name]
@pytest.mark.internet
def test_load_timeout():
url = parse_obj_as(ImageUrl, REMOTE_JPG)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
],
)
def test_load_to_bytes(image_format, path_to_img):
url = parse_obj_as(ImageUrl, path_to_img)
_bytes = url.load_bytes()
assert isinstance(_bytes, bytes)
img = Image.frombytes(mode='1', size=(224, 224), data=_bytes)
assert isinstance(img, Image.Image)
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
],
)
def test_validation(image_format, path_to_img):
if image_format == 'illegal':
with pytest.raises(ValueError):
parse_obj_as(ImageUrl, path_to_img)
else:
url = parse_obj_as(ImageUrl, path_to_img)
assert isinstance(url, ImageUrl)
assert isinstance(url, str)
|
from docarray.base_doc.any_doc import AnyDoc
from docarray.base_doc.base_node import BaseNode
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
__all__ = ['AnyDoc', 'BaseDoc', 'BaseNode']
def __getattr__(name: str):
if name == 'DocResponse':
import_library('fastapi', raise_error=True)
from docarray.base_doc.doc_response import DocResponse
if name not in __all__:
__all__.append(name)
return DocResponse
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
from docarray.base_doc.any_doc import AnyDoc
from docarray.base_doc.base_node import BaseNode
from docarray.base_doc.doc import BaseDoc
from docarray.base_doc.doc_response import DocResponse
__all__ = ['AnyDoc', 'BaseDoc', 'BaseNode', 'DocResponse']
|
import collections.abc
import dataclasses
from typing import Optional, Sequence
import pytest
import torch
from torch.nn.functional import one_hot
from torchvision.prototype import tv_tensors
from transforms_v2_legacy_utils import combinations_grid, DEFAULT_EXTRA_DIMS, from_loader, from_loaders, TensorLoader
@dataclasses.dataclass
class LabelLoader(TensorLoader):
categories: Optional[Sequence[str]]
def _parse_categories(categories):
if categories is None:
num_categories = int(torch.randint(1, 11, ()))
elif isinstance(categories, int):
num_categories = categories
categories = [f"category{idx}" for idx in range(num_categories)]
elif isinstance(categories, collections.abc.Sequence) and all(isinstance(category, str) for category in categories):
categories = list(categories)
num_categories = len(categories)
else:
raise pytest.UsageError(
f"`categories` can either be `None` (default), an integer, or a sequence of strings, "
f"but got '{categories}' instead."
)
return categories, num_categories
def make_label_loader(*, extra_dims=(), categories=None, dtype=torch.int64):
categories, num_categories = _parse_categories(categories)
def fn(shape, dtype, device):
# The idiom `make_tensor(..., dtype=torch.int64).to(dtype)` is intentional to only get integer values,
# regardless of the requested dtype, e.g. 0 or 0.0 rather than 0 or 0.123
data = torch.testing.make_tensor(shape, low=0, high=num_categories, dtype=torch.int64, device=device).to(dtype)
return tv_tensors.Label(data, categories=categories)
return LabelLoader(fn, shape=extra_dims, dtype=dtype, categories=categories)
make_label = from_loader(make_label_loader)
@dataclasses.dataclass
class OneHotLabelLoader(TensorLoader):
categories: Optional[Sequence[str]]
def make_one_hot_label_loader(*, categories=None, extra_dims=(), dtype=torch.int64):
categories, num_categories = _parse_categories(categories)
def fn(shape, dtype, device):
if num_categories == 0:
data = torch.empty(shape, dtype=dtype, device=device)
else:
# The idiom `make_label_loader(..., dtype=torch.int64); ...; one_hot(...).to(dtype)` is intentional
# since `one_hot` only supports int64
label = make_label_loader(extra_dims=extra_dims, categories=num_categories, dtype=torch.int64).load(device)
data = one_hot(label, num_classes=num_categories).to(dtype)
return tv_tensors.OneHotLabel(data, categories=categories)
return OneHotLabelLoader(fn, shape=(*extra_dims, num_categories), dtype=dtype, categories=categories)
def make_one_hot_label_loaders(
*,
categories=(1, 0, None),
extra_dims=DEFAULT_EXTRA_DIMS,
dtypes=(torch.int64, torch.float32),
):
for params in combinations_grid(categories=categories, extra_dims=extra_dims, dtype=dtypes):
yield make_one_hot_label_loader(**params)
make_one_hot_labels = from_loaders(make_one_hot_label_loaders)
|
import collections.abc
import dataclasses
from typing import Optional, Sequence
import pytest
import torch
from torch.nn.functional import one_hot
from torchvision.prototype import datapoints
from transforms_v2_legacy_utils import combinations_grid, DEFAULT_EXTRA_DIMS, from_loader, from_loaders, TensorLoader
@dataclasses.dataclass
class LabelLoader(TensorLoader):
categories: Optional[Sequence[str]]
def _parse_categories(categories):
if categories is None:
num_categories = int(torch.randint(1, 11, ()))
elif isinstance(categories, int):
num_categories = categories
categories = [f"category{idx}" for idx in range(num_categories)]
elif isinstance(categories, collections.abc.Sequence) and all(isinstance(category, str) for category in categories):
categories = list(categories)
num_categories = len(categories)
else:
raise pytest.UsageError(
f"`categories` can either be `None` (default), an integer, or a sequence of strings, "
f"but got '{categories}' instead."
)
return categories, num_categories
def make_label_loader(*, extra_dims=(), categories=None, dtype=torch.int64):
categories, num_categories = _parse_categories(categories)
def fn(shape, dtype, device):
# The idiom `make_tensor(..., dtype=torch.int64).to(dtype)` is intentional to only get integer values,
# regardless of the requested dtype, e.g. 0 or 0.0 rather than 0 or 0.123
data = torch.testing.make_tensor(shape, low=0, high=num_categories, dtype=torch.int64, device=device).to(dtype)
return datapoints.Label(data, categories=categories)
return LabelLoader(fn, shape=extra_dims, dtype=dtype, categories=categories)
make_label = from_loader(make_label_loader)
@dataclasses.dataclass
class OneHotLabelLoader(TensorLoader):
categories: Optional[Sequence[str]]
def make_one_hot_label_loader(*, categories=None, extra_dims=(), dtype=torch.int64):
categories, num_categories = _parse_categories(categories)
def fn(shape, dtype, device):
if num_categories == 0:
data = torch.empty(shape, dtype=dtype, device=device)
else:
# The idiom `make_label_loader(..., dtype=torch.int64); ...; one_hot(...).to(dtype)` is intentional
# since `one_hot` only supports int64
label = make_label_loader(extra_dims=extra_dims, categories=num_categories, dtype=torch.int64).load(device)
data = one_hot(label, num_classes=num_categories).to(dtype)
return datapoints.OneHotLabel(data, categories=categories)
return OneHotLabelLoader(fn, shape=(*extra_dims, num_categories), dtype=dtype, categories=categories)
def make_one_hot_label_loaders(
*,
categories=(1, 0, None),
extra_dims=DEFAULT_EXTRA_DIMS,
dtypes=(torch.int64, torch.float32),
):
for params in combinations_grid(categories=categories, extra_dims=extra_dims, dtype=dtypes):
yield make_one_hot_label_loader(**params)
make_one_hot_labels = from_loaders(make_one_hot_label_loaders)
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
@mmcv.jit(coderize=True)
def accuracy(pred, target, topk=1, thresh=None):
"""Calculate accuracy according to the prediction and target.
Args:
pred (torch.Tensor): The model prediction, shape (N, num_class)
target (torch.Tensor): The target of each prediction, shape (N, )
topk (int | tuple[int], optional): If the predictions in ``topk``
matches the target, the predictions will be regarded as
correct ones. Defaults to 1.
thresh (float, optional): If not None, predictions with scores under
this threshold are considered incorrect. Default to None.
Returns:
float | tuple[float]: If the input ``topk`` is a single integer,
the function will return a single float as accuracy. If
``topk`` is a tuple containing multiple integers, the
function will return a tuple containing accuracies of
each ``topk`` number.
"""
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk, )
return_single = True
else:
return_single = False
maxk = max(topk)
if pred.size(0) == 0:
accu = [pred.new_tensor(0.) for i in range(len(topk))]
return accu[0] if return_single else accu
assert pred.ndim == 2 and target.ndim == 1
assert pred.size(0) == target.size(0)
assert maxk <= pred.size(1), \
f'maxk {maxk} exceeds pred dimension {pred.size(1)}'
pred_value, pred_label = pred.topk(maxk, dim=1)
pred_label = pred_label.t() # transpose to shape (maxk, N)
correct = pred_label.eq(target.view(1, -1).expand_as(pred_label))
if thresh is not None:
# Only prediction values larger than thresh are counted as correct
correct = correct & (pred_value > thresh).t()
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / pred.size(0)))
return res[0] if return_single else res
class Accuracy(nn.Module):
def __init__(self, topk=(1, ), thresh=None):
"""Module to calculate the accuracy.
Args:
topk (tuple, optional): The criterion used to calculate the
accuracy. Defaults to (1,).
thresh (float, optional): If not None, predictions with scores
under this threshold are considered incorrect. Default to None.
"""
super().__init__()
self.topk = topk
self.thresh = thresh
def forward(self, pred, target):
"""Forward function to calculate accuracy.
Args:
pred (torch.Tensor): Prediction of models.
target (torch.Tensor): Target for each prediction.
Returns:
tuple[float]: The accuracies under different topk criterions.
"""
return accuracy(pred, target, self.topk, self.thresh)
|
import mmcv
import torch.nn as nn
@mmcv.jit(coderize=True)
def accuracy(pred, target, topk=1, thresh=None):
"""Calculate accuracy according to the prediction and target.
Args:
pred (torch.Tensor): The model prediction, shape (N, num_class)
target (torch.Tensor): The target of each prediction, shape (N, )
topk (int | tuple[int], optional): If the predictions in ``topk``
matches the target, the predictions will be regarded as
correct ones. Defaults to 1.
thresh (float, optional): If not None, predictions with scores under
this threshold are considered incorrect. Default to None.
Returns:
float | tuple[float]: If the input ``topk`` is a single integer,
the function will return a single float as accuracy. If
``topk`` is a tuple containing multiple integers, the
function will return a tuple containing accuracies of
each ``topk`` number.
"""
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk, )
return_single = True
else:
return_single = False
maxk = max(topk)
if pred.size(0) == 0:
accu = [pred.new_tensor(0.) for i in range(len(topk))]
return accu[0] if return_single else accu
assert pred.ndim == 2 and target.ndim == 1
assert pred.size(0) == target.size(0)
assert maxk <= pred.size(1), \
f'maxk {maxk} exceeds pred dimension {pred.size(1)}'
pred_value, pred_label = pred.topk(maxk, dim=1)
pred_label = pred_label.t() # transpose to shape (maxk, N)
correct = pred_label.eq(target.view(1, -1).expand_as(pred_label))
if thresh is not None:
# Only prediction values larger than thresh are counted as correct
correct = correct & (pred_value > thresh).t()
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / pred.size(0)))
return res[0] if return_single else res
class Accuracy(nn.Module):
def __init__(self, topk=(1, ), thresh=None):
"""Module to calculate the accuracy.
Args:
topk (tuple, optional): The criterion used to calculate the
accuracy. Defaults to (1,).
thresh (float, optional): If not None, predictions with scores
under this threshold are considered incorrect. Default to None.
"""
super().__init__()
self.topk = topk
self.thresh = thresh
def forward(self, pred, target):
"""Forward function to calculate accuracy.
Args:
pred (torch.Tensor): Prediction of models.
target (torch.Tensor): Target for each prediction.
Returns:
tuple[float]: The accuracies under different topk criterions.
"""
return accuracy(pred, target, self.topk, self.thresh)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
num_stages = 6
num_proposals = 100
model = dict(
type='SparseRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=0,
add_extra_convs='on_input',
num_outs=4),
rpn_head=dict(
type='EmbeddingRPNHead',
num_proposals=num_proposals,
proposal_feature_channel=256),
roi_head=dict(
type='SparseRoIHead',
num_stages=num_stages,
stage_loss_weights=[1] * num_stages,
proposal_feature_channel=256,
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='DIIHead',
num_classes=80,
num_ffn_fcs=2,
num_heads=8,
num_cls_fcs=1,
num_reg_fcs=3,
feedforward_channels=2048,
in_channels=256,
dropout=0.0,
ffn_act_cfg=dict(type='ReLU', inplace=True),
dynamic_conv_cfg=dict(
type='DynamicConv',
in_channels=256,
feat_channels=64,
out_channels=256,
input_feat_shape=7,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN')),
loss_bbox=dict(type='L1Loss', loss_weight=5.0),
loss_iou=dict(type='GIoULoss', loss_weight=2.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=2.0),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
clip_border=False,
target_means=[0., 0., 0., 0.],
target_stds=[0.5, 0.5, 1., 1.])) for _ in range(num_stages)
]),
# training and testing settings
train_cfg=dict(
rpn=None,
rcnn=[
dict(
assigner=dict(
type='HungarianAssigner',
# TODO update
cls_cost=dict(type='FocalLossCost', weight=2.0),
reg_cost=dict(type='BBoxL1Cost', weight=5.0),
iou_cost=dict(type='IoUCost', iou_mode='giou',
weight=2.0)),
sampler=dict(type='PseudoSampler'),
pos_weight=1) for _ in range(num_stages)
]),
test_cfg=dict(rpn=None, rcnn=dict(max_per_img=num_proposals)))
# optimizer
optimizer = dict(_delete_=True, type='AdamW', lr=0.000025, weight_decay=0.0001)
optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=1, norm_type=2))
# learning policy
lr_config = dict(policy='step', step=[8, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
num_stages = 6
num_proposals = 100
model = dict(
type='SparseRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=0,
add_extra_convs='on_input',
num_outs=4),
rpn_head=dict(
type='EmbeddingRPNHead',
num_proposals=num_proposals,
proposal_feature_channel=256),
roi_head=dict(
type='SparseRoIHead',
num_stages=num_stages,
stage_loss_weights=[1] * num_stages,
proposal_feature_channel=256,
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='DIIHead',
num_classes=80,
num_ffn_fcs=2,
num_heads=8,
num_cls_fcs=1,
num_reg_fcs=3,
feedforward_channels=2048,
in_channels=256,
dropout=0.0,
ffn_act_cfg=dict(type='ReLU', inplace=True),
dynamic_conv_cfg=dict(
type='DynamicConv',
in_channels=256,
feat_channels=64,
out_channels=256,
input_feat_shape=7,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN')),
loss_bbox=dict(type='L1Loss', loss_weight=5.0),
loss_iou=dict(type='GIoULoss', loss_weight=2.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=2.0),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
clip_border=False,
target_means=[0., 0., 0., 0.],
target_stds=[0.5, 0.5, 1., 1.])) for _ in range(num_stages)
]),
# training and testing settings
train_cfg=dict(
rpn=None,
rcnn=[
dict(
assigner=dict(
type='HungarianAssigner',
cls_cost=dict(type='FocalLossCost', weight=2.0),
reg_cost=dict(type='BBoxL1Cost', weight=5.0),
iou_cost=dict(type='IoUCost', iou_mode='giou',
weight=2.0)),
sampler=dict(type='PseudoSampler'),
pos_weight=1) for _ in range(num_stages)
]),
test_cfg=dict(rpn=None, rcnn=dict(max_per_img=num_proposals)))
# optimizer
optimizer = dict(_delete_=True, type='AdamW', lr=0.000025, weight_decay=0.0001)
optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=1, norm_type=2))
# learning policy
lr_config = dict(policy='step', step=[8, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
|
from typing import Any, List, Literal, Optional
import numpy as np
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from fastembed import TextEmbedding
class FastEmbedEmbedding(BaseEmbedding):
"""
Qdrant FastEmbedding models.
FastEmbed is a lightweight, fast, Python library built for embedding generation.
See more documentation at:
* https://github.com/qdrant/fastembed/
* https://qdrant.github.io/fastembed/.
To use this class, you must install the `fastembed` Python package.
`pip install fastembed`
Example:
from llama_index.embeddings.fastembed import FastEmbedEmbedding
fastembed = FastEmbedEmbedding()
"""
model_name: str = Field(
"BAAI/bge-small-en-v1.5",
description="Name of the FastEmbedding model to use.\n"
"Defaults to 'BAAI/bge-small-en-v1.5'.\n"
"Find the list of supported models at "
"https://qdrant.github.io/fastembed/examples/Supported_Models/",
)
max_length: int = Field(
512,
description="The maximum number of tokens. Defaults to 512.\n"
"Unknown behavior for values > 512.",
)
cache_dir: Optional[str] = Field(
None,
description="The path to the cache directory.\n"
"Defaults to `local_cache` in the parent directory",
)
threads: Optional[int] = Field(
None,
description="The number of threads single onnxruntime session can use.\n"
"Defaults to None",
)
doc_embed_type: Literal["default", "passage"] = Field(
"default",
description="Type of embedding method to use for documents.\n"
"Available options are 'default' and 'passage'.",
)
providers: Optional[List[str]] = Field(
default=None,
description="The ONNX providers to use for the embedding model.",
)
_model: Any = PrivateAttr()
@classmethod
def class_name(self) -> str:
return "FastEmbedEmbedding"
def __init__(
self,
model_name: Optional[str] = "BAAI/bge-small-en-v1.5",
max_length: Optional[int] = 512,
cache_dir: Optional[str] = None,
threads: Optional[int] = None,
doc_embed_type: Literal["default", "passage"] = "default",
providers: Optional[List[str]] = None,
**kwargs: Any,
):
super().__init__(
model_name=model_name,
max_length=max_length,
threads=threads,
doc_embed_type=doc_embed_type,
providers=providers,
**kwargs,
)
self._model = TextEmbedding(
model_name=model_name,
max_length=max_length,
cache_dir=cache_dir,
threads=threads,
providers=providers,
**kwargs,
)
def _get_text_embedding(self, text: str) -> List[float]:
embeddings: List[np.ndarray]
if self.doc_embed_type == "passage":
embeddings = list(self._model.passage_embed(text))
else:
embeddings = list(self._model.embed(text))
return embeddings[0].tolist()
def _get_query_embedding(self, query: str) -> List[float]:
query_embeddings: np.ndarray = next(self._model.query_embed(query))
return query_embeddings.tolist()
async def _aget_query_embedding(self, query: str) -> List[float]:
return self._get_query_embedding(query)
|
from typing import Any, List, Literal, Optional
import numpy as np
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from fastembed import TextEmbedding
class FastEmbedEmbedding(BaseEmbedding):
"""
Qdrant FastEmbedding models.
FastEmbed is a lightweight, fast, Python library built for embedding generation.
See more documentation at:
* https://github.com/qdrant/fastembed/
* https://qdrant.github.io/fastembed/.
To use this class, you must install the `fastembed` Python package.
`pip install fastembed`
Example:
from llama_index.embeddings.fastembed import FastEmbedEmbedding
fastembed = FastEmbedEmbedding()
"""
model_name: str = Field(
"BAAI/bge-small-en-v1.5",
description="Name of the FastEmbedding model to use.\n"
"Defaults to 'BAAI/bge-small-en-v1.5'.\n"
"Find the list of supported models at "
"https://qdrant.github.io/fastembed/examples/Supported_Models/",
)
max_length: int = Field(
512,
description="The maximum number of tokens. Defaults to 512.\n"
"Unknown behavior for values > 512.",
)
cache_dir: Optional[str] = Field(
None,
description="The path to the cache directory.\n"
"Defaults to `local_cache` in the parent directory",
)
threads: Optional[int] = Field(
None,
description="The number of threads single onnxruntime session can use.\n"
"Defaults to None",
)
doc_embed_type: Literal["default", "passage"] = Field(
"default",
description="Type of embedding method to use for documents.\n"
"Available options are 'default' and 'passage'.",
)
providers: Optional[List[str]] = Field(
default=None,
description="The ONNX providers to use for the embedding model.",
)
_model: Any = PrivateAttr()
@classmethod
def class_name(self) -> str:
return "FastEmbedEmbedding"
def __init__(
self,
model_name: Optional[str] = "BAAI/bge-small-en-v1.5",
max_length: Optional[int] = 512,
cache_dir: Optional[str] = None,
threads: Optional[int] = None,
doc_embed_type: Literal["default", "passage"] = "default",
providers: Optional[List[str]] = None,
):
super().__init__(
model_name=model_name,
max_length=max_length,
threads=threads,
doc_embed_type=doc_embed_type,
providers=providers,
)
self._model = TextEmbedding(
model_name=model_name,
max_length=max_length,
cache_dir=cache_dir,
threads=threads,
providers=providers,
)
def _get_text_embedding(self, text: str) -> List[float]:
embeddings: List[np.ndarray]
if self.doc_embed_type == "passage":
embeddings = list(self._model.passage_embed(text))
else:
embeddings = list(self._model.embed(text))
return embeddings[0].tolist()
def _get_query_embedding(self, query: str) -> List[float]:
query_embeddings: np.ndarray = next(self._model.query_embed(query))
return query_embeddings.tolist()
async def _aget_query_embedding(self, query: str) -> List[float]:
return self._get_query_embedding(query)
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../../'))
# -- Project information -----------------------------------------------------
project = 'MMDetection'
copyright = '2018-2021, OpenMMLab'
author = 'MMDetection Authors'
version_file = '../../mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'myst_parser',
'sphinx_markdown_tables',
'sphinx_copybutton',
]
autodoc_mock_imports = [
'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmdetection'
},
],
# Specify the language of shared menu
'menu_lang':
'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
language = 'zh_CN'
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../../'))
# -- Project information -----------------------------------------------------
project = 'MMDetection'
copyright = '2018-2021, OpenMMLab'
author = 'MMDetection Authors'
version_file = '../../mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'recommonmark',
'sphinx_markdown_tables',
'sphinx_copybutton',
]
autodoc_mock_imports = [
'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmdetection'
},
],
# Specify the language of shared menu
'menu_lang':
'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
language = 'zh_CN'
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
|
import enum
import pathlib
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import CSVParser, Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
read_categories_file,
)
from torchvision.prototype.features import Label
from .._api import register_dataset, register_info
NAME = "dtd"
class DTDDemux(enum.IntEnum):
SPLIT = 0
JOINT_CATEGORIES = 1
IMAGES = 2
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class DTD(Dataset):
"""DTD Dataset.
homepage="https://www.robots.ox.ac.uk/~vgg/data/dtd/",
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
fold: int = 1,
skip_validation_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "val", "test"})
if not (1 <= fold <= 10):
raise ValueError(f"The fold parameter should be an integer in [1, 10]. Got {fold}")
self._fold = fold
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_validation_check)
def _resources(self) -> List[OnlineResource]:
archive = HttpResource(
"https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz",
sha256="e42855a52a4950a3b59612834602aa253914755c95b0cff9ead6d07395f8e205",
preprocess="decompress",
)
return [archive]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.parent.name == "labels":
if path.name == "labels_joint_anno.txt":
return DTDDemux.JOINT_CATEGORIES
return DTDDemux.SPLIT
elif path.parents[1].name == "images":
return DTDDemux.IMAGES
else:
return None
def _image_key_fn(self, data: Tuple[str, Any]) -> str:
path = pathlib.Path(data[0])
# The split files contain hardcoded posix paths for the images, e.g. banded/banded_0001.jpg
return str(path.relative_to(path.parents[1]).as_posix())
def _prepare_sample(self, data: Tuple[Tuple[str, List[str]], Tuple[str, BinaryIO]]) -> Dict[str, Any]:
(_, joint_categories_data), image_data = data
_, *joint_categories = joint_categories_data
path, buffer = image_data
category = pathlib.Path(path).parent.name
return dict(
joint_categories={category for category in joint_categories if category},
label=Label.from_category(category, categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
splits_dp, joint_categories_dp, images_dp = Demultiplexer(
archive_dp, 3, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
splits_dp = Filter(splits_dp, path_comparator("name", f"{self._split}{self._fold}.txt"))
splits_dp = LineReader(splits_dp, decode=True, return_path=False)
splits_dp = hint_shuffling(splits_dp)
splits_dp = hint_sharding(splits_dp)
joint_categories_dp = CSVParser(joint_categories_dp, delimiter=" ")
dp = IterKeyZipper(
splits_dp,
joint_categories_dp,
key_fn=getitem(),
ref_key_fn=getitem(0),
buffer_size=INFINITE_BUFFER_SIZE,
)
dp = IterKeyZipper(
dp,
images_dp,
key_fn=getitem(0),
ref_key_fn=self._image_key_fn,
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def _filter_images(self, data: Tuple[str, Any]) -> bool:
return self._classify_archive(data) == DTDDemux.IMAGES
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
dp = Filter(dp, self._filter_images)
return sorted({pathlib.Path(path).parent.name for path, _ in dp})
def __len__(self) -> int:
return 1_880 # All splits have the same length
|
import enum
import pathlib
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import CSVParser, Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
read_categories_file,
)
from torchvision.prototype.features import EncodedImage, Label
from .._api import register_dataset, register_info
NAME = "dtd"
class DTDDemux(enum.IntEnum):
SPLIT = 0
JOINT_CATEGORIES = 1
IMAGES = 2
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class DTD(Dataset):
"""DTD Dataset.
homepage="https://www.robots.ox.ac.uk/~vgg/data/dtd/",
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
fold: int = 1,
skip_validation_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "val", "test"})
if not (1 <= fold <= 10):
raise ValueError(f"The fold parameter should be an integer in [1, 10]. Got {fold}")
self._fold = fold
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_validation_check)
def _resources(self) -> List[OnlineResource]:
archive = HttpResource(
"https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz",
sha256="e42855a52a4950a3b59612834602aa253914755c95b0cff9ead6d07395f8e205",
preprocess="decompress",
)
return [archive]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.parent.name == "labels":
if path.name == "labels_joint_anno.txt":
return DTDDemux.JOINT_CATEGORIES
return DTDDemux.SPLIT
elif path.parents[1].name == "images":
return DTDDemux.IMAGES
else:
return None
def _image_key_fn(self, data: Tuple[str, Any]) -> str:
path = pathlib.Path(data[0])
# The split files contain hardcoded posix paths for the images, e.g. banded/banded_0001.jpg
return str(path.relative_to(path.parents[1]).as_posix())
def _prepare_sample(self, data: Tuple[Tuple[str, List[str]], Tuple[str, BinaryIO]]) -> Dict[str, Any]:
(_, joint_categories_data), image_data = data
_, *joint_categories = joint_categories_data
path, buffer = image_data
category = pathlib.Path(path).parent.name
return dict(
joint_categories={category for category in joint_categories if category},
label=Label.from_category(category, categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
splits_dp, joint_categories_dp, images_dp = Demultiplexer(
archive_dp, 3, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
splits_dp = Filter(splits_dp, path_comparator("name", f"{self._split}{self._fold}.txt"))
splits_dp = LineReader(splits_dp, decode=True, return_path=False)
splits_dp = hint_shuffling(splits_dp)
splits_dp = hint_sharding(splits_dp)
joint_categories_dp = CSVParser(joint_categories_dp, delimiter=" ")
dp = IterKeyZipper(
splits_dp,
joint_categories_dp,
key_fn=getitem(),
ref_key_fn=getitem(0),
buffer_size=INFINITE_BUFFER_SIZE,
)
dp = IterKeyZipper(
dp,
images_dp,
key_fn=getitem(0),
ref_key_fn=self._image_key_fn,
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def _filter_images(self, data: Tuple[str, Any]) -> bool:
return self._classify_archive(data) == DTDDemux.IMAGES
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
dp = Filter(dp, self._filter_images)
return sorted({pathlib.Path(path).parent.name for path, _ in dp})
def __len__(self) -> int:
return 1_880 # All splits have the same length
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.imdb import get_word_index as get_word_index
from keras.src.datasets.imdb import load_data as load_data
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.imdb import get_word_index
from keras.src.datasets.imdb import load_data
|
import pathlib
from typing import Any, BinaryIO, Optional, Union
from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, JsonParser, Mapper, UnBatcher
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_accessor,
path_comparator,
)
from torchvision.prototype.tv_tensors import Label
from .._api import register_dataset, register_info
NAME = "clevr"
@register_info(NAME)
def _info() -> dict[str, Any]:
return dict()
@register_dataset(NAME)
class CLEVR(Dataset):
"""
- **homepage**: https://cs.stanford.edu/people/jcjohns/clevr/
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "val", "test"))
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> list[OnlineResource]:
archive = HttpResource(
"https://dl.fbaipublicfiles.com/clevr/CLEVR_v1.0.zip",
sha256="5cd61cf1096ed20944df93c9adb31e74d189b8459a94f54ba00090e5c59936d1",
)
return [archive]
def _classify_archive(self, data: tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.parents[1].name == "images":
return 0
elif path.parent.name == "scenes":
return 1
else:
return None
def _filter_scene_anns(self, data: tuple[str, Any]) -> bool:
key, _ = data
return key == "scenes"
def _add_empty_anns(self, data: tuple[str, BinaryIO]) -> tuple[tuple[str, BinaryIO], None]:
return data, None
def _prepare_sample(self, data: tuple[tuple[str, BinaryIO], Optional[dict[str, Any]]]) -> dict[str, Any]:
image_data, scenes_data = data
path, buffer = image_data
return dict(
path=path,
image=EncodedImage.from_file(buffer),
label=Label(len(scenes_data["objects"])) if scenes_data else None,
)
def _datapipe(self, resource_dps: list[IterDataPipe]) -> IterDataPipe[dict[str, Any]]:
archive_dp = resource_dps[0]
images_dp, scenes_dp = Demultiplexer(
archive_dp,
2,
self._classify_archive,
drop_none=True,
buffer_size=INFINITE_BUFFER_SIZE,
)
images_dp = Filter(images_dp, path_comparator("parent.name", self._split))
images_dp = hint_shuffling(images_dp)
images_dp = hint_sharding(images_dp)
if self._split != "test":
scenes_dp = Filter(scenes_dp, path_comparator("name", f"CLEVR_{self._split}_scenes.json"))
scenes_dp = JsonParser(scenes_dp)
scenes_dp = Mapper(scenes_dp, getitem(1, "scenes"))
scenes_dp = UnBatcher(scenes_dp)
dp = IterKeyZipper(
images_dp,
scenes_dp,
key_fn=path_accessor("name"),
ref_key_fn=getitem("image_filename"),
buffer_size=INFINITE_BUFFER_SIZE,
)
else:
for _, file in scenes_dp:
file.close()
dp = Mapper(images_dp, self._add_empty_anns)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 70_000 if self._split == "train" else 15_000
|
import pathlib
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, JsonParser, Mapper, UnBatcher
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_accessor,
path_comparator,
)
from torchvision.prototype.tv_tensors import Label
from .._api import register_dataset, register_info
NAME = "clevr"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict()
@register_dataset(NAME)
class CLEVR(Dataset):
"""
- **homepage**: https://cs.stanford.edu/people/jcjohns/clevr/
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "val", "test"))
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
archive = HttpResource(
"https://dl.fbaipublicfiles.com/clevr/CLEVR_v1.0.zip",
sha256="5cd61cf1096ed20944df93c9adb31e74d189b8459a94f54ba00090e5c59936d1",
)
return [archive]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.parents[1].name == "images":
return 0
elif path.parent.name == "scenes":
return 1
else:
return None
def _filter_scene_anns(self, data: Tuple[str, Any]) -> bool:
key, _ = data
return key == "scenes"
def _add_empty_anns(self, data: Tuple[str, BinaryIO]) -> Tuple[Tuple[str, BinaryIO], None]:
return data, None
def _prepare_sample(self, data: Tuple[Tuple[str, BinaryIO], Optional[Dict[str, Any]]]) -> Dict[str, Any]:
image_data, scenes_data = data
path, buffer = image_data
return dict(
path=path,
image=EncodedImage.from_file(buffer),
label=Label(len(scenes_data["objects"])) if scenes_data else None,
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
images_dp, scenes_dp = Demultiplexer(
archive_dp,
2,
self._classify_archive,
drop_none=True,
buffer_size=INFINITE_BUFFER_SIZE,
)
images_dp = Filter(images_dp, path_comparator("parent.name", self._split))
images_dp = hint_shuffling(images_dp)
images_dp = hint_sharding(images_dp)
if self._split != "test":
scenes_dp = Filter(scenes_dp, path_comparator("name", f"CLEVR_{self._split}_scenes.json"))
scenes_dp = JsonParser(scenes_dp)
scenes_dp = Mapper(scenes_dp, getitem(1, "scenes"))
scenes_dp = UnBatcher(scenes_dp)
dp = IterKeyZipper(
images_dp,
scenes_dp,
key_fn=path_accessor("name"),
ref_key_fn=getitem("image_filename"),
buffer_size=INFINITE_BUFFER_SIZE,
)
else:
for _, file in scenes_dp:
file.close()
dp = Mapper(images_dp, self._add_empty_anns)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 70_000 if self._split == "train" else 15_000
|
from typing import Any
from langchain_core.agents import AgentAction
from langchain_core.prompts.chat import ChatPromptTemplate
class AgentScratchPadChatPromptTemplate(ChatPromptTemplate):
"""Chat prompt template for the agent scratchpad."""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
def _construct_agent_scratchpad(
self,
intermediate_steps: list[tuple[AgentAction, str]],
) -> str:
if len(intermediate_steps) == 0:
return ""
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\nObservation: {observation}\nThought: "
return (
f"This was your previous work "
f"(but I haven't seen any of it! I only see what "
f"you return as final answer):\n{thoughts}"
)
def _merge_partial_and_user_variables(self, **kwargs: Any) -> dict[str, Any]:
intermediate_steps = kwargs.pop("intermediate_steps")
kwargs["agent_scratchpad"] = self._construct_agent_scratchpad(
intermediate_steps,
)
return kwargs
|
from typing import Any
from langchain_core.agents import AgentAction
from langchain_core.prompts.chat import ChatPromptTemplate
class AgentScratchPadChatPromptTemplate(ChatPromptTemplate):
"""Chat prompt template for the agent scratchpad."""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
def _construct_agent_scratchpad(
self, intermediate_steps: list[tuple[AgentAction, str]]
) -> str:
if len(intermediate_steps) == 0:
return ""
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\nObservation: {observation}\nThought: "
return (
f"This was your previous work "
f"(but I haven't seen any of it! I only see what "
f"you return as final answer):\n{thoughts}"
)
def _merge_partial_and_user_variables(self, **kwargs: Any) -> dict[str, Any]:
intermediate_steps = kwargs.pop("intermediate_steps")
kwargs["agent_scratchpad"] = self._construct_agent_scratchpad(
intermediate_steps
)
return kwargs
|
from functools import partial
from inspect import isclass
from typing import Any, Union, cast
from pydantic import BaseModel
from langchain_core.language_models import FakeListChatModel
from langchain_core.load.dump import dumps
from langchain_core.load.load import loads
from langchain_core.messages import HumanMessage
from langchain_core.prompts.structured import StructuredPrompt
from langchain_core.runnables.base import Runnable, RunnableLambda
from langchain_core.utils.pydantic import is_basemodel_subclass
def _fake_runnable(
input: Any, *, schema: Union[dict, type[BaseModel]], value: Any = 42, **_: Any
) -> Union[BaseModel, dict]:
if isclass(schema) and is_basemodel_subclass(schema):
return schema(name="yo", value=value)
else:
params = cast("dict", schema)["parameters"]
return {k: 1 if k != "value" else value for k, v in params.items()}
class FakeStructuredChatModel(FakeListChatModel):
"""Fake ChatModel for testing purposes."""
def with_structured_output(
self, schema: Union[dict, type[BaseModel]], **kwargs: Any
) -> Runnable:
return RunnableLambda(partial(_fake_runnable, schema=schema, **kwargs))
@property
def _llm_type(self) -> str:
return "fake-messages-list-chat-model"
FakeStructuredChatModel.model_rebuild()
def test_structured_prompt_pydantic() -> None:
class OutputSchema(BaseModel):
name: str
value: int
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
OutputSchema,
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == OutputSchema(name="yo", value=42)
def test_structured_prompt_dict() -> None:
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
{
"name": "yo",
"description": "a structured output",
"parameters": {
"name": {"type": "string"},
"value": {"type": "integer"},
},
},
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 42}
assert loads(dumps(prompt)).model_dump() == prompt.model_dump()
chain = loads(dumps(prompt)) | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 42}
def test_structured_prompt_kwargs() -> None:
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
{
"name": "yo",
"description": "a structured output",
"parameters": {
"name": {"type": "string"},
"value": {"type": "integer"},
},
},
value=7,
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 7}
assert loads(dumps(prompt)).model_dump() == prompt.model_dump()
chain = loads(dumps(prompt)) | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 7}
class OutputSchema(BaseModel):
name: str
value: int
prompt = StructuredPrompt(
[("human", "I'm very structured, how about you?")], OutputSchema, value=7
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == OutputSchema(name="yo", value=7)
def test_structured_prompt_template_format() -> None:
prompt = StructuredPrompt(
[("human", "hi {{person.name}}")], schema={}, template_format="mustache"
)
assert prompt.messages[0].prompt.template_format == "mustache" # type: ignore[union-attr, union-attr]
assert prompt.input_variables == ["person"]
assert prompt.invoke({"person": {"name": "foo"}}).to_messages() == [
HumanMessage("hi foo")
]
|
from functools import partial
from inspect import isclass
from typing import Any, Union, cast
from pydantic import BaseModel
from langchain_core.language_models import FakeListChatModel
from langchain_core.load.dump import dumps
from langchain_core.load.load import loads
from langchain_core.messages import HumanMessage
from langchain_core.prompts.structured import StructuredPrompt
from langchain_core.runnables.base import Runnable, RunnableLambda
from langchain_core.utils.pydantic import is_basemodel_subclass
def _fake_runnable(
input: Any, *, schema: Union[dict, type[BaseModel]], value: Any = 42, **_: Any
) -> Union[BaseModel, dict]:
if isclass(schema) and is_basemodel_subclass(schema):
return schema(name="yo", value=value)
else:
params = cast(dict, schema)["parameters"]
return {k: 1 if k != "value" else value for k, v in params.items()}
class FakeStructuredChatModel(FakeListChatModel):
"""Fake ChatModel for testing purposes."""
def with_structured_output(
self, schema: Union[dict, type[BaseModel]], **kwargs: Any
) -> Runnable:
return RunnableLambda(partial(_fake_runnable, schema=schema, **kwargs))
@property
def _llm_type(self) -> str:
return "fake-messages-list-chat-model"
FakeStructuredChatModel.model_rebuild()
def test_structured_prompt_pydantic() -> None:
class OutputSchema(BaseModel):
name: str
value: int
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
OutputSchema,
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == OutputSchema(name="yo", value=42)
def test_structured_prompt_dict() -> None:
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
{
"name": "yo",
"description": "a structured output",
"parameters": {
"name": {"type": "string"},
"value": {"type": "integer"},
},
},
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 42}
assert loads(dumps(prompt)).model_dump() == prompt.model_dump()
chain = loads(dumps(prompt)) | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 42}
def test_structured_prompt_kwargs() -> None:
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
{
"name": "yo",
"description": "a structured output",
"parameters": {
"name": {"type": "string"},
"value": {"type": "integer"},
},
},
value=7,
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 7}
assert loads(dumps(prompt)).model_dump() == prompt.model_dump()
chain = loads(dumps(prompt)) | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 7}
class OutputSchema(BaseModel):
name: str
value: int
prompt = StructuredPrompt(
[("human", "I'm very structured, how about you?")], OutputSchema, value=7
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == OutputSchema(name="yo", value=7)
def test_structured_prompt_template_format() -> None:
prompt = StructuredPrompt(
[("human", "hi {{person.name}}")], schema={}, template_format="mustache"
)
assert prompt.messages[0].prompt.template_format == "mustache" # type: ignore[union-attr, union-attr]
assert prompt.input_variables == ["person"]
assert prompt.invoke({"person": {"name": "foo"}}).to_messages() == [
HumanMessage("hi foo")
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .csp_darknet import CSPDarknet
from .darknet import Darknet
from .detectors_resnet import DetectoRS_ResNet
from .detectors_resnext import DetectoRS_ResNeXt
from .hourglass import HourglassNet
from .hrnet import HRNet
from .mobilenet_v2 import MobileNetV2
from .pvt import PyramidVisionTransformer, PyramidVisionTransformerV2
from .regnet import RegNet
from .res2net import Res2Net
from .resnest import ResNeSt
from .resnet import ResNet, ResNetV1d
from .resnext import ResNeXt
from .ssd_vgg import SSDVGG
from .swin import SwinTransformer
from .trident_resnet import TridentResNet
__all__ = [
'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet',
'MobileNetV2', 'Res2Net', 'HourglassNet', 'DetectoRS_ResNet',
'DetectoRS_ResNeXt', 'Darknet', 'ResNeSt', 'TridentResNet', 'CSPDarknet',
'SwinTransformer', 'PyramidVisionTransformer', 'PyramidVisionTransformerV2'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .csp_darknet import CSPDarknet
from .darknet import Darknet
from .detectors_resnet import DetectoRS_ResNet
from .detectors_resnext import DetectoRS_ResNeXt
from .hourglass import HourglassNet
from .hrnet import HRNet
from .mobilenet_v2 import MobileNetV2
from .regnet import RegNet
from .res2net import Res2Net
from .resnest import ResNeSt
from .resnet import ResNet, ResNetV1d
from .resnext import ResNeXt
from .ssd_vgg import SSDVGG
from .swin import SwinTransformer
from .trident_resnet import TridentResNet
__all__ = [
'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet',
'MobileNetV2', 'Res2Net', 'HourglassNet', 'DetectoRS_ResNet',
'DetectoRS_ResNeXt', 'Darknet', 'ResNeSt', 'TridentResNet', 'CSPDarknet',
'SwinTransformer'
]
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, ForwardRef, Optional, Union
from typing_extensions import get_origin
from typing_inspect import get_args, is_typevar, is_union_type
def is_type_tensor(type_: Any) -> bool:
"""Return True if type is a type Tensor or an Optional Tensor type."""
from docarray.typing.tensor.abstract_tensor import AbstractTensor
return isinstance(type_, type) and safe_issubclass(type_, AbstractTensor)
def is_tensor_union(type_: Any) -> bool:
"""Return True if type is a Union of type Tensors."""
is_union = is_union_type(type_)
if is_union is None:
return False
else:
return is_union and all(
(is_type_tensor(t) or safe_issubclass(t, type(None)))
for t in get_args(type_)
)
def change_cls_name(cls: type, new_name: str, scope: Optional[dict] = None) -> None:
"""Change the name of a class.
:param cls: the class to change the name of
:param new_name: the new name
:param scope: the scope in which the class is defined
"""
if scope:
scope[new_name] = cls
cls.__qualname__ = cls.__qualname__[: -len(cls.__name__)] + new_name
cls.__name__ = new_name
def safe_issubclass(x: type, a_tuple: type) -> bool:
"""
This is a modified version of the built-in 'issubclass' function to support non-class input.
Traditional 'issubclass' calls can result in a crash if the input is non-class type (e.g. list/tuple).
:param x: A class 'x'
:param a_tuple: A class, or a tuple of classes.
:return: A boolean value - 'True' if 'x' is a subclass of 'A_tuple', 'False' otherwise.
Note that if the origin of 'x' is a list or tuple, the function immediately returns 'False'.
"""
origin = get_origin(x)
if origin: # If x is a generic type like DocList[SomeDoc], get its origin
x = origin
if (
(origin in (list, tuple, dict, set, Union))
or is_typevar(x)
or (type(x) == ForwardRef)
or is_typevar(x)
):
return False
return isinstance(x, type) and issubclass(x, a_tuple)
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, ForwardRef, Optional, Union
from typing_extensions import get_origin
from typing_inspect import get_args, is_typevar, is_union_type
def is_type_tensor(type_: Any) -> bool:
"""Return True if type is a type Tensor or an Optional Tensor type."""
from docarray.typing.tensor.abstract_tensor import AbstractTensor
return isinstance(type_, type) and safe_issubclass(type_, AbstractTensor)
def is_tensor_union(type_: Any) -> bool:
"""Return True if type is a Union of type Tensors."""
is_union = is_union_type(type_)
if is_union is None:
return False
else:
return is_union and all(
(is_type_tensor(t) or safe_issubclass(t, type(None)))
for t in get_args(type_)
)
def change_cls_name(cls: type, new_name: str, scope: Optional[dict] = None) -> None:
"""Change the name of a class.
:param cls: the class to change the name of
:param new_name: the new name
:param scope: the scope in which the class is defined
"""
if scope:
scope[new_name] = cls
cls.__qualname__ = cls.__qualname__[: -len(cls.__name__)] + new_name
cls.__name__ = new_name
def safe_issubclass(x: type, a_tuple: type) -> bool:
"""
This is a modified version of the built-in 'issubclass' function to support non-class input.
Traditional 'issubclass' calls can result in a crash if the input is non-class type (e.g. list/tuple).
:param x: A class 'x'
:param a_tuple: A class, or a tuple of classes.
:return: A boolean value - 'True' if 'x' is a subclass of 'A_tuple', 'False' otherwise.
Note that if the origin of 'x' is a list or tuple, the function immediately returns 'False'.
"""
if (
(get_origin(x) in (list, tuple, dict, set, Union))
or is_typevar(x)
or (type(x) == ForwardRef)
or is_typevar(x)
):
return False
return issubclass(x, a_tuple)
|
"""
This scripts demonstrates how to train a Sparse Encoder model for Information Retrieval.
As dataset, we use sentence-transformers/msmarco-bm25, where we have triplets versions of MSMARCO mined thanks to BM25.
As loss function, we use MultipleNegativesRankingLoss in the SpladeLoss.
"""
import logging
import traceback
from datasets import load_dataset
from sentence_transformers import (
SparseEncoder,
SparseEncoderModelCardData,
SparseEncoderTrainer,
SparseEncoderTrainingArguments,
)
from sentence_transformers.sparse_encoder import evaluation, losses
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
model_name = "distilbert/distilbert-base-uncased"
train_batch_size = 12
num_epochs = 1
lambda_query = 5e-5
lambda_corpus = 3e-5
learning_rate = 2e-5
# 1. Define our SparseEncoder model
model = SparseEncoder(
model_name,
model_card_data=SparseEncoderModelCardData(
language="en",
license="apache-2.0",
model_name="splade-distilbert-base-uncased trained on Quora Duplicates Questions",
),
)
model.max_seq_length = 256 # Set the max sequence length to 256 for the training
logging.info("Model max length: %s", model.max_seq_length)
# 2. Load the MS MARCO dataset: https://huggingface.co/datasets/sentence-transformers/msmarco-bm25
logging.info("Read the MS MARCO training dataset")
full_dataset = load_dataset("sentence-transformers/msmarco-bm25", "triplet", split="train").select(range(100000))
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Define our training loss
loss = losses.SpladeLoss(
model=model,
loss=losses.SparseMultipleNegativesRankingLoss(model=model),
lambda_query=lambda_query, # Weight for query loss
lambda_corpus=lambda_corpus, # Weight for document loss
)
# 4. Define the evaluator. We use the SparseNanoBEIREvaluator, which is a light-weight evaluator for English
evaluator = evaluation.SparseNanoBEIREvaluator(
dataset_names=["msmarco", "nfcorpus", "nq"], batch_size=train_batch_size
)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"splade-{short_model_name}-msmarco-mrl"
args = SparseEncoderTrainingArguments(
# Required parameter:
output_dir=f"models/{run_name}",
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
learning_rate=learning_rate,
load_best_model_at_end=True,
metric_for_best_model="eval_NanoBEIR_mean_dot_ndcg@10",
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=1650,
save_strategy="steps",
save_steps=1650,
save_total_limit=2,
logging_steps=200,
run_name=run_name, # Will be used in W&B if `wandb` is installed
seed=42,
)
# 6. Create the trainer & start training
trainer = SparseEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=evaluator,
)
trainer.train()
# 7. Evaluate the final model, using the complete NanoBEIR dataset
test_evaluator = evaluation.SparseNanoBEIREvaluator(show_progress_bar=True, batch_size=train_batch_size)
test_evaluator(model)
# 8. Save the final model
final_output_dir = f"models/{run_name}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SparseEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
if __name__ == "__main__":
main()
|
"""
This scripts demonstrates how to train a Sparse Encoder model for Information Retrieval.
As dataset, we use sentence-transformers/msmarco-bm25, where we have triplets versions of MSMARCO mined thanks to BM25.
As loss function, we use MultipleNegativesRankingLoss in the SpladeLoss.
"""
import logging
import traceback
from datasets import load_dataset
from sentence_transformers import (
SparseEncoder,
SparseEncoderModelCardData,
SparseEncoderTrainer,
SparseEncoderTrainingArguments,
)
from sentence_transformers.sparse_encoder import evaluation, losses
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
model_name = "distilbert/distilbert-base-uncased"
train_batch_size = 12
num_epochs = 1
lambda_query = 5e-5
lambda_corpus = 3e-5
learning_rate = 2e-5
# 1. Define our SparseEncoder model
model = SparseEncoder(
model_name,
model_card_data=SparseEncoderModelCardData(
language="en",
license="apache-2.0",
model_name="splade-distilbert-base-uncased trained on Quora Duplicates Questions",
),
)
model.max_seq_length = 256 # Set the max sequence length to 256 for the training
logging.info("Model max length: %s", model.max_seq_length)
# 2. Load the MS MARCO dataset: https://huggingface.co/datasets/sentence-transformers/msmarco-bm25
logging.info("Read the MS MARCO training dataset")
full_dataset = load_dataset("sentence-transformers/quora-duplicates", "triplet", split="train").select(
range(100000)
)
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Define our training loss
loss = losses.SpladeLoss(
model=model,
loss=losses.SparseMultipleNegativesRankingLoss(model=model),
lambda_query=lambda_query, # Weight for query loss
lambda_corpus=lambda_corpus, # Weight for document loss
)
# 4. Define the evaluator. We use the SparseNanoBEIREvaluator, which is a light-weight evaluator for English
evaluator = evaluation.SparseNanoBEIREvaluator(
dataset_names=["msmarco", "nfcorpus", "nq"], batch_size=train_batch_size
)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"splade-{short_model_name}-msmarco-mrl"
args = SparseEncoderTrainingArguments(
# Required parameter:
output_dir=f"models/{run_name}",
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
learning_rate=learning_rate,
load_best_model_at_end=True,
metric_for_best_model="eval_NanoBEIR_mean_dot_ndcg@10",
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=1650,
save_strategy="steps",
save_steps=1650,
save_total_limit=2,
logging_steps=200,
run_name=run_name, # Will be used in W&B if `wandb` is installed
seed=42,
)
# 6. Create the trainer & start training
trainer = SparseEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=evaluator,
)
trainer.train()
# 7. Evaluate the final model, using the complete NanoBEIR dataset
test_evaluator = evaluation.SparseNanoBEIREvaluator(show_progress_bar=True, batch_size=train_batch_size)
test_evaluator(model)
# 8. Save the final model
final_output_dir = f"models/{run_name}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SparseEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
if __name__ == "__main__":
main()
|
import inspect
from keras.src.api_export import keras_export
from keras.src.initializers.constant_initializers import Constant
from keras.src.initializers.constant_initializers import Identity
from keras.src.initializers.constant_initializers import Ones
from keras.src.initializers.constant_initializers import STFTInitializer
from keras.src.initializers.constant_initializers import Zeros
from keras.src.initializers.initializer import Initializer
from keras.src.initializers.random_initializers import GlorotNormal
from keras.src.initializers.random_initializers import GlorotUniform
from keras.src.initializers.random_initializers import HeNormal
from keras.src.initializers.random_initializers import HeUniform
from keras.src.initializers.random_initializers import LecunNormal
from keras.src.initializers.random_initializers import LecunUniform
from keras.src.initializers.random_initializers import OrthogonalInitializer
from keras.src.initializers.random_initializers import RandomNormal
from keras.src.initializers.random_initializers import RandomUniform
from keras.src.initializers.random_initializers import TruncatedNormal
from keras.src.initializers.random_initializers import VarianceScaling
from keras.src.saving import serialization_lib
from keras.src.utils.naming import to_snake_case
ALL_OBJECTS = {
Initializer,
Constant,
Identity,
Ones,
STFTInitializer,
Zeros,
GlorotNormal,
GlorotUniform,
HeNormal,
HeUniform,
LecunNormal,
LecunUniform,
RandomNormal,
TruncatedNormal,
RandomUniform,
VarianceScaling,
OrthogonalInitializer,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
ALL_OBJECTS_DICT.update(
{to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}
)
# Aliases
ALL_OBJECTS_DICT.update(
{
"uniform": RandomUniform,
"normal": RandomNormal,
"orthogonal": OrthogonalInitializer,
"Orthogonal": OrthogonalInitializer, # Legacy
"one": Ones,
"zero": Zeros,
}
)
@keras_export("keras.initializers.serialize")
def serialize(initializer):
"""Returns the initializer configuration as a Python dict."""
return serialization_lib.serialize_keras_object(initializer)
@keras_export("keras.initializers.deserialize")
def deserialize(config, custom_objects=None):
"""Returns a Keras initializer object via its configuration."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.initializers.get")
def get(identifier):
"""Retrieves a Keras initializer object via an identifier.
The `identifier` may be the string name of a initializers function or class
(case-sensitively).
>>> identifier = 'Ones'
>>> keras.initializers.deserialize(identifier)
<...keras.initializers.initializers.Ones...>
You can also specify `config` of the initializer to this function by passing
dict containing `class_name` and `config` as an identifier. Also note that
the `class_name` must map to a `Initializer` class.
>>> cfg = {'class_name': 'Ones', 'config': {}}
>>> keras.initializers.deserialize(cfg)
<...keras.initializers.initializers.Ones...>
In the case that the `identifier` is a class, this method will return a new
instance of the class by its constructor.
Args:
identifier: String or dict that contains the initializer name or
configurations.
Returns:
Initializer instance base on the input identifier.
"""
if identifier is None:
return None
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
config = {"class_name": str(identifier), "config": {}}
obj = deserialize(config)
else:
obj = identifier
if callable(obj):
if inspect.isclass(obj):
obj = obj()
return obj
else:
raise ValueError(
f"Could not interpret initializer identifier: {identifier}"
)
|
import inspect
from keras.src.api_export import keras_export
from keras.src.initializers.constant_initializers import Constant
from keras.src.initializers.constant_initializers import Identity
from keras.src.initializers.constant_initializers import Ones
from keras.src.initializers.constant_initializers import Zeros
from keras.src.initializers.initializer import Initializer
from keras.src.initializers.random_initializers import GlorotNormal
from keras.src.initializers.random_initializers import GlorotUniform
from keras.src.initializers.random_initializers import HeNormal
from keras.src.initializers.random_initializers import HeUniform
from keras.src.initializers.random_initializers import LecunNormal
from keras.src.initializers.random_initializers import LecunUniform
from keras.src.initializers.random_initializers import OrthogonalInitializer
from keras.src.initializers.random_initializers import RandomNormal
from keras.src.initializers.random_initializers import RandomUniform
from keras.src.initializers.random_initializers import TruncatedNormal
from keras.src.initializers.random_initializers import VarianceScaling
from keras.src.saving import serialization_lib
from keras.src.utils.naming import to_snake_case
ALL_OBJECTS = {
Initializer,
Constant,
Identity,
Ones,
Zeros,
GlorotNormal,
GlorotUniform,
HeNormal,
HeUniform,
LecunNormal,
LecunUniform,
RandomNormal,
TruncatedNormal,
RandomUniform,
VarianceScaling,
OrthogonalInitializer,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
ALL_OBJECTS_DICT.update(
{to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}
)
# Aliases
ALL_OBJECTS_DICT.update(
{
"uniform": RandomUniform,
"normal": RandomNormal,
"orthogonal": OrthogonalInitializer,
"Orthogonal": OrthogonalInitializer, # Legacy
"one": Ones,
"zero": Zeros,
}
)
@keras_export("keras.initializers.serialize")
def serialize(initializer):
"""Returns the initializer configuration as a Python dict."""
return serialization_lib.serialize_keras_object(initializer)
@keras_export("keras.initializers.deserialize")
def deserialize(config, custom_objects=None):
"""Returns a Keras initializer object via its configuration."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.initializers.get")
def get(identifier):
"""Retrieves a Keras initializer object via an identifier.
The `identifier` may be the string name of a initializers function or class
(case-sensitively).
>>> identifier = 'Ones'
>>> keras.initializers.deserialize(identifier)
<...keras.initializers.initializers.Ones...>
You can also specify `config` of the initializer to this function by passing
dict containing `class_name` and `config` as an identifier. Also note that
the `class_name` must map to a `Initializer` class.
>>> cfg = {'class_name': 'Ones', 'config': {}}
>>> keras.initializers.deserialize(cfg)
<...keras.initializers.initializers.Ones...>
In the case that the `identifier` is a class, this method will return a new
instance of the class by its constructor.
Args:
identifier: String or dict that contains the initializer name or
configurations.
Returns:
Initializer instance base on the input identifier.
"""
if identifier is None:
return None
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
config = {"class_name": str(identifier), "config": {}}
obj = deserialize(config)
else:
obj = identifier
if callable(obj):
if inspect.isclass(obj):
obj = obj()
return obj
else:
raise ValueError(
f"Could not interpret initializer identifier: {identifier}"
)
|
# mypy: allow-untyped-defs
from typing import Any, NamedTuple, Optional
import torch
from torch.fx._compatibility import compatibility
from torch.fx.graph import Graph
from torch.fx.graph_module import GraphModule
from torch.fx.node import map_arg, Node, Target
from torch.fx.passes.shape_prop import ShapeProp
__all__ = [
"replace_target_nodes_with",
"size_bytes",
"get_size_of_all_nodes",
"get_tensor_meta",
"get_size_of_node",
]
@compatibility(is_backward_compatible=False)
def replace_target_nodes_with(
fx_module: GraphModule,
old_op: str,
old_target: Target,
new_op: str,
new_target: Target,
):
"""Modifies all nodes in fx_module.graph.nodes which match the specified op code and target,
and updates them to match the new op code and target"""
new_graph = Graph()
val_map: dict[Node, Node] = {}
for node in fx_module.graph.nodes:
if node.op == old_op and node.target == old_target:
args = map_arg(node.args, lambda n: val_map[n])
kwargs = map_arg(node.kwargs, lambda n: val_map[n])
assert isinstance(args, tuple)
assert isinstance(kwargs, dict)
val_map[node] = new_graph.create_node(
new_op, new_target, args, kwargs, node.name
)
else:
val_map[node] = new_graph.node_copy(node, lambda n: val_map[n])
fx_module.graph = new_graph
@compatibility(is_backward_compatible=False)
class size_bytes(NamedTuple):
output_size: int
total_size: int
@compatibility(is_backward_compatible=False)
def get_size_of_all_nodes(
fx_module: GraphModule, args: Optional[list[torch.Tensor]] = None
) -> None:
"""Given a fx graph module, update each node with its total size (weights + bias + output)
and its output_size(output). For a non-module node, the total size is the output size.
return total size"""
if args is not None:
# Mark shape and dtype for each node (node.shape and node.dtype)
ShapeProp(fx_module).propagate(*args)
# Calculate the total size of the whole fx graph
for node in fx_module.graph.nodes:
if node.op == "output":
break
node.size_bytes = get_size_of_node(fx_module, node)
return
@compatibility(is_backward_compatible=False)
def get_tensor_meta(node: Node) -> Any:
tensor_meta = node.meta.get("tensor_meta")
if not tensor_meta:
raise RuntimeError(
f"Node {node} has no tensor metadata associated with it! "
f"Check that shape propagation has run."
)
return tensor_meta
@compatibility(is_backward_compatible=False)
def get_size_of_node(fx_module: GraphModule, node: Node) -> size_bytes:
"""Given a node with node.dtype and node.shape, return its total size and its output size.
total_size = weights + bias + output_size
"""
# Total num of elements
total_num_of_elems = 0
# For a module, consider all parameters
if node.op == "call_module":
submodule_dict = dict(fx_module.named_modules())
submodule = submodule_dict[node.target]
parameters = submodule.named_parameters()
# Parameters are named tuples
for _name, p in parameters:
total_num_of_elems += p.numel()
# Don't forget the output size
# node.shape is the shape of this node's output
tensor_meta = get_tensor_meta(node)
output_elem = tensor_meta.shape.numel()
total_num_of_elems += output_elem
# Assume for now if it's quantized then it's qint8 or quint8
if tensor_meta.is_quantized:
size_per_elem_bytes = torch._empty_affine_quantized(
[], dtype=tensor_meta.dtype
).element_size()
else:
size_per_elem_bytes = torch.tensor([], dtype=tensor_meta.dtype).element_size()
total_size = size_per_elem_bytes * total_num_of_elems
output_size = size_per_elem_bytes * output_elem
return size_bytes(output_size, total_size)
|
# mypy: allow-untyped-defs
from typing import Any, NamedTuple, Optional
import torch
from torch.fx._compatibility import compatibility
from torch.fx.graph import Graph
from torch.fx.graph_module import GraphModule
from torch.fx.node import map_arg, Node, Target
from torch.fx.passes.shape_prop import ShapeProp
__all__ = [
"replace_target_nodes_with",
"size_bytes",
"get_size_of_all_nodes",
"get_tensor_meta",
"get_size_of_node",
]
@compatibility(is_backward_compatible=False)
def replace_target_nodes_with(
fx_module: GraphModule,
old_op: str,
old_target: Target,
new_op: str,
new_target: Target,
):
"""Modifies all nodes in fx_module.graph.nodes which match the specified op code and target,
and updates them to match the new op code and target"""
new_graph = Graph()
val_map: dict[Node, Node] = {}
for node in fx_module.graph.nodes:
if node.op == old_op and node.target == old_target:
args = map_arg(node.args, lambda n: val_map[n])
kwargs = map_arg(node.kwargs, lambda n: val_map[n])
assert isinstance(args, tuple)
assert isinstance(kwargs, dict)
val_map[node] = new_graph.create_node(
new_op, new_target, args, kwargs, node.name
)
else:
val_map[node] = new_graph.node_copy(node, lambda n: val_map[n])
fx_module.graph = new_graph
@compatibility(is_backward_compatible=False)
class size_bytes(NamedTuple):
output_size: int
total_size: int
@compatibility(is_backward_compatible=False)
def get_size_of_all_nodes(
fx_module: GraphModule, args: Optional[list[torch.Tensor]] = None
) -> None:
"""Given a fx graph module, update each node with its total size (weights + bias + output)
and its output_size(output). For a non-module node, the total size is the output size.
return total size"""
if args is not None:
# Mark shape and dtype for each node (node.shape and node.dtype)
ShapeProp(fx_module).propagate(*args)
# Calculate the total size of the whole fx graph
for node in fx_module.graph.nodes:
if node.op == "output":
break
node.size_bytes = get_size_of_node(fx_module, node)
return
@compatibility(is_backward_compatible=False)
def get_tensor_meta(node: Node) -> Any:
tensor_meta = node.meta.get("tensor_meta")
if not tensor_meta:
raise RuntimeError(
f"Node {node} has no tensor metadata associated with it! "
f"Check that shape propagation has run."
)
return tensor_meta
@compatibility(is_backward_compatible=False)
def get_size_of_node(fx_module: GraphModule, node: Node) -> size_bytes:
"""Given a node with node.dtype and node.shape, return its total size and its output size.
total_size = weights + bias + output_size
"""
# Total num of elements
total_num_of_elems = 0
# For a module, conside all parameters
if node.op == "call_module":
submodule_dict = dict(fx_module.named_modules())
submodule = submodule_dict[node.target]
parameters = submodule.named_parameters()
# Parameters are named tuples
for _name, p in parameters:
total_num_of_elems += p.numel()
# Don't forget the output size
# node.shape is the shape of this node's output
tensor_meta = get_tensor_meta(node)
output_elem = tensor_meta.shape.numel()
total_num_of_elems += output_elem
# Assume for now if it's quantized then it's qint8 or quint8
if tensor_meta.is_quantized:
size_per_elem_bytes = torch._empty_affine_quantized(
[], dtype=tensor_meta.dtype
).element_size()
else:
size_per_elem_bytes = torch.tensor([], dtype=tensor_meta.dtype).element_size()
total_size = size_per_elem_bytes * total_num_of_elems
output_size = size_per_elem_bytes * output_elem
return size_bytes(output_size, total_size)
|
_base_ = './faster-rcnn_r50-caffe_c4-1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=_base_.backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
_base_.train_dataloader.dataset.pipeline = train_pipeline
|
_base_ = './faster-rcnn_r50-caffe_c4-1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=_base_.backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768),
(1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
_base_.train_dataloader.dataset.pipeline = train_pipeline
|
from jinja2 import BaseLoader
from jinja2.sandbox import SandboxedEnvironment
class TextFormatter:
def __init__(self):
# Create a sandboxed environment
self.env = SandboxedEnvironment(loader=BaseLoader(), autoescape=True)
# Clear any registered filters, tests, and globals to minimize attack surface
self.env.filters.clear()
self.env.tests.clear()
self.env.globals.clear()
def format_string(self, template_str: str, values=None, **kwargs) -> str:
template = self.env.from_string(template_str)
return template.render(values or {}, **kwargs)
|
import re
from jinja2 import BaseLoader
from jinja2.sandbox import SandboxedEnvironment
class TextFormatter:
def __init__(self):
# Create a sandboxed environment
self.env = SandboxedEnvironment(loader=BaseLoader(), autoescape=True)
# Clear any registered filters, tests, and globals to minimize attack surface
self.env.filters.clear()
self.env.tests.clear()
self.env.globals.clear()
def format_string(self, template_str: str, values=None, **kwargs) -> str:
# For python.format compatibility: replace all {...} with {{..}}.
# But avoid replacing {{...}} to {{{...}}}.
template_str = re.sub(r"(?<!{){[ a-zA-Z0-9_]+}", r"{\g<0>}", template_str)
template = self.env.from_string(template_str)
return template.render(values or {}, **kwargs)
|
from typing import TYPE_CHECKING, Any, NamedTuple, Type, TypeVar, Union
import numpy as np
from pydantic.tools import parse_obj_as
from docarray.typing import AudioNdArray, NdArray
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.video import VideoNdArray
from docarray.typing.url.any_url import AnyUrl
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='VideoUrl')
VIDEO_FILE_FORMATS = ['mp4']
class VideoLoadResult(NamedTuple):
video: VideoNdArray
audio: AudioNdArray
key_frame_indices: NdArray
@_register_proto(proto_type_name='video_url')
class VideoUrl(AnyUrl):
"""
URL to a .wav file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config)
has_video_extension = any(ext in url for ext in VIDEO_FILE_FORMATS)
if not has_video_extension:
raise ValueError(
f'Video URL must have one of the following extensions:'
f'{VIDEO_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(self: T, **kwargs) -> VideoLoadResult:
"""
Load the data from the url into a named Tuple of VideoNdArray, AudioNdArray and
NdArray.
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described in:
https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open
:return: AudioNdArray representing the audio content, VideoNdArray representing
the images of the video, NdArray of the key frame indices.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
from docarray import BaseDocument
from docarray.typing import VideoUrl, VideoNdArray, AudioNdArray, NdArray
class MyDoc(BaseDocument):
video_url: VideoUrl
video: Optional[VideoNdArray]
audio: Optional[AudioNdArray]
key_frame_indices: Optional[NdArray]
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
doc.video, doc.audio, doc.key_frame_indices = doc.video_url.load()
assert isinstance(doc.video, VideoNdArray)
assert isinstance(doc.audio, AudioNdArray)
assert isinstance(doc.key_frame_indices, NdArray)
You can load only the key frames (or video, audio respectively):
.. code-block:: python
from pydantic import parse_obj_as
from docarray.typing import NdArray, VideoUrl
url = parse_obj_as(
VideoUrl,
'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true',
)
key_frame_indices = url.load().key_frame_indices
assert isinstance(key_frame_indices, NdArray)
"""
import av
with av.open(self, **kwargs) as container:
audio_frames = []
video_frames = []
keyframe_indices = []
for frame in container.decode():
if type(frame) == av.audio.frame.AudioFrame:
audio_frames.append(frame.to_ndarray())
elif type(frame) == av.video.frame.VideoFrame:
video_frames.append(frame.to_ndarray(format='rgb24'))
if frame.key_frame == 1:
curr_index = len(video_frames)
keyframe_indices.append(curr_index)
if len(audio_frames) == 0:
audio = parse_obj_as(AudioNdArray, np.array(audio_frames))
else:
audio = parse_obj_as(AudioNdArray, np.stack(audio_frames))
video = parse_obj_as(VideoNdArray, np.stack(video_frames))
indices = parse_obj_as(NdArray, keyframe_indices)
return VideoLoadResult(video=video, audio=audio, key_frame_indices=indices)
|
from typing import TYPE_CHECKING, Any, NamedTuple, Type, TypeVar, Union
import numpy as np
from pydantic.tools import parse_obj_as
from docarray.typing import AudioNdArray, NdArray
from docarray.typing.tensor.video import VideoNdArray
from docarray.typing.url.any_url import AnyUrl
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='VideoUrl')
VIDEO_FILE_FORMATS = ['mp4']
class VideoLoadResult(NamedTuple):
video: VideoNdArray
audio: AudioNdArray
key_frame_indices: NdArray
class VideoUrl(AnyUrl):
"""
URL to a .wav file.
Can be remote (web) URL, or a local file path.
"""
def _to_node_protobuf(self: T) -> 'NodeProto':
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that needs to
be converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(video_url=str(self))
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config)
has_video_extension = any(ext in url for ext in VIDEO_FILE_FORMATS)
if not has_video_extension:
raise ValueError(
f'Video URL must have one of the following extensions:'
f'{VIDEO_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(self: T, **kwargs) -> VideoLoadResult:
"""
Load the data from the url into a named Tuple of VideoNdArray, AudioNdArray and
NdArray.
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described in:
https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open
:return: AudioNdArray representing the audio content, VideoNdArray representing
the images of the video, NdArray of the key frame indices.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
from docarray import BaseDocument
from docarray.typing import VideoUrl, VideoNdArray, AudioNdArray, NdArray
class MyDoc(BaseDocument):
video_url: VideoUrl
video: Optional[VideoNdArray]
audio: Optional[AudioNdArray]
key_frame_indices: Optional[NdArray]
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
doc.video, doc.audio, doc.key_frame_indices = doc.video_url.load()
assert isinstance(doc.video, VideoNdArray)
assert isinstance(doc.audio, AudioNdArray)
assert isinstance(doc.key_frame_indices, NdArray)
You can load only the key frames (or video, audio respectively):
.. code-block:: python
from pydantic import parse_obj_as
from docarray.typing import NdArray, VideoUrl
url = parse_obj_as(
VideoUrl,
'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true',
)
key_frame_indices = url.load().key_frame_indices
assert isinstance(key_frame_indices, NdArray)
"""
import av
with av.open(self, **kwargs) as container:
audio_frames = []
video_frames = []
keyframe_indices = []
for frame in container.decode():
if type(frame) == av.audio.frame.AudioFrame:
audio_frames.append(frame.to_ndarray())
elif type(frame) == av.video.frame.VideoFrame:
video_frames.append(frame.to_ndarray(format='rgb24'))
if frame.key_frame == 1:
curr_index = len(video_frames)
keyframe_indices.append(curr_index)
if len(audio_frames) == 0:
audio = parse_obj_as(AudioNdArray, np.array(audio_frames))
else:
audio = parse_obj_as(AudioNdArray, np.stack(audio_frames))
video = parse_obj_as(VideoNdArray, np.stack(video_frames))
indices = parse_obj_as(NdArray, keyframe_indices)
return VideoLoadResult(video=video, audio=audio, key_frame_indices=indices)
|
#!/usr/bin/env python
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from .env import EnvironmentCommand
from .fp16_safetensors import FP16SafetensorsCommand
def main():
parser = ArgumentParser("Diffusers CLI tool", usage="diffusers-cli <command> [<args>]")
commands_parser = parser.add_subparsers(help="diffusers-cli command helpers")
# Register commands
EnvironmentCommand.register_subcommand(commands_parser)
FP16SafetensorsCommand.register_subcommand(commands_parser)
# Let's go
args = parser.parse_args()
if not hasattr(args, "func"):
parser.print_help()
exit(1)
# Run
service = args.func(args)
service.run()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from .env import EnvironmentCommand
from .fp16_safetensors import FP16SafetensorsCommand
def main():
parser = ArgumentParser("Diffusers CLI tool", usage="diffusers-cli <command> [<args>]")
commands_parser = parser.add_subparsers(help="diffusers-cli command helpers")
# Register commands
EnvironmentCommand.register_subcommand(commands_parser)
FP16SafetensorsCommand.register_subcommand(commands_parser)
# Let's go
args = parser.parse_args()
if not hasattr(args, "func"):
parser.print_help()
exit(1)
# Run
service = args.func(args)
service.run()
if __name__ == "__main__":
main()
|
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...flair_text import FlairTextEncoder
_EMBEDDING_DIM = 100
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=FlairTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
'--volumes=.cache:/workspace/.cache',
],
timeout=30,
check=True,
)
|
from jina import Flow, Document, DocumentArray
from ...flair_text import FlairTextEncoder
def data_generator(num_docs):
for i in range(num_docs):
doc = Document(
text='it is a good day! the dog sits on the floor.')
yield doc
def test_use_in_flow():
with Flow.load_config('flow.yml') as flow:
data = flow.post(on='/encode', inputs=data_generator(5), return_results=True)
docs = data[0].docs
for doc in docs:
assert doc.embedding.shape == (100,)
|
__version__ = '0.16.1'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.16.0'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class DDOD(SingleStageDetector):
"""Implementation of `DDOD <https://arxiv.org/pdf/2107.02963.pdf>`_.
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of ATSS. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of ATSS. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class DDOD(SingleStageDetector):
"""Implementation of `DDOD <https://arxiv.org/pdf/2107.02963.pdf>`_."""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(DDOD, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
from typing import Optional, Sequence
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index import QdrantDocumentIndex
from docarray.typing import NdArray
from tests.index.qdrant.fixtures import qdrant, qdrant_config # noqa: F401
class SimpleDoc(BaseDoc):
embedding: NdArray[4] = Field(space='cosine') # type: ignore[valid-type]
text: Optional[str]
@pytest.fixture
def index_docs() -> Sequence[SimpleDoc]:
index_docs = [SimpleDoc(embedding=np.zeros(4), text=f'Test {i}') for i in range(10)]
return index_docs
@pytest.mark.parametrize('limit', [1, 5, 10])
def test_dict_limit(qdrant_config, index_docs, limit): # noqa: F811
index = QdrantDocumentIndex[SimpleDoc](db_config=qdrant_config)
index.index(index_docs)
# Search test
query = {
'vector': ('embedding', [1.0, 0.0, 0.0, 0.0]),
'limit': limit,
'with_vectors': True,
}
points = index.execute_query(query=query)
assert points is not None
assert len(points) == limit
# Scroll test
query = {
'limit': limit,
'with_vectors': True,
}
points = index.execute_query(query=query)
assert points is not None
assert len(points) == limit
def test_dict_full_text_filter(qdrant_config, index_docs): # noqa: F811
index = QdrantDocumentIndex[SimpleDoc](db_config=qdrant_config)
index.index(index_docs)
# Search test
query = {
'filter': {'must': [{'key': 'text', 'match': {'text': '2'}}]},
'params': {'hnsw_ef': 128, 'exact': False},
'vector': ('embedding', [1.0, 0.0, 0.0, 0.0]),
'limit': 3,
'with_vectors': True,
}
points = index.execute_query(query=query)
assert points is not None
assert len(points) == 1
assert points[0].id == index_docs[2].id
# Scroll test
query = {
'filter': {'must': [{'key': 'text', 'match': {'text': '2'}}]},
'params': {'hnsw_ef': 128, 'exact': False},
'limit': 3,
'with_vectors': True,
}
points = index.execute_query(query=query)
assert points is not None
assert len(points) == 1
assert points[0].id == index_docs[2].id
|
import numpy as np
from typing import Optional, Sequence
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index import QdrantDocumentIndex
from docarray.typing import NdArray
from .fixtures import qdrant_config, qdrant
class SimpleDoc(BaseDoc):
embedding: NdArray[4] = Field(space='cosine') # type: ignore[valid-type]
text: Optional[str]
@pytest.fixture
def index_docs() -> Sequence[SimpleDoc]:
index_docs = [SimpleDoc(embedding=np.zeros(4), text=f'Test {i}') for i in range(10)]
return index_docs
@pytest.mark.parametrize('limit', [1, 5, 10])
def test_dict_limit(qdrant_config, qdrant, index_docs, limit):
store = QdrantDocumentIndex[SimpleDoc](db_config=qdrant_config)
store.index(index_docs)
# Search test
query = {
'vector': ('embedding', [1.0, 0.0, 0.0, 0.0]),
'limit': limit,
'with_vectors': True,
}
points = store.execute_query(query=query)
assert points is not None
assert len(points) == limit
# Scroll test
query = {
'limit': limit,
'with_vectors': True,
}
points = store.execute_query(query=query)
assert points is not None
assert len(points) == limit
def test_dict_full_text_filter(qdrant_config, qdrant, index_docs):
store = QdrantDocumentIndex[SimpleDoc](db_config=qdrant_config)
store.index(index_docs)
# Search test
query = {
'filter': {'must': [{'key': 'text', 'match': {'text': '2'}}]},
'params': {'hnsw_ef': 128, 'exact': False},
'vector': ('embedding', [1.0, 0.0, 0.0, 0.0]),
'limit': 3,
'with_vectors': True,
}
points = store.execute_query(query=query)
assert points is not None
assert len(points) == 1
assert points[0].id == index_docs[2].id
# Scroll test
query = {
'filter': {'must': [{'key': 'text', 'match': {'text': '2'}}]},
'params': {'hnsw_ef': 128, 'exact': False},
'limit': 3,
'with_vectors': True,
}
points = store.execute_query(query=query)
assert points is not None
assert len(points) == 1
assert points[0].id == index_docs[2].id
|
# Copyright 2024 The HuggingFace Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import numpy as np
import torch
from transformers import AutoTokenizer, T5EncoderModel
from diffusers import AutoencoderKLWan, FlowMatchEulerDiscreteScheduler, WanPipeline, WanTransformer3DModel
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineTesterMixin,
)
enable_full_determinism()
class WanPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = WanPipeline
params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"}
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
required_optional_params = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback_on_step_end",
"callback_on_step_end_tensor_inputs",
]
)
test_xformers_attention = False
supports_dduf = False
def get_dummy_components(self):
torch.manual_seed(0)
vae = AutoencoderKLWan(
base_dim=3,
z_dim=16,
dim_mult=[1, 1, 1, 1],
num_res_blocks=1,
temperal_downsample=[False, True, True],
)
torch.manual_seed(0)
# TODO: impl FlowDPMSolverMultistepScheduler
scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0)
text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5")
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5")
torch.manual_seed(0)
transformer = WanTransformer3DModel(
patch_size=(1, 2, 2),
num_attention_heads=2,
attention_head_dim=12,
in_channels=16,
out_channels=16,
text_dim=32,
freq_dim=256,
ffn_dim=32,
num_layers=2,
cross_attn_norm=True,
qk_norm="rms_norm_across_heads",
rope_max_seq_len=32,
)
components = {
"transformer": transformer,
"vae": vae,
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "dance monkey",
"negative_prompt": "negative", # TODO
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"height": 16,
"width": 16,
"num_frames": 9,
"max_sequence_length": 16,
"output_type": "pt",
}
return inputs
def test_inference(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
video = pipe(**inputs).frames
generated_video = video[0]
self.assertEqual(generated_video.shape, (9, 3, 16, 16))
expected_video = torch.randn(9, 3, 16, 16)
max_diff = np.abs(generated_video - expected_video).max()
self.assertLessEqual(max_diff, 1e10)
@unittest.skip("Test not supported")
def test_attention_slicing_forward_pass(self):
pass
@slow
@require_torch_accelerator
class WanPipelineIntegrationTests(unittest.TestCase):
prompt = "A painting of a squirrel eating a burger."
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
@unittest.skip("TODO: test needs to be implemented")
def test_Wanx(self):
pass
|
# Copyright 2024 The HuggingFace Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import numpy as np
import torch
from transformers import AutoTokenizer, T5EncoderModel
from diffusers import AutoencoderKLWan, FlowMatchEulerDiscreteScheduler, WanPipeline, WanTransformer3DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
require_torch_accelerator,
slow,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineTesterMixin,
)
enable_full_determinism()
class WanPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = WanPipeline
params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"}
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
required_optional_params = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback_on_step_end",
"callback_on_step_end_tensor_inputs",
]
)
test_xformers_attention = False
supports_dduf = False
def get_dummy_components(self):
torch.manual_seed(0)
vae = AutoencoderKLWan(
base_dim=3,
z_dim=16,
dim_mult=[1, 1, 1, 1],
num_res_blocks=1,
temperal_downsample=[False, True, True],
)
torch.manual_seed(0)
# TODO: impl FlowDPMSolverMultistepScheduler
scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0)
text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5")
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5")
torch.manual_seed(0)
transformer = WanTransformer3DModel(
patch_size=(1, 2, 2),
num_attention_heads=2,
attention_head_dim=12,
in_channels=16,
out_channels=16,
text_dim=32,
freq_dim=256,
ffn_dim=32,
num_layers=2,
cross_attn_norm=True,
qk_norm="rms_norm_across_heads",
rope_max_seq_len=32,
)
components = {
"transformer": transformer,
"vae": vae,
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "dance monkey",
"negative_prompt": "negative", # TODO
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"height": 16,
"width": 16,
"num_frames": 9,
"max_sequence_length": 16,
"output_type": "pt",
}
return inputs
def test_inference(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
video = pipe(**inputs).frames
generated_video = video[0]
self.assertEqual(generated_video.shape, (9, 3, 16, 16))
expected_video = torch.randn(9, 3, 16, 16)
max_diff = np.abs(generated_video - expected_video).max()
self.assertLessEqual(max_diff, 1e10)
@unittest.skip("Test not supported")
def test_attention_slicing_forward_pass(self):
pass
@slow
@require_torch_accelerator
class WanPipelineIntegrationTests(unittest.TestCase):
prompt = "A painting of a squirrel eating a burger."
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@unittest.skip("TODO: test needs to be implemented")
def test_Wanx(self):
pass
|
from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
from sentence_transformers.sparse_encoder.losses import CSRLoss
from sentence_transformers.sparse_encoder.models import CSRSparsity
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
# Initialize model components
model_name = "sentence-transformers/all-mpnet-base-v2"
transformer = Transformer(model_name)
pooling = Pooling(transformer.get_word_embedding_dimension(), pooling_mode="mean")
csr_sparsity = CSRSparsity(
input_dim=transformer.get_word_embedding_dimension(),
hidden_dim=4 * transformer.get_word_embedding_dimension(),
k=32, # Number of top values to keep
k_aux=512, # Number of top values for auxiliary loss
)
# Create the SparseEncoder model
model = SparseEncoder(modules=[transformer, pooling, csr_sparsity])
# 2. Load the GooAQ dataset: https://huggingface.co/datasets/sentence-transformers/gooaq
logging.info("Read the gooaq training dataset")
full_dataset = load_dataset("sentence-transformers/gooaq", split="train").select(range(10000))
dataset_dict = full_dataset.train_test_split(test_size=1040, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# Initialize the NanoBEIR evaluator
evaluator = SparseNanoBEIREvaluator(
dataset_names=["msmarco", "nq"],
show_progress_bar=True,
batch_size=32,
)
# Set up training arguments
training_args = SparseEncoderTrainingArguments(
output_dir="./sparse_encoder_nq",
num_train_epochs=3,
per_device_train_batch_size=32,
per_device_eval_batch_size=32,
warmup_steps=10,
logging_dir="./logs",
logging_steps=10,
evaluation_strategy="steps",
eval_steps=140,
save_strategy="steps",
save_steps=140,
load_best_model_at_end=True,
metric_for_best_model="NanoBEIR_mean_cosine_ndcg@10", # Using NDCG@10 as the primary metric
learning_rate=4e-5,
optim="adamw_torch",
weight_decay=1e-4,
adam_epsilon=6.25e-10,
)
# Initialize the loss
loss = CSRLoss(
model=model,
beta=0.1, # Weight for auxiliary loss
gamma=1, # Weight for ranking loss
scale=1.0, # Scale for similarity computation
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=evaluator, # Add the NanoBEIR evaluator
)
# Train model
trainer.train()
# Save model
trainer.save_model("./sparse_encoder_nq_final")
# Test the trained model
test_sentences = [
"What is the capital of France?",
"Who wrote Romeo and Juliet?",
"What is the largest planet in our solar system?",
]
# Encode test sentences
embeddings = model.encode(test_sentences, convert_to_sparse_tensor=True)
# Get sparsity statistics
stats = model.get_sparsity_stats(embeddings)
logger.info(f"Sparsity statistics: {stats}")
# Compute similarities
similarities = model.similarity(embeddings, embeddings)
logger.info(f"Similarity matrix:\n{similarities}")
if __name__ == "__main__":
main()
|
from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.losses import CSRLoss
from sentence_transformers.sparse_encoder.models import CSRSparsity
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
# Initialize model components
model_name = "sentence-transformers/all-mpnet-base-v2"
transformer = Transformer(model_name)
pooling = Pooling(transformer.get_word_embedding_dimension(), pooling_mode="mean")
csr_sparsity = CSRSparsity(
input_dim=transformer.get_word_embedding_dimension(),
hidden_dim=4 * transformer.get_word_embedding_dimension(),
k=32, # Number of top values to keep
k_aux=512, # Number of top values for auxiliary loss
)
# Create the SparseEncoder model
model = SparseEncoder(modules=[transformer, pooling, csr_sparsity])
# 2. Load the GooAQ dataset: https://huggingface.co/datasets/sentence-transformers/gooaq
logging.info("Read the gooaq training dataset")
full_dataset = load_dataset("sentence-transformers/gooaq", split="train").select(range(10000))
dataset_dict = full_dataset.train_test_split(test_size=1040, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# Set up training arguments
training_args = SparseEncoderTrainingArguments(
output_dir="./sparse_encoder_nq",
num_train_epochs=3,
per_device_train_batch_size=32,
per_device_eval_batch_size=32,
warmup_steps=10,
logging_dir="./logs",
logging_steps=10,
evaluation_strategy="steps",
eval_steps=140,
save_strategy="steps",
save_steps=140,
load_best_model_at_end=True,
learning_rate=4e-5,
optim="adamw_torch",
weight_decay=1e-4,
adam_epsilon=6.25e-10,
)
# Initialize the loss
loss = CSRLoss(
model=model,
beta=0.1, # Weight for auxiliary loss
gamma=1, # Weight for ranking loss
scale=1.0, # Scale for similarity computation
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
)
# Train model
trainer.train()
# Save model
trainer.save_model("./sparse_encoder_nq_final")
# Test the trained model
test_sentences = [
"What is the capital of France?",
"Who wrote Romeo and Juliet?",
"What is the largest planet in our solar system?",
]
# Encode test sentences
embeddings = model.encode(test_sentences, convert_to_sparse_tensor=True)
# Get sparsity statistics
stats = model.get_sparsity_stats(embeddings)
logger.info(f"Sparsity statistics: {stats}")
# Compute similarities
similarities = model.similarity(embeddings, embeddings)
logger.info(f"Similarity matrix:\n{similarities}")
if __name__ == "__main__":
main()
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
roi_head=dict(
bbox_head=dict(
num_classes=1203,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0)),
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
dataset_type = 'LVISV1Dataset'
data_root = 'data/lvis_v1/'
train_dataloader = dict(
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/lvis_v1_train.json',
data_prefix=dict(img=''),
pipeline=train_pipeline))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/lvis_v1_val.json',
data_prefix=dict(img='')))
test_dataloader = val_dataloader
val_evaluator = dict(
type='LVISMetric',
ann_file=data_root + 'annotations/lvis_v1_val.json',
metric=['bbox', 'segm'])
test_evaluator = val_evaluator
train_cfg = dict(val_interval=24)
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
roi_head=dict(
bbox_head=dict(
num_classes=1203,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0)),
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
dataset_type = 'LVISV1Dataset'
data_root = 'data/lvis_v1/'
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v1_train.json',
img_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v1_val.json',
img_prefix=data_root,
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v1_val.json',
img_prefix=data_root,
pipeline=test_pipeline))
evaluation = dict(interval=24, metric=['bbox', 'segm'])
|
"""Init file of LlamaIndex."""
__version__ = "0.12.28"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Init file of LlamaIndex."""
__version__ = "0.12.28"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"VellumPredictor",
"VellumPromptRegistry",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import DATASETS
from .coco import CocoDataset
@DATASETS.register_module()
class DeepFashionDataset(CocoDataset):
"""Dataset for DeepFashion."""
METAINFO = {
'classes': ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants',
'bag', 'neckwear', 'headwear', 'eyeglass', 'belt',
'footwear', 'hair', 'skin', 'face'),
# palette is a list of color tuples, which is used for visualization.
'palette': [(0, 192, 64), (0, 64, 96), (128, 192, 192), (0, 64, 64),
(0, 192, 224), (0, 192, 192), (128, 192, 64), (0, 192, 96),
(128, 32, 192), (0, 0, 224), (0, 0, 64), (0, 160, 192),
(128, 0, 96), (128, 0, 192), (0, 32, 192)]
}
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import DATASETS
from .coco import CocoDataset
@DATASETS.register_module()
class DeepFashionDataset(CocoDataset):
"""Dataset for DeepFashion."""
METAINFO = {
'CLASSES': ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants',
'bag', 'neckwear', 'headwear', 'eyeglass', 'belt',
'footwear', 'hair', 'skin', 'face'),
# PALETTE is a list of color tuples, which is used for visualization.
'PALETTE': [(0, 192, 64), (0, 64, 96), (128, 192, 192), (0, 64, 64),
(0, 192, 224), (0, 192, 192), (128, 192, 64), (0, 192, 96),
(128, 32, 192), (0, 0, 224), (0, 0, 64), (0, 160, 192),
(128, 0, 96), (128, 0, 192), (0, 32, 192)]
}
|
import logging
import os
from typing import Optional
from jina.importer import ImportExtensions
from jina.serve.runtimes.servers import BaseServer
from jina._docarray import docarray_v2
class WebSocketServer(BaseServer):
"""WebSocket Server implementation"""
def __init__(
self,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
uvicorn_kwargs: Optional[dict] = None,
proxy: Optional[bool] = None,
**kwargs
):
"""Initialize the gateway
:param ssl_keyfile: the path to the key file
:param ssl_certfile: the path to the certificate file
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
:param proxy: If set, respect the http_proxy and https_proxy environment variables, otherwise, it will unset
these proxy variables before start. gRPC seems to prefer no proxy
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self.ssl_keyfile = ssl_keyfile
self.ssl_certfile = ssl_certfile
self.uvicorn_kwargs = uvicorn_kwargs
if not proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
async def setup_server(self):
"""
Setup WebSocket Server
"""
if docarray_v2:
from jina.serve.runtimes.gateway.request_handling import GatewayRequestHandler
if isinstance(self._request_handler, GatewayRequestHandler):
await self._request_handler.streamer._get_endpoints_input_output_models()
self._request_handler.streamer._validate_flow_docarray_compatibility()
self.app = self._request_handler._websocket_fastapi_default_app(tracing=self.tracing, tracer_provider=self.tracer_provider)
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
uvicorn_kwargs = self.uvicorn_kwargs or {}
if self.ssl_keyfile and 'ssl_keyfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_keyfile'] = self.ssl_keyfile
if self.ssl_certfile and 'ssl_certfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_certfile'] = self.ssl_certfile
self.server = UviServer(
config=Config(
app=self.app,
host=self.host,
port=self.port,
ws_max_size=1024 * 1024 * 1024,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs,
)
)
await self.server.setup()
@property
def _should_exit(self):
"""Property describing if server is ready to exit
:return: boolean indicating if Server ready to exit
"""
return self.server.should_exit
@property
def should_exit(self):
"""Property describing if server is ready to exit
:return: boolean indicating if Server ready to exit
"""
return self._should_exit
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
await super().shutdown()
self.server.should_exit = True
await self.server.shutdown()
async def run_server(self):
"""Run WebSocket server forever"""
await self.server.serve()
|
import logging
import os
from typing import Optional
from jina.importer import ImportExtensions
from jina.serve.runtimes.servers import BaseServer
class WebSocketServer(BaseServer):
"""WebSocket Server implementation"""
def __init__(
self,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
uvicorn_kwargs: Optional[dict] = None,
proxy: Optional[bool] = None,
**kwargs
):
"""Initialize the gateway
:param ssl_keyfile: the path to the key file
:param ssl_certfile: the path to the certificate file
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
:param proxy: If set, respect the http_proxy and https_proxy environment variables, otherwise, it will unset
these proxy variables before start. gRPC seems to prefer no proxy
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self.ssl_keyfile = ssl_keyfile
self.ssl_certfile = ssl_certfile
self.uvicorn_kwargs = uvicorn_kwargs
if not proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
async def setup_server(self):
"""
Setup WebSocket Server
"""
self.app = self._request_handler._websocket_fastapi_default_app(tracing=self.tracing, tracer_provider=self.tracer_provider)
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
uvicorn_kwargs = self.uvicorn_kwargs or {}
if self.ssl_keyfile and 'ssl_keyfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_keyfile'] = self.ssl_keyfile
if self.ssl_certfile and 'ssl_certfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_certfile'] = self.ssl_certfile
self.server = UviServer(
config=Config(
app=self.app,
host=self.host,
port=self.port,
ws_max_size=1024 * 1024 * 1024,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs,
)
)
await self.server.setup()
@property
def _should_exit(self):
"""Property describing if server is ready to exit
:return: boolean indicating if Server ready to exit
"""
return self.server.should_exit
@property
def should_exit(self):
"""Property describing if server is ready to exit
:return: boolean indicating if Server ready to exit
"""
return self._should_exit
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
await super().shutdown()
self.server.should_exit = True
await self.server.shutdown()
async def run_server(self):
"""Run WebSocket server forever"""
await self.server.serve()
|
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_pure_tensor, register_kernel # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_format_bounding_boxes,
get_dimensions_image,
_get_dimensions_image_pil,
get_dimensions_video,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image,
_get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_size_bounding_boxes,
get_size_image,
_get_size_image_pil,
get_size_mask,
get_size_video,
get_size,
) # usort: skip
from ._augment import _erase_image_pil, erase, erase_image, erase_video
from ._color import (
_adjust_brightness_image_pil,
_adjust_contrast_image_pil,
_adjust_gamma_image_pil,
_adjust_hue_image_pil,
_adjust_saturation_image_pil,
_adjust_sharpness_image_pil,
_autocontrast_image_pil,
_equalize_image_pil,
_invert_image_pil,
_permute_channels_image_pil,
_posterize_image_pil,
_rgb_to_grayscale_image_pil,
_solarize_image_pil,
adjust_brightness,
adjust_brightness_image,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image,
adjust_gamma_video,
adjust_hue,
adjust_hue_image,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image,
adjust_sharpness_video,
autocontrast,
autocontrast_image,
autocontrast_video,
equalize,
equalize_image,
equalize_video,
invert,
invert_image,
invert_video,
permute_channels,
permute_channels_image,
permute_channels_video,
posterize,
posterize_image,
posterize_video,
rgb_to_grayscale,
rgb_to_grayscale_image,
solarize,
solarize_image,
solarize_video,
to_grayscale,
)
from ._geometry import (
_affine_image_pil,
_center_crop_image_pil,
_crop_image_pil,
_elastic_image_pil,
_five_crop_image_pil,
_horizontal_flip_image_pil,
_pad_image_pil,
_perspective_image_pil,
_resize_image_pil,
_resized_crop_image_pil,
_rotate_image_pil,
_ten_crop_image_pil,
_vertical_flip_image_pil,
affine,
affine_bounding_boxes,
affine_image,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_boxes,
center_crop_image,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_boxes,
crop_image,
crop_mask,
crop_video,
elastic,
elastic_bounding_boxes,
elastic_image,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_boxes,
horizontal_flip_image,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_boxes,
pad_image,
pad_mask,
pad_video,
perspective,
perspective_bounding_boxes,
perspective_image,
perspective_mask,
perspective_video,
resize,
resize_bounding_boxes,
resize_image,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_boxes,
resized_crop_image,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_boxes,
rotate_image,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_boxes,
vertical_flip_image,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
_gaussian_blur_image_pil,
convert_image_dtype,
gaussian_blur,
gaussian_blur_image,
gaussian_blur_video,
normalize,
normalize_image,
normalize_video,
to_dtype,
to_dtype_image,
to_dtype_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image, to_pil_image
from ._deprecated import get_image_size, to_tensor # usort: skip
|
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_simple_tensor, register_kernel # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_format_bounding_boxes,
get_dimensions_image,
_get_dimensions_image_pil,
get_dimensions_video,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image,
_get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_size_bounding_boxes,
get_size_image,
_get_size_image_pil,
get_size_mask,
get_size_video,
get_size,
) # usort: skip
from ._augment import _erase_image_pil, erase, erase_image, erase_video
from ._color import (
_adjust_brightness_image_pil,
_adjust_contrast_image_pil,
_adjust_gamma_image_pil,
_adjust_hue_image_pil,
_adjust_saturation_image_pil,
_adjust_sharpness_image_pil,
_autocontrast_image_pil,
_equalize_image_pil,
_invert_image_pil,
_permute_channels_image_pil,
_posterize_image_pil,
_rgb_to_grayscale_image_pil,
_solarize_image_pil,
adjust_brightness,
adjust_brightness_image,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image,
adjust_gamma_video,
adjust_hue,
adjust_hue_image,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image,
adjust_sharpness_video,
autocontrast,
autocontrast_image,
autocontrast_video,
equalize,
equalize_image,
equalize_video,
invert,
invert_image,
invert_video,
permute_channels,
permute_channels_image,
permute_channels_video,
posterize,
posterize_image,
posterize_video,
rgb_to_grayscale,
rgb_to_grayscale_image,
solarize,
solarize_image,
solarize_video,
to_grayscale,
)
from ._geometry import (
_affine_image_pil,
_center_crop_image_pil,
_crop_image_pil,
_elastic_image_pil,
_five_crop_image_pil,
_horizontal_flip_image_pil,
_pad_image_pil,
_perspective_image_pil,
_resize_image_pil,
_resized_crop_image_pil,
_rotate_image_pil,
_ten_crop_image_pil,
_vertical_flip_image_pil,
affine,
affine_bounding_boxes,
affine_image,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_boxes,
center_crop_image,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_boxes,
crop_image,
crop_mask,
crop_video,
elastic,
elastic_bounding_boxes,
elastic_image,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_boxes,
horizontal_flip_image,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_boxes,
pad_image,
pad_mask,
pad_video,
perspective,
perspective_bounding_boxes,
perspective_image,
perspective_mask,
perspective_video,
resize,
resize_bounding_boxes,
resize_image,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_boxes,
resized_crop_image,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_boxes,
rotate_image,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_boxes,
vertical_flip_image,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
_gaussian_blur_image_pil,
convert_image_dtype,
gaussian_blur,
gaussian_blur_image,
gaussian_blur_video,
normalize,
normalize_image,
normalize_video,
to_dtype,
to_dtype_image,
to_dtype_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image, to_pil_image
from ._deprecated import get_image_size, to_tensor # usort: skip
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomResize', scale=[(1333, 640), (1333, 800)]),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
|
from typing import Union, Iterable, MutableSequence, Iterator
from docarray.array.storage.memory.backend import needs_id2offset_rebuild
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods"""
@needs_id2offset_rebuild
def insert(self, index: int, value: 'Document'):
"""Insert `doc` at `index`.
:param index: Position of the insertion.
:param value: The doc needs to be inserted.
"""
self._data.insert(index, value)
def _append(self, value: 'Document', **kwargs):
"""Append `doc` to the end of the array.
:param value: The doc needs to be appended.
"""
self._data.append(value)
if not self._needs_id2offset_rebuild:
self._id_to_index[value.id] = len(self) - 1
def __eq__(self, other):
return (
type(self) is type(other)
and type(self._data) is type(other._data)
and self._data == other._data
)
def __len__(self):
return len(self._data)
def __iter__(self) -> Iterator['Document']:
yield from self._data
def __contains__(self, x: Union[str, 'Document']):
if isinstance(x, str):
return x in self._id2offset
elif isinstance(x, Document):
return x.id in self._id2offset
else:
return False
def __repr__(self):
return f'<DocumentArray (length={len(self)}) at {id(self)}>'
def __add__(self, other: Union['Document', Iterable['Document']]):
v = type(self)(self, copy=True)
v.extend(other)
return v
def _extend(self, values: Iterable['Document'], **kwargs) -> None:
values = list(values) # consume the iterator only once
last_idx = len(self._id2offset)
self._data.extend(values)
self._id_to_index.update({d.id: i + last_idx for i, d in enumerate(values)})
|
from typing import Union, Iterable, MutableSequence, Iterator
from docarray.array.storage.memory.backend import needs_id2offset_rebuild
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods"""
@needs_id2offset_rebuild
def insert(self, index: int, value: 'Document'):
"""Insert `doc` at `index`.
:param index: Position of the insertion.
:param value: The doc needs to be inserted.
"""
self._data.insert(index, value)
def _append(self, value: 'Document', **kwargs):
"""Append `doc` to the end of the array.
:param value: The doc needs to be appended.
"""
self._data.append(value)
if not self._needs_id2offset_rebuild:
self._id_to_index[value.id] = len(self) - 1
def __eq__(self, other):
return (
type(self) is type(other)
and type(self._data) is type(other._data)
and self._data == other._data
)
def __len__(self):
return len(self._data)
def __iter__(self) -> Iterator['Document']:
yield from self._data
def __contains__(self, x: Union[str, 'Document']):
if isinstance(x, str):
return x in self._id2offset
elif isinstance(x, Document):
return x.id in self._id2offset
else:
return False
def __repr__(self):
return f'<DocumentArray (length={len(self)}) at {id(self)}>'
def __add__(self, other: Union['Document', Iterable['Document']]):
v = type(self)(self)
v.extend(other)
return v
def _extend(self, values: Iterable['Document'], **kwargs) -> None:
values = list(values) # consume the iterator only once
last_idx = len(self._id2offset)
self._data.extend(values)
self._id_to_index.update({d.id: i + last_idx for i, d in enumerate(values)})
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.documents.point_cloud.points_and_colors import PointsAndColors
from docarray.typing import AnyEmbedding, PointCloud3DUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='PointCloud3D')
class PointCloud3D(BaseDoc):
"""
Document for handling point clouds for 3D data representation.
Point cloud is a representation of a 3D mesh. It is made by repeatedly and uniformly
sampling points within the surface of the 3D body. Compared to the mesh
representation, the point cloud is a fixed size ndarray (shape=(n_samples, 3)) and
hence easier for deep learning algorithms to handle.
A PointCloud3D Document can contain an PointCloud3DUrl (`PointCloud3D.url`),
a PointsAndColors object (`PointCloud3D.tensors`), and an AnyEmbedding
(`PointCloud3D.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import PointCloud3D
# use it directly
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensors = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensors.points)
You can extend this Document:
.. code-block:: python
from docarray.documents import PointCloud3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyPointCloud3D(PointCloud3D):
second_embedding: Optional[AnyEmbedding]
pc = MyPointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensors = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensors.points)
pc.second_embedding = model(pc.tensors.colors)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDoc
from docarray.documents import PointCloud3D, Text
# compose it
class MultiModalDoc(BaseDoc):
point_cloud: PointCloud3D
text: Text
mmdoc = MultiModalDoc(
point_cloud=PointCloud3D(
url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.point_cloud.tensors = mmdoc.point_cloud.url.load(samples=100)
# or
mmdoc.point_cloud.bytes_ = mmdoc.point_cloud.url.load_bytes()
You can display your point cloud from either its url, or its tensors:
.. code-block:: python
from docarray.documents import PointCloud3D
# display from url
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.url.display()
# display from tensors
pc.tensors = pc.url.load(samples=10000)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensors.points)
"""
url: Optional[PointCloud3DUrl]
tensors: Optional[PointsAndColors]
embedding: Optional[AnyEmbedding]
bytes_: Optional[bytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensors=PointsAndColors(points=value))
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.documents.point_cloud.points_and_colors import PointsAndColors
from docarray.typing import AnyEmbedding, PointCloud3DUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='PointCloud3D')
class PointCloud3D(BaseDocument):
"""
Document for handling point clouds for 3D data representation.
Point cloud is a representation of a 3D mesh. It is made by repeatedly and uniformly
sampling points within the surface of the 3D body. Compared to the mesh
representation, the point cloud is a fixed size ndarray (shape=(n_samples, 3)) and
hence easier for deep learning algorithms to handle.
A PointCloud3D Document can contain an PointCloud3DUrl (`PointCloud3D.url`),
a PointsAndColors object (`PointCloud3D.tensors`), and an AnyEmbedding
(`PointCloud3D.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import PointCloud3D
# use it directly
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensors = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensors.points)
You can extend this Document:
.. code-block:: python
from docarray.documents import PointCloud3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyPointCloud3D(PointCloud3D):
second_embedding: Optional[AnyEmbedding]
pc = MyPointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensors = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensors.points)
pc.second_embedding = model(pc.tensors.colors)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import PointCloud3D, Text
# compose it
class MultiModalDoc(BaseDocument):
point_cloud: PointCloud3D
text: Text
mmdoc = MultiModalDoc(
point_cloud=PointCloud3D(
url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.point_cloud.tensors = mmdoc.point_cloud.url.load(samples=100)
# or
mmdoc.point_cloud.bytes_ = mmdoc.point_cloud.url.load_bytes()
You can display your point cloud from either its url, or its tensors:
.. code-block:: python
from docarray.documents import PointCloud3D
# display from url
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.url.display()
# display from tensors
pc.tensors = pc.url.load(samples=10000)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensors.points)
"""
url: Optional[PointCloud3DUrl]
tensors: Optional[PointsAndColors]
embedding: Optional[AnyEmbedding]
bytes_: Optional[bytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensors=PointsAndColors(points=value))
return super().validate(value)
|
from docarray import BaseDocument
from docarray.typing import ID
def test_set_id():
class MyDocument(BaseDocument):
id: ID
d = MyDocument(id="123")
assert isinstance(d.id, ID)
assert d.id == "123"
|
from docarray import Document
from docarray.typing import ID
def test_set_id():
class MyDocument(Document):
id: ID
d = MyDocument(id="123")
assert isinstance(d.id, ID)
assert d.id == "123"
|
from docarray import BaseDocument
from docarray.typing import ImageUrl
def test_set_image_url():
class MyDocument(BaseDocument):
image_url: ImageUrl
d = MyDocument(image_url="https://jina.ai/img.png")
assert isinstance(d.image_url, ImageUrl)
assert d.image_url == "https://jina.ai/img.png"
|
from docarray import Document
from docarray.typing import ImageUrl
def test_set_image_url():
class MyDocument(Document):
image_url: ImageUrl
d = MyDocument(image_url="https://jina.ai/img.png")
assert isinstance(d.image_url, ImageUrl)
assert d.image_url == "https://jina.ai/img.png"
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_mobilevit import *
from .feature_extraction_mobilevit import *
from .image_processing_mobilevit import *
from .image_processing_mobilevit_fast import *
from .modeling_mobilevit import *
from .modeling_tf_mobilevit import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_mobilevit import *
from .feature_extraction_mobilevit import *
from .image_processing_mobilevit import *
from .modeling_mobilevit import *
from .modeling_tf_mobilevit import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, EVALUATORS, HOOKS, MODEL_WRAPPERS,
MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS,
PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS,
TRANSFORMS, WEIGHT_INITIALIZERS)
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIMIZER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS',
'EVALUATORS', 'MODEL_WRAPPERS'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, EVALUATORS, HOOKS, MODELS,
OPTIMIZER_CONSTRUCTORS, OPTIMIZERS, PARAM_SCHEDULERS,
RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS, TRANSFORMS,
WEIGHT_INITIALIZERS)
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIMIZER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS',
'EVALUATORS'
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.