input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from pydantic import parse_obj_as
from docarray.base_doc.doc import BaseDoc
from docarray.documents import Mesh3D
from tests import TOYDATA_DIR
LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj')
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
pytestmark = [pytest.mark.mesh]
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_OBJ_FILE, REMOTE_OBJ_FILE])
def test_mesh(file_url: str):
mesh = Mesh3D(url=file_url)
mesh.tensors = mesh.url.load()
assert isinstance(mesh.tensors.vertices, np.ndarray)
assert isinstance(mesh.tensors.faces, np.ndarray)
def test_str_init():
t = parse_obj_as(Mesh3D, 'http://hello.ply')
assert t.url == 'http://hello.ply'
def test_doc():
class MyDoc(BaseDoc):
mesh1: Mesh3D
mesh2: Mesh3D
doc = MyDoc(mesh1='http://hello.ply', mesh2=Mesh3D(url='http://hello.ply'))
assert doc.mesh1.url == 'http://hello.ply'
assert doc.mesh2.url == 'http://hello.ply'
|
import numpy as np
import pytest
from pydantic import parse_obj_as
from docarray.base_doc.doc import BaseDoc
from docarray.documents import Mesh3D
from tests import TOYDATA_DIR
LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj')
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
pytestmark = [pytest.mark.mesh]
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_OBJ_FILE, REMOTE_OBJ_FILE])
def test_mesh(file_url: str):
mesh = Mesh3D(url=file_url)
mesh.tensors = mesh.url.load()
assert isinstance(mesh.tensors.vertices, np.ndarray)
assert isinstance(mesh.tensors.faces, np.ndarray)
def test_str_init():
t = parse_obj_as(Mesh3D, 'http://hello.ply')
assert t.url == 'http://hello.ply'
def test_doc():
class MyDoc(BaseDoc):
mesh1: Mesh3D
mesh2: Mesh3D
doc = MyDoc(mesh1='http://hello.ply', mesh2=Mesh3D(url='http://hello.ply'))
assert doc.mesh1.url == 'http://hello.ply'
assert doc.mesh2.url == 'http://hello.ply'
|
from __future__ import annotations
from typing import Literal
from sentence_transformers.losses.GISTEmbedLoss import GISTEmbedLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseGISTEmbedLoss(GISTEmbedLoss):
def __init__(
self,
model: SparseEncoder,
guide: SparseEncoder,
temperature: float = 0.1,
margin_strategy: Literal["absolute", "relative"] = "absolute",
margin: float = 0.0,
) -> None:
"""
This loss is used to train a SparseEncoder model using the GISTEmbed algorithm.
It takes a model and a guide model as input, and uses the guide model to guide the
in-batch negative sample selection. The cosine similarity is used to compute the loss
and the temperature parameter is used to scale the cosine similarities.
You can apply different false-negative filtering strategies to discard hard negatives that are too similar to
the positive. Two strategies are supported:
- "absolute": Discards negatives whose similarity score is greater than or equal to ``positive_score - margin``.
- "relative": Discards negatives whose similarity score is greater than or equal to ``positive_score * (1 - margin)``.
Args:
model: SparseEncoder model based on a `transformers` model.
guide: SparseEncoder model to guide the in-batch negative sample selection.
temperature: Temperature parameter to scale the cosine similarities, default is 0.1 here adapted for Sparse embeddings,
might need some adaptations.
margin_strategy: Strategy used for false negative filtering. One of {"absolute", "relative"}.
margin: The margin value for filtering negatives. Defaults to 0.0, together with the "absolute" strategy,
this only removes negatives that are more similar to the query than the positive is to the query.
References:
- For further details, see: https://arxiv.org/abs/2402.16829
Requirements:
1. (anchor, positive, negative) triplets
2. (anchor, positive) pairs
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
| (anchor, positive) pairs | none |
+---------------------------------------+--------+
Recommendations:
- Use ``BatchSamplers.NO_DUPLICATES`` (:class:`docs <sentence_transformers.training_args.BatchSamplers>`) to
ensure that no in-batch negatives are duplicates of the anchor or positive samples.
Relations:
- :class:`SparseMultipleNegativesRankingLoss` is similar to this loss, but it does not use
a guide model to guide the in-batch negative sample selection. `SparseGISTEmbedLoss` yields
a stronger training signal at the cost of some training overhead.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
# Initialize the SPLADE model
model = SparseEncoder("distilbert/distilbert-base-uncased")
guide = SparseEncoder("naver/splade-cocondenser-ensembledistil")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
}
)
loss = losses.SparseGISTEmbedLoss(model, guide=guide)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(
model, guide=guide, temperature=temperature, margin_strategy=margin_strategy, margin=margin
)
|
from __future__ import annotations
from sentence_transformers.losses.GISTEmbedLoss import GISTEmbedLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseGISTEmbedLoss(GISTEmbedLoss):
def __init__(
self,
model: SparseEncoder,
guide: SparseEncoder,
temperature: float = 0.1,
) -> None:
"""
This loss is used to train a SparseEncoder model using the GISTEmbed algorithm.
It takes a model and a guide model as input, and uses the guide model to guide the
in-batch negative sample selection. The cosine similarity is used to compute the loss
and the temperature parameter is used to scale the cosine similarities.
You can apply different false-negative filtering strategies to discard hard negatives that are too similar to
the positive. Two strategies are supported:
- "absolute": Discards negatives whose similarity score is greater than or equal to ``positive_score - margin``.
- "relative": Discards negatives whose similarity score is greater than or equal to ``positive_score * (1 - margin)``.
Args:
model: SparseEncoder model based on a `transformers` model.
guide: SparseEncoder model to guide the in-batch negative sample selection.
temperature: Temperature parameter to scale the cosine similarities, default is 0.1 here adapted for Sparse embeddings,
might need some adaptations.
margin_strategy: Strategy used for false negative filtering. One of {"absolute", "relative"}.
margin: The margin value for filtering negatives. Defaults to 0.0, together with the "absolute" strategy,
this only removes negatives that are more similar to the query than the positive is to the query.
References:
- For further details, see: https://arxiv.org/abs/2402.16829
Requirements:
1. (anchor, positive, negative) triplets
2. (anchor, positive) pairs
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
| (anchor, positive) pairs | none |
+---------------------------------------+--------+
Recommendations:
- Use ``BatchSamplers.NO_DUPLICATES`` (:class:`docs <sentence_transformers.training_args.BatchSamplers>`) to
ensure that no in-batch negatives are duplicates of the anchor or positive samples.
Relations:
- :class:`SparseMultipleNegativesRankingLoss` is similar to this loss, but it does not use
a guide model to guide the in-batch negative sample selection. `SparseGISTEmbedLoss` yields
a stronger training signal at the cost of some training overhead.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
# Initialize the SPLADE model
model = SparseEncoder("distilbert/distilbert-base-uncased")
guide = SparseEncoder("naver/splade-cocondenser-ensembledistil")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
}
)
loss = losses.SparseGISTEmbedLoss(model, guide=guide)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, guide=guide, temperature=temperature)
|
from PIL import Image
from sentence_transformers import SentenceTransformer, models, util
###########
image = Image.open("two_dogs_in_snow.jpg")
from transformers import CLIPModel, CLIPProcessor
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
inputs = processor(texts=["a cat", "a dog"], images=[image], return_tensors="pt", padding=True)
output = model(**inputs)
# vision_outputs = model.vision_model(pixel_values=inputs['pixel_values'])
# image_embeds = model.visual_projection(vision_outputs[1])
# print(image_embeds.shape)
# exit()
# Load CLIP model
clip = models.CLIPModel()
model = SentenceTransformer(modules=[clip])
model.save("tmp-clip-model")
model = SentenceTransformer("tmp-clip-model")
# Encode an image:
img_emb = model.encode(Image.open("two_dogs_in_snow.jpg"))
# Encode text descriptions
text_emb = model.encode(["Two dogs in the snow", "A cat on a table", "A picture of London at night"])
# Compute cosine similarities
cos_scores = util.cos_sim(img_emb, text_emb)
print(cos_scores)
|
from sentence_transformers import SentenceTransformer, util, models
from PIL import Image
###########
image = Image.open("two_dogs_in_snow.jpg")
from transformers import CLIPProcessor, CLIPModel
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
inputs = processor(texts=["a cat", "a dog"], images=[image], return_tensors="pt", padding=True)
output = model(**inputs)
# vision_outputs = model.vision_model(pixel_values=inputs['pixel_values'])
# image_embeds = model.visual_projection(vision_outputs[1])
# print(image_embeds.shape)
# exit()
# Load CLIP model
clip = models.CLIPModel()
model = SentenceTransformer(modules=[clip])
model.save("tmp-clip-model")
model = SentenceTransformer("tmp-clip-model")
# Encode an image:
img_emb = model.encode(Image.open("two_dogs_in_snow.jpg"))
# Encode text descriptions
text_emb = model.encode(["Two dogs in the snow", "A cat on a table", "A picture of London at night"])
# Compute cosine similarities
cos_scores = util.cos_sim(img_emb, text_emb)
print(cos_scores)
|
from collections import Counter
import pytest
from datasets import Dataset
from sentence_transformers.sampler import GroupByLabelBatchSampler
@pytest.fixture
def dummy_dataset():
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": [0, 1, 2, ..., 99],
"label_a": [0, 1, 0, 1, ..., 0, 1],
"label_b": [0, 1, 2, 3, 4, 0, ..., 4]
}
"""
data = {"data": list(range(100)), "label_a": [i % 2 for i in range(100)], "label_b": [i % 5 for i in range(100)]}
return Dataset.from_dict(data)
@pytest.fixture
def dummy_uneven_dataset():
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": ["a"] * 51,
"label": [0] * 17 + [1] * 17 + [2] * 17,
}
"""
data = {"data": ["a"] * 51, "label": [0] * 17 + [1] * 17 + [2] * 17}
return Dataset.from_dict(data)
def test_group_by_label_batch_sampler_label_a(dummy_dataset: Dataset) -> None:
batch_size = 10
sampler = GroupByLabelBatchSampler(
dataset=dummy_dataset, batch_size=batch_size, drop_last=False, valid_label_columns=["label_a", "label_b"]
)
batches = list(iter(sampler))
assert all(len(batch) == batch_size for batch in batches)
# Check if all labels within each batch are identical
# In this case, label_a has 50 0's and 50 1's, so with a batch size of 10 we expect each batch to
# have only 0's or only 1's.
for batch in batches:
labels = [dummy_dataset[int(idx)]["label_a"] for idx in batch]
assert len(set(labels)) == 1, f"Batch {batch} does not have identical labels: {labels}"
def test_group_by_label_batch_sampler_label_b(dummy_dataset: Dataset) -> None:
batch_size = 8
sampler = GroupByLabelBatchSampler(
dataset=dummy_dataset, batch_size=batch_size, drop_last=True, valid_label_columns=["label_b"]
)
# drop_last=True, so each batch should be the same length and the last batch is dropped.
batches = list(iter(sampler))
assert all(
len(batch) == batch_size for batch in batches
), "Not all batches are the same size, while drop_last was True."
# Assert that we have the expected number of total samples in the batches.
assert sum(len(batch) for batch in batches) == 100 // batch_size * batch_size
# Since we have 20 occurrences each of label_b values 0, 1, 2, 3 and 4 and a batch_size of 8, we expect each batch
# to have either 4 or 8 samples with the same label. (The first two batches are 16 samples of the first label,
# leaving 4 for the third batch. There 4 of the next label are added, leaving 16 for the next two batches, and so on.)
for batch in batches:
labels = [dummy_dataset[int(idx)]["label_b"] for idx in batch]
counts = list(Counter(labels).values())
assert counts == [8] or counts == [4, 4]
def test_group_by_label_batch_sampler_uneven_dataset(dummy_uneven_dataset: Dataset) -> None:
batch_size = 8
sampler = GroupByLabelBatchSampler(
dataset=dummy_uneven_dataset, batch_size=batch_size, drop_last=False, valid_label_columns=["label"]
)
# With a batch_size of 8 and 17 samples per label; verify that every label in a batch occurs at least twice.
# We accept some tiny data loss (1 sample per label) due to the uneven number of samples per label.
batches = list(iter(sampler))
for batch in batches:
labels = [dummy_uneven_dataset[int(idx)]["label"] for idx in batch]
counts = list(Counter(labels).values())
assert [count > 1 for count in counts]
|
import pytest
from datasets import Dataset
from sentence_transformers.sampler import GroupByLabelBatchSampler
from collections import Counter
@pytest.fixture
def dummy_dataset():
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": [0, 1, 2, ..., 99],
"label_a": [0, 1, 0, 1, ..., 0, 1],
"label_b": [0, 1, 2, 3, 4, 0, ..., 4]
}
"""
data = {"data": list(range(100)), "label_a": [i % 2 for i in range(100)], "label_b": [i % 5 for i in range(100)]}
return Dataset.from_dict(data)
@pytest.fixture
def dummy_uneven_dataset():
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": ["a"] * 51,
"label": [0] * 17 + [1] * 17 + [2] * 17,
}
"""
data = {"data": ["a"] * 51, "label": [0] * 17 + [1] * 17 + [2] * 17}
return Dataset.from_dict(data)
def test_group_by_label_batch_sampler_label_a(dummy_dataset: Dataset) -> None:
batch_size = 10
sampler = GroupByLabelBatchSampler(
dataset=dummy_dataset, batch_size=batch_size, drop_last=False, valid_label_columns=["label_a", "label_b"]
)
batches = list(iter(sampler))
assert all(len(batch) == batch_size for batch in batches)
# Check if all labels within each batch are identical
# In this case, label_a has 50 0's and 50 1's, so with a batch size of 10 we expect each batch to
# have only 0's or only 1's.
for batch in batches:
labels = [dummy_dataset[int(idx)]["label_a"] for idx in batch]
assert len(set(labels)) == 1, f"Batch {batch} does not have identical labels: {labels}"
def test_group_by_label_batch_sampler_label_b(dummy_dataset: Dataset) -> None:
batch_size = 8
sampler = GroupByLabelBatchSampler(
dataset=dummy_dataset, batch_size=batch_size, drop_last=True, valid_label_columns=["label_b"]
)
# drop_last=True, so each batch should be the same length and the last batch is dropped.
batches = list(iter(sampler))
assert all(
len(batch) == batch_size for batch in batches
), "Not all batches are the same size, while drop_last was True."
# Assert that we have the expected number of total samples in the batches.
assert sum(len(batch) for batch in batches) == 100 // batch_size * batch_size
# Since we have 20 occurrences each of label_b values 0, 1, 2, 3 and 4 and a batch_size of 8, we expect each batch
# to have either 4 or 8 samples with the same label. (The first two batches are 16 samples of the first label,
# leaving 4 for the third batch. There 4 of the next label are added, leaving 16 for the next two batches, and so on.)
for batch in batches:
labels = [dummy_dataset[int(idx)]["label_b"] for idx in batch]
counts = list(Counter(labels).values())
assert counts == [8] or counts == [4, 4]
def test_group_by_label_batch_sampler_uneven_dataset(dummy_uneven_dataset: Dataset) -> None:
batch_size = 8
sampler = GroupByLabelBatchSampler(
dataset=dummy_uneven_dataset, batch_size=batch_size, drop_last=False, valid_label_columns=["label"]
)
# With a batch_size of 8 and 17 samples per label; verify that every label in a batch occurs at least twice.
# We accept some tiny data loss (1 sample per label) due to the uneven number of samples per label.
batches = list(iter(sampler))
for batch in batches:
labels = [dummy_uneven_dataset[int(idx)]["label"] for idx in batch]
counts = list(Counter(labels).values())
assert [count > 1 for count in counts]
|
from langchain_core.callbacks.base import BaseCallbackHandler, BaseCallbackManager
def test_remove_handler() -> None:
"""Test removing handler does not raise an error on removal.
An handler can be inheritable or not. This test checks that
removing a handler does not raise an error if the handler
is not inheritable.
"""
handler1 = BaseCallbackHandler()
handler2 = BaseCallbackHandler()
manager = BaseCallbackManager([handler1], inheritable_handlers=[handler2])
manager.remove_handler(handler1)
manager.remove_handler(handler2)
|
from langchain_core.callbacks.base import BaseCallbackHandler
from langchain_core.callbacks.manager import BaseCallbackManager
def test_remove_handler() -> None:
"""Test removing handler does not raise an error on removal.
An handler can be inheritable or not. This test checks that
removing a handler does not raise an error if the handler
is not inheritable.
"""
handler1 = BaseCallbackHandler()
handler2 = BaseCallbackHandler()
manager = BaseCallbackManager([handler1], inheritable_handlers=[handler2])
manager.remove_handler(handler1)
manager.remove_handler(handler2)
|
from collections.abc import Generator
import pytest
from langchain_core.vectorstores import VectorStore
from langchain_tests.integration_tests.vectorstores import VectorStoreIntegrationTests
from langchain_chroma import Chroma
class TestChromaStandard(VectorStoreIntegrationTests):
@pytest.fixture()
def vectorstore(self) -> Generator[VectorStore, None, None]: # type: ignore[override]
"""Get an empty vectorstore for unit tests."""
store = Chroma(embedding_function=self.get_embeddings())
try:
yield store
finally:
store.delete_collection()
|
from collections.abc import Generator
import pytest
from langchain_core.vectorstores import VectorStore
from langchain_tests.integration_tests.vectorstores import VectorStoreIntegrationTests
from langchain_chroma import Chroma
class TestChromaStandard(VectorStoreIntegrationTests):
@pytest.fixture()
def vectorstore(self) -> Generator[VectorStore, None, None]: # type: ignore
"""Get an empty vectorstore for unit tests."""
store = Chroma(embedding_function=self.get_embeddings())
try:
yield store
finally:
store.delete_collection()
pass
|
from typing import Any, Optional, Sequence, List, cast
from llama_index.core.llms import ChatMessage, ImageBlock, TextBlock
from llama_index.core.base.llms.types import ContentBlock
from llama_index.core.base.llms.generic_utils import image_node_to_image_block
from llama_index.core.schema import ImageDocument, ImageNode
def generate_gemini_multi_modal_chat_message(
prompt: str,
role: str,
image_documents: Optional[Sequence[ImageDocument]] = None,
**kwargs: Any,
) -> ChatMessage:
# if image_documents is empty, return text only chat message
if image_documents is None or len(image_documents) == 0:
return ChatMessage(role=role, content=prompt)
blocks: List[ContentBlock] = [TextBlock(text=prompt)]
if all(isinstance(doc, ImageNode) for doc in image_documents):
image_docs: List[ImageBlock] = [
image_node_to_image_block(doc) for doc in image_documents
]
else:
image_docs = cast(List[ImageBlock], image_documents)
blocks.extend(image_docs)
return ChatMessage(role=role, blocks=blocks)
|
from typing import Any, Optional, Sequence
from llama_index.core.multi_modal_llms.base import ChatMessage
from llama_index.core.schema import ImageDocument
def generate_gemini_multi_modal_chat_message(
prompt: str,
role: str,
image_documents: Optional[Sequence[ImageDocument]] = None,
**kwargs: Any,
) -> ChatMessage:
# if image_documents is empty, return text only chat message
if image_documents is None or len(image_documents) == 0:
return ChatMessage(role=role, content=prompt)
additional_kwargs = {
"images": image_documents,
}
return ChatMessage(role=role, content=prompt, additional_kwargs=additional_kwargs)
|
_base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py'
model = dict(
rpn_head=dict(
_delete_=True,
type='GARPNHead',
in_channels=256,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=8,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[8],
strides=[4, 8, 16, 32, 64]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.14, 0.14]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.11, 0.11]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
roi_head=dict(
bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
center_ratio=0.2,
ignore_ratio=0.5),
rpn_proposal=dict(nms_post=1000, max_per_img=300),
rcnn=dict(
assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6),
sampler=dict(type='RandomSampler', num=256))),
test_cfg=dict(
rpn=dict(nms_post=1000, max_per_img=300), rcnn=dict(score_thr=1e-3)))
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
|
_base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py'
model = dict(
rpn_head=dict(
_delete_=True,
type='GARPNHead',
in_channels=256,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=8,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[8],
strides=[4, 8, 16, 32, 64]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.14, 0.14]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.11, 0.11]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
roi_head=dict(
bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
center_ratio=0.2,
ignore_ratio=0.5),
rpn_proposal=dict(nms_post=1000, max_per_img=300),
rcnn=dict(
assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6),
sampler=dict(type='RandomSampler', num=256))),
test_cfg=dict(
rpn=dict(nms_post=1000, max_per_img=300), rcnn=dict(score_thr=1e-3)))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import Mock
from mmengine.hooks import RuntimeInfoHook
from mmengine.logging import MessageHub
class TestRuntimeInfoHook(TestCase):
def test_before_run(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_run')
runner = Mock()
runner.epoch = 3
runner.iter = 30
runner.max_epochs = 4
runner.max_iters = 40
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.before_run(runner)
self.assertEqual(message_hub.get_info('epoch'), 3)
self.assertEqual(message_hub.get_info('iter'), 30)
self.assertEqual(message_hub.get_info('max_epochs'), 4)
self.assertEqual(message_hub.get_info('max_iters'), 40)
def test_before_train(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_train')
runner = Mock()
runner.epoch = 7
runner.iter = 71
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.before_train(runner)
self.assertEqual(message_hub.get_info('epoch'), 7)
self.assertEqual(message_hub.get_info('iter'), 71)
def test_before_train_epoch(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_train_epoch')
runner = Mock()
runner.epoch = 9
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.before_train_epoch(runner)
self.assertEqual(message_hub.get_info('epoch'), 9)
def test_before_train_iter(self):
# single optimizer
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_train_iter')
runner = Mock()
runner.iter = 9
runner.optimizer.param_groups = [{'lr': 0.01}]
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.before_train_iter(runner, batch_idx=2, data_batch=None)
self.assertEqual(message_hub.get_info('iter'), 9)
self.assertEqual(message_hub.get_scalar('train/lr').current(), 0.01)
# multiple optimizers
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_train_iter')
runner = Mock()
runner.iter = 9
optimizer1 = Mock()
optimizer1.param_groups = [{'lr': 0.01}]
optimizer2 = Mock()
optimizer2.param_groups = [{'lr': 0.02}]
runner.optimizer = dict(key1=optimizer1, key2=optimizer2)
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.before_train_iter(runner, batch_idx=2, data_batch=None)
self.assertEqual(message_hub.get_info('iter'), 9)
self.assertEqual(
message_hub.get_scalar('train/key1.lr').current(), 0.01)
self.assertEqual(
message_hub.get_scalar('train/key2.lr').current(), 0.02)
def test_after_train_iter(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_after_train_iter')
runner = Mock()
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.after_train_iter(
runner,
batch_idx=2,
data_batch=None,
outputs={'log_vars': {
'loss_cls': 1.111
}})
self.assertEqual(
message_hub.get_scalar('train/loss_cls').current(), 1.111)
def test_after_val_epoch(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_after_val_epoch')
runner = Mock()
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.after_val_epoch(runner, metrics={'acc': 0.8})
self.assertEqual(message_hub.get_scalar('val/acc').current(), 0.8)
def test_after_test_epoch(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_after_test_epoch')
runner = Mock()
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.after_test_epoch(runner, metrics={'acc': 0.8})
self.assertEqual(message_hub.get_scalar('test/acc').current(), 0.8)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import Mock
from mmengine.hooks import RuntimeInfoHook
from mmengine.logging import MessageHub
class TestRuntimeInfoHook(TestCase):
def test_before_run(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_run')
runner = Mock()
runner.epoch = 3
runner.iter = 30
runner.max_epochs = 4
runner.max_iters = 40
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.before_run(runner)
self.assertEqual(message_hub.get_info('epoch'), 3)
self.assertEqual(message_hub.get_info('iter'), 30)
self.assertEqual(message_hub.get_info('max_epochs'), 4)
self.assertEqual(message_hub.get_info('max_iters'), 40)
def test_before_train(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_train')
runner = Mock()
runner.epoch = 7
runner.iter = 71
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.before_train(runner)
self.assertEqual(message_hub.get_info('epoch'), 7)
self.assertEqual(message_hub.get_info('iter'), 71)
def test_before_train_epoch(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_train_epoch')
runner = Mock()
runner.epoch = 9
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.before_train_epoch(runner)
self.assertEqual(message_hub.get_info('epoch'), 9)
def test_before_train_iter(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_train_iter')
runner = Mock()
runner.iter = 9
runner.optimizer.param_groups = [{'lr': 0.01}]
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.before_train_iter(runner, batch_idx=2, data_batch=None)
self.assertEqual(message_hub.get_info('iter'), 9)
self.assertEqual(message_hub.get_scalar('train/lr').current(), 0.01)
def test_after_train_iter(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_after_train_iter')
runner = Mock()
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.after_train_iter(
runner,
batch_idx=2,
data_batch=None,
outputs={'log_vars': {
'loss_cls': 1.111
}})
self.assertEqual(
message_hub.get_scalar('train/loss_cls').current(), 1.111)
def test_after_val_epoch(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_after_val_epoch')
runner = Mock()
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.after_val_epoch(runner, metrics={'acc': 0.8})
self.assertEqual(message_hub.get_scalar('val/acc').current(), 0.8)
def test_after_test_epoch(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_after_test_epoch')
runner = Mock()
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.after_test_epoch(runner, metrics={'acc': 0.8})
self.assertEqual(message_hub.get_scalar('test/acc').current(), 0.8)
|
from torch.fx import Graph, GraphModule, Node
from torch.fx._compatibility import compatibility
from .matcher_utils import InternalMatch, SubgraphMatcher
__all__ = ["SubgraphMatcherWithNameNodeMap"]
def _split_to_graph_and_name_node_map(
gm: GraphModule,
) -> tuple[GraphModule, dict[str, Node]]:
from torch.fx.graph import _PyTreeInfo
from torch.utils._pytree import tree_flatten, tree_unflatten
name_node_map = {}
for n in gm.graph.nodes:
if n.op == "output":
assert gm._out_spec is not None
output = tree_unflatten(n.args[0], gm._out_spec)
assert isinstance(output, tuple), (
"Expecting the pattern graph to return a tuple"
)
assert len(output) >= 2, (
"Expecting the pattern graph to have at least two outputs"
)
*out, name_node_map = output
flattened, out_spec = tree_flatten(out)
assert isinstance(name_node_map, dict), (
"Expecting the input graph to have a dict output as the last element"
)
n.args = (flattened,)
orig_pytree_info = gm._graph._codegen.pytree_info # type: ignore[attr-defined]
gm._graph._codegen.pytree_info = _PyTreeInfo( # type: ignore[attr-defined]
orig_pytree_info.orig_args, orig_pytree_info.in_spec, out_spec
)
gm.recompile()
return gm, name_node_map
@compatibility(is_backward_compatible=False)
class SubgraphMatcherWithNameNodeMap(SubgraphMatcher):
"""Extends SubgraphMatcher to support querying the matched subgraph nodes through node name,
this requires pattern to have specific format (returning and additional dictionary at the output,
that has node name as key, and the node in the pattern graph as value, see Example for more details)
Difference with SubgraphMatcher is that it takes a `pattern_gm` GraphModule as input during
initialization since we need to modify the graph (which requires `recompile` the GraphModule)
Example::
def pattern(x, weight):
conv = F.conv2d(x, weight)
relu = F.relu(conv)
return relu, {"conv": conv, "relu": relu}
def target_graph(x, weight):
conv = F.conv2d(x, weight)
relu = F.relu(conv)
relu *= 2
return relu
pattern_gm = export_for_training(pattern, example_inputs).module()
target_gm = export_for_training(target_graph, example_inputs).module()
matcher = SubgraphMatcherWithNameNodeMap(pattern_gm)
matches = matcher.match(target_gm)
for match in matches:
match.name_node_map["conv"].meta["annotation"] = ...
"""
def __init__(
self,
pattern_gm: GraphModule,
match_output: bool = False,
match_placeholder: bool = False,
remove_overlapping_matches: bool = True,
ignore_literals: bool = False,
) -> None:
pattern_gm, name_node_map = _split_to_graph_and_name_node_map(pattern_gm)
self.name_node_map = name_node_map
super().__init__(
pattern_gm.graph,
match_output,
match_placeholder,
remove_overlapping_matches,
ignore_literals,
)
def match(self, graph: Graph, node_name_match: str = "") -> list[InternalMatch]:
"""The returned InternalMatch will have name_node_map populated with a map
from node name (str) to the target node, e.g.
{"conv": target_conv_ndoe, "relu": target_relu_node}
this requires the pattern graph returns an additional
output of node name to node, e.g. instead of:
```
def pattern(...):
...
return relu
```
we should do:
```
def pattern(...):
...
return relu, {"conv": conv, "relu": relu}
``` instead
"""
internal_matches = super().match(graph, node_name_match)
for internal_match in internal_matches:
for k, n in self.name_node_map.items():
internal_match.name_node_map[k] = internal_match.nodes_map[n]
return internal_matches
|
from torch.fx import Graph, GraphModule, Node
from torch.fx._compatibility import compatibility
from .matcher_utils import InternalMatch, SubgraphMatcher
__all__ = ["SubgraphMatcherWithNameNodeMap"]
def _split_to_graph_and_name_node_map(
gm: GraphModule,
) -> tuple[GraphModule, dict[str, Node]]:
from torch.fx.graph import _PyTreeInfo
from torch.utils._pytree import tree_flatten, tree_unflatten
name_node_map = {}
for n in gm.graph.nodes:
if n.op == "output":
assert gm._out_spec is not None
output = tree_unflatten(n.args[0], gm._out_spec)
assert isinstance(output, tuple), (
"Expecting the pattern graph to return a tuple"
)
assert len(output) >= 2, (
"Expecting the pattern graph to have at least two outputs"
)
*out, name_node_map = output
flattened, out_spec = tree_flatten(out)
assert isinstance(name_node_map, dict), (
"Expecting the input graph to have a dict output as the last element"
)
n.args = (flattened,)
orig_pytree_info = gm._graph._codegen.pytree_info # type: ignore[attr-defined]
gm._graph._codegen.pytree_info = _PyTreeInfo( # type: ignore[attr-defined]
orig_pytree_info.orig_args, orig_pytree_info.in_spec, out_spec
)
gm.recompile()
return gm, name_node_map
@compatibility(is_backward_compatible=False)
class SubgraphMatcherWithNameNodeMap(SubgraphMatcher):
"""Extends SubgraphMatcher to support querying the matched subgraph nodes through node name,
this requires pattern to have specific format (returning and additional dictionary at the output,
that has node name as key, and the node in the pattern graph as value, see Example for more details)
Difference with SubgraphMatcher is that it takes a `pattern_gm` GraphModule as input during
initialization since we need to modify the graph (which requires `recompile` the GraphModule)
Example::
def pattern(x, weight):
conv = F.conv2d(x, weight)
relu = F.relu(conv)
return relu, {"conv": conv, "relu": relu}
def target_graph(x, weight):
conv = F.conv2d(x, weight)
relu = F.relu(conv)
relu *= 2
return relu
pattern_gm = export_for_training(pattern, example_inputs).module()
target_gm = export_for_training(target_graph, example_inputs).module()
matcher = SubgraphMatcherWithNameNodeMap(pattern_gm)
matches = matcher.match(target_gm)
for match in matches:
match.name_node_map["conv"].meta["annotation"] = ...
"""
def __init__(
self,
pattern_gm: GraphModule,
match_output: bool = False,
match_placeholder: bool = False,
remove_overlapping_matches: bool = True,
ignore_literals: bool = False,
) -> None:
pattern_gm, name_node_map = _split_to_graph_and_name_node_map(pattern_gm)
self.name_node_map = name_node_map
super().__init__(
pattern_gm.graph,
match_output,
match_placeholder,
remove_overlapping_matches,
ignore_literals,
)
def match(self, graph: Graph) -> list[InternalMatch]:
"""The returned InternalMatch will have name_node_map populated with a map
from node name (str) to the target node, e.g.
{"conv": target_conv_ndoe, "relu": target_relu_node}
this requires the pattern graph returns an additional
output of node name to node, e.g. instead of:
```
def pattern(...):
...
return relu
```
we should do:
```
def pattern(...):
...
return relu, {"conv": conv, "relu": relu}
``` instead
"""
internal_matches = super().match(graph)
for internal_match in internal_matches:
for k, n in self.name_node_map.items():
internal_match.name_node_map[k] = internal_match.nodes_map[n]
return internal_matches
|
# TODO: Add _log_api_usage_once() in all mid-level kernels. If they remain not jit-scriptable we can use decorators
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_simple_tensor # usort: skip
from ._meta import (
clamp_bounding_box,
convert_format_bounding_box,
convert_dtype_image_tensor,
convert_dtype,
convert_dtype_video,
convert_image_dtype,
get_dimensions_image_tensor,
get_dimensions_image_pil,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image_tensor,
get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_spatial_size_bounding_box,
get_spatial_size_image_tensor,
get_spatial_size_image_pil,
get_spatial_size_mask,
get_spatial_size_video,
get_spatial_size,
) # usort: skip
from ._augment import erase, erase_image_pil, erase_image_tensor, erase_video
from ._color import (
adjust_brightness,
adjust_brightness_image_pil,
adjust_brightness_image_tensor,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image_pil,
adjust_contrast_image_tensor,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image_pil,
adjust_gamma_image_tensor,
adjust_gamma_video,
adjust_hue,
adjust_hue_image_pil,
adjust_hue_image_tensor,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image_pil,
adjust_saturation_image_tensor,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image_pil,
adjust_sharpness_image_tensor,
adjust_sharpness_video,
autocontrast,
autocontrast_image_pil,
autocontrast_image_tensor,
autocontrast_video,
equalize,
equalize_image_pil,
equalize_image_tensor,
equalize_video,
invert,
invert_image_pil,
invert_image_tensor,
invert_video,
posterize,
posterize_image_pil,
posterize_image_tensor,
posterize_video,
solarize,
solarize_image_pil,
solarize_image_tensor,
solarize_video,
)
from ._geometry import (
affine,
affine_bounding_box,
affine_image_pil,
affine_image_tensor,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_box,
center_crop_image_pil,
center_crop_image_tensor,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_box,
crop_image_pil,
crop_image_tensor,
crop_mask,
crop_video,
elastic,
elastic_bounding_box,
elastic_image_pil,
elastic_image_tensor,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image_pil,
five_crop_image_tensor,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_box,
horizontal_flip_image_pil,
horizontal_flip_image_tensor,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_box,
pad_image_pil,
pad_image_tensor,
pad_mask,
pad_video,
perspective,
perspective_bounding_box,
perspective_image_pil,
perspective_image_tensor,
perspective_mask,
perspective_video,
resize,
resize_bounding_box,
resize_image_pil,
resize_image_tensor,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_box,
resized_crop_image_pil,
resized_crop_image_tensor,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_box,
rotate_image_pil,
rotate_image_tensor,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image_pil,
ten_crop_image_tensor,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_box,
vertical_flip_image_pil,
vertical_flip_image_tensor,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
gaussian_blur,
gaussian_blur_image_pil,
gaussian_blur_image_tensor,
gaussian_blur_video,
normalize,
normalize_image_tensor,
normalize_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image_pil, to_image_tensor, to_pil_image
from ._deprecated import get_image_size, rgb_to_grayscale, to_grayscale, to_tensor # usort: skip
|
# TODO: Add _log_api_usage_once() in all mid-level kernels. If they remain not jit-scriptable we can use decorators
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_simple_tensor # usort: skip
from ._meta import (
clamp_bounding_box,
convert_format_bounding_box,
convert_color_space_image_tensor,
convert_color_space_image_pil,
convert_color_space_video,
convert_color_space,
convert_dtype_image_tensor,
convert_dtype,
convert_dtype_video,
convert_image_dtype,
get_dimensions_image_tensor,
get_dimensions_image_pil,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image_tensor,
get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_spatial_size_bounding_box,
get_spatial_size_image_tensor,
get_spatial_size_image_pil,
get_spatial_size_mask,
get_spatial_size_video,
get_spatial_size,
) # usort: skip
from ._augment import erase, erase_image_pil, erase_image_tensor, erase_video
from ._color import (
adjust_brightness,
adjust_brightness_image_pil,
adjust_brightness_image_tensor,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image_pil,
adjust_contrast_image_tensor,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image_pil,
adjust_gamma_image_tensor,
adjust_gamma_video,
adjust_hue,
adjust_hue_image_pil,
adjust_hue_image_tensor,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image_pil,
adjust_saturation_image_tensor,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image_pil,
adjust_sharpness_image_tensor,
adjust_sharpness_video,
autocontrast,
autocontrast_image_pil,
autocontrast_image_tensor,
autocontrast_video,
equalize,
equalize_image_pil,
equalize_image_tensor,
equalize_video,
invert,
invert_image_pil,
invert_image_tensor,
invert_video,
posterize,
posterize_image_pil,
posterize_image_tensor,
posterize_video,
solarize,
solarize_image_pil,
solarize_image_tensor,
solarize_video,
)
from ._geometry import (
affine,
affine_bounding_box,
affine_image_pil,
affine_image_tensor,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_box,
center_crop_image_pil,
center_crop_image_tensor,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_box,
crop_image_pil,
crop_image_tensor,
crop_mask,
crop_video,
elastic,
elastic_bounding_box,
elastic_image_pil,
elastic_image_tensor,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image_pil,
five_crop_image_tensor,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_box,
horizontal_flip_image_pil,
horizontal_flip_image_tensor,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_box,
pad_image_pil,
pad_image_tensor,
pad_mask,
pad_video,
perspective,
perspective_bounding_box,
perspective_image_pil,
perspective_image_tensor,
perspective_mask,
perspective_video,
resize,
resize_bounding_box,
resize_image_pil,
resize_image_tensor,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_box,
resized_crop_image_pil,
resized_crop_image_tensor,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_box,
rotate_image_pil,
rotate_image_tensor,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image_pil,
ten_crop_image_tensor,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_box,
vertical_flip_image_pil,
vertical_flip_image_tensor,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
gaussian_blur,
gaussian_blur_image_pil,
gaussian_blur_image_tensor,
gaussian_blur_video,
normalize,
normalize_image_tensor,
normalize_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image_pil, to_image_tensor, to_pil_image
from ._deprecated import get_image_size, rgb_to_grayscale, to_grayscale, to_tensor # usort: skip
|
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import testing
from keras.src.utils import summary_utils
class SummaryUtilsTest(testing.TestCase, parameterized.TestCase):
@parameterized.parameters([("adam",), (None,)])
@pytest.mark.requires_trainable_backend
def test_print_model_summary(self, optimizer):
inputs = layers.Input((2,))
outputs = layers.Dense(3)(inputs)
model = models.Model(inputs, outputs)
model.compile(optimizer=optimizer, loss="mse", metrics=["mse"])
if optimizer:
# Trigger the optimizer weights creation
model.fit(x=np.zeros([4, 2]), y=np.zeros([4, 3]))
summary_content = []
def print_to_variable(text, line_break=False):
summary_content.append(text)
try:
summary_utils.print_summary(model, print_fn=print_to_variable)
summary_content = "\n".join(summary_content)
if optimizer:
self.assertIn("Total params: 29", summary_content)
self.assertIn("Trainable params: 9", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
self.assertIn("Optimizer params: 20", summary_content)
else:
self.assertIn("Total params: 9", summary_content)
self.assertIn("Trainable params: 9", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
self.assertNotIn("Optimizer params", summary_content)
except ImportError:
pass
def test_print_model_summary_custom_build(self):
class MyModel(models.Model):
def __init__(self):
super().__init__()
self.dense1 = layers.Dense(4, activation="relu")
self.dense2 = layers.Dense(2, activation="softmax")
self.unbuilt_dense = layers.Dense(1)
def build(self, input_shape):
self.dense1.build(input_shape)
input_shape = self.dense1.compute_output_shape(input_shape)
self.dense2.build(input_shape)
def call(self, inputs):
x = self.dense1(inputs)
return self.dense2(x)
model = MyModel()
model.build((None, 2))
summary_content = []
def print_to_variable(text, line_break=False):
summary_content.append(text)
summary_utils.print_summary(model, print_fn=print_to_variable)
summary_content = "\n".join(summary_content)
self.assertIn("(None, 4)", summary_content) # dense1
self.assertIn("(None, 2)", summary_content) # dense2
self.assertIn("?", summary_content) # unbuilt_dense
self.assertIn("Total params: 22", summary_content)
self.assertIn("Trainable params: 22", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
def test_print_model_summary_op_as_layer(self):
inputs = layers.Input((2,))
x = layers.Dense(4)(inputs)
outputs = ops.mean(x)
model = models.Model(inputs, outputs)
summary_content = []
def print_to_variable(text, line_break=False):
summary_content.append(text)
summary_utils.print_summary(
model, print_fn=print_to_variable, show_trainable=True
)
summary_content = "\n".join(summary_content)
self.assertIn("(None, 4)", summary_content) # dense
self.assertIn("Y", summary_content) # dense
self.assertIn("()", summary_content) # mean
self.assertIn("-", summary_content) # mean
self.assertIn("Total params: 12", summary_content)
self.assertIn("Trainable params: 12", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
|
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import layers
from keras.src import models
from keras.src import testing
from keras.src.utils import summary_utils
class SummaryUtilsTest(testing.TestCase, parameterized.TestCase):
@parameterized.parameters([("adam",), (None,)])
@pytest.mark.requires_trainable_backend
def test_print_model_summary(self, optimizer):
inputs = layers.Input((2,))
outputs = layers.Dense(3)(inputs)
model = models.Model(inputs, outputs)
model.compile(optimizer=optimizer, loss="mse", metrics=["mse"])
if optimizer:
# Trigger the optimizer weights creation
model.fit(x=np.zeros([4, 2]), y=np.zeros([4, 3]))
summary_content = []
def print_to_variable(text, line_break=False):
summary_content.append(text)
try:
summary_utils.print_summary(model, print_fn=print_to_variable)
summary_content = "\n".join(summary_content)
if optimizer:
self.assertIn("Total params: 29", summary_content)
self.assertIn("Trainable params: 9", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
self.assertIn("Optimizer params: 20", summary_content)
else:
self.assertIn("Total params: 9", summary_content)
self.assertIn("Trainable params: 9", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
self.assertNotIn("Optimizer params", summary_content)
except ImportError:
pass
def test_print_model_summary_custom_build(self):
class MyModel(models.Model):
def __init__(self):
super().__init__()
self.dense1 = layers.Dense(4, activation="relu")
self.dense2 = layers.Dense(2, activation="softmax")
self.unbuilt_dense = layers.Dense(1)
def build(self, input_shape):
self.dense1.build(input_shape)
input_shape = self.dense1.compute_output_shape(input_shape)
self.dense2.build(input_shape)
def call(self, inputs):
x = self.dense1(inputs)
return self.dense2(x)
model = MyModel()
model.build((None, 2))
summary_content = []
def print_to_variable(text, line_break=False):
summary_content.append(text)
summary_utils.print_summary(model, print_fn=print_to_variable)
summary_content = "\n".join(summary_content)
self.assertIn("(None, 4)", summary_content) # dense1
self.assertIn("(None, 2)", summary_content) # dense2
self.assertIn("?", summary_content) # unbuilt_dense
self.assertIn("Total params: 22", summary_content)
self.assertIn("Trainable params: 22", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
|
import textwrap
import pyarrow as pa
import pytest
from datasets import Features, Value
from datasets.packaged_modules.json.json import Json
@pytest.fixture
def jsonl_file(tmp_path):
filename = tmp_path / "file.jsonl"
data = textwrap.dedent(
"""\
{"col_1": 1, "col_2": 2}
{"col_1": 10, "col_2": 20}
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def json_file_with_list_of_dicts(tmp_path):
filename = tmp_path / "file_with_list_of_dicts.json"
data = textwrap.dedent(
"""\
[
{"col_1": 1, "col_2": 2},
{"col_1": 10, "col_2": 20}
]
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def json_file_with_list_of_dicts_field(tmp_path):
filename = tmp_path / "file_with_list_of_dicts_field.json"
data = textwrap.dedent(
"""\
{
"field1": 1,
"field2": "aabb",
"field3": [
{"col_1": 1, "col_2": 2},
{"col_1": 10, "col_2": 20}
]
}
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.mark.parametrize(
"file_fixture, config_kwargs",
[
("jsonl_file", {}),
("json_file_with_list_of_dicts", {}),
("json_file_with_list_of_dicts_field", {"field": "field3"}),
],
)
def test_json_generate_tables(file_fixture, config_kwargs, request):
json = Json(**config_kwargs)
generator = json._generate_tables([[request.getfixturevalue(file_fixture)]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.to_pydict() == {"col_1": [1, 10], "col_2": [2, 20]}
@pytest.mark.parametrize(
"file_fixture, config_kwargs",
[
(
"jsonl_file",
{"features": Features({"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")})},
),
(
"json_file_with_list_of_dicts",
{"features": Features({"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")})},
),
(
"json_file_with_list_of_dicts_field",
{
"field": "field3",
"features": Features(
{"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")}
),
},
),
],
)
def test_json_generate_tables_with_missing_features(file_fixture, config_kwargs, request):
json = Json(**config_kwargs)
generator = json._generate_tables([[request.getfixturevalue(file_fixture)]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.to_pydict() == {"col_1": [1, 10], "col_2": [2, 20], "missing_col": [None, None]}
|
import textwrap
import pyarrow as pa
import pytest
from datasets.packaged_modules.json.json import Json
@pytest.fixture
def jsonl_file(tmp_path):
filename = tmp_path / "file.jsonl"
data = textwrap.dedent(
"""\
{"col_1": 1, "col_2": 2}
{"col_1": 10, "col_2": 20}
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def json_file_with_list_of_dicts(tmp_path):
filename = tmp_path / "file_with_list_of_dicts.json"
data = textwrap.dedent(
"""\
[
{"col_1": 1, "col_2": 2},
{"col_1": 10, "col_2": 20}
]
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def json_file_with_list_of_dicts_field(tmp_path):
filename = tmp_path / "file_with_list_of_dicts_field.json"
data = textwrap.dedent(
"""\
{
"field1": 1,
"field2": "aabb",
"field3": [
{"col_1": 1, "col_2": 2},
{"col_1": 10, "col_2": 20}
]
}
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.mark.parametrize(
"file_fixture, config_kwargs",
[
("jsonl_file", {}),
("json_file_with_list_of_dicts", {}),
("json_file_with_list_of_dicts_field", {"field": "field3"}),
],
)
def test_json_generate_tables(file_fixture, config_kwargs, request):
json = Json(**config_kwargs)
generator = json._generate_tables([[request.getfixturevalue(file_fixture)]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.to_pydict() == {"col_1": [1, 10], "col_2": [2, 20]}
|
import pytest
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.mrkl.output_parser import (
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE,
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
MRKLOutputParser,
)
mrkl_output_parser = MRKLOutputParser()
def test_valid_action_and_action_input_parse() -> None:
llm_output = """I can use the `foo` tool to achieve the goal.
Action: foo
Action Input: bar"""
agent_action: AgentAction = mrkl_output_parser.parse(llm_output) # type: ignore[assignment]
assert agent_action.tool == "foo"
assert agent_action.tool_input == "bar"
def test_valid_final_answer_parse() -> None:
llm_output = """Final Answer: The best pizza to eat is margaritta """
agent_finish: AgentFinish = mrkl_output_parser.parse(llm_output) # type: ignore[assignment]
assert (
agent_finish.return_values.get("output")
== "The best pizza to eat is margaritta"
)
def test_missing_action() -> None:
llm_output = """I can use the `foo` tool to achieve the goal."""
with pytest.raises(OutputParserException) as exception_info:
mrkl_output_parser.parse(llm_output)
assert (
exception_info.value.observation == MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE
)
def test_missing_action_input() -> None:
llm_output = """I can use the `foo` tool to achieve the goal.
Action: foo"""
with pytest.raises(OutputParserException) as exception_info:
mrkl_output_parser.parse(llm_output)
assert (
exception_info.value.observation
== MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE
)
def test_final_answer_before_parsable_action() -> None:
llm_output = """Final Answer: The best pizza to eat is margaritta
Action: foo
Action Input: bar
"""
agent_finish: AgentFinish = mrkl_output_parser.parse(llm_output) # type: ignore[assignment]
assert (
agent_finish.return_values.get("output")
== "The best pizza to eat is margaritta"
)
def test_final_answer_after_parsable_action() -> None:
llm_output = """
Observation: I can use the `foo` tool to achieve the goal.
Action: foo
Action Input: bar
Final Answer: The best pizza to eat is margaritta
"""
with pytest.raises(OutputParserException) as exception_info:
mrkl_output_parser.parse(llm_output)
assert (
"Parsing LLM output produced both a final answer and a parse-able action"
in exception_info.value.args[0]
)
|
import pytest
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.mrkl.output_parser import (
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE,
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
MRKLOutputParser,
)
mrkl_output_parser = MRKLOutputParser()
def test_valid_action_and_action_input_parse() -> None:
llm_output = """I can use the `foo` tool to achieve the goal.
Action: foo
Action Input: bar"""
agent_action: AgentAction = mrkl_output_parser.parse(llm_output) # type: ignore[assignment]
assert agent_action.tool == "foo"
assert agent_action.tool_input == "bar"
def test_valid_final_answer_parse() -> None:
llm_output = """Final Answer: The best pizza to eat is margaritta """
agent_finish: AgentFinish = mrkl_output_parser.parse(llm_output) # type: ignore[assignment]
assert (
agent_finish.return_values.get("output")
== "The best pizza to eat is margaritta"
)
def test_missing_action() -> None:
llm_output = """I can use the `foo` tool to achieve the goal."""
with pytest.raises(OutputParserException) as exception_info:
mrkl_output_parser.parse(llm_output)
assert (
exception_info.value.observation == MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE
)
def test_missing_action_input() -> None:
llm_output = """I can use the `foo` tool to achieve the goal.
Action: foo"""
with pytest.raises(OutputParserException) as exception_info:
mrkl_output_parser.parse(llm_output)
assert (
exception_info.value.observation
== MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE
)
def test_final_answer_before_parsable_action() -> None:
llm_output = """Final Answer: The best pizza to eat is margaritta
Action: foo
Action Input: bar
"""
agent_finish: AgentFinish = mrkl_output_parser.parse(llm_output) # type: ignore[assignment]
assert (
agent_finish.return_values.get("output")
== "The best pizza to eat is margaritta"
)
def test_final_answer_after_parsable_action() -> None:
llm_output = """
Observation: I can use the `foo` tool to achieve the goal.
Action: foo
Action Input: bar
Final Answer: The best pizza to eat is margaritta
"""
with pytest.raises(OutputParserException) as exception_info:
mrkl_output_parser.parse(llm_output)
assert (
"Parsing LLM output produced both a final answer and a parse-able action"
in exception_info.value.args[0]
)
|
import pathlib
from argparse import ArgumentParser
from lightning import ConformerRNNTModule
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.plugins import DDPPlugin
from transforms import get_data_module
def run_train(args):
seed_everything(1)
checkpoint_dir = args.exp_dir / "checkpoints"
checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/val_loss",
mode="min",
save_top_k=5,
save_weights_only=False,
verbose=True,
)
train_checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/train_loss",
mode="min",
save_top_k=5,
save_weights_only=False,
verbose=True,
)
lr_monitor = LearningRateMonitor(logging_interval="step")
callbacks = [
checkpoint,
train_checkpoint,
lr_monitor,
]
trainer = Trainer(
default_root_dir=args.exp_dir,
max_epochs=args.epochs,
num_nodes=args.nodes,
gpus=args.gpus,
accelerator="gpu",
strategy=DDPPlugin(find_unused_parameters=False),
callbacks=callbacks,
reload_dataloaders_every_n_epochs=1,
)
model = ConformerRNNTModule(str(args.sp_model_path))
data_module = get_data_module(str(args.librispeech_path), str(args.global_stats_path), str(args.sp_model_path))
trainer.fit(model, data_module, ckpt_path=args.checkpoint_path)
def cli_main():
parser = ArgumentParser()
parser.add_argument(
"--checkpoint-path",
default=None,
type=pathlib.Path,
help="Path to checkpoint to use for evaluation.",
)
parser.add_argument(
"--exp-dir",
default=pathlib.Path("./exp"),
type=pathlib.Path,
help="Directory to save checkpoints and logs to. (Default: './exp')",
)
parser.add_argument(
"--global-stats-path",
default=pathlib.Path("global_stats.json"),
type=pathlib.Path,
help="Path to JSON file containing feature means and stddevs.",
)
parser.add_argument(
"--librispeech-path",
type=pathlib.Path,
help="Path to LibriSpeech datasets.",
required=True,
)
parser.add_argument(
"--sp-model-path",
type=pathlib.Path,
help="Path to SentencePiece model.",
required=True,
)
parser.add_argument(
"--nodes",
default=4,
type=int,
help="Number of nodes to use for training. (Default: 4)",
)
parser.add_argument(
"--gpus",
default=8,
type=int,
help="Number of GPUs per node to use for training. (Default: 8)",
)
parser.add_argument(
"--epochs",
default=120,
type=int,
help="Number of epochs to train for. (Default: 120)",
)
args = parser.parse_args()
run_train(args)
if __name__ == "__main__":
cli_main()
|
import pathlib
from argparse import ArgumentParser
from lightning import ConformerRNNTModule, get_data_module
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.plugins import DDPPlugin
def run_train(args):
seed_everything(1)
checkpoint_dir = args.exp_dir / "checkpoints"
checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/val_loss",
mode="min",
save_top_k=5,
save_weights_only=False,
verbose=True,
)
train_checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/train_loss",
mode="min",
save_top_k=5,
save_weights_only=False,
verbose=True,
)
lr_monitor = LearningRateMonitor(logging_interval="step")
callbacks = [
checkpoint,
train_checkpoint,
lr_monitor,
]
trainer = Trainer(
default_root_dir=args.exp_dir,
max_epochs=args.epochs,
num_nodes=args.nodes,
gpus=args.gpus,
accelerator="gpu",
strategy=DDPPlugin(find_unused_parameters=False),
callbacks=callbacks,
reload_dataloaders_every_n_epochs=1,
)
model = ConformerRNNTModule(str(args.sp_model_path))
data_module = get_data_module(str(args.librispeech_path), str(args.global_stats_path), str(args.sp_model_path))
trainer.fit(model, data_module, ckpt_path=args.checkpoint_path)
def cli_main():
parser = ArgumentParser()
parser.add_argument(
"--checkpoint-path",
default=None,
type=pathlib.Path,
help="Path to checkpoint to use for evaluation.",
)
parser.add_argument(
"--exp-dir",
default=pathlib.Path("./exp"),
type=pathlib.Path,
help="Directory to save checkpoints and logs to. (Default: './exp')",
)
parser.add_argument(
"--global-stats-path",
default=pathlib.Path("global_stats.json"),
type=pathlib.Path,
help="Path to JSON file containing feature means and stddevs.",
)
parser.add_argument(
"--librispeech-path",
type=pathlib.Path,
help="Path to LibriSpeech datasets.",
required=True,
)
parser.add_argument(
"--sp-model-path",
type=pathlib.Path,
help="Path to SentencePiece model.",
required=True,
)
parser.add_argument(
"--nodes",
default=4,
type=int,
help="Number of nodes to use for training. (Default: 4)",
)
parser.add_argument(
"--gpus",
default=8,
type=int,
help="Number of GPUs per node to use for training. (Default: 8)",
)
parser.add_argument(
"--epochs",
default=120,
type=int,
help="Number of epochs to train for. (Default: 120)",
)
args = parser.parse_args()
run_train(args)
if __name__ == "__main__":
cli_main()
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='DocArray team',
author_email='[email protected]',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
install_requires=['numpy', 'rich>=12.0.0', 'jina-hubble-sdk>=0.24.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'grpcio>=1.46.0,<1.48.1',
'grpcio-reflection>=1.46.0,<1.48.1',
'grpcio-health-checking>=1.46.0,<1.48.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client~=0.10.3',
],
'annlite': [
'annlite',
],
'weaviate': [
'weaviate-client~=3.9.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'milvus': [
'pymilvus~=2.1.0',
],
'benchmark': [
'pandas',
'matplotlib',
'seaborn',
'h5py',
],
'test': [
'protobuf>=3.13.0,<=3.20.0', # pip dependency resolution does not respect this restriction from paddle
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov==3.0.0',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.9.0',
'annlite',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'pymilvus==2.1.3',
'jina',
'pytest-mock',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='DocArray team',
author_email='[email protected]',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
install_requires=['numpy', 'rich>=12.0.0', 'jina-hubble-sdk>=0.24.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'grpcio>=1.46.0,<1.48.1',
'grpcio-reflection>=1.46.0,<1.48.1',
'grpcio-health-checking>=1.46.0,<1.48.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client~=0.10.3',
],
'annlite': [
'annlite',
],
'weaviate': [
'weaviate-client~=3.9.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'milvus': [
'pymilvus>=2.1.0',
],
'benchmark': [
'pandas',
'matplotlib',
'seaborn',
'h5py',
],
'test': [
'protobuf>=3.13.0,<=3.20.0', # pip dependency resolution does not respect this restriction from paddle
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov==3.0.0',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.9.0',
'annlite',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'pymilvus>=2.1.0',
'jina',
'pytest-mock',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
"""Google PaLM embeddings file."""
import deprecated
from typing import Any, List, Optional
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.callbacks.base import CallbackManager
import google.generativeai as palm
@deprecated.deprecated(
reason=(
"Should use `llama-index-embeddings-google-genai` instead, using Google's latest unified SDK. "
"See: https://docs.llamaindex.ai/en/stable/examples/embeddings/google_genai/"
)
)
class GooglePaLMEmbedding(BaseEmbedding):
"""
Class for Google PaLM embeddings.
Args:
model_name (str): Model for embedding.
Defaults to "models/embedding-gecko-001".
api_key (Optional[str]): API key to access the model. Defaults to None.
"""
_model: Any = PrivateAttr()
def __init__(
self,
model_name: str = "models/embedding-gecko-001",
api_key: Optional[str] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
):
super().__init__(
model_name=model_name,
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
**kwargs,
)
palm.configure(api_key=api_key)
self._model = palm
@classmethod
def class_name(cls) -> str:
return "PaLMEmbedding"
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._model.generate_embeddings(model=self.model_name, text=query)[
"embedding"
]
async def _aget_query_embedding(self, query: str) -> List[float]:
"""The asynchronous version of _get_query_embedding."""
return await self._model.aget_embedding(query)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self._model.generate_embeddings(model=self.model_name, text=text)[
"embedding"
]
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Asynchronously get text embedding."""
return self._model._get_text_embedding(text)
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
return self._model.generate_embeddings(model=self.model_name, text=texts)[
"embedding"
]
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Asynchronously get text embeddings."""
return await self._model._get_embeddings(texts)
|
"""Google PaLM embeddings file."""
import deprecated
from typing import Any, List, Optional
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.callbacks.base import CallbackManager
import google.generativeai as palm
@deprecated.deprecated(
reason=(
"Should use `llama-index-embeddings-google-genai` instead, using Google's latest unified SDK. "
"See: https://docs.llamaindex.ai/en/stable/examples/embeddings/google_genai/"
)
)
class GooglePaLMEmbedding(BaseEmbedding):
"""Class for Google PaLM embeddings.
Args:
model_name (str): Model for embedding.
Defaults to "models/embedding-gecko-001".
api_key (Optional[str]): API key to access the model. Defaults to None.
"""
_model: Any = PrivateAttr()
def __init__(
self,
model_name: str = "models/embedding-gecko-001",
api_key: Optional[str] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
):
super().__init__(
model_name=model_name,
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
**kwargs,
)
palm.configure(api_key=api_key)
self._model = palm
@classmethod
def class_name(cls) -> str:
return "PaLMEmbedding"
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._model.generate_embeddings(model=self.model_name, text=query)[
"embedding"
]
async def _aget_query_embedding(self, query: str) -> List[float]:
"""The asynchronous version of _get_query_embedding."""
return await self._model.aget_embedding(query)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self._model.generate_embeddings(model=self.model_name, text=text)[
"embedding"
]
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Asynchronously get text embedding."""
return self._model._get_text_embedding(text)
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
return self._model.generate_embeddings(model=self.model_name, text=texts)[
"embedding"
]
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Asynchronously get text embeddings."""
return await self._model._get_embeddings(texts)
|
import pytest
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.events import Event, StartEvent, StopEvent
from llama_index.core.workflow.retry_policy import ConstantDelayRetryPolicy
from llama_index.core.workflow.workflow import Workflow
@pytest.mark.asyncio
async def test_retry_e2e():
class CountEvent(Event):
"""Empty event to signal a step to increment a counter in the Context."""
class DummyWorkflow(Workflow):
# Set a small delay to avoid impacting the CI speed too much
@step(retry_policy=ConstantDelayRetryPolicy(delay=0.2))
async def flaky_step(self, ctx: Context, ev: StartEvent) -> StopEvent:
count = await ctx.get("counter", default=0)
ctx.send_event(CountEvent())
if count < 3:
raise ValueError("Something bad happened!")
return StopEvent(result="All good!")
@step
async def counter(self, ctx: Context, ev: CountEvent) -> None:
count = await ctx.get("counter", default=0)
await ctx.set("counter", count + 1)
workflow = DummyWorkflow(disable_validation=True)
assert await workflow.run() == "All good!"
def test_ConstantDelayRetryPolicy_init():
p = ConstantDelayRetryPolicy()
assert p.maximum_attempts == 3
assert p.delay == 5
def test_ConstantDelayRetryPolicy_next():
delay = 4.2
p = ConstantDelayRetryPolicy(maximum_attempts=5, delay=delay)
assert p.next(elapsed_time=0.0, attempts=4, error=Exception()) == delay
assert p.next(elapsed_time=0.0, attempts=5, error=Exception()) is None
# This should never happen but ensure the code is resilient
assert p.next(elapsed_time=0.0, attempts=999, error=Exception()) is None
|
import pytest
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.events import Event, StartEvent, StopEvent
from llama_index.core.workflow.retry_policy import ConstantDelayRetryPolicy
from llama_index.core.workflow.workflow import Workflow
@pytest.mark.asyncio()
async def test_retry_e2e():
class CountEvent(Event):
"""Empty event to signal a step to increment a counter in the Context."""
class DummyWorkflow(Workflow):
# Set a small delay to avoid impacting the CI speed too much
@step(retry_policy=ConstantDelayRetryPolicy(delay=0.2))
async def flaky_step(self, ctx: Context, ev: StartEvent) -> StopEvent:
count = await ctx.get("counter", default=0)
ctx.send_event(CountEvent())
if count < 3:
raise ValueError("Something bad happened!")
return StopEvent(result="All good!")
@step
async def counter(self, ctx: Context, ev: CountEvent) -> None:
count = await ctx.get("counter", default=0)
await ctx.set("counter", count + 1)
workflow = DummyWorkflow(disable_validation=True)
assert await workflow.run() == "All good!"
def test_ConstantDelayRetryPolicy_init():
p = ConstantDelayRetryPolicy()
assert p.maximum_attempts == 3
assert p.delay == 5
def test_ConstantDelayRetryPolicy_next():
delay = 4.2
p = ConstantDelayRetryPolicy(maximum_attempts=5, delay=delay)
assert p.next(elapsed_time=0.0, attempts=4, error=Exception()) == delay
assert p.next(elapsed_time=0.0, attempts=5, error=Exception()) is None
# This should never happen but ensure the code is resilient
assert p.next(elapsed_time=0.0, attempts=999, error=Exception()) is None
|
#!/usr/bin/env python3
import logging
import pathlib
from argparse import ArgumentParser, RawTextHelpFormatter
import torch
import torchaudio
from torchaudio.prototype.pipelines import EMFORMER_RNNT_BASE_TEDLIUM3
logger = logging.getLogger(__name__)
def compute_word_level_distance(seq1, seq2):
return torchaudio.functional.edit_distance(seq1.lower().split(), seq2.lower().split())
def _eval_subset(tedlium_path, subset, feature_extractor, decoder, token_processor, use_cuda):
total_edit_distance = 0
total_length = 0
if subset == "dev":
dataset = torchaudio.datasets.TEDLIUM(tedlium_path, release="release3", subset="dev")
elif subset == "test":
dataset = torchaudio.datasets.TEDLIUM(tedlium_path, release="release3", subset="test")
with torch.no_grad():
for idx in range(len(dataset)):
sample = dataset[idx]
waveform = sample[0].squeeze()
if use_cuda:
waveform = waveform.to(device="cuda")
actual = sample[2].replace("\n", "")
if actual == "ignore_time_segment_in_scoring":
continue
features, length = feature_extractor(waveform)
hypos = decoder(features, length, 20)
hypothesis = hypos[0]
hypothesis = token_processor(hypothesis[0])
total_edit_distance += compute_word_level_distance(actual, hypothesis)
total_length += len(actual.split())
if idx % 100 == 0:
print(f"Processed elem {idx}; WER: {total_edit_distance / total_length}")
print(f"Final WER for {subset} set: {total_edit_distance / total_length}")
def run_eval_pipeline(args):
decoder = EMFORMER_RNNT_BASE_TEDLIUM3.get_decoder()
token_processor = EMFORMER_RNNT_BASE_TEDLIUM3.get_token_processor()
feature_extractor = EMFORMER_RNNT_BASE_TEDLIUM3.get_feature_extractor()
if args.use_cuda:
feature_extractor = feature_extractor.to(device="cuda").eval()
decoder = decoder.to(device="cuda")
_eval_subset(args.tedlium_path, "dev", feature_extractor, decoder, token_processor, args.use_cuda)
_eval_subset(args.tedlium_path, "test", feature_extractor, decoder, token_processor, args.use_cuda)
def _parse_args():
parser = ArgumentParser(
description=__doc__,
formatter_class=RawTextHelpFormatter,
)
parser.add_argument(
"--tedlium-path",
type=pathlib.Path,
help="Path to TED-LIUM release 3 dataset.",
)
parser.add_argument(
"--use-cuda",
action="store_true",
default=False,
help="Run using CUDA.",
)
parser.add_argument("--debug", action="store_true", help="whether to use debug level for logging")
return parser.parse_args()
def _init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def cli_main():
args = _parse_args()
_init_logger(args.debug)
run_eval_pipeline(args)
if __name__ == "__main__":
cli_main()
|
#!/usr/bin/env python3
import logging
import pathlib
from argparse import ArgumentParser, RawTextHelpFormatter
import torch
import torchaudio
from torchaudio.prototype.pipelines import EMFORMER_RNNT_BASE_TEDLIUM3
logger = logging.getLogger(__name__)
def compute_word_level_distance(seq1, seq2):
return torchaudio.functional.edit_distance(seq1.lower().split(), seq2.lower().split())
def _eval_subset(tedlium_path, subset, feature_extractor, decoder, token_processor, use_cuda):
total_edit_distance = 0
total_length = 0
if subset == "dev":
dataset = torchaudio.datasets.TEDLIUM(tedlium_path, release="release3", subset="dev")
elif subset == "test":
dataset = torchaudio.datasets.TEDLIUM(tedlium_path, release="release3", subset="test")
with torch.no_grad():
for idx in range(len(dataset)):
sample = dataset[idx]
waveform = sample[0].squeeze()
if use_cuda:
waveform = waveform.to(device="cuda")
actual = sample[2].replace("\n", "")
if actual == "ignore_time_segment_in_scoring":
continue
features, length = feature_extractor(waveform)
hypos = decoder(features, length, 20)
hypothesis = hypos[0]
hypothesis = token_processor(hypothesis.tokens)
total_edit_distance += compute_word_level_distance(actual, hypothesis)
total_length += len(actual.split())
if idx % 100 == 0:
print(f"Processed elem {idx}; WER: {total_edit_distance / total_length}")
print(f"Final WER for {subset} set: {total_edit_distance / total_length}")
def run_eval_pipeline(args):
decoder = EMFORMER_RNNT_BASE_TEDLIUM3.get_decoder()
token_processor = EMFORMER_RNNT_BASE_TEDLIUM3.get_token_processor()
feature_extractor = EMFORMER_RNNT_BASE_TEDLIUM3.get_feature_extractor()
if args.use_cuda:
feature_extractor = feature_extractor.to(device="cuda").eval()
decoder = decoder.to(device="cuda")
_eval_subset(args.tedlium_path, "dev", feature_extractor, decoder, token_processor, args.use_cuda)
_eval_subset(args.tedlium_path, "test", feature_extractor, decoder, token_processor, args.use_cuda)
def _parse_args():
parser = ArgumentParser(
description=__doc__,
formatter_class=RawTextHelpFormatter,
)
parser.add_argument(
"--tedlium-path",
type=pathlib.Path,
help="Path to TED-LIUM release 3 dataset.",
)
parser.add_argument(
"--use-cuda",
action="store_true",
default=False,
help="Run using CUDA.",
)
parser.add_argument("--debug", action="store_true", help="whether to use debug level for logging")
return parser.parse_args()
def _init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def cli_main():
args = _parse_args()
_init_logger(args.debug)
run_eval_pipeline(args)
if __name__ == "__main__":
cli_main()
|
"""AgentQL Web Reader."""
import httpx
from typing import Optional, List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
import logging
logging.getLogger("root").setLevel(logging.INFO)
QUERY_DATA_ENDPOINT = "https://api.agentql.com/v1/query-data"
API_TIMEOUT_SECONDS = 900
REQUEST_ORIGIN = "llamaindex"
class AgentQLWebReader(BasePydanticReader):
"""
Scrape a URL with or without a agentql query and returns document in json format.
Args:
api_key (str): The AgentQL API key, get one at https://dev.agentql.com
params (dict): Additional parameters to pass to the AgentQL API. Visit https://docs.agentql.com/rest-api/api-reference for details.
"""
api_key: str
params: Optional[dict]
def __init__(
self,
api_key: str,
params: Optional[dict] = None,
) -> None:
super().__init__(api_key=api_key, params=params)
def load_data(
self, url: str, query: Optional[str] = None, prompt: Optional[str] = None
) -> List[Document]:
"""
Load data from the input directory.
Args:
url (str): URL to scrape or crawl.
query (Optional[str]): AgentQL query used to specify the scraped data.
prompt (Optional[str]): Natural language description of the data you want to scrape.
Either query or prompt must be provided.
params (Optional[dict]): Additional parameters to pass to the AgentQL API. Visit https://docs.agentql.com/rest-api/api-reference for details.
Returns:
List[Document]: List of documents.
"""
payload = {"url": url, "query": query, "prompt": prompt, "params": self.params}
headers = {
"X-API-Key": f"{self.api_key}",
"Content-Type": "application/json",
"X-TF-Request-Origin": REQUEST_ORIGIN,
}
try:
response = httpx.post(
QUERY_DATA_ENDPOINT,
headers=headers,
json=payload,
timeout=API_TIMEOUT_SECONDS,
)
response.raise_for_status()
except httpx.HTTPStatusError as e:
response = e.response
if response.status_code in [401, 403]:
raise ValueError(
"Please, provide a valid API Key. You can create one at https://dev.agentql.com."
) from e
else:
try:
error_json = response.json()
msg = (
error_json["error_info"]
if "error_info" in error_json
else error_json["detail"]
)
except (ValueError, TypeError):
msg = f"HTTP {e}."
raise ValueError(msg) from e
else:
json = response.json()
return [Document(text=str(json["data"]), metadata=json["metadata"])]
|
"""AgentQL Web Reader."""
import httpx
from typing import Optional, List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
import logging
logging.getLogger("root").setLevel(logging.INFO)
QUERY_DATA_ENDPOINT = "https://api.agentql.com/v1/query-data"
API_TIMEOUT_SECONDS = 900
REQUEST_ORIGIN = "llamaindex"
class AgentQLWebReader(BasePydanticReader):
"""
Scrape a URL with or without a agentql query and returns document in json format.
Args:
api_key (str): The AgentQL API key, get one at https://dev.agentql.com
params (dict): Additional parameters to pass to the AgentQL API. Visit https://docs.agentql.com/rest-api/api-reference for details.
"""
api_key: str
params: Optional[dict]
def __init__(
self,
api_key: str,
params: Optional[dict] = None,
) -> None:
super().__init__(api_key=api_key, params=params)
def load_data(
self, url: str, query: Optional[str] = None, prompt: Optional[str] = None
) -> List[Document]:
"""
Load data from the input directory.
Args:
url (str): URL to scrape or crawl.
query (Optional[str]): AgentQL query used to specify the scraped data.
prompt (Optional[str]): Natural language description of the data you want to scrape.
Either query or prompt must be provided.
params (Optional[dict]): Additional parameters to pass to the AgentQL API. Visit https://docs.agentql.com/rest-api/api-reference for details.
Returns:
List[Document]: List of documents.
"""
payload = {"url": url, "query": query, "prompt": prompt, "params": self.params}
headers = {
"X-API-Key": f"{self.api_key}",
"Content-Type": "application/json",
"X-TF-Request-Origin": REQUEST_ORIGIN,
}
try:
response = httpx.post(
QUERY_DATA_ENDPOINT,
headers=headers,
json=payload,
timeout=API_TIMEOUT_SECONDS,
)
response.raise_for_status()
except httpx.HTTPStatusError as e:
response = e.response
if response.status_code in [401, 403]:
raise ValueError(
"Please, provide a valid API Key. You can create one at https://dev.agentql.com."
) from e
else:
try:
error_json = response.json()
msg = (
error_json["error_info"]
if "error_info" in error_json
else error_json["detail"]
)
except (ValueError, TypeError):
msg = f"HTTP {e}."
raise ValueError(msg) from e
else:
json = response.json()
return [Document(text=str(json["data"]), metadata=json["metadata"])]
|
import numpy as np
import pytest
from docarray import DocumentArray
from docarray.document.generators import from_ndarray
from jina import Client, Flow
from jina.excepts import BadClientCallback
def validate(x):
raise NotImplementedError
@pytest.mark.skip(
reason='something wrong with parametrize in the following, setting either False or True work, but combining them does not. see discussion in https://jinaai.slack.com/archives/C018F60RBL5/p1613984424012700?thread_ts=1613954151.005100&cid=C018F60RBL5'
)
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_client_on_error(protocol):
# In this particular test, when you write two tests in a row, you are testing the following case:
#
# You are testing exception in client's callback, not error in client's request generator
# 1. The exception breaks the `async for req in stub.Call(req_iter)` on the client
# 2. Server probably has something hold in the stream
# 3. Restart the client, keep server untouched.
# 4. Now, server stucks (because it considers the last connection wasn't end yet)
def validate(x):
raise NotImplementedError
with Flow(protocol=protocol).add() as f:
t = 0
try:
f.index(
from_ndarray(np.random.random([5, 4])),
on_done=validate,
continue_on_error=False,
)
except BadClientCallback:
# bad client callback will break the `async for req in stub.Call(req_iter)`
t = 1
# now query the gateway again, make sure gateway's channel is still usable
f.index(
from_ndarray(np.random.random([5, 4])),
on_done=validate,
continue_on_error=True,
)
assert t == 1
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_client_on_error_call(protocol):
with pytest.raises(ConnectionError):
Client(host='0.0.0.0', protocol=protocol, port=12345).post(
'/blah',
inputs=DocumentArray.empty(10),
)
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_client_on_error_raise_exception(protocol):
with pytest.raises(ConnectionError):
Client(host='0.0.0.0', protocol=protocol, port=12345).post(
'/blah',
inputs=DocumentArray.empty(10),
)
|
from typing import Optional
import aiohttp
import numpy as np
import pytest
from docarray import DocumentArray
from docarray.document.generators import from_ndarray
from jina import Client, Flow
from jina.excepts import BadClientCallback
def validate(x):
raise NotImplementedError
@pytest.mark.skip(
reason='something wrong with parametrize in the following, setting either False or True work, but combining them does not. see discussion in https://jinaai.slack.com/archives/C018F60RBL5/p1613984424012700?thread_ts=1613954151.005100&cid=C018F60RBL5'
)
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_client_on_error(protocol):
# In this particular test, when you write two tests in a row, you are testing the following case:
#
# You are testing exception in client's callback, not error in client's request generator
# 1. The exception breaks the `async for req in stub.Call(req_iter)` on the client
# 2. Server probably has something hold in the stream
# 3. Restart the client, keep server untouched.
# 4. Now, server stucks (because it considers the last connection wasn't end yet)
def validate(x):
raise NotImplementedError
with Flow(protocol=protocol).add() as f:
t = 0
try:
f.index(
from_ndarray(np.random.random([5, 4])),
on_done=validate,
continue_on_error=False,
)
except BadClientCallback:
# bad client callback will break the `async for req in stub.Call(req_iter)`
t = 1
# now query the gateway again, make sure gateway's channel is still usable
f.index(
from_ndarray(np.random.random([5, 4])),
on_done=validate,
continue_on_error=True,
)
assert t == 1
@pytest.mark.parametrize(
'protocol,exception',
[
('websocket', aiohttp.ClientError),
('grpc', ConnectionError),
('http', aiohttp.ClientError),
],
)
def test_client_on_error_call(protocol, exception):
with pytest.raises(exception):
Client(host='0.0.0.0', protocol=protocol, port=12345).post(
'/blah',
inputs=DocumentArray.empty(10),
)
@pytest.mark.parametrize(
'protocol,exception',
[
('websocket', aiohttp.client_exceptions.ClientConnectorError),
('grpc', ConnectionError),
('http', aiohttp.client_exceptions.ClientConnectorError),
],
)
def test_client_on_error_raise_exception(protocol, exception):
with pytest.raises(exception):
Client(host='0.0.0.0', protocol=protocol, port=12345).post(
'/blah',
inputs=DocumentArray.empty(10),
)
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../common/lsj-100e_coco-instance.py'
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# the model is trained from scratch, so init_cfg is None
data_preprocessor=dict(
# pad_size_divisor=32 is unnecessary in training but necessary
# in testing.
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2), # leads to 0.1+ mAP
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../common/lsj_100e_coco_instance.py'
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# the model is trained from scratch, so init_cfg is None
data_preprocessor=dict(
# pad_size_divisor=32 is unnecessary in training but necessary
# in testing.
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2), # leads to 0.1+ mAP
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
|
# dataset settings
dataset_type = 'Objects365V1Dataset'
data_root = 'data/Objects365/Obj365_v1/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/objects365_train.json',
data_prefix=dict(img='train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/objects365_val.json',
data_prefix=dict(img='val/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/objects365_val.json',
metric='bbox',
sort_categories=True,
format_only=False,
backend_args=backend_args)
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'Objects365V1Dataset'
data_root = 'data/Objects365/Obj365_v1/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/objects365_train.json',
data_prefix=dict(img='train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/objects365_val.json',
data_prefix=dict(img='val/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/objects365_val.json',
metric='bbox',
sort_categories=True,
format_only=False)
test_evaluator = val_evaluator
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Literal
from sentence_transformers.evaluation import TripletEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseTripletEvaluator(TripletEvaluator):
def __init__(
self,
anchors: list[str],
positives: list[str],
negatives: list[str],
main_similarity_function: str | SimilarityFunction | None = None,
margin: float | dict[str, float] | None = None,
name: str = "",
batch_size: int = 16,
show_progress_bar: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
similarity_fn_names: list[Literal["cosine", "dot", "euclidean", "manhattan"]] | None = None,
main_distance_function: str | SimilarityFunction | None = "deprecated",
):
return super().__init__(
anchors=anchors,
positives=positives,
negatives=negatives,
main_similarity_function=main_similarity_function,
margin=margin,
name=name,
batch_size=batch_size,
show_progress_bar=show_progress_bar,
write_csv=write_csv,
truncate_dim=truncate_dim,
similarity_fn_names=similarity_fn_names,
main_distance_function=main_distance_function,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> Tensor:
kwargs["truncate_dim"] = self.truncate_dim
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Literal
from sentence_transformers.evaluation import TripletEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseTripletEvaluator(TripletEvaluator):
def __init__(
self,
anchors: list[str],
positives: list[str],
negatives: list[str],
main_similarity_function: str | SimilarityFunction | None = None,
margin: float | dict[str, float] | None = None,
name: str = "",
batch_size: int = 16,
show_progress_bar: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
similarity_fn_names: list[Literal["cosine", "dot", "euclidean", "manhattan"]] | None = None,
main_distance_function: str | SimilarityFunction | None = "deprecated",
):
return super().__init__(
anchors=anchors,
positives=positives,
negatives=negatives,
main_similarity_function=main_similarity_function,
margin=margin,
name=name,
batch_size=batch_size,
show_progress_bar=show_progress_bar,
write_csv=write_csv,
truncate_dim=truncate_dim,
similarity_fn_names=similarity_fn_names,
main_distance_function=main_distance_function,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> Tensor:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core import bbox2result
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class YOLACT(SingleStageDetector):
"""Implementation of `YOLACT <https://arxiv.org/abs/1904.02689>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
segm_head,
mask_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(YOLACT, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
self.segm_head = MODELS.build(segm_head)
self.mask_head = MODELS.build(mask_head)
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
feat = self.extract_feat(img)
bbox_outs = self.bbox_head(feat)
prototypes = self.mask_head.forward_dummy(feat[0])
return (bbox_outs, prototypes)
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# convert Bitmap mask or Polygon Mask to Tensor here
gt_masks = [
gt_mask.to_tensor(dtype=torch.uint8, device=img.device)
for gt_mask in gt_masks
]
x = self.extract_feat(img)
cls_score, bbox_pred, coeff_pred = self.bbox_head(x)
bbox_head_loss_inputs = (cls_score, bbox_pred) + (gt_bboxes, gt_labels,
img_metas)
losses, sampling_results = self.bbox_head.loss(
*bbox_head_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
segm_head_outs = self.segm_head(x[0])
loss_segm = self.segm_head.loss(segm_head_outs, gt_masks, gt_labels)
losses.update(loss_segm)
mask_pred = self.mask_head(x[0], coeff_pred, gt_bboxes, img_metas,
sampling_results)
loss_mask = self.mask_head.loss(mask_pred, gt_masks, gt_bboxes,
img_metas, sampling_results)
losses.update(loss_mask)
# check NaN and Inf
for loss_name in losses.keys():
assert torch.isfinite(torch.stack(losses[loss_name]))\
.all().item(), '{} becomes infinite or NaN!'\
.format(loss_name)
return losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test-time augmentation."""
feat = self.extract_feat(img)
det_bboxes, det_labels, det_coeffs = self.bbox_head.simple_test(
feat, img_metas, rescale=rescale)
bbox_results = [
bbox2result(det_bbox, det_label, self.bbox_head.num_classes)
for det_bbox, det_label in zip(det_bboxes, det_labels)
]
segm_results = self.mask_head.simple_test(
feat,
det_bboxes,
det_labels,
det_coeffs,
img_metas,
rescale=rescale)
return list(zip(bbox_results, segm_results))
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations."""
raise NotImplementedError(
'YOLACT does not support test-time augmentation')
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core import bbox2result
from ..builder import DETECTORS, build_head
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class YOLACT(SingleStageDetector):
"""Implementation of `YOLACT <https://arxiv.org/abs/1904.02689>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
segm_head,
mask_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(YOLACT, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
self.segm_head = build_head(segm_head)
self.mask_head = build_head(mask_head)
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
feat = self.extract_feat(img)
bbox_outs = self.bbox_head(feat)
prototypes = self.mask_head.forward_dummy(feat[0])
return (bbox_outs, prototypes)
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# convert Bitmap mask or Polygon Mask to Tensor here
gt_masks = [
gt_mask.to_tensor(dtype=torch.uint8, device=img.device)
for gt_mask in gt_masks
]
x = self.extract_feat(img)
cls_score, bbox_pred, coeff_pred = self.bbox_head(x)
bbox_head_loss_inputs = (cls_score, bbox_pred) + (gt_bboxes, gt_labels,
img_metas)
losses, sampling_results = self.bbox_head.loss(
*bbox_head_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
segm_head_outs = self.segm_head(x[0])
loss_segm = self.segm_head.loss(segm_head_outs, gt_masks, gt_labels)
losses.update(loss_segm)
mask_pred = self.mask_head(x[0], coeff_pred, gt_bboxes, img_metas,
sampling_results)
loss_mask = self.mask_head.loss(mask_pred, gt_masks, gt_bboxes,
img_metas, sampling_results)
losses.update(loss_mask)
# check NaN and Inf
for loss_name in losses.keys():
assert torch.isfinite(torch.stack(losses[loss_name]))\
.all().item(), '{} becomes infinite or NaN!'\
.format(loss_name)
return losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test-time augmentation."""
feat = self.extract_feat(img)
det_bboxes, det_labels, det_coeffs = self.bbox_head.simple_test(
feat, img_metas, rescale=rescale)
bbox_results = [
bbox2result(det_bbox, det_label, self.bbox_head.num_classes)
for det_bbox, det_label in zip(det_bboxes, det_labels)
]
segm_results = self.mask_head.simple_test(
feat,
det_bboxes,
det_labels,
det_coeffs,
img_metas,
rescale=rescale)
return list(zip(bbox_results, segm_results))
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations."""
raise NotImplementedError(
'YOLACT does not support test-time augmentation')
|
import time
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings, semantic_search_usearch
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
# 2. Come up with some queries
queries = [
"How do I become a good programmer?",
"How do I become a good data scientist?",
]
# 3. Load the model
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
# 4. Choose a target precision for the corpus embeddings
corpus_precision = "binary"
# Valid options are: "float32", "uint8", "int8", "ubinary", and "binary"
# But usearch only supports "float32", "int8", and "binary"
# 5. Encode the corpus
full_corpus_embeddings = model.encode(corpus, normalize_embeddings=True, show_progress_bar=True)
corpus_embeddings = quantize_embeddings(full_corpus_embeddings, precision=corpus_precision)
# NOTE: We can also pass "precision=..." to the encode method to quantize the embeddings directly,
# but we want to keep the full precision embeddings to act as a calibration dataset for quantizing
# the query embeddings. This is important only if you are using uint8 or int8 precision
# Initially, we don't have a usearch index yet, we can use semantic_search_usearch to create it
corpus_index = None
while True:
# 7. Encode the queries using the full precision
start_time = time.time()
query_embeddings = model.encode(queries, normalize_embeddings=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 8. Perform semantic search using usearch
results, search_time, corpus_index = semantic_search_usearch(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
corpus_precision=corpus_precision,
top_k=10,
calibration_embeddings=full_corpus_embeddings,
rescore=corpus_precision != "float32",
rescore_multiplier=4,
exact=True,
output_index=True,
)
# This is a helper function to showcase how usearch can be used with quantized embeddings.
# You must either provide the `corpus_embeddings` or the `corpus_index` usearch index, but not both.
# In the first call we'll provide the `corpus_embeddings` and get the `corpus_index` back, which
# we'll use in the next call. In practice, the index is stored in RAM or saved to disk, and not
# recalculated for every query.
# This function will 1) quantize the query embeddings to the same precision as the corpus embeddings,
# 2) perform the semantic search using usearch, 3) rescore the results using the full precision embeddings,
# and 4) return the results and the search time (and perhaps the usearch index).
# `corpus_precision` must be the same as the precision used to quantize the corpus embeddings.
# It is used to convert the query embeddings to the same precision as the corpus embeddings.
# `top_k` determines how many results are returned for each query.
# `rescore_multiplier` is a parameter for the rescoring step. Rather than searching for the top_k results,
# we search for top_k * rescore_multiplier results and rescore the top_k results using the full precision embeddings.
# So, higher values of rescore_multiplier will give better results, but will be slower.
# `calibration_embeddings` is a set of embeddings used to calibrate the quantization of the query embeddings.
# This is important only if you are using uint8 or int8 precision. In practice, this is used to calculate
# the minimum and maximum values of each of the embedding dimensions, which are then used to determine the
# quantization thresholds.
# `rescore` determines whether to rescore the results using the full precision embeddings, if False & the
# corpus is quantized, the results will be very poor. `exact` determines whether to use the exact search
# or the approximate search method in usearch.
# 9. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}")
print("")
# 10. Prompt for more queries
queries = [input("Please enter a question: ")]
|
import time
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings, semantic_search_usearch
from datasets import load_dataset
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
# 2. Come up with some queries
queries = [
"How do I become a good programmer?",
"How do I become a good data scientist?",
]
# 3. Load the model
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
# 4. Choose a target precision for the corpus embeddings
corpus_precision = "binary"
# Valid options are: "float32", "uint8", "int8", "ubinary", and "binary"
# But usearch only supports "float32", "int8", and "binary"
# 5. Encode the corpus
full_corpus_embeddings = model.encode(corpus, normalize_embeddings=True, show_progress_bar=True)
corpus_embeddings = quantize_embeddings(full_corpus_embeddings, precision=corpus_precision)
# NOTE: We can also pass "precision=..." to the encode method to quantize the embeddings directly,
# but we want to keep the full precision embeddings to act as a calibration dataset for quantizing
# the query embeddings. This is important only if you are using uint8 or int8 precision
# Initially, we don't have a usearch index yet, we can use semantic_search_usearch to create it
corpus_index = None
while True:
# 7. Encode the queries using the full precision
start_time = time.time()
query_embeddings = model.encode(queries, normalize_embeddings=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 8. Perform semantic search using usearch
results, search_time, corpus_index = semantic_search_usearch(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
corpus_precision=corpus_precision,
top_k=10,
calibration_embeddings=full_corpus_embeddings,
rescore=corpus_precision != "float32",
rescore_multiplier=4,
exact=True,
output_index=True,
)
# This is a helper function to showcase how usearch can be used with quantized embeddings.
# You must either provide the `corpus_embeddings` or the `corpus_index` usearch index, but not both.
# In the first call we'll provide the `corpus_embeddings` and get the `corpus_index` back, which
# we'll use in the next call. In practice, the index is stored in RAM or saved to disk, and not
# recalculated for every query.
# This function will 1) quantize the query embeddings to the same precision as the corpus embeddings,
# 2) perform the semantic search using usearch, 3) rescore the results using the full precision embeddings,
# and 4) return the results and the search time (and perhaps the usearch index).
# `corpus_precision` must be the same as the precision used to quantize the corpus embeddings.
# It is used to convert the query embeddings to the same precision as the corpus embeddings.
# `top_k` determines how many results are returned for each query.
# `rescore_multiplier` is a parameter for the rescoring step. Rather than searching for the top_k results,
# we search for top_k * rescore_multiplier results and rescore the top_k results using the full precision embeddings.
# So, higher values of rescore_multiplier will give better results, but will be slower.
# `calibration_embeddings` is a set of embeddings used to calibrate the quantization of the query embeddings.
# This is important only if you are using uint8 or int8 precision. In practice, this is used to calculate
# the minimum and maximum values of each of the embedding dimensions, which are then used to determine the
# quantization thresholds.
# `rescore` determines whether to rescore the results using the full precision embeddings, if False & the
# corpus is quantized, the results will be very poor. `exact` determines whether to use the exact search
# or the approximate search method in usearch.
# 9. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}")
print("")
# 10. Prompt for more queries
queries = [input("Please enter a question: ")]
|
from fastapi.testclient import TestClient
from docs_src.configure_swagger_ui.tutorial001 import app
client = TestClient(app)
def test_swagger_ui():
response = client.get("/docs")
assert response.status_code == 200, response.text
assert '"syntaxHighlight": false' in response.text, (
"syntaxHighlight should be included and converted to JSON"
)
assert '"dom_id": "#swagger-ui"' in response.text, (
"default configs should be preserved"
)
assert "presets: [" in response.text, "default configs should be preserved"
assert "SwaggerUIBundle.presets.apis," in response.text, (
"default configs should be preserved"
)
assert "SwaggerUIBundle.SwaggerUIStandalonePreset" in response.text, (
"default configs should be preserved"
)
assert '"layout": "BaseLayout",' in response.text, (
"default configs should be preserved"
)
assert '"deepLinking": true,' in response.text, (
"default configs should be preserved"
)
assert '"showExtensions": true,' in response.text, (
"default configs should be preserved"
)
assert '"showCommonExtensions": true,' in response.text, (
"default configs should be preserved"
)
def test_get_users():
response = client.get("/users/foo")
assert response.status_code == 200, response.text
assert response.json() == {"message": "Hello foo"}
|
from fastapi.testclient import TestClient
from docs_src.configure_swagger_ui.tutorial001 import app
client = TestClient(app)
def test_swagger_ui():
response = client.get("/docs")
assert response.status_code == 200, response.text
assert (
'"syntaxHighlight": false' in response.text
), "syntaxHighlight should be included and converted to JSON"
assert (
'"dom_id": "#swagger-ui"' in response.text
), "default configs should be preserved"
assert "presets: [" in response.text, "default configs should be preserved"
assert (
"SwaggerUIBundle.presets.apis," in response.text
), "default configs should be preserved"
assert (
"SwaggerUIBundle.SwaggerUIStandalonePreset" in response.text
), "default configs should be preserved"
assert (
'"layout": "BaseLayout",' in response.text
), "default configs should be preserved"
assert (
'"deepLinking": true,' in response.text
), "default configs should be preserved"
assert (
'"showExtensions": true,' in response.text
), "default configs should be preserved"
assert (
'"showCommonExtensions": true,' in response.text
), "default configs should be preserved"
def test_get_users():
response = client.get("/users/foo")
assert response.status_code == 200, response.text
assert response.json() == {"message": "Hello foo"}
|
import os
import pytest
from jina import Client, Document, Executor, Flow, requests
from jina.helper import random_port
class MyExec(Executor):
def __init__(self, bar: str, bar2: int = 3, **kwargs):
super().__init__(**kwargs)
self.bar = bar
self.bar2 = bar2
@requests(on=['/foo', '/foo2'])
def foo(self, docs, **kwargs):
for d in docs:
d.text = '/foo'
@requests
def bar(self, docs, **kwargs):
for d in docs:
d.text = '/bar'
def random(self, docs, **kwargs):
for d in docs:
d.text = '/random'
y = """
jtype: MyExec
with:
bar: hello
bar2: 1
metas:
name: my-awesomeness
description: this is an awesome executor
requests:
/foo_endpoint: foo
/random_endpoint: random
"""
def test_load_save_yml(tmp_path):
m = Executor.load_config(y)
m.save_config(os.path.join(tmp_path, 'a.yml'))
assert m.bar == 'hello'
assert m.bar2 == 1
assert m.metas.name == 'my-awesomeness'
for k in ('/default', '/foo_endpoint', '/random_endpoint'):
assert k in m.requests
@pytest.mark.parametrize(
'req_endpoint, doc_text',
[
('/foo', '/foo'),
('/foo2', '/bar'),
('/foo3', '/bar'),
('/foo_endpoint', '/foo'),
('/random_endpoint', '/random'),
('/bar', '/bar'),
],
)
def test_load_yaml_route(req_endpoint, doc_text):
port = random_port()
f = Flow(port=port).add(uses=y)
c = Client(port=f.port)
with f:
results = c.post(req_endpoint, Document(), return_responses=True)
assert results[0].docs[0].text == doc_text
|
import os
import pytest
from jina import Executor, Client, requests, Flow, Document
exposed_port = 12345
class MyExec(Executor):
def __init__(self, bar: str, bar2: int = 3, **kwargs):
super().__init__(**kwargs)
self.bar = bar
self.bar2 = bar2
@requests(on=['/foo', '/foo2'])
def foo(self, docs, **kwargs):
for d in docs:
d.text = '/foo'
@requests
def bar(self, docs, **kwargs):
for d in docs:
d.text = '/bar'
def random(self, docs, **kwargs):
for d in docs:
d.text = '/random'
y = """
jtype: MyExec
with:
bar: hello
bar2: 1
metas:
name: my-awesomeness
description: this is an awesome executor
requests:
/foo_endpoint: foo
/random_endpoint: random
"""
def test_load_save_yml(tmp_path):
m = Executor.load_config(y)
m.save_config(os.path.join(tmp_path, 'a.yml'))
assert m.bar == 'hello'
assert m.bar2 == 1
assert m.metas.name == 'my-awesomeness'
for k in ('/default', '/foo_endpoint', '/random_endpoint'):
assert k in m.requests
@pytest.mark.parametrize(
'req_endpoint, doc_text',
[
('/foo', '/foo'),
('/foo2', '/bar'),
('/foo3', '/bar'),
('/foo_endpoint', '/foo'),
('/random_endpoint', '/random'),
('/bar', '/bar'),
],
)
def test_load_yaml_route(req_endpoint, doc_text):
f = Flow(port=12345).add(uses=y)
c = Client(port=exposed_port, return_responses=True)
with f:
results = c.post(req_endpoint, Document())
assert results[0].docs[0].text == doc_text
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmengine.utils import digit_version
from torch import Tensor
from mmdet.registry import MODELS
MODELS.register_module('Linear', module=nn.Linear)
@MODELS.register_module(name='NormedLinear')
class NormedLinear(nn.Linear):
"""Normalized Linear Layer.
Args:
tempeature (float, optional): Tempeature term. Defaults to 20.
power (int, optional): Power term. Defaults to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Defaults to 1e-6.
"""
def __init__(self,
*args,
tempearture: float = 20,
power: int = 1.0,
eps: float = 1e-6,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.eps = eps
self.init_weights()
def init_weights(self) -> None:
"""Initialize the weights."""
nn.init.normal_(self.weight, mean=0, std=0.01)
if self.bias is not None:
nn.init.constant_(self.bias, 0)
def forward(self, x: Tensor) -> Tensor:
"""Forward function for `NormedLinear`."""
weight_ = self.weight / (
self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
return F.linear(x_, weight_, self.bias)
@MODELS.register_module(name='NormedConv2d')
class NormedConv2d(nn.Conv2d):
"""Normalized Conv2d Layer.
Args:
tempeature (float, optional): Tempeature term. Defaults to 20.
power (int, optional): Power term. Defaults to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Defaults to 1e-6.
norm_over_kernel (bool, optional): Normalize over kernel.
Defaults to False.
"""
def __init__(self,
*args,
tempearture: float = 20,
power: int = 1.0,
eps: float = 1e-6,
norm_over_kernel: bool = False,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.norm_over_kernel = norm_over_kernel
self.eps = eps
def forward(self, x: Tensor) -> Tensor:
"""Forward function for `NormedConv2d`."""
if not self.norm_over_kernel:
weight_ = self.weight / (
self.weight.norm(dim=1, keepdim=True).pow(self.power) +
self.eps)
else:
weight_ = self.weight / (
self.weight.view(self.weight.size(0), -1).norm(
dim=1, keepdim=True).pow(self.power)[..., None, None] +
self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
if hasattr(self, 'conv2d_forward'):
x_ = self.conv2d_forward(x_, weight_)
else:
if digit_version(torch.__version__) >= digit_version('1.8'):
x_ = self._conv_forward(x_, weight_, self.bias)
else:
x_ = self._conv_forward(x_, weight_)
return x_
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from mmdet.registry import MODELS
MODELS.register_module('Linear', module=nn.Linear)
@MODELS.register_module(name='NormedLinear')
class NormedLinear(nn.Linear):
"""Normalized Linear Layer.
Args:
tempeature (float, optional): Tempeature term. Defaults to 20.
power (int, optional): Power term. Defaults to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Defaults to 1e-6.
"""
def __init__(self,
*args,
tempearture: float = 20,
power: int = 1.0,
eps: float = 1e-6,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.eps = eps
self.init_weights()
def init_weights(self) -> None:
"""Initialize the weights."""
nn.init.normal_(self.weight, mean=0, std=0.01)
if self.bias is not None:
nn.init.constant_(self.bias, 0)
def forward(self, x: Tensor) -> Tensor:
"""Forward function for `NormedLinear`."""
weight_ = self.weight / (
self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
return F.linear(x_, weight_, self.bias)
@MODELS.register_module(name='NormedConv2d')
class NormedConv2d(nn.Conv2d):
"""Normalized Conv2d Layer.
Args:
tempeature (float, optional): Tempeature term. Defaults to 20.
power (int, optional): Power term. Defaults to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Defaults to 1e-6.
norm_over_kernel (bool, optional): Normalize over kernel.
Defaults to False.
"""
def __init__(self,
*args,
tempearture: float = 20,
power: int = 1.0,
eps: float = 1e-6,
norm_over_kernel: bool = False,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.norm_over_kernel = norm_over_kernel
self.eps = eps
def forward(self, x: Tensor) -> Tensor:
"""Forward function for `NormedConv2d`."""
if not self.norm_over_kernel:
weight_ = self.weight / (
self.weight.norm(dim=1, keepdim=True).pow(self.power) +
self.eps)
else:
weight_ = self.weight / (
self.weight.view(self.weight.size(0), -1).norm(
dim=1, keepdim=True).pow(self.power)[..., None, None] +
self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
if hasattr(self, 'conv2d_forward'):
x_ = self.conv2d_forward(x_, weight_)
else:
if torch.__version__ >= '1.8':
x_ = self._conv_forward(x_, weight_, self.bias)
else:
x_ = self._conv_forward(x_, weight_)
return x_
|
"""
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LinearRegression, RANSACRegressor, TheilSenRegressor
estimators = [
("OLS", LinearRegression()),
("Theil-Sen", TheilSenRegressor(random_state=42)),
("RANSAC", RANSACRegressor(random_state=42)),
]
colors = {"OLS": "turquoise", "Theil-Sen": "gold", "RANSAC": "lightgreen"}
lw = 2
# %%
# Outliers only in the y direction
# --------------------------------
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.0
c = 2.0
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.scatter(x, y, color="indigo", marker="x", s=40)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(
line_x,
y_pred,
color=colors[name],
linewidth=lw,
label="%s (fit time: %.2fs)" % (name, elapsed_time),
)
plt.axis("tight")
plt.legend(loc="upper right")
_ = plt.title("Corrupt y")
# %%
# Outliers in the X direction
# ---------------------------
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.scatter(x, y, color="indigo", marker="x", s=40)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(
line_x,
y_pred,
color=colors[name],
linewidth=lw,
label="%s (fit time: %.2fs)" % (name, elapsed_time),
)
plt.axis("tight")
plt.legend(loc="upper left")
plt.title("Corrupt x")
plt.show()
|
"""
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LinearRegression, RANSACRegressor, TheilSenRegressor
estimators = [
("OLS", LinearRegression()),
("Theil-Sen", TheilSenRegressor(random_state=42)),
("RANSAC", RANSACRegressor(random_state=42)),
]
colors = {"OLS": "turquoise", "Theil-Sen": "gold", "RANSAC": "lightgreen"}
lw = 2
# %%
# Outliers only in the y direction
# --------------------------------
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.0
c = 2.0
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.scatter(x, y, color="indigo", marker="x", s=40)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(
line_x,
y_pred,
color=colors[name],
linewidth=lw,
label="%s (fit time: %.2fs)" % (name, elapsed_time),
)
plt.axis("tight")
plt.legend(loc="upper left")
_ = plt.title("Corrupt y")
# %%
# Outliers in the X direction
# ---------------------------
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.scatter(x, y, color="indigo", marker="x", s=40)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(
line_x,
y_pred,
color=colors[name],
linewidth=lw,
label="%s (fit time: %.2fs)" % (name, elapsed_time),
)
plt.axis("tight")
plt.legend(loc="upper left")
plt.title("Corrupt x")
plt.show()
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
from abc import ABCMeta, abstractmethod
from mmengine.logging import print_log
class BaseStorageBackend(metaclass=ABCMeta):
"""Abstract class of storage backends.
All backends need to implement two apis: :meth:`get()` and
:meth:`get_text()`.
- :meth:`get()` reads the file as a byte stream.
- :meth:`get_text()` reads the file as texts.
"""
# a flag to indicate whether the backend can create a symlink for a file
# This attribute will be deprecated in future.
_allow_symlink = False
@property
def allow_symlink(self):
print_log(
'allow_symlink will be deprecated in future',
logger='current',
level=logging.WARNING)
return self._allow_symlink
@property
def name(self):
return self.__class__.__name__
@abstractmethod
def get(self, filepath):
pass
@abstractmethod
def get_text(self, filepath):
pass
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from abc import ABCMeta, abstractmethod
class BaseStorageBackend(metaclass=ABCMeta):
"""Abstract class of storage backends.
All backends need to implement two apis: :meth:`get()` and
:meth:`get_text()`.
- :meth:`get()` reads the file as a byte stream.
- :meth:`get_text()` reads the file as texts.
"""
# a flag to indicate whether the backend can create a symlink for a file
# This attribute will be deprecated in future.
_allow_symlink = False
@property
def allow_symlink(self):
warnings.warn('allow_symlink will be deprecated in future',
DeprecationWarning)
return self._allow_symlink
@property
def name(self):
return self.__class__.__name__
@abstractmethod
def get(self, filepath):
pass
@abstractmethod
def get_text(self, filepath):
pass
|
import re
from langchain_core.output_parsers import BaseOutputParser
class BooleanOutputParser(BaseOutputParser[bool]):
"""Parse the output of an LLM call to a boolean."""
true_val: str = "YES"
"""The string value that should be parsed as True."""
false_val: str = "NO"
"""The string value that should be parsed as False."""
def parse(self, text: str) -> bool:
"""Parse the output of an LLM call to a boolean.
Args:
text: output of a language model
Returns:
boolean
"""
regexp = rf"\b({self.true_val}|{self.false_val})\b"
truthy = {
val.upper()
for val in re.findall(regexp, text, flags=re.IGNORECASE | re.MULTILINE)
}
if self.true_val.upper() in truthy:
if self.false_val.upper() in truthy:
msg = (
f"Ambiguous response. Both {self.true_val} and {self.false_val} "
f"in received: {text}."
)
raise ValueError(msg)
return True
elif self.false_val.upper() in truthy:
if self.true_val.upper() in truthy:
msg = (
f"Ambiguous response. Both {self.true_val} and {self.false_val} "
f"in received: {text}."
)
raise ValueError(msg)
return False
msg = (
f"BooleanOutputParser expected output value to include either "
f"{self.true_val} or {self.false_val}. Received {text}."
)
raise ValueError(msg)
@property
def _type(self) -> str:
"""Snake-case string identifier for an output parser type."""
return "boolean_output_parser"
|
import re
from langchain_core.output_parsers import BaseOutputParser
class BooleanOutputParser(BaseOutputParser[bool]):
"""Parse the output of an LLM call to a boolean."""
true_val: str = "YES"
"""The string value that should be parsed as True."""
false_val: str = "NO"
"""The string value that should be parsed as False."""
def parse(self, text: str) -> bool:
"""Parse the output of an LLM call to a boolean.
Args:
text: output of a language model
Returns:
boolean
"""
regexp = rf"\b({self.true_val}|{self.false_val})\b"
truthy = {
val.upper()
for val in re.findall(regexp, text, flags=re.IGNORECASE | re.MULTILINE)
}
if self.true_val.upper() in truthy:
if self.false_val.upper() in truthy:
raise ValueError(
f"Ambiguous response. Both {self.true_val} and {self.false_val} "
f"in received: {text}."
)
return True
elif self.false_val.upper() in truthy:
if self.true_val.upper() in truthy:
raise ValueError(
f"Ambiguous response. Both {self.true_val} and {self.false_val} "
f"in received: {text}."
)
return False
raise ValueError(
f"BooleanOutputParser expected output value to include either "
f"{self.true_val} or {self.false_val}. Received {text}."
)
@property
def _type(self) -> str:
"""Snake-case string identifier for an output parser type."""
return "boolean_output_parser"
|
"""Init file."""
from llama_index.readers.remote.base import (
RemoteReader,
)
__all__ = ["RemoteReader"]
|
"""Init file."""
from llama_index.readers.remote.base import (
RemoteReader,
)
__all__ = ["RemoteReader"]
|
from langchain.output_parsers.regex import RegexParser
def load_output_parser(config: dict) -> dict:
"""Load an output parser.
Args:
config: config dict
Returns:
config dict with output parser loaded
"""
if "output_parsers" in config:
if config["output_parsers"] is not None:
_config = config["output_parsers"]
output_parser_type = _config["_type"]
if output_parser_type == "regex_parser":
output_parser = RegexParser(**_config)
else:
msg = f"Unsupported output parser {output_parser_type}"
raise ValueError(msg)
config["output_parsers"] = output_parser
return config
|
from langchain.output_parsers.regex import RegexParser
def load_output_parser(config: dict) -> dict:
"""Load an output parser.
Args:
config: config dict
Returns:
config dict with output parser loaded
"""
if "output_parsers" in config:
if config["output_parsers"] is not None:
_config = config["output_parsers"]
output_parser_type = _config["_type"]
if output_parser_type == "regex_parser":
output_parser = RegexParser(**_config)
else:
raise ValueError(f"Unsupported output parser {output_parser_type}")
config["output_parsers"] = output_parser
return config
|
from __future__ import annotations
import copy
from typing import TYPE_CHECKING, List
import pytest
from langchain_core.documents import Document
from pytest_mock import MockerFixture
from langchain_community.retrievers import ZepRetriever
if TYPE_CHECKING:
from zep_python import MemorySearchResult, ZepClient
@pytest.fixture
def search_results() -> List[MemorySearchResult]:
from zep_python import MemorySearchResult, Message
search_result = [
{
"message": {
"uuid": "66830914-19f5-490b-8677-1ba06bcd556b",
"created_at": "2023-05-18T20:40:42.743773Z",
"role": "user",
"content": "I'm looking to plan a trip to Iceland. Can you help me?",
"token_count": 17,
},
"summary": None,
"dist": 0.8734284910450115,
},
{
"message": {
"uuid": "015e618c-ba9d-45b6-95c3-77a8e611570b",
"created_at": "2023-05-18T20:40:42.743773Z",
"role": "user",
"content": "How much does a trip to Iceland typically cost?",
"token_count": 12,
},
"summary": None,
"dist": 0.8554048017463456,
},
]
return [
MemorySearchResult(
message=Message.parse_obj(result["message"]),
summary=result["summary"],
dist=result["dist"],
)
for result in search_result
]
@pytest.fixture
@pytest.mark.requires("zep_python")
def zep_retriever(
mocker: MockerFixture, search_results: List[MemorySearchResult]
) -> ZepRetriever:
mock_zep_client: ZepClient = mocker.patch("zep_python.ZepClient", autospec=True)
mock_zep_client.memory = mocker.patch(
"zep_python.memory.client.MemoryClient", autospec=True
)
mock_zep_client.memory.search_memory.return_value = copy.deepcopy(search_results)
mock_zep_client.memory.asearch_memory.return_value = copy.deepcopy(search_results)
zep = ZepRetriever(session_id="123", url="http://localhost:8000") # type: ignore[call-arg]
zep.zep_client = mock_zep_client
return zep
@pytest.mark.requires("zep_python")
def test_zep_retriever_invoke(
zep_retriever: ZepRetriever, search_results: List[MemorySearchResult]
) -> None:
documents: List[Document] = zep_retriever.invoke("My trip to Iceland")
_test_documents(documents, search_results)
@pytest.mark.requires("zep_python")
async def test_zep_retriever_ainvoke(
zep_retriever: ZepRetriever, search_results: List[MemorySearchResult]
) -> None:
documents: List[Document] = await zep_retriever.ainvoke("My trip to Iceland")
_test_documents(documents, search_results)
def _test_documents(
documents: List[Document], search_results: List[MemorySearchResult]
) -> None:
assert len(documents) == 2
for i, document in enumerate(documents):
assert document.page_content == search_results[i].message.get("content")
assert document.metadata.get("uuid") == search_results[i].message.get("uuid")
assert document.metadata.get("role") == search_results[i].message.get("role")
assert document.metadata.get("score") == search_results[i].dist
|
from __future__ import annotations
import copy
from typing import TYPE_CHECKING, List
import pytest
from langchain_core.documents import Document
from pytest_mock import MockerFixture
from langchain_community.retrievers import ZepRetriever
if TYPE_CHECKING:
from zep_python import MemorySearchResult, ZepClient
@pytest.fixture
def search_results() -> List[MemorySearchResult]:
from zep_python import MemorySearchResult, Message
search_result = [
{
"message": {
"uuid": "66830914-19f5-490b-8677-1ba06bcd556b",
"created_at": "2023-05-18T20:40:42.743773Z",
"role": "user",
"content": "I'm looking to plan a trip to Iceland. Can you help me?",
"token_count": 17,
},
"summary": None,
"dist": 0.8734284910450115,
},
{
"message": {
"uuid": "015e618c-ba9d-45b6-95c3-77a8e611570b",
"created_at": "2023-05-18T20:40:42.743773Z",
"role": "user",
"content": "How much does a trip to Iceland typically cost?",
"token_count": 12,
},
"summary": None,
"dist": 0.8554048017463456,
},
]
return [
MemorySearchResult(
message=Message.parse_obj(result["message"]),
summary=result["summary"],
dist=result["dist"],
)
for result in search_result
]
@pytest.fixture
@pytest.mark.requires("zep_python")
def zep_retriever(
mocker: MockerFixture, search_results: List[MemorySearchResult]
) -> ZepRetriever:
mock_zep_client: ZepClient = mocker.patch("zep_python.ZepClient", autospec=True)
mock_zep_client.memory = mocker.patch(
"zep_python.memory.client.MemoryClient", autospec=True
)
mock_zep_client.memory.search_memory.return_value = copy.deepcopy( # type: ignore
search_results
)
mock_zep_client.memory.asearch_memory.return_value = copy.deepcopy( # type: ignore
search_results
)
zep = ZepRetriever(session_id="123", url="http://localhost:8000") # type: ignore[call-arg]
zep.zep_client = mock_zep_client
return zep
@pytest.mark.requires("zep_python")
def test_zep_retriever_invoke(
zep_retriever: ZepRetriever, search_results: List[MemorySearchResult]
) -> None:
documents: List[Document] = zep_retriever.invoke("My trip to Iceland")
_test_documents(documents, search_results)
@pytest.mark.requires("zep_python")
async def test_zep_retriever_ainvoke(
zep_retriever: ZepRetriever, search_results: List[MemorySearchResult]
) -> None:
documents: List[Document] = await zep_retriever.ainvoke("My trip to Iceland")
_test_documents(documents, search_results)
def _test_documents(
documents: List[Document], search_results: List[MemorySearchResult]
) -> None:
assert len(documents) == 2
for i, document in enumerate(documents):
assert document.page_content == search_results[i].message.get( # type: ignore
"content"
)
assert document.metadata.get("uuid") == search_results[i].message.get( # type: ignore
"uuid"
)
assert document.metadata.get("role") == search_results[i].message.get( # type: ignore
"role"
)
assert document.metadata.get("score") == search_results[i].dist
|
import sys
from jina.parsers import set_gateway_parser
from jina.parsers.helper import _set_gateway_uses
from jina.serve.runtimes.gateway import GatewayRuntime
def run(*args, **kwargs):
runtime_cls = GatewayRuntime
print(f' args {args}')
runtime_args = set_gateway_parser().parse_args(args)
print(f' protocol {runtime_args.protocol}')
_set_gateway_uses(runtime_args)
print(f' runtime_cls {runtime_cls}')
with runtime_cls(runtime_args) as runtime:
print(f' Lets run forever')
runtime.run_forever()
if __name__ == '__main__':
run(*sys.argv[1:])
|
import sys
from jina.serve.runtimes.gateway.grpc import GRPCGatewayRuntime
from jina.serve.runtimes.gateway.http import HTTPGatewayRuntime
from jina.serve.runtimes.gateway.websocket import WebSocketGatewayRuntime
from jina.enums import GatewayProtocolType
from jina.parsers import set_gateway_parser
def run(*args, **kwargs):
runtime_cls = None
print(f' args {args}')
runtime_args = set_gateway_parser().parse_args(args)
print(f' protocol {runtime_args.protocol}')
if runtime_args.protocol == GatewayProtocolType.GRPC:
runtime_cls = GRPCGatewayRuntime
elif runtime_args.protocol == GatewayProtocolType.HTTP:
runtime_cls = HTTPGatewayRuntime
elif runtime_args.protocol == GatewayProtocolType.WEBSOCKET:
runtime_cls = WebSocketGatewayRuntime
print(f' runtime_cls {runtime_cls}')
with runtime_cls(runtime_args) as runtime:
print(f' Lets run forever')
runtime.run_forever()
if __name__ == '__main__':
run(*sys.argv[1:])
|
# Copyright (c) OpenMMLab. All rights reserved.
from .data_preprocessor import BatchSyncRandomResize, DetDataPreprocessor
__all__ = ['DetDataPreprocessor', 'BatchSyncRandomResize']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .data_preprocessor import DetDataPreprocessor
__all__ = ['DetDataPreprocessor']
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
bbox_head=dict(
_delete_=True,
type='SABLRetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),
loss_bbox_reg=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
bbox_head=dict(
_delete_=True,
type='SABLRetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),
loss_bbox_reg=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
from dataclasses import dataclass, field
from typing import Union
from transformers import TrainingArguments as TransformersTrainingArguments
from transformers.utils import ExplicitEnum
class BatchSamplers(ExplicitEnum):
"""
Stores the acceptable string identifiers for batch samplers.
The batch sampler is responsible for determining how samples are grouped into batches during training.
Valid options are:
- ``BatchSamplers.BATCH_SAMPLER``: The default PyTorch batch sampler.
- ``BatchSamplers.NO_DUPLICATES``: Ensures no duplicate samples in a batch.
- ``BatchSamplers.GROUP_BY_LABEL``: Ensures each batch has 2+ samples from the same label.
"""
BATCH_SAMPLER = "batch_sampler"
NO_DUPLICATES = "no_duplicates"
GROUP_BY_LABEL = "group_by_label"
class MultiDatasetBatchSamplers(ExplicitEnum):
"""
Stores the acceptable string identifiers for multi-dataset batch samplers.
The multi-dataset batch sampler is responsible for determining in what order batches are sampled from multiple
datasets during training. Valid options are:
- ``MultiDatasetBatchSamplers.ROUND_ROBIN``: Round-robin sampling from each dataset until one is exhausted.
With this strategy, it's likely that not all samples from each dataset are used, but each dataset is sampled
from equally.
- ``MultiDatasetBatchSamplers.PROPORTIONAL``: Sample from each dataset in proportion to its size [default].
With this strategy, all samples from each dataset are used and larger datasets are sampled from more frequently.
"""
ROUND_ROBIN = "round_robin" # Round-robin sampling from each dataset
PROPORTIONAL = "proportional" # Sample from each dataset in proportion to its size [default]
@dataclass
class SentenceTransformerTrainingArguments(TransformersTrainingArguments):
"""
SentenceTransformerTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
"""
batch_sampler: Union[BatchSamplers, str] = field(
default=BatchSamplers.BATCH_SAMPLER, metadata={"help": "The batch sampler to use."}
)
multi_dataset_batch_sampler: Union[MultiDatasetBatchSamplers, str] = field(
default=MultiDatasetBatchSamplers.PROPORTIONAL, metadata={"help": "The multi-dataset batch sampler to use."}
)
def __post_init__(self):
super().__post_init__()
self.batch_sampler = BatchSamplers(self.batch_sampler)
self.multi_dataset_batch_sampler = MultiDatasetBatchSamplers(self.multi_dataset_batch_sampler)
# The `compute_loss` method in `SentenceTransformerTrainer` is overridden to only compute the prediction loss,
# so we set `prediction_loss_only` to `True` here to avoid
self.prediction_loss_only = True
|
from dataclasses import dataclass, field
from typing import Union
from transformers import TrainingArguments as TransformersTrainingArguments
from transformers.utils import ExplicitEnum
class BatchSamplers(ExplicitEnum):
"""
Stores the acceptable string identifiers for batch samplers.
The batch sampler is responsible for determining how samples are grouped into batches during training.
Valid options are:
- ``BatchSamplers.BATCH_SAMPLER``: The default PyTorch batch sampler.
- ``BatchSamplers.NO_DUPLICATES``: Ensures no duplicate samples in a batch.
- ``BatchSamplers.GROUP_BY_LABEL``: Ensures each batch has 2+ samples from the same label.
"""
BATCH_SAMPLER = "batch_sampler"
NO_DUPLICATES = "no_duplicates"
GROUP_BY_LABEL = "group_by_label"
class MultiDatasetBatchSamplers(ExplicitEnum):
"""
Stores the acceptable string identifiers for multi-dataset batch samplers.
The multi-dataset batch sampler is responsible for determining in what order batches are sampled from multiple
datasets during training. Valid options are:
- ``MultiDatasetBatchSamplers.ROUND_ROBIN``: Round-robin sampling from each dataset until one is exhausted.
With this strategy, it's likely that not all samples from each dataset are used, but each dataset is sampled
from equally.
- ``MultiDatasetBatchSamplers.PROPORTIONAL``: Sample from each dataset in proportion to its size [default].
With this strategy, all samples from each dataset are used and larger datasets are sampled from more frequently.
"""
ROUND_ROBIN = "round_robin" # Round-robin sampling from each dataset
PROPORTIONAL = "proportional" # Sample from each dataset in proportion to its size [default]
@dataclass
class SentenceTransformerTrainingArguments(TransformersTrainingArguments):
"""
SentenceTransformerTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
"""
batch_sampler: Union[BatchSamplers, str] = field(
default=BatchSamplers.BATCH_SAMPLER, metadata={"help": "The batch sampler to use."}
)
multi_dataset_batch_sampler: Union[MultiDatasetBatchSamplers, str] = field(
default=MultiDatasetBatchSamplers.PROPORTIONAL, metadata={"help": "The multi-dataset batch sampler to use."}
)
def __post_init__(self):
super().__post_init__()
self.batch_sampler = BatchSamplers(self.batch_sampler)
self.multi_dataset_batch_sampler = MultiDatasetBatchSamplers(self.multi_dataset_batch_sampler)
# The `compute_loss` method in `SentenceTransformerTrainer` is overridden to only compute the prediction loss,
# so we set `prediction_loss_only` to `True` here to avoid
self.prediction_loss_only = True
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.23.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.22.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_panoptic_fusion_head import \
BasePanopticFusionHead # noqa: F401,F403
from .heuristic_fusion_head import HeuristicFusionHead # noqa: F401,F403
from .maskformer_fusion_head import MaskFormerFusionHead # noqa: F401,F403
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_panoptic_fusion_head import \
BasePanopticFusionHead # noqa: F401,F403
from .heuristic_fusion_head import HeuristicFusionHead # noqa: F401,F403
|
import json
from jina.logging.logger import JinaLogger
from jina.parsers import set_gateway_parser
from jina.serve.runtimes.gateway.http.app import get_fastapi_app
from jina.serve.streamer import GatewayStreamer
JINA_LOGO_URL = 'https://api.jina.ai/logo/logo-product/jina-core/horizontal-layout/colored/Product%20logo_Core_vertical_colorful%402x-margin.png'
GATEWAY_SCHEMA_FILENAME = 'gateway.json'
args = set_gateway_parser().parse_args([])
logger = JinaLogger('')
graph_description = json.loads(args.graph_description)
graph_conditions = json.loads(args.graph_conditions)
deployments_addresses = json.loads(args.deployments_addresses)
deployments_disable_reduce = json.loads(args.deployments_disable_reduce)
streamer = GatewayStreamer(
graph_representation=graph_description,
executor_addresses=deployments_addresses,
graph_conditions=graph_conditions,
deployments_disable_reduce=deployments_disable_reduce,
timeout_send=args.timeout_send,
retries=args.retries,
compression=args.compression,
runtime_name=args.name,
prefetch=args.prefetch,
logger=logger,
)
gateway_app = get_fastapi_app(
streamer=streamer,
title=args.title,
description=args.description,
no_debug_endpoints=args.no_debug_endpoints,
no_crud_endpoints=args.no_crud_endpoints,
expose_endpoints=args.expose_endpoints,
expose_graphql_endpoint=args.expose_graphql_endpoint,
cors=args.cors,
logger=logger,
)
gateway_schema = gateway_app.openapi()
gateway_schema['info']['x-logo'] = {'url': JINA_LOGO_URL}
gateway_schema['servers'] = []
gateway_schema['servers'].append(
{'url': f'http://localhost:{args.port}', 'description': 'Local Jina gateway'}
)
with open(GATEWAY_SCHEMA_FILENAME, 'w') as f:
json.dump(gateway_schema, f)
|
import json
from jina.logging.logger import JinaLogger
from jina.parsers import set_gateway_parser
from jina.serve.runtimes.gateway.http.app import get_fastapi_app
JINA_LOGO_URL = 'https://api.jina.ai/logo/logo-product/jina-core/horizontal-layout/colored/Product%20logo_Core_vertical_colorful%402x-margin.png'
GATEWAY_SCHEMA_FILENAME = 'gateway.json'
args = set_gateway_parser().parse_args([])
logger = JinaLogger('')
gateway_app = get_fastapi_app(
title=args.title,
description=args.description,
no_debug_endpoints=args.no_debug_endpoints,
no_crud_endpoints=args.no_crud_endpoints,
expose_endpoints=args.expose_endpoints,
expose_graphql_endpoint=args.expose_graphql_endpoint,
cors=args.cors,
logger=args.logger,
)
gateway_schema = gateway_app.openapi()
gateway_schema['info']['x-logo'] = {'url': JINA_LOGO_URL}
gateway_schema['servers'] = []
gateway_schema['servers'].append(
{'url': f'http://localhost:{args.port}', 'description': 'Local Jina gateway'}
)
with open(GATEWAY_SCHEMA_FILENAME, 'w') as f:
json.dump(gateway_schema, f)
|
_base_ = './yolof_r50-c5_8xb8-1x_coco.py'
# We implemented the iter-based config according to the source code.
# COCO dataset has 117266 images after filtering. We use 8 gpu and
# 8 batch size training, so 22500 is equivalent to
# 22500/(117266/(8x8))=12.3 epoch, 15000 is equivalent to 8.2 epoch,
# 20000 is equivalent to 10.9 epoch. Due to lr(0.12) is large,
# the iter-based and epoch-based setting have about 0.2 difference on
# the mAP evaluation value.
train_cfg = dict(
_delete_=True,
type='IterBasedTrainLoop',
max_iters=22500,
val_interval=4500)
# learning rate policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=22500,
by_epoch=False,
milestones=[15000, 20000],
gamma=0.1)
]
train_dataloader = dict(sampler=dict(type='InfiniteSampler'))
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=2500))
log_processor = dict(by_epoch=False)
|
_base_ = './yolof_r50_c5_8x8_1x_coco.py'
# We implemented the iter-based config according to the source code.
# COCO dataset has 117266 images after filtering. We use 8 gpu and
# 8 batch size training, so 22500 is equivalent to
# 22500/(117266/(8x8))=12.3 epoch, 15000 is equivalent to 8.2 epoch,
# 20000 is equivalent to 10.9 epoch. Due to lr(0.12) is large,
# the iter-based and epoch-based setting have about 0.2 difference on
# the mAP evaluation value.
train_cfg = dict(
_delete_=True,
type='IterBasedTrainLoop',
max_iters=22500,
val_interval=4500)
# learning rate policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=22500,
by_epoch=False,
milestones=[15000, 20000],
gamma=0.1)
]
train_dataloader = dict(sampler=dict(type='InfiniteSampler'))
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=2500))
log_processor = dict(by_epoch=False)
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class TextDatasetReader(AbstractDatasetReader):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
super().__init__(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
self.builder = Text(
cache_dir=cache_dir,
data_files=path_or_paths,
features=features,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
base_path=base_path,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class TextDatasetReader(AbstractDatasetReader):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
super().__init__(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
self.builder = Text(
cache_dir=cache_dir,
data_files=path_or_paths,
features=features,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset
|
from unittest.mock import patch
import pytest
from llama_index.core.readers.base import BaseReader
from llama_index.readers.microsoft_outlook_emails import OutlookEmailReader
def test_class():
names_of_base_classes = [b.__name__ for b in OutlookEmailReader.__mro__]
assert BaseReader.__name__ in names_of_base_classes
def test_serialize():
reader = OutlookEmailReader(
client_id="test_client_id",
client_secret="test_client_secret",
tenant_id="test_tenant_id",
user_email="[email protected]",
num_mails=5,
)
# Get the JSON schema
schema = reader.model_json_schema()
assert schema is not None
assert "properties" in schema
assert "client_id" in schema["properties"]
assert "client_secret" in schema["properties"]
assert "tenant_id" in schema["properties"]
assert "user_email" in schema["properties"]
# Test serialization/deserialization
data = reader.model_dump()
data.pop("is_remote")
data.pop("class_name")
new_reader = OutlookEmailReader.model_validate(data)
assert new_reader.client_id == reader.client_id
assert new_reader.client_secret == reader.client_secret
assert new_reader.tenant_id == reader.tenant_id
assert new_reader.user_email == reader.user_email
@pytest.fixture()
def outlook_reader():
return OutlookEmailReader(
client_id="dummy_client_id",
client_secret="dummy_client_secret",
tenant_id="dummy_tenant_id",
user_email="[email protected]",
num_mails=2,
)
def mock_fetch_emails(*args, **kwargs):
return [
{
"subject": "Test Email 1",
"body": {"content": "This is the body of email 1."},
},
{
"subject": "Test Email 2",
"body": {"content": "This is the body of email 2."},
},
]
def test_load_data(outlook_reader):
# Only mock the response from _fetch_emails, not the entire method
with patch.object(
OutlookEmailReader, "_fetch_emails", return_value=mock_fetch_emails()
):
email_texts = outlook_reader.load_data()
# Verify the results
assert len(email_texts) == 2
assert email_texts[0] == "Subject: Test Email 1\n\nThis is the body of email 1."
assert email_texts[1] == "Subject: Test Email 2\n\nThis is the body of email 2."
|
import pytest
from unittest.mock import patch
from llama_index.core.readers.base import BaseReader
from llama_index.readers.outlook_emails import OutlookEmailReader
def test_class():
names_of_base_classes = [b.__name__ for b in OutlookEmailReader.__mro__]
assert BaseReader.__name__ in names_of_base_classes
def test_serialize():
reader = OutlookEmailReader(
client_id="test_client_id",
client_secret="test_client_secret",
tenant_id="test_tenant_id",
user_email="[email protected]",
num_mails=5,
)
# Get the JSON schema
schema = reader.model_json_schema()
assert schema is not None
assert "properties" in schema
assert "client_id" in schema["properties"]
assert "client_secret" in schema["properties"]
assert "tenant_id" in schema["properties"]
assert "user_email" in schema["properties"]
# Test serialization/deserialization
data = reader.model_dump()
data.pop("is_remote")
data.pop("class_name")
new_reader = OutlookEmailReader.model_validate(data)
assert new_reader.client_id == reader.client_id
assert new_reader.client_secret == reader.client_secret
assert new_reader.tenant_id == reader.tenant_id
assert new_reader.user_email == reader.user_email
@pytest.fixture()
def outlook_reader():
return OutlookEmailReader(
client_id="dummy_client_id",
client_secret="dummy_client_secret",
tenant_id="dummy_tenant_id",
user_email="[email protected]",
num_mails=2,
)
def mock_fetch_emails(*args, **kwargs):
return [
{
"subject": "Test Email 1",
"body": {"content": "This is the body of email 1."},
},
{
"subject": "Test Email 2",
"body": {"content": "This is the body of email 2."},
},
]
def test_load_data(outlook_reader):
# Only mock the response from _fetch_emails, not the entire method
with patch.object(
OutlookEmailReader, "_fetch_emails", return_value=mock_fetch_emails()
):
email_texts = outlook_reader.load_data()
# Verify the results
assert len(email_texts) == 2
assert email_texts[0] == "Subject: Test Email 1\n\nThis is the body of email 1."
assert email_texts[1] == "Subject: Test Email 2\n\nThis is the body of email 2."
|
"""Base classes for chain routing."""
from __future__ import annotations
from abc import ABC
from collections.abc import Mapping
from typing import Any, NamedTuple, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
Callbacks,
)
from pydantic import ConfigDict
from langchain.chains.base import Chain
class Route(NamedTuple):
destination: Optional[str]
next_inputs: dict[str, Any]
class RouterChain(Chain, ABC):
"""Chain that outputs the name of a destination chain and the inputs to it."""
@property
def output_keys(self) -> list[str]:
return ["destination", "next_inputs"]
def route(self, inputs: dict[str, Any], callbacks: Callbacks = None) -> Route:
"""
Route inputs to a destination chain.
Args:
inputs: inputs to the chain
callbacks: callbacks to use for the chain
Returns:
a Route object
"""
result = self(inputs, callbacks=callbacks)
return Route(result["destination"], result["next_inputs"])
async def aroute(
self, inputs: dict[str, Any], callbacks: Callbacks = None
) -> Route:
result = await self.acall(inputs, callbacks=callbacks)
return Route(result["destination"], result["next_inputs"])
class MultiRouteChain(Chain):
"""Use a single chain to route an input to one of multiple candidate chains."""
router_chain: RouterChain
"""Chain that routes inputs to destination chains."""
destination_chains: Mapping[str, Chain]
"""Chains that return final answer to inputs."""
default_chain: Chain
"""Default chain to use when none of the destination chains are suitable."""
silent_errors: bool = False
"""If True, use default_chain when an invalid destination name is provided.
Defaults to False."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> list[str]:
"""Will be whatever keys the router chain prompt expects.
:meta private:
"""
return self.router_chain.input_keys
@property
def output_keys(self) -> list[str]:
"""Will always return text key.
:meta private:
"""
return []
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
route = self.router_chain.route(inputs, callbacks=callbacks)
_run_manager.on_text(
str(route.destination) + ": " + str(route.next_inputs), verbose=self.verbose
)
if not route.destination:
return self.default_chain(route.next_inputs, callbacks=callbacks)
elif route.destination in self.destination_chains:
return self.destination_chains[route.destination](
route.next_inputs, callbacks=callbacks
)
elif self.silent_errors:
return self.default_chain(route.next_inputs, callbacks=callbacks)
else:
msg = f"Received invalid destination chain name '{route.destination}'"
raise ValueError(msg)
async def _acall(
self,
inputs: dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> dict[str, Any]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
route = await self.router_chain.aroute(inputs, callbacks=callbacks)
await _run_manager.on_text(
str(route.destination) + ": " + str(route.next_inputs), verbose=self.verbose
)
if not route.destination:
return await self.default_chain.acall(
route.next_inputs, callbacks=callbacks
)
elif route.destination in self.destination_chains:
return await self.destination_chains[route.destination].acall(
route.next_inputs, callbacks=callbacks
)
elif self.silent_errors:
return await self.default_chain.acall(
route.next_inputs, callbacks=callbacks
)
else:
msg = f"Received invalid destination chain name '{route.destination}'"
raise ValueError(msg)
|
"""Base classes for chain routing."""
from __future__ import annotations
from abc import ABC
from collections.abc import Mapping
from typing import Any, NamedTuple, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
Callbacks,
)
from pydantic import ConfigDict
from langchain.chains.base import Chain
class Route(NamedTuple):
destination: Optional[str]
next_inputs: dict[str, Any]
class RouterChain(Chain, ABC):
"""Chain that outputs the name of a destination chain and the inputs to it."""
@property
def output_keys(self) -> list[str]:
return ["destination", "next_inputs"]
def route(self, inputs: dict[str, Any], callbacks: Callbacks = None) -> Route:
"""
Route inputs to a destination chain.
Args:
inputs: inputs to the chain
callbacks: callbacks to use for the chain
Returns:
a Route object
"""
result = self(inputs, callbacks=callbacks)
return Route(result["destination"], result["next_inputs"])
async def aroute(
self, inputs: dict[str, Any], callbacks: Callbacks = None
) -> Route:
result = await self.acall(inputs, callbacks=callbacks)
return Route(result["destination"], result["next_inputs"])
class MultiRouteChain(Chain):
"""Use a single chain to route an input to one of multiple candidate chains."""
router_chain: RouterChain
"""Chain that routes inputs to destination chains."""
destination_chains: Mapping[str, Chain]
"""Chains that return final answer to inputs."""
default_chain: Chain
"""Default chain to use when none of the destination chains are suitable."""
silent_errors: bool = False
"""If True, use default_chain when an invalid destination name is provided.
Defaults to False."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> list[str]:
"""Will be whatever keys the router chain prompt expects.
:meta private:
"""
return self.router_chain.input_keys
@property
def output_keys(self) -> list[str]:
"""Will always return text key.
:meta private:
"""
return []
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
route = self.router_chain.route(inputs, callbacks=callbacks)
_run_manager.on_text(
str(route.destination) + ": " + str(route.next_inputs), verbose=self.verbose
)
if not route.destination:
return self.default_chain(route.next_inputs, callbacks=callbacks)
elif route.destination in self.destination_chains:
return self.destination_chains[route.destination](
route.next_inputs, callbacks=callbacks
)
elif self.silent_errors:
return self.default_chain(route.next_inputs, callbacks=callbacks)
else:
raise ValueError(
f"Received invalid destination chain name '{route.destination}'"
)
async def _acall(
self,
inputs: dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> dict[str, Any]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
route = await self.router_chain.aroute(inputs, callbacks=callbacks)
await _run_manager.on_text(
str(route.destination) + ": " + str(route.next_inputs), verbose=self.verbose
)
if not route.destination:
return await self.default_chain.acall(
route.next_inputs, callbacks=callbacks
)
elif route.destination in self.destination_chains:
return await self.destination_chains[route.destination].acall(
route.next_inputs, callbacks=callbacks
)
elif self.silent_errors:
return await self.default_chain.acall(
route.next_inputs, callbacks=callbacks
)
else:
raise ValueError(
f"Received invalid destination chain name '{route.destination}'"
)
|
import unittest
import pytest
import torch
from torchvision.models.maxvit import SwapAxes, WindowDepartition, WindowPartition
class MaxvitTester(unittest.TestCase):
def test_maxvit_window_partition(self):
input_shape = (1, 3, 224, 224)
partition_size = 7
n_partitions = input_shape[3] // partition_size
x = torch.randn(input_shape)
partition = WindowPartition()
departition = WindowDepartition()
x_hat = partition(x, partition_size)
x_hat = departition(x_hat, partition_size, n_partitions, n_partitions)
torch.testing.assert_close(x, x_hat)
def test_maxvit_grid_partition(self):
input_shape = (1, 3, 224, 224)
partition_size = 7
n_partitions = input_shape[3] // partition_size
x = torch.randn(input_shape)
pre_swap = SwapAxes(-2, -3)
post_swap = SwapAxes(-2, -3)
partition = WindowPartition()
departition = WindowDepartition()
x_hat = partition(x, n_partitions)
x_hat = pre_swap(x_hat)
x_hat = post_swap(x_hat)
x_hat = departition(x_hat, n_partitions, partition_size, partition_size)
torch.testing.assert_close(x, x_hat)
if __name__ == "__main__":
pytest.main([__file__])
|
import unittest
import pytest
import torch
from torchvision.models.maxvit import SwapAxes, WindowDepartition, WindowPartition
class MaxvitTester(unittest.TestCase):
def test_maxvit_window_partition(self):
input_shape = (1, 3, 224, 224)
partition_size = 7
n_partitions = input_shape[3] // partition_size
x = torch.randn(input_shape)
partition = WindowPartition()
departition = WindowDepartition()
x_hat = partition(x, partition_size)
x_hat = departition(x_hat, partition_size, n_partitions, n_partitions)
assert torch.allclose(x, x_hat)
def test_maxvit_grid_partition(self):
input_shape = (1, 3, 224, 224)
partition_size = 7
n_partitions = input_shape[3] // partition_size
x = torch.randn(input_shape)
pre_swap = SwapAxes(-2, -3)
post_swap = SwapAxes(-2, -3)
partition = WindowPartition()
departition = WindowDepartition()
x_hat = partition(x, n_partitions)
x_hat = pre_swap(x_hat)
x_hat = post_swap(x_hat)
x_hat = departition(x_hat, n_partitions, partition_size, partition_size)
assert torch.allclose(x, x_hat)
if __name__ == "__main__":
pytest.main([__file__])
|
"""Copyright 2024, XGBoost contributors"""
import pytest
from distributed import Client, Scheduler, Worker
from distributed.utils_test import gen_cluster
import xgboost as xgb
from xgboost import testing as tm
from xgboost.testing.dask import check_external_memory
@pytest.mark.parametrize("is_qdm", [True, False])
@gen_cluster(client=True)
async def test_external_memory(
client: Client, s: Scheduler, a: Worker, b: Worker, is_qdm: bool
) -> None:
workers = tm.get_client_workers(client)
args = await client.sync(
xgb.dask._get_rabit_args,
len(workers),
None,
client,
)
n_workers = len(workers)
futs = client.map(
check_external_memory,
range(n_workers),
n_workers=n_workers,
device="cpu",
comm_args=args,
is_qdm=is_qdm,
)
await client.gather(futs)
|
from typing import List, cast
import numpy as np
from distributed import Client, Scheduler, Worker, get_worker
from distributed.utils_test import gen_cluster
import xgboost as xgb
from xgboost import testing as tm
from xgboost.compat import concat
def run_external_memory(worker_id: int, n_workers: int, comm_args: dict) -> None:
n_samples_per_batch = 32
n_features = 4
n_batches = 16
use_cupy = False
n_threads = get_worker().state.nthreads
with xgb.collective.CommunicatorContext(dmlc_communicator="rabit", **comm_args):
it = tm.IteratorForTest(
*tm.make_batches(
n_samples_per_batch,
n_features,
n_batches,
use_cupy,
random_state=worker_id,
),
cache="cache",
)
Xy = xgb.DMatrix(it, nthread=n_threads)
results: xgb.callback.TrainingCallback.EvalsLog = {}
booster = xgb.train(
{"tree_method": "hist", "nthread": n_threads},
Xy,
evals=[(Xy, "Train")],
num_boost_round=32,
evals_result=results,
)
assert tm.non_increasing(cast(List[float], results["Train"]["rmse"]))
lx, ly, lw = [], [], []
for i in range(n_workers):
x, y, w = tm.make_batches(
n_samples_per_batch,
n_features,
n_batches,
use_cupy,
random_state=i,
)
lx.extend(x)
ly.extend(y)
lw.extend(w)
X = concat(lx)
yconcat = concat(ly)
wconcat = concat(lw)
Xy = xgb.DMatrix(X, yconcat, weight=wconcat, nthread=n_threads)
results_local: xgb.callback.TrainingCallback.EvalsLog = {}
booster = xgb.train(
{"tree_method": "hist", "nthread": n_threads},
Xy,
evals=[(Xy, "Train")],
num_boost_round=32,
evals_result=results_local,
)
np.testing.assert_allclose(
results["Train"]["rmse"], results_local["Train"]["rmse"], rtol=1e-4
)
@gen_cluster(client=True)
async def test_external_memory(
client: Client, s: Scheduler, a: Worker, b: Worker
) -> None:
workers = tm.get_client_workers(client)
args = await client.sync(
xgb.dask._get_rabit_args,
len(workers),
None,
client,
)
n_workers = len(workers)
futs = client.map(
run_external_memory, range(n_workers), n_workers=n_workers, comm_args=args
)
await client.gather(futs)
|
import torch
from parameterized import parameterized
from torchaudio.prototype.models import (
conformer_wav2vec2_base,
conformer_wav2vec2_pretrain_base,
conformer_wav2vec2_pretrain_large,
)
from torchaudio_unittest.common_utils import disabledInCI, nested_params, skipIfNoCuda, torch_script, TorchaudioTestCase
class TestConformerWav2Vec2(TorchaudioTestCase):
def _smoke_test(self, model, device, dtype):
model = model.to(device=device, dtype=dtype)
model = model.eval()
batch_size, num_frames, in_features = 3, 1024, 64
features = torch.randn(batch_size, num_frames, in_features, device=device, dtype=dtype)
lengths = torch.randint(
low=0,
high=num_frames,
size=[
batch_size,
],
device=device,
)
model(features, lengths)
@parameterized.expand([(torch.float32,), (torch.float64,)])
def test_cpu_smoke_test(self, dtype):
model = conformer_wav2vec2_base()
self._smoke_test(model, torch.device("cpu"), dtype)
@parameterized.expand([(torch.float32,), (torch.float64,)])
@skipIfNoCuda
# Disabled in CI: https://github.com/pytorch/audio/issues/3376
@disabledInCI
def test_cuda_smoke_test(self, dtype):
model = conformer_wav2vec2_base()
self._smoke_test(model, torch.device("cuda"), dtype)
@nested_params(
[conformer_wav2vec2_pretrain_base, conformer_wav2vec2_pretrain_large],
[torch.float32, torch.float64],
)
def test_pretrain_cpu_smoke_test(self, model, dtype):
model = model()
self._smoke_test(model, torch.device("cpu"), dtype)
@nested_params(
[conformer_wav2vec2_pretrain_base, conformer_wav2vec2_pretrain_large],
[torch.float32, torch.float64],
)
@skipIfNoCuda
# Disabled in CI: https://github.com/pytorch/audio/issues/3376
@disabledInCI
def test_pretrain_cuda_smoke_test(self, model, dtype):
model = model()
self._smoke_test(model, torch.device("cuda"), dtype)
def test_extract_feature(self):
model = conformer_wav2vec2_base()
model.eval()
batch_size, num_frames, in_features = 3, 1024, 64
num_layers = len(model.encoder.conformer)
features = torch.randn(batch_size, num_frames, in_features)
lengths = torch.randint(
low=0,
high=num_frames,
size=[
batch_size,
],
)
all_features, lengths_ = model.extract_features(features, lengths, num_layers=None)
assert len(all_features) == num_layers
for feats in all_features:
assert feats.ndim == 3
assert feats.shape[0] == batch_size
assert lengths_.shape == torch.Size([batch_size])
for l in range(1, num_layers + 1):
feats, lengths_ = model.extract_features(features, lengths, num_layers=l)
assert len(feats) == l
for i in range(l):
self.assertEqual(all_features[i], feats[i])
assert lengths_.shape == torch.Size([batch_size])
def test_zero_length(self):
model = conformer_wav2vec2_base()
model.eval()
batch_size, num_frames, in_features = 3, 1024, 64
features = torch.randn(batch_size, num_frames, in_features)
input_lengths = torch.zeros(batch_size)
_, output_lengths = model(features, input_lengths)
self.assertEqual(torch.zeros_like(output_lengths), output_lengths)
_, output_lengths = model.extract_features(features, input_lengths)
self.assertEqual(torch.zeros_like(output_lengths), output_lengths)
def test_torchscript_consistency(self):
model = conformer_wav2vec2_base()
model.eval()
batch_size, num_frames, in_features = 3, 1024, 64
features = torch.randn(batch_size, num_frames, in_features)
lengths = torch.randint(
low=0,
high=num_frames,
size=[
batch_size,
],
)
ref_out, ref_len = model(features, lengths)
scripted = torch_script(model)
hyp_out, hyp_len = scripted(features, lengths)
self.assertEqual(hyp_out, ref_out)
self.assertEqual(hyp_len, ref_len)
|
import torch
from parameterized import parameterized
from torchaudio.prototype.models import (
conformer_wav2vec2_base,
conformer_wav2vec2_pretrain_base,
conformer_wav2vec2_pretrain_large,
)
from torchaudio_unittest.common_utils import nested_params, skipIfNoCuda, torch_script, TorchaudioTestCase
class TestConformerWav2Vec2(TorchaudioTestCase):
def _smoke_test(self, model, device, dtype):
model = model.to(device=device, dtype=dtype)
model = model.eval()
batch_size, num_frames, in_features = 3, 1024, 64
features = torch.randn(batch_size, num_frames, in_features, device=device, dtype=dtype)
lengths = torch.randint(
low=0,
high=num_frames,
size=[
batch_size,
],
device=device,
)
model(features, lengths)
@parameterized.expand([(torch.float32,), (torch.float64,)])
def test_cpu_smoke_test(self, dtype):
model = conformer_wav2vec2_base()
self._smoke_test(model, torch.device("cpu"), dtype)
@parameterized.expand([(torch.float32,), (torch.float64,)])
@skipIfNoCuda
def test_cuda_smoke_test(self, dtype):
model = conformer_wav2vec2_base()
self._smoke_test(model, torch.device("cuda"), dtype)
@nested_params(
[conformer_wav2vec2_pretrain_base, conformer_wav2vec2_pretrain_large],
[torch.float32, torch.float64],
)
def test_pretrain_cpu_smoke_test(self, model, dtype):
model = model()
self._smoke_test(model, torch.device("cpu"), dtype)
@nested_params(
[conformer_wav2vec2_pretrain_base, conformer_wav2vec2_pretrain_large],
[torch.float32, torch.float64],
)
@skipIfNoCuda
def test_pretrain_cuda_smoke_test(self, model, dtype):
model = model()
self._smoke_test(model, torch.device("cuda"), dtype)
def test_extract_feature(self):
model = conformer_wav2vec2_base()
model.eval()
batch_size, num_frames, in_features = 3, 1024, 64
num_layers = len(model.encoder.conformer)
features = torch.randn(batch_size, num_frames, in_features)
lengths = torch.randint(
low=0,
high=num_frames,
size=[
batch_size,
],
)
all_features, lengths_ = model.extract_features(features, lengths, num_layers=None)
assert len(all_features) == num_layers
for feats in all_features:
assert feats.ndim == 3
assert feats.shape[0] == batch_size
assert lengths_.shape == torch.Size([batch_size])
for l in range(1, num_layers + 1):
feats, lengths_ = model.extract_features(features, lengths, num_layers=l)
assert len(feats) == l
for i in range(l):
self.assertEqual(all_features[i], feats[i])
assert lengths_.shape == torch.Size([batch_size])
def test_zero_length(self):
model = conformer_wav2vec2_base()
model.eval()
batch_size, num_frames, in_features = 3, 1024, 64
features = torch.randn(batch_size, num_frames, in_features)
input_lengths = torch.zeros(batch_size)
_, output_lengths = model(features, input_lengths)
self.assertEqual(torch.zeros_like(output_lengths), output_lengths)
_, output_lengths = model.extract_features(features, input_lengths)
self.assertEqual(torch.zeros_like(output_lengths), output_lengths)
def test_torchscript_consistency(self):
model = conformer_wav2vec2_base()
model.eval()
batch_size, num_frames, in_features = 3, 1024, 64
features = torch.randn(batch_size, num_frames, in_features)
lengths = torch.randint(
low=0,
high=num_frames,
size=[
batch_size,
],
)
ref_out, ref_len = model(features, lengths)
scripted = torch_script(model)
hyp_out, hyp_len = scripted(features, lengths)
self.assertEqual(hyp_out, ref_out)
self.assertEqual(hyp_len, ref_len)
|
_base_ = '../cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
|
_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
vis_backends = [dict(type='LocalVisBackend'), dict(type='WandBVisBackend')]
visualizer = dict(vis_backends=vis_backends)
# MMEngine support the following two ways, users can choose
# according to convenience
# default_hooks = dict(checkpoint=dict(interval=4))
_base_.default_hooks.checkpoint.interval = 4
# train_cfg = dict(val_interval=2)
_base_.train_cfg.val_interval = 2
|
# TODO: Awaiting refactoring
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# Set evaluation interval
evaluation = dict(interval=2)
# Set checkpoint interval
checkpoint_config = dict(interval=4)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
dict(type='MMDetWandbHook',
init_kwargs={
'project': 'mmdetection',
'group': 'maskrcnn-r50-fpn-1x-coco'
},
interval=50,
log_checkpoint=True,
log_checkpoint_metadata=True,
num_eval_images=100)
])
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from docarray import BaseDoc
from docarray.typing import AnyUrl
def test_set_any_url():
class MyDocument(BaseDoc):
any_url: AnyUrl
d = MyDocument(any_url="https://jina.ai")
assert isinstance(d.any_url, AnyUrl)
assert d.any_url == "https://jina.ai"
|
from docarray import BaseDoc
from docarray.typing import AnyUrl
def test_set_any_url():
class MyDocument(BaseDoc):
any_url: AnyUrl
d = MyDocument(any_url="https://jina.ai")
assert isinstance(d.any_url, AnyUrl)
assert d.any_url == "https://jina.ai"
|
from __future__ import annotations
import pytest
from sentence_transformers import SparseEncoder
@pytest.fixture()
def splade_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
@pytest.fixture(scope="session")
def splade_bert_tiny_model_reused() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
@pytest.fixture()
def csr_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sentence-transformers-testing/stsb-bert-tiny-safetensors")
|
from __future__ import annotations
import pytest
from sentence_transformers import SparseEncoder
@pytest.fixture()
def splade_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
@pytest.fixture()
def csr_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sentence-transformers-testing/stsb-bert-tiny-safetensors")
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.quantizers import deserialize
from keras.src.quantizers import get
from keras.src.quantizers import serialize
from keras.src.quantizers.quantizers import AbsMaxQuantizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.quantizers.quantizers import abs_max_quantize
from keras.src.quantizers.quantizers import compute_float8_amax_history
from keras.src.quantizers.quantizers import compute_float8_scale
from keras.src.quantizers.quantizers import fake_quant_with_min_max_vars
from keras.src.quantizers.quantizers import quantize_and_dequantize
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.quantizers import deserialize
from keras.src.quantizers import get
from keras.src.quantizers import serialize
from keras.src.quantizers.quantizers import AbsMaxQuantizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.quantizers.quantizers import abs_max_quantize
from keras.src.quantizers.quantizers import compute_float8_amax_history
from keras.src.quantizers.quantizers import compute_float8_scale
from keras.src.quantizers.quantizers import (
fake_quant_with_min_max_vars as fake_quant_with_min_max_vars_per_channel,
)
from keras.src.quantizers.quantizers import quantize_and_dequantize
|
"""Top-level imports for LlamaIndex."""
__version__ = "0.12.39"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Top-level imports for LlamaIndex."""
__version__ = "0.12.38"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Module to change the configuration of libsox, which is used by I/O functions like
:py:mod:`~torchaudio.backend.sox_io_backend` and :py:mod:`~torchaudio.sox_effects`.
"""
from typing import Dict, List
import torchaudio
sox_ext = torchaudio._extension.lazy_import_sox_ext()
def set_seed(seed: int):
"""Set libsox's PRNG
Args:
seed (int): seed value. valid range is int32.
See Also:
http://sox.sourceforge.net/sox.html
"""
sox_ext.set_seed(seed)
def set_verbosity(verbosity: int):
"""Set libsox's verbosity
Args:
verbosity (int): Set verbosity level of libsox.
* ``1`` failure messages
* ``2`` warnings
* ``3`` details of processing
* ``4``-``6`` increasing levels of debug messages
See Also:
http://sox.sourceforge.net/sox.html
"""
sox_ext.set_verbosity(verbosity)
def set_buffer_size(buffer_size: int):
"""Set buffer size for sox effect chain
Args:
buffer_size (int): Set the size in bytes of the buffers used for processing audio.
See Also:
http://sox.sourceforge.net/sox.html
"""
sox_ext.set_buffer_size(buffer_size)
def set_use_threads(use_threads: bool):
"""Set multithread option for sox effect chain
Args:
use_threads (bool): When ``True``, enables ``libsox``'s parallel effects channels processing.
To use mutlithread, the underlying ``libsox`` has to be compiled with OpenMP support.
See Also:
http://sox.sourceforge.net/sox.html
"""
sox_ext.set_use_threads(use_threads)
def list_effects() -> Dict[str, str]:
"""List the available sox effect names
Returns:
Dict[str, str]: Mapping from ``effect name`` to ``usage``
"""
return dict(sox_ext.list_effects())
def list_read_formats() -> List[str]:
"""List the supported audio formats for read
Returns:
List[str]: List of supported audio formats
"""
return sox_ext.list_read_formats()
def list_write_formats() -> List[str]:
"""List the supported audio formats for write
Returns:
List[str]: List of supported audio formats
"""
return sox_ext.list_write_formats()
def get_buffer_size() -> int:
"""Get buffer size for sox effect chain
Returns:
int: size in bytes of buffers used for processing audio.
"""
return sox_ext.get_buffer_size()
|
"""Module to change the configuration of libsox, which is used by I/O functions like
:py:mod:`~torchaudio.backend.sox_io_backend` and :py:mod:`~torchaudio.sox_effects`.
"""
from typing import Dict, List
import torchaudio
@torchaudio._extension.fail_if_no_sox
def set_seed(seed: int):
"""Set libsox's PRNG
Args:
seed (int): seed value. valid range is int32.
See Also:
http://sox.sourceforge.net/sox.html
"""
torchaudio.lib._torchaudio_sox.set_seed(seed)
@torchaudio._extension.fail_if_no_sox
def set_verbosity(verbosity: int):
"""Set libsox's verbosity
Args:
verbosity (int): Set verbosity level of libsox.
* ``1`` failure messages
* ``2`` warnings
* ``3`` details of processing
* ``4``-``6`` increasing levels of debug messages
See Also:
http://sox.sourceforge.net/sox.html
"""
torchaudio.lib._torchaudio_sox.set_verbosity(verbosity)
@torchaudio._extension.fail_if_no_sox
def set_buffer_size(buffer_size: int):
"""Set buffer size for sox effect chain
Args:
buffer_size (int): Set the size in bytes of the buffers used for processing audio.
See Also:
http://sox.sourceforge.net/sox.html
"""
torchaudio.lib._torchaudio_sox.set_buffer_size(buffer_size)
@torchaudio._extension.fail_if_no_sox
def set_use_threads(use_threads: bool):
"""Set multithread option for sox effect chain
Args:
use_threads (bool): When ``True``, enables ``libsox``'s parallel effects channels processing.
To use mutlithread, the underlying ``libsox`` has to be compiled with OpenMP support.
See Also:
http://sox.sourceforge.net/sox.html
"""
torchaudio.lib._torchaudio_sox.set_use_threads(use_threads)
@torchaudio._extension.fail_if_no_sox
def list_effects() -> Dict[str, str]:
"""List the available sox effect names
Returns:
Dict[str, str]: Mapping from ``effect name`` to ``usage``
"""
return dict(torchaudio.lib._torchaudio_sox.list_effects())
@torchaudio._extension.fail_if_no_sox
def list_read_formats() -> List[str]:
"""List the supported audio formats for read
Returns:
List[str]: List of supported audio formats
"""
return torchaudio.lib._torchaudio_sox.list_read_formats()
@torchaudio._extension.fail_if_no_sox
def list_write_formats() -> List[str]:
"""List the supported audio formats for write
Returns:
List[str]: List of supported audio formats
"""
return torchaudio.lib._torchaudio_sox.list_write_formats()
@torchaudio._extension.fail_if_no_sox
def get_buffer_size() -> int:
"""Get buffer size for sox effect chain
Returns:
int: size in bytes of buffers used for processing audio.
"""
return torchaudio.lib._torchaudio_sox.get_buffer_size()
|
from langchain_core.load.dump import default, dumpd, dumps
__all__ = ["default", "dumpd", "dumps"]
|
from langchain_core.load.dump import default, dumpd, dumps
__all__ = ["default", "dumps", "dumpd"]
|
import contextlib
import logging
import typing
import fastapi
import fastapi.responses
import starlette.middleware.cors
import uvicorn
import backend.data.block
import backend.data.db
import backend.data.user
import backend.server.routers.v1
import backend.util.service
import backend.util.settings
settings = backend.util.settings.Settings()
logger = logging.getLogger(__name__)
@contextlib.asynccontextmanager
async def lifespan_context(app: fastapi.FastAPI):
await backend.data.db.connect()
await backend.data.block.initialize_blocks()
yield
await backend.data.db.disconnect()
def handle_internal_http_error(status_code: int = 500, log_error: bool = True):
def handler(request: fastapi.Request, exc: Exception):
if log_error:
logger.exception(f"{request.method} {request.url.path} failed: {exc}")
return fastapi.responses.JSONResponse(
content={
"message": f"{request.method} {request.url.path} failed",
"detail": str(exc),
},
status_code=status_code,
)
return handler
docs_url = (
"/docs"
if settings.config.app_env == backend.util.settings.AppEnvironment.LOCAL
else None
)
app = fastapi.FastAPI(
title="AutoGPT Agent Server",
description=(
"This server is used to execute agents that are created by the "
"AutoGPT system."
),
summary="AutoGPT Agent Server",
version="0.1",
lifespan=lifespan_context,
docs_url=docs_url,
)
app.add_exception_handler(ValueError, handle_internal_http_error(400))
app.add_exception_handler(500, handle_internal_http_error(500))
app.include_router(backend.server.routers.v1.v1_router, tags=["v1"])
@app.get(path="/health", tags=["health"], dependencies=[])
async def health():
return {"status": "healthy"}
class AgentServer(backend.util.service.AppProcess):
def run(self):
server_app = starlette.middleware.cors.CORSMiddleware(
app=app,
allow_origins=settings.config.backend_cors_allow_origins,
allow_credentials=True,
allow_methods=["*"], # Allows all methods
allow_headers=["*"], # Allows all headers
)
uvicorn.run(
server_app,
host=backend.util.settings.Config().agent_api_host,
port=backend.util.settings.Config().agent_api_port,
)
@staticmethod
async def test_execute_graph(
graph_id: str, node_input: dict[typing.Any, typing.Any], user_id: str
):
return await backend.server.routers.v1.execute_graph(
graph_id, node_input, user_id
)
@staticmethod
async def test_create_graph(
create_graph: backend.server.routers.v1.CreateGraph,
user_id: str,
is_template=False,
):
return await backend.server.routers.v1.create_new_graph(create_graph, user_id)
@staticmethod
async def test_get_graph_run_status(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_status(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_get_graph_run_node_execution_results(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_node_execution_results(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_delete_graph(graph_id: str, user_id: str):
return await backend.server.routers.v1.delete_graph(graph_id, user_id)
def set_test_dependency_overrides(self, overrides: dict):
app.dependency_overrides.update(overrides)
|
import contextlib
import logging
import typing
import fastapi
import fastapi.responses
import starlette.middleware.cors
import uvicorn
import backend.data.block
import backend.data.db
import backend.data.user
import backend.server.routers.v1
import backend.util.service
import backend.util.settings
settings = backend.util.settings.Settings()
logger = logging.getLogger(__name__)
@contextlib.asynccontextmanager
async def lifespan_context(app: fastapi.FastAPI):
await backend.data.db.connect()
await backend.data.block.initialize_blocks()
await backend.data.user.migrate_and_encrypt_user_integrations()
yield
await backend.data.db.disconnect()
def handle_internal_http_error(status_code: int = 500, log_error: bool = True):
def handler(request: fastapi.Request, exc: Exception):
if log_error:
logger.exception(f"{request.method} {request.url.path} failed: {exc}")
return fastapi.responses.JSONResponse(
content={
"message": f"{request.method} {request.url.path} failed",
"detail": str(exc),
},
status_code=status_code,
)
return handler
docs_url = (
"/docs"
if settings.config.app_env == backend.util.settings.AppEnvironment.LOCAL
else None
)
app = fastapi.FastAPI(
title="AutoGPT Agent Server",
description=(
"This server is used to execute agents that are created by the "
"AutoGPT system."
),
summary="AutoGPT Agent Server",
version="0.1",
lifespan=lifespan_context,
docs_url=docs_url,
)
app.add_exception_handler(ValueError, handle_internal_http_error(400))
app.add_exception_handler(500, handle_internal_http_error(500))
app.include_router(backend.server.routers.v1.v1_router, tags=["v1"])
@app.get(path="/health", tags=["health"], dependencies=[])
async def health():
return {"status": "healthy"}
class AgentServer(backend.util.service.AppProcess):
def run(self):
server_app = starlette.middleware.cors.CORSMiddleware(
app=app,
allow_origins=settings.config.backend_cors_allow_origins,
allow_credentials=True,
allow_methods=["*"], # Allows all methods
allow_headers=["*"], # Allows all headers
)
uvicorn.run(
server_app,
host=backend.util.settings.Config().agent_api_host,
port=backend.util.settings.Config().agent_api_port,
)
@staticmethod
async def test_execute_graph(
graph_id: str, node_input: dict[typing.Any, typing.Any], user_id: str
):
return await backend.server.routers.v1.execute_graph(
graph_id, node_input, user_id
)
@staticmethod
async def test_create_graph(
create_graph: backend.server.routers.v1.CreateGraph,
user_id: str,
is_template=False,
):
return await backend.server.routers.v1.create_new_graph(create_graph, user_id)
@staticmethod
async def test_get_graph_run_status(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_status(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_get_graph_run_node_execution_results(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_node_execution_results(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_delete_graph(graph_id: str, user_id: str):
return await backend.server.routers.v1.delete_graph(graph_id, user_id)
def set_test_dependency_overrides(self, overrides: dict):
app.dependency_overrides.update(overrides)
|
import unittest
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torchaudio_unittest.common_utils import skipIfNoRIR, TestBaseMixin, torch_script
class TorchScriptConsistencyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(device=self.device, dtype=self.dtype)
inputs_.append(i)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(*inputs_)
torch.random.manual_seed(40)
ts_output = ts_func(*inputs_)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
def test_barkscale_fbanks(self):
if self.device != torch.device("cpu"):
raise unittest.SkipTest("No need to perform test on device other than CPU")
n_stft = 100
f_min = 0.0
f_max = 20.0
n_barks = 10
sample_rate = 16000
self._assert_consistency(F.barkscale_fbanks, (n_stft, f_min, f_max, n_barks, sample_rate, "traunmuller"))
def test_oscillator_bank(self):
num_frames, num_pitches, sample_rate = 8000, 8, 8000
freq = torch.rand((num_frames, num_pitches), dtype=self.dtype, device=self.device)
amps = torch.ones_like(freq)
self._assert_consistency(F.oscillator_bank, (freq, amps, sample_rate, "sum", torch.float64))
def test_extend_pitch(self):
num_frames = 5
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype)
num_pitches = 7
pattern = [i + 1.0 for i in range(num_pitches)]
self._assert_consistency(F.extend_pitch, (input, num_pitches))
self._assert_consistency(F.extend_pitch, (input, pattern))
self._assert_consistency(F.extend_pitch, (input, torch.tensor(pattern)))
def test_sinc_ir(self):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype)
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, False))
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, True))
def test_freq_ir(self):
mags = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype)
self._assert_consistency(F.frequency_impulse_response, (mags,))
class TorchScriptConsistencyCPUOnlyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(device=self.device, dtype=self.dtype)
inputs_.append(i)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(*inputs_)
torch.random.manual_seed(40)
ts_output = ts_func(*inputs_)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
@skipIfNoRIR
@parameterized.expand([(1,), (4,)])
def test_simulate_rir_ism_single_band(self, channel):
room_dim = torch.rand(3, dtype=self.dtype, device=self.device) + 5
mic_array = torch.rand(channel, 3, dtype=self.dtype, device=self.device) + 1
source = torch.rand(3, dtype=self.dtype, device=self.device) + 4
max_order = 3
absorption = 0.5
center_frequency = torch.tensor([125, 250, 500, 1000, 2000, 4000, 8000], dtype=self.dtype, device=self.device)
self._assert_consistency(
F.simulate_rir_ism,
(room_dim, source, mic_array, max_order, absorption, None, 81, center_frequency, 343.0, 16000.0),
)
@skipIfNoRIR
@parameterized.expand([(1,), (4,)])
def test_simulate_rir_ism_multi_band(self, channel):
room_dim = torch.rand(3, dtype=self.dtype, device=self.device) + 5
mic_array = torch.rand(channel, 3, dtype=self.dtype, device=self.device) + 1
source = torch.rand(3, dtype=self.dtype, device=self.device) + 4
max_order = 3
absorption = torch.rand(7, 6, dtype=self.dtype, device=self.device)
center_frequency = torch.tensor([125, 250, 500, 1000, 2000, 4000, 8000], dtype=self.dtype, device=self.device)
self._assert_consistency(
F.simulate_rir_ism,
(room_dim, source, mic_array, max_order, absorption, None, 81, center_frequency, 343.0, 16000.0),
)
|
import unittest
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torchaudio_unittest.common_utils import skipIfNoRIR, TestBaseMixin, torch_script
class TorchScriptConsistencyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(device=self.device, dtype=self.dtype)
inputs_.append(i)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(*inputs_)
torch.random.manual_seed(40)
ts_output = ts_func(*inputs_)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
def test_barkscale_fbanks(self):
if self.device != torch.device("cpu"):
raise unittest.SkipTest("No need to perform test on device other than CPU")
n_stft = 100
f_min = 0.0
f_max = 20.0
n_barks = 10
sample_rate = 16000
self._assert_consistency(F.barkscale_fbanks, (n_stft, f_min, f_max, n_barks, sample_rate, "traunmuller"))
def test_oscillator_bank(self):
num_frames, num_pitches, sample_rate = 8000, 8, 8000
freq = torch.rand((num_frames, num_pitches), dtype=self.dtype, device=self.device)
amps = torch.ones_like(freq)
self._assert_consistency(F.oscillator_bank, (freq, amps, sample_rate, "sum"))
def test_extend_pitch(self):
num_frames = 5
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype)
num_pitches = 7
pattern = [i + 1.0 for i in range(num_pitches)]
self._assert_consistency(F.extend_pitch, (input, num_pitches))
self._assert_consistency(F.extend_pitch, (input, pattern))
self._assert_consistency(F.extend_pitch, (input, torch.tensor(pattern)))
def test_sinc_ir(self):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype)
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, False))
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, True))
def test_freq_ir(self):
mags = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype)
self._assert_consistency(F.frequency_impulse_response, (mags,))
class TorchScriptConsistencyCPUOnlyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(device=self.device, dtype=self.dtype)
inputs_.append(i)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(*inputs_)
torch.random.manual_seed(40)
ts_output = ts_func(*inputs_)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
@skipIfNoRIR
@parameterized.expand([(1,), (4,)])
def test_simulate_rir_ism_single_band(self, channel):
room_dim = torch.rand(3, dtype=self.dtype, device=self.device) + 5
mic_array = torch.rand(channel, 3, dtype=self.dtype, device=self.device) + 1
source = torch.rand(3, dtype=self.dtype, device=self.device) + 4
max_order = 3
absorption = 0.5
center_frequency = torch.tensor([125, 250, 500, 1000, 2000, 4000, 8000], dtype=self.dtype, device=self.device)
self._assert_consistency(
F.simulate_rir_ism,
(room_dim, source, mic_array, max_order, absorption, None, 81, center_frequency, 343.0, 16000.0),
)
@skipIfNoRIR
@parameterized.expand([(1,), (4,)])
def test_simulate_rir_ism_multi_band(self, channel):
room_dim = torch.rand(3, dtype=self.dtype, device=self.device) + 5
mic_array = torch.rand(channel, 3, dtype=self.dtype, device=self.device) + 1
source = torch.rand(3, dtype=self.dtype, device=self.device) + 4
max_order = 3
absorption = torch.rand(7, 6, dtype=self.dtype, device=self.device)
center_frequency = torch.tensor([125, 250, 500, 1000, 2000, 4000, 8000], dtype=self.dtype, device=self.device)
self._assert_consistency(
F.simulate_rir_ism,
(room_dim, source, mic_array, max_order, absorption, None, 81, center_frequency, 343.0, 16000.0),
)
|
# This file should NEVER be packaged! This is a hack to make "import keras" from
# the base of the repo just import the source files. We'll keep it for compat.
import os # isort: skip
# Add everything in /api/ to the module search path.
__path__.append(os.path.join(os.path.dirname(__file__), "api")) # noqa: F405
from keras.api import * # noqa: F403, E402
from keras.api import __version__ # noqa: E402
# Don't pollute namespace.
del os
|
# DO NOT EDIT. Generated by api_gen.sh
from keras.api import DTypePolicy
from keras.api import FloatDTypePolicy
from keras.api import Function
from keras.api import Initializer
from keras.api import Input
from keras.api import InputSpec
from keras.api import KerasTensor
from keras.api import Layer
from keras.api import Loss
from keras.api import Metric
from keras.api import Model
from keras.api import Operation
from keras.api import Optimizer
from keras.api import Quantizer
from keras.api import Regularizer
from keras.api import RematScope
from keras.api import Sequential
from keras.api import StatelessScope
from keras.api import SymbolicScope
from keras.api import Variable
from keras.api import __version__
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import device
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import layers
from keras.api import legacy
from keras.api import losses
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import name_scope
from keras.api import ops
from keras.api import optimizers
from keras.api import preprocessing
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import remat
from keras.api import saving
from keras.api import tree
from keras.api import utils
from keras.api import version
from keras.api import visualization
from keras.api import wrappers
# END DO NOT EDIT.
import os # isort: skip
# Add everything in /api/ to the module search path.
__path__.append(os.path.join(os.path.dirname(__file__), "api")) # noqa: F405
# Don't pollute namespace.
del os
# Never autocomplete `.src` or `.api` on an imported keras object.
def __dir__():
keys = dict.fromkeys((globals().keys()))
keys.pop("src")
keys.pop("api")
return list(keys)
# Don't import `.src` or `.api` during `from keras import *`.
__all__ = [
name
for name in globals().keys()
if not (name.startswith("_") or name in ("src", "api"))
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import SlackGetMessage
from langchain_community.tools.slack.get_message import SlackGetMessageSchema
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SlackGetMessageSchema": "langchain_community.tools.slack.get_message",
"SlackGetMessage": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SlackGetMessage",
"SlackGetMessageSchema",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import SlackGetMessage
from langchain_community.tools.slack.get_message import SlackGetMessageSchema
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SlackGetMessageSchema": "langchain_community.tools.slack.get_message",
"SlackGetMessage": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SlackGetMessageSchema",
"SlackGetMessage",
]
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.3'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update``
deps = {
"Pillow": "Pillow>=10.0.1,<=15.0",
"accelerate": "accelerate>=0.26.0",
"av": "av",
"beautifulsoup4": "beautifulsoup4",
"blobfile": "blobfile",
"codecarbon": "codecarbon>=2.8.1",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"hf_xet": "hf_xet",
"huggingface-hub": "huggingface-hub>=0.30.0,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"jax": "jax>=0.4.1,<=0.4.13",
"jaxlib": "jaxlib>=0.4.1,<=0.4.13",
"jieba": "jieba",
"jinja2": "jinja2>=3.1.0",
"kenlm": "kenlm",
"keras": "keras>2.9,<2.16",
"keras-nlp": "keras-nlp>=0.3.1,<0.14.0",
"kernels": "kernels>=0.6.1,<0.7",
"librosa": "librosa",
"natten": "natten>=0.14.6,<0.15.0",
"nltk": "nltk<=3.8.1",
"num2words": "num2words",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optimum-benchmark": "optimum-benchmark>=0.3.0",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"pandas": "pandas<2.3.0",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic",
"pytest": "pytest>=7.2.0",
"pytest-asyncio": "pytest-asyncio",
"pytest-rerunfailures": "pytest-rerunfailures",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"pytest-order": "pytest-order",
"python": "python>=3.9.0",
"ray[tune]": "ray[tune]>=2.7.0",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff==0.11.2",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.4.3",
"sagemaker": "sagemaker>=2.31.0",
"schedulefree": "schedulefree>=1.2.6",
"scikit-learn": "scikit-learn",
"scipy": "scipy<1.13.0",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorboard": "tensorboard",
"tensorflow-cpu": "tensorflow-cpu>2.9,<2.16",
"tensorflow": "tensorflow>2.9,<2.16",
"tensorflow-text": "tensorflow-text<2.16",
"tensorflow-probability": "tensorflow-probability<0.24",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"tiktoken": "tiktoken",
"timm": "timm<=1.0.11",
"tokenizers": "tokenizers>=0.21,<0.22",
"torch": "torch>=2.1",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
"pytest-rich": "pytest-rich",
"libcst": "libcst",
"rich": "rich",
"opentelemetry-api": "opentelemetry-api",
"opentelemetry-exporter-otlp": "opentelemetry-exporter-otlp",
"opentelemetry-sdk": "opentelemetry-sdk",
}
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update``
deps = {
"Pillow": "Pillow>=10.0.1,<=15.0",
"accelerate": "accelerate>=0.26.0",
"av": "av",
"beautifulsoup4": "beautifulsoup4",
"blobfile": "blobfile",
"codecarbon": "codecarbon>=2.8.1",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"hf_xet": "hf_xet",
"huggingface-hub": "huggingface-hub>=0.30.0,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"jax": "jax>=0.4.1,<=0.4.13",
"jaxlib": "jaxlib>=0.4.1,<=0.4.13",
"jieba": "jieba",
"jinja2": "jinja2>=3.1.0",
"kenlm": "kenlm",
"keras": "keras>2.9,<2.16",
"keras-nlp": "keras-nlp>=0.3.1,<0.14.0",
"kernels": "kernels>=0.4.4,<0.5",
"librosa": "librosa",
"natten": "natten>=0.14.6,<0.15.0",
"nltk": "nltk<=3.8.1",
"num2words": "num2words",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optimum-benchmark": "optimum-benchmark>=0.3.0",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"pandas": "pandas<2.3.0",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic",
"pytest": "pytest>=7.2.0",
"pytest-asyncio": "pytest-asyncio",
"pytest-rerunfailures": "pytest-rerunfailures",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"pytest-order": "pytest-order",
"python": "python>=3.9.0",
"ray[tune]": "ray[tune]>=2.7.0",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff==0.11.2",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.4.3",
"sagemaker": "sagemaker>=2.31.0",
"schedulefree": "schedulefree>=1.2.6",
"scikit-learn": "scikit-learn",
"scipy": "scipy<1.13.0",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorboard": "tensorboard",
"tensorflow-cpu": "tensorflow-cpu>2.9,<2.16",
"tensorflow": "tensorflow>2.9,<2.16",
"tensorflow-text": "tensorflow-text<2.16",
"tensorflow-probability": "tensorflow-probability<0.24",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"tiktoken": "tiktoken",
"timm": "timm<=1.0.11",
"tokenizers": "tokenizers>=0.21,<0.22",
"torch": "torch>=2.1",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
"pytest-rich": "pytest-rich",
"libcst": "libcst",
"rich": "rich",
"opentelemetry-api": "opentelemetry-api",
"opentelemetry-exporter-otlp": "opentelemetry-exporter-otlp",
"opentelemetry-sdk": "opentelemetry-sdk",
}
|
# Copyright (c) OpenMMLab. All rights reserved.
from .backbones import * # noqa: F401,F403
from .builder import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS,
ROI_EXTRACTORS, SHARED_HEADS, build_backbone,
build_detector, build_head, build_loss, build_neck,
build_roi_extractor, build_shared_head)
from .data_preprocessors import * # noqa: F401,F403
from .dense_heads import * # noqa: F401,F403
from .detectors import * # noqa: F401,F403
from .losses import * # noqa: F401,F403
from .necks import * # noqa: F401,F403
from .plugins import * # noqa: F401,F403
from .roi_heads import * # noqa: F401,F403
from .seg_heads import * # noqa: F401,F403
__all__ = [
'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES',
'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor',
'build_shared_head', 'build_head', 'build_loss', 'build_detector'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .backbones import * # noqa: F401,F403
from .builder import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS,
ROI_EXTRACTORS, SHARED_HEADS, build_backbone,
build_detector, build_head, build_loss, build_neck,
build_roi_extractor, build_shared_head)
from .dense_heads import * # noqa: F401,F403
from .detectors import * # noqa: F401,F403
from .losses import * # noqa: F401,F403
from .necks import * # noqa: F401,F403
from .plugins import * # noqa: F401,F403
from .roi_heads import * # noqa: F401,F403
from .seg_heads import * # noqa: F401,F403
__all__ = [
'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES',
'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor',
'build_shared_head', 'build_head', 'build_loss', 'build_detector'
]
|
import sys
from absl import logging
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
@keras_export(
[
"keras.config.enable_interactive_logging",
"keras.utils.enable_interactive_logging",
]
)
def enable_interactive_logging():
"""Turn on interactive logging.
When interactive logging is enabled, Keras displays logs via stdout.
This provides the best experience when using Keras in an interactive
environment such as a shell or a notebook.
"""
global_state.set_global_attribute("interactive_logging", True)
@keras_export(
[
"keras.config.disable_interactive_logging",
"keras.utils.disable_interactive_logging",
]
)
def disable_interactive_logging():
"""Turn off interactive logging.
When interactive logging is disabled, Keras sends logs to `absl.logging`.
This is the best option when using Keras in a non-interactive
way, such as running a training or inference job on a server.
"""
global_state.set_global_attribute("interactive_logging", False)
@keras_export(
[
"keras.config.is_interactive_logging_enabled",
"keras.utils.is_interactive_logging_enabled",
]
)
def is_interactive_logging_enabled():
"""Check if interactive logging is enabled.
To switch between writing logs to stdout and `absl.logging`, you may use
`keras.config.enable_interactive_logging()` and
`keras.config.disable_interactive_logging()`.
Returns:
Boolean, `True` if interactive logging is enabled,
and `False` otherwise.
"""
return global_state.get_global_attribute("interactive_logging", True)
def set_logging_verbosity(level):
"""Sets the verbosity level for logging.
Supported log levels are as follows:
- `"FATAL"` (least verbose)
- `"ERROR"`
- `"WARNING"`
- `"INFO"`
- `"DEBUG"` (most verbose)
Args:
level: A string corresponding to the level of verbosity for logging.
"""
valid_levels = {
"FATAL": logging.FATAL,
"ERROR": logging.ERROR,
"WARNING": logging.WARNING,
"INFO": logging.INFO,
"DEBUG": logging.DEBUG,
}
verbosity = valid_levels.get(level)
if verbosity is None:
raise ValueError(
"Please pass a valid level for logging verbosity. "
f"Expected one of: {set(valid_levels.keys())}. "
f"Received: {level}"
)
logging.set_verbosity(verbosity)
def print_msg(message, line_break=True):
"""Print the message to absl logging or stdout."""
message = str(message)
if is_interactive_logging_enabled():
message = message + "\n" if line_break else message
try:
sys.stdout.write(message)
except UnicodeEncodeError:
# If the encoding differs from UTF-8, `sys.stdout.write` may fail.
# To address this, replace special unicode characters in the
# message, and then encode and decode using the target encoding.
message = _replace_special_unicode_character(message)
# Fallback to UTF-8 when `sys.stdout.encoding` is `None` (e.g. when
# stdout is redirected). This prevents a `TypeError` that would be
# raised by `bytes.encode(None)` / `bytes.decode(None)`.
encoding = sys.stdout.encoding or "utf-8"
message_bytes = message.encode(encoding, errors="ignore")
message = message_bytes.decode(encoding)
sys.stdout.write(message)
sys.stdout.flush()
else:
logging.info(message)
def ask_to_proceed_with_overwrite(filepath):
"""Produces a prompt asking about overwriting a file.
Args:
filepath: the path to the file to be overwritten.
Returns:
True if we can proceed with overwrite, False otherwise.
"""
overwrite = (
input(f"[WARNING] {filepath} already exists - overwrite? [y/n]")
.strip()
.lower()
)
while overwrite not in ("y", "n"):
overwrite = (
input('Enter "y" (overwrite) or "n" (cancel).').strip().lower()
)
if overwrite == "n":
return False
print_msg("[TIP] Next time specify overwrite=True!")
return True
def _replace_special_unicode_character(message):
message = str(message).replace("━", "=") # Fall back to Keras2 behavior.
return message
|
import sys
from absl import logging
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
@keras_export(
[
"keras.config.enable_interactive_logging",
"keras.utils.enable_interactive_logging",
]
)
def enable_interactive_logging():
"""Turn on interactive logging.
When interactive logging is enabled, Keras displays logs via stdout.
This provides the best experience when using Keras in an interactive
environment such as a shell or a notebook.
"""
global_state.set_global_attribute("interactive_logging", True)
@keras_export(
[
"keras.config.disable_interactive_logging",
"keras.utils.disable_interactive_logging",
]
)
def disable_interactive_logging():
"""Turn off interactive logging.
When interactive logging is disabled, Keras sends logs to `absl.logging`.
This is the best option when using Keras in a non-interactive
way, such as running a training or inference job on a server.
"""
global_state.set_global_attribute("interactive_logging", False)
@keras_export(
[
"keras.config.is_interactive_logging_enabled",
"keras.utils.is_interactive_logging_enabled",
]
)
def is_interactive_logging_enabled():
"""Check if interactive logging is enabled.
To switch between writing logs to stdout and `absl.logging`, you may use
`keras.config.enable_interactive_logging()` and
`keras.config.disable_interactive_logging()`.
Returns:
Boolean, `True` if interactive logging is enabled,
and `False` otherwise.
"""
return global_state.get_global_attribute("interactive_logging", True)
def set_logging_verbosity(level):
"""Sets the verbosity level for logging.
Supported log levels are as follows:
- `"FATAL"` (least verbose)
- `"ERROR"`
- `"WARNING"`
- `"INFO"`
- `"DEBUG"` (most verbose)
Args:
level: A string corresponding to the level of verbosity for logging.
"""
valid_levels = {
"FATAL": logging.FATAL,
"ERROR": logging.ERROR,
"WARNING": logging.WARNING,
"INFO": logging.INFO,
"DEBUG": logging.DEBUG,
}
verbosity = valid_levels.get(level)
if verbosity is None:
raise ValueError(
"Please pass a valid level for logging verbosity. "
f"Expected one of: {set(valid_levels.keys())}. "
f"Received: {level}"
)
logging.set_verbosity(verbosity)
def print_msg(message, line_break=True):
"""Print the message to absl logging or stdout."""
message = str(message)
if is_interactive_logging_enabled():
message = message + "\n" if line_break else message
try:
sys.stdout.write(message)
except UnicodeEncodeError:
# If the encoding differs from UTF-8, `sys.stdout.write` may fail.
# To address this, replace special unicode characters in the
# message, and then encode and decode using the target encoding.
message = _replace_special_unicode_character(message)
message_bytes = message.encode(sys.stdout.encoding, errors="ignore")
message = message_bytes.decode(sys.stdout.encoding)
sys.stdout.write(message)
sys.stdout.flush()
else:
logging.info(message)
def ask_to_proceed_with_overwrite(filepath):
"""Produces a prompt asking about overwriting a file.
Args:
filepath: the path to the file to be overwritten.
Returns:
True if we can proceed with overwrite, False otherwise.
"""
overwrite = (
input(f"[WARNING] {filepath} already exists - overwrite? [y/n]")
.strip()
.lower()
)
while overwrite not in ("y", "n"):
overwrite = (
input('Enter "y" (overwrite) or "n" (cancel).').strip().lower()
)
if overwrite == "n":
return False
print_msg("[TIP] Next time specify overwrite=True!")
return True
def _replace_special_unicode_character(message):
message = str(message).replace("━", "=") # Fall back to Keras2 behavior.
return message
|
"""Standard LangChain interface tests"""
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import ( # type: ignore[import-not-found]
ChatModelIntegrationTests, # type: ignore[import-not-found]
)
from langchain_fireworks import ChatFireworks
class TestFireworksStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatFireworks
@property
def chat_model_params(self) -> dict:
return {
"model": "accounts/fireworks/models/llama-v3p1-70b-instruct",
"temperature": 0,
}
@pytest.mark.xfail(reason="Not yet implemented.")
def test_tool_message_histories_list_content(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
super().test_tool_message_histories_list_content(model, my_adder_tool)
@property
def supports_json_mode(self) -> bool:
return True
|
"""Standard LangChain interface tests"""
from typing import Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import ( # type: ignore[import-not-found]
ChatModelIntegrationTests, # type: ignore[import-not-found]
)
from langchain_fireworks import ChatFireworks
class TestFireworksStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatFireworks
@property
def chat_model_params(self) -> dict:
return {
"model": "accounts/fireworks/models/llama-v3p1-70b-instruct",
"temperature": 0,
}
@pytest.mark.xfail(reason="Not yet implemented.")
def test_tool_message_histories_list_content(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
super().test_tool_message_histories_list_content(model, my_adder_tool)
@property
def supports_json_mode(self) -> bool:
return True
|
import textwrap
import pyarrow as pa
import pytest
from datasets import Features, Image
from datasets.packaged_modules.text.text import Text
from ..utils import require_pil
@pytest.fixture
def text_file(tmp_path):
filename = tmp_path / "text.txt"
data = textwrap.dedent(
"""\
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.
Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
Second paragraph:
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.
Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
"""
)
with open(filename, "w", encoding="utf-8") as f:
f.write(data)
return str(filename)
@pytest.fixture
def text_file_with_image(tmp_path, image_file):
filename = tmp_path / "text_with_image.txt"
with open(filename, "w", encoding="utf-8") as f:
f.write(image_file)
return str(filename)
@pytest.mark.parametrize("keep_linebreaks", [True, False])
def test_text_linebreaks(text_file, keep_linebreaks):
with open(text_file, encoding="utf-8") as f:
expected_content = f.read().splitlines(keepends=keep_linebreaks)
text = Text(keep_linebreaks=keep_linebreaks, encoding="utf-8")
generator = text._generate_tables([[text_file]])
generated_content = pa.concat_tables([table for _, table in generator]).to_pydict()["text"]
assert generated_content == expected_content
@require_pil
def test_text_cast_image(text_file_with_image):
with open(text_file_with_image, encoding="utf-8") as f:
image_file = f.read().splitlines()[0]
text = Text(encoding="utf-8", features=Features({"image": Image()}))
generator = text._generate_tables([[text_file_with_image]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.schema.field("image").type == Image()()
generated_content = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
@pytest.mark.parametrize("sample_by", ["line", "paragraph", "document"])
def test_text_sample_by(sample_by, text_file):
with open(text_file, encoding="utf-8") as f:
expected_content = f.read()
if sample_by == "line":
expected_content = expected_content.splitlines()
elif sample_by == "paragraph":
expected_content = expected_content.split("\n\n")
elif sample_by == "document":
expected_content = [expected_content]
text = Text(sample_by=sample_by, encoding="utf-8", chunksize=100)
generator = text._generate_tables([[text_file]])
generated_content = pa.concat_tables([table for _, table in generator]).to_pydict()["text"]
assert generated_content == expected_content
|
import textwrap
import pyarrow as pa
import pytest
from datasets import Features, Image
from datasets.packaged_modules.text.text import Text
from ..utils import require_pil
@pytest.fixture
def text_file(tmp_path):
filename = tmp_path / "text.txt"
data = textwrap.dedent(
"""\
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.
Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
"""
)
with open(filename, "w", encoding="utf-8") as f:
f.write(data)
return str(filename)
@pytest.fixture
def text_file_with_image(tmp_path, image_file):
filename = tmp_path / "text_with_image.txt"
with open(filename, "w", encoding="utf-8") as f:
f.write(image_file)
return str(filename)
@pytest.mark.parametrize("keep_linebreaks", [True, False])
def test_text_linebreaks(text_file, keep_linebreaks):
with open(text_file, encoding="utf-8") as f:
expected_content = f.read().splitlines(keepends=keep_linebreaks)
text = Text(keep_linebreaks=keep_linebreaks, encoding="utf-8")
generator = text._generate_tables([[text_file]])
generated_content = pa.concat_tables([table for _, table in generator]).to_pydict()["text"]
assert generated_content == expected_content
@require_pil
def test_text_cast_image(text_file_with_image):
with open(text_file_with_image, encoding="utf-8") as f:
image_file = f.read().splitlines()[0]
text = Text(encoding="utf-8", features=Features({"image": Image()}))
generator = text._generate_tables([[text_file_with_image]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.schema.field("image").type == Image()()
generated_content = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Helium model."""
import unittest
from transformers import AutoModelForCausalLM, AutoTokenizer, HeliumConfig, is_torch_available
from transformers.testing_utils import (
require_read_token,
require_torch,
slow,
torch_device,
)
from ...test_configuration_common import ConfigTester
from ..gemma.test_modeling_gemma import GemmaModelTest, GemmaModelTester
if is_torch_available():
import torch
from transformers import (
HeliumForCausalLM,
HeliumForSequenceClassification,
HeliumForTokenClassification,
HeliumModel,
)
class HeliumModelTester(GemmaModelTester):
if is_torch_available():
config_class = HeliumConfig
model_class = HeliumModel
for_causal_lm_class = HeliumForCausalLM
for_sequence_class = HeliumForSequenceClassification
for_token_class = HeliumForTokenClassification
@require_torch
class HeliumModelTest(GemmaModelTest, unittest.TestCase):
all_model_classes = (
(HeliumModel, HeliumForCausalLM, HeliumForSequenceClassification, HeliumForTokenClassification)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": HeliumModel,
"text-classification": HeliumForSequenceClassification,
"token-classification": HeliumForTokenClassification,
"text-generation": HeliumForCausalLM,
"zero-shot": HeliumForSequenceClassification,
}
if is_torch_available()
else {}
)
test_headmasking = False
test_pruning = False
_is_stateful = True
model_split_percents = [0.5, 0.6]
def setUp(self):
self.model_tester = HeliumModelTester(self)
self.config_tester = ConfigTester(self, config_class=HeliumConfig, hidden_size=37)
@slow
# @require_torch_gpu
class HeliumIntegrationTest(unittest.TestCase):
input_text = ["Hello, today is a great day to"]
@require_read_token
def test_model_2b(self):
model_id = "kyutai/helium-1-preview"
EXPECTED_TEXTS = [
"Hello, today is a great day to start a new project. I have been working on a new project for a while now and I have"
]
model = AutoModelForCausalLM.from_pretrained(
model_id, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16, revision="refs/pr/1"
).to(torch_device)
tokenizer = AutoTokenizer.from_pretrained(model_id, revision="refs/pr/1")
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXTS)
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Helium model."""
import unittest
from transformers import AutoModelForCausalLM, AutoTokenizer, HeliumConfig, is_torch_available
from transformers.testing_utils import (
require_read_token,
require_torch,
slow,
torch_device,
)
from ...test_configuration_common import ConfigTester
from ..gemma.test_modeling_gemma import GemmaModelTest, GemmaModelTester
if is_torch_available():
import torch
from transformers import (
HeliumForCausalLM,
HeliumForSequenceClassification,
HeliumForTokenClassification,
HeliumModel,
)
class HeliumModelTester(GemmaModelTester):
if is_torch_available():
config_class = HeliumConfig
model_class = HeliumModel
for_causal_lm_class = HeliumForCausalLM
for_sequence_class = HeliumForSequenceClassification
for_token_class = HeliumForTokenClassification
@require_torch
class HeliumModelTest(GemmaModelTest, unittest.TestCase):
all_model_classes = (
(HeliumModel, HeliumForCausalLM, HeliumForSequenceClassification, HeliumForTokenClassification)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": HeliumModel,
"text-classification": HeliumForSequenceClassification,
"token-classification": HeliumForTokenClassification,
"text-generation": HeliumForCausalLM,
"zero-shot": HeliumForSequenceClassification,
}
if is_torch_available()
else {}
)
test_headmasking = False
test_pruning = False
_is_stateful = True
model_split_percents = [0.5, 0.6]
def setUp(self):
self.model_tester = HeliumModelTester(self)
self.config_tester = ConfigTester(self, config_class=HeliumConfig, hidden_size=37)
@slow
# @require_torch_gpu
class HeliumIntegrationTest(unittest.TestCase):
input_text = ["Hello, today is a great day to"]
# This variable is used to determine which CUDA device are we using for our runners (A10 or T4)
# Depending on the hardware we get different logits / generations
cuda_compute_capability_major_version = None
@classmethod
def setUpClass(cls):
if is_torch_available() and torch.cuda.is_available():
# 8 is for A100 / A10 and 7 for T4
cls.cuda_compute_capability_major_version = torch.cuda.get_device_capability()[0]
@require_read_token
def test_model_2b(self):
model_id = "kyutai/helium-1-preview"
EXPECTED_TEXTS = [
"Hello, today is a great day to start a new project. I have been working on a new project for a while now and I have"
]
model = AutoModelForCausalLM.from_pretrained(
model_id, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16, revision="refs/pr/1"
).to(torch_device)
tokenizer = AutoTokenizer.from_pretrained(model_id, revision="refs/pr/1")
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXTS)
|
from langchain_core.agents import AgentAction
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
def format_log_to_messages(
intermediate_steps: list[tuple[AgentAction, str]],
template_tool_response: str = "{observation}",
) -> list[BaseMessage]:
"""Construct the scratchpad that lets the agent continue its thought process.
Args:
intermediate_steps: List of tuples of AgentAction and observation strings.
template_tool_response: Template to format the observation with.
Defaults to "{observation}".
Returns:
List[BaseMessage]: The scratchpad.
"""
thoughts: list[BaseMessage] = []
for action, observation in intermediate_steps:
thoughts.append(AIMessage(content=action.log))
human_message = HumanMessage(
content=template_tool_response.format(observation=observation),
)
thoughts.append(human_message)
return thoughts
|
from langchain_core.agents import AgentAction
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
def format_log_to_messages(
intermediate_steps: list[tuple[AgentAction, str]],
template_tool_response: str = "{observation}",
) -> list[BaseMessage]:
"""Construct the scratchpad that lets the agent continue its thought process.
Args:
intermediate_steps: List of tuples of AgentAction and observation strings.
template_tool_response: Template to format the observation with.
Defaults to "{observation}".
Returns:
List[BaseMessage]: The scratchpad.
"""
thoughts: list[BaseMessage] = []
for action, observation in intermediate_steps:
thoughts.append(AIMessage(content=action.log))
human_message = HumanMessage(
content=template_tool_response.format(observation=observation)
)
thoughts.append(human_message)
return thoughts
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core import bbox2result
from mmdet.registry import MODELS
from ...core.utils import flip_tensor
from .single_stage import SingleStageDetector
@MODELS.register_module()
class CenterNet(SingleStageDetector):
"""Implementation of CenterNet(Objects as Points)
<https://arxiv.org/abs/1904.07850>.
"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(CenterNet, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
def merge_aug_results(self, aug_results, with_nms):
"""Merge augmented detection bboxes and score.
Args:
aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each
image.
with_nms (bool): If True, do nms before return boxes.
Returns:
tuple: (out_bboxes, out_labels)
"""
recovered_bboxes, aug_labels = [], []
for single_result in aug_results:
recovered_bboxes.append(single_result[0][0])
aug_labels.append(single_result[0][1])
bboxes = torch.cat(recovered_bboxes, dim=0).contiguous()
labels = torch.cat(aug_labels).contiguous()
if with_nms:
out_bboxes, out_labels = self.bbox_head._bboxes_nms(
bboxes, labels, self.bbox_head.test_cfg)
else:
out_bboxes, out_labels = bboxes, labels
return out_bboxes, out_labels
def aug_test(self, imgs, img_metas, rescale=True):
"""Augment testing of CenterNet. Aug test must have flipped image pair,
and unlike CornerNet, it will perform an averaging operation on the
feature map instead of detecting bbox.
Args:
imgs (list[Tensor]): Augmented images.
img_metas (list[list[dict]]): Meta information of each image, e.g.,
image size, scaling factor, etc.
rescale (bool): If True, return boxes in original image space.
Default: True.
Note:
``imgs`` must including flipped image pairs.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
img_inds = list(range(len(imgs)))
assert img_metas[0][0]['flip'] + img_metas[1][0]['flip'], (
'aug test must have flipped image pair')
aug_results = []
for ind, flip_ind in zip(img_inds[0::2], img_inds[1::2]):
flip_direction = img_metas[flip_ind][0]['flip_direction']
img_pair = torch.cat([imgs[ind], imgs[flip_ind]])
x = self.extract_feat(img_pair)
center_heatmap_preds, wh_preds, offset_preds = self.bbox_head(x)
assert len(center_heatmap_preds) == len(wh_preds) == len(
offset_preds) == 1
# Feature map averaging
center_heatmap_preds[0] = (
center_heatmap_preds[0][0:1] +
flip_tensor(center_heatmap_preds[0][1:2], flip_direction)) / 2
wh_preds[0] = (wh_preds[0][0:1] +
flip_tensor(wh_preds[0][1:2], flip_direction)) / 2
bbox_list = self.bbox_head.get_bboxes(
center_heatmap_preds,
wh_preds, [offset_preds[0][0:1]],
img_metas[ind],
rescale=rescale,
with_nms=False)
aug_results.append(bbox_list)
nms_cfg = self.bbox_head.test_cfg.get('nms_cfg', None)
if nms_cfg is None:
with_nms = False
else:
with_nms = True
bbox_list = [self.merge_aug_results(aug_results, with_nms)]
bbox_results = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
for det_bboxes, det_labels in bbox_list
]
return bbox_results
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core import bbox2result
from mmdet.models.builder import DETECTORS
from ...core.utils import flip_tensor
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class CenterNet(SingleStageDetector):
"""Implementation of CenterNet(Objects as Points)
<https://arxiv.org/abs/1904.07850>.
"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(CenterNet, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
def merge_aug_results(self, aug_results, with_nms):
"""Merge augmented detection bboxes and score.
Args:
aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each
image.
with_nms (bool): If True, do nms before return boxes.
Returns:
tuple: (out_bboxes, out_labels)
"""
recovered_bboxes, aug_labels = [], []
for single_result in aug_results:
recovered_bboxes.append(single_result[0][0])
aug_labels.append(single_result[0][1])
bboxes = torch.cat(recovered_bboxes, dim=0).contiguous()
labels = torch.cat(aug_labels).contiguous()
if with_nms:
out_bboxes, out_labels = self.bbox_head._bboxes_nms(
bboxes, labels, self.bbox_head.test_cfg)
else:
out_bboxes, out_labels = bboxes, labels
return out_bboxes, out_labels
def aug_test(self, imgs, img_metas, rescale=True):
"""Augment testing of CenterNet. Aug test must have flipped image pair,
and unlike CornerNet, it will perform an averaging operation on the
feature map instead of detecting bbox.
Args:
imgs (list[Tensor]): Augmented images.
img_metas (list[list[dict]]): Meta information of each image, e.g.,
image size, scaling factor, etc.
rescale (bool): If True, return boxes in original image space.
Default: True.
Note:
``imgs`` must including flipped image pairs.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
img_inds = list(range(len(imgs)))
assert img_metas[0][0]['flip'] + img_metas[1][0]['flip'], (
'aug test must have flipped image pair')
aug_results = []
for ind, flip_ind in zip(img_inds[0::2], img_inds[1::2]):
flip_direction = img_metas[flip_ind][0]['flip_direction']
img_pair = torch.cat([imgs[ind], imgs[flip_ind]])
x = self.extract_feat(img_pair)
center_heatmap_preds, wh_preds, offset_preds = self.bbox_head(x)
assert len(center_heatmap_preds) == len(wh_preds) == len(
offset_preds) == 1
# Feature map averaging
center_heatmap_preds[0] = (
center_heatmap_preds[0][0:1] +
flip_tensor(center_heatmap_preds[0][1:2], flip_direction)) / 2
wh_preds[0] = (wh_preds[0][0:1] +
flip_tensor(wh_preds[0][1:2], flip_direction)) / 2
bbox_list = self.bbox_head.get_bboxes(
center_heatmap_preds,
wh_preds, [offset_preds[0][0:1]],
img_metas[ind],
rescale=rescale,
with_nms=False)
aug_results.append(bbox_list)
nms_cfg = self.bbox_head.test_cfg.get('nms_cfg', None)
if nms_cfg is None:
with_nms = False
else:
with_nms = True
bbox_list = [self.merge_aug_results(aug_results, with_nms)]
bbox_results = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
for det_bboxes, det_labels in bbox_list
]
return bbox_results
|
# ruff: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.19.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# ruff: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.19.0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
"""Init file."""
from llama_index.readers.docstring_walker.base import DocstringWalker
__all__ = ["DocstringWalker"]
|
"""Init file."""
from llama_index.readers.docstring_walker.base import DocstringWalker
__all__ = ["DocstringWalker"]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.saving.serialization import (
deserialize_keras_object as deserialize_keras_object,
)
from keras.src.legacy.saving.serialization import (
serialize_keras_object as serialize_keras_object,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.saving.serialization import deserialize_keras_object
from keras.src.legacy.saving.serialization import serialize_keras_object
|
# Copyright (c) OpenMMLab. All rights reserved.
import datetime
import os.path as osp
import warnings
from typing import Optional
from mmengine.fileio import dump
from mmengine.logging import print_log
from . import root
from .default_scope import DefaultScope
from .registry import Registry
def traverse_registry_tree(registry: Registry, verbose: bool = True) -> list:
"""Traverse the whole registry tree from any given node, and collect
information of all registered modules in this registry tree.
Args:
registry (Registry): a registry node in the registry tree.
verbose (bool): Whether to print log. Defaults to True
Returns:
list: Statistic results of all modules in each node of the registry
tree.
"""
root_registry = registry.root
modules_info = []
def _dfs_registry(_registry):
if isinstance(_registry, Registry):
num_modules = len(_registry.module_dict)
scope = _registry.scope
registry_info = dict(num_modules=num_modules, scope=scope)
for name, registered_class in _registry.module_dict.items():
folder = '/'.join(registered_class.__module__.split('.')[:-1])
if folder in registry_info:
registry_info[folder].append(name)
else:
registry_info[folder] = [name]
if verbose:
print_log(
f"Find {num_modules} modules in {scope}'s "
f"'{_registry.name}' registry ",
logger='current')
modules_info.append(registry_info)
else:
return
for _, child in _registry.children.items():
_dfs_registry(child)
_dfs_registry(root_registry)
return modules_info
def count_registered_modules(save_path: Optional[str] = None,
verbose: bool = True) -> dict:
"""Scan all modules in MMEngine's root and child registries and dump to
json.
Args:
save_path (str, optional): Path to save the json file.
verbose (bool): Whether to print log. Defaults to True.
Returns:
dict: Statistic results of all registered modules.
"""
# import modules to trigger registering
import mmengine.dataset
import mmengine.evaluator
import mmengine.hooks
import mmengine.model
import mmengine.optim
import mmengine.runner
import mmengine.visualization # noqa: F401
registries_info = {}
# traverse all registries in MMEngine
for item in dir(root):
if not item.startswith('__'):
registry = getattr(root, item)
if isinstance(registry, Registry):
registries_info[item] = traverse_registry_tree(
registry, verbose)
scan_data = dict(
scan_date=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
registries=registries_info)
if verbose:
print_log(
f'Finish registry analysis, got: {scan_data}', logger='current')
if save_path is not None:
json_path = osp.join(save_path, 'modules_statistic_results.json')
dump(scan_data, json_path, indent=2)
print_log(f'Result has been saved to {json_path}', logger='current')
return scan_data
def init_default_scope(scope: str) -> None:
"""Initialize the given default scope.
Args:
scope (str): The name of the default scope.
"""
never_created = DefaultScope.get_current_instance(
) is None or not DefaultScope.check_instance_created(scope)
if never_created:
DefaultScope.get_instance(scope, scope_name=scope)
return
current_scope = DefaultScope.get_current_instance() # type: ignore
if current_scope.scope_name != scope: # type: ignore
warnings.warn('The current default scope ' # type: ignore
f'"{current_scope.scope_name}" is not "{scope}", '
'`init_default_scope` will force set the current'
f'default scope to "{scope}".')
# avoid name conflict
new_instance_name = f'{scope}-{datetime.datetime.now()}'
DefaultScope.get_instance(new_instance_name, scope_name=scope)
|
# Copyright (c) OpenMMLab. All rights reserved.
import datetime
import os.path as osp
import warnings
from typing import Optional
from mmengine.fileio import dump
from mmengine.logging import print_log
from . import root
from .default_scope import DefaultScope
from .registry import Registry
def traverse_registry_tree(registry: Registry, verbose: bool = True) -> list:
"""Traverse the whole registry tree from any given node, and collect
information of all registered modules in this registry tree.
Args:
registry (Registry): a registry node in the registry tree.
verbose (bool): Whether to print log. Default: True
Returns:
list: Statistic results of all modules in each node of the registry
tree.
"""
root_registry = registry.root
modules_info = []
def _dfs_registry(_registry):
if isinstance(_registry, Registry):
num_modules = len(_registry.module_dict)
scope = _registry.scope
registry_info = dict(num_modules=num_modules, scope=scope)
for name, registered_class in _registry.module_dict.items():
folder = '/'.join(registered_class.__module__.split('.')[:-1])
if folder in registry_info:
registry_info[folder].append(name)
else:
registry_info[folder] = [name]
if verbose:
print_log(
f"Find {num_modules} modules in {scope}'s "
f"'{_registry.name}' registry ",
logger='current')
modules_info.append(registry_info)
else:
return
for _, child in _registry.children.items():
_dfs_registry(child)
_dfs_registry(root_registry)
return modules_info
def count_registered_modules(save_path: Optional[str] = None,
verbose: bool = True) -> dict:
"""Scan all modules in MMEngine's root and child registries and dump to
json.
Args:
save_path (str, optional): Path to save the json file.
verbose (bool): Whether to print log. Defaults to True.
Returns:
dict: Statistic results of all registered modules.
"""
# import modules to trigger registering
import mmengine.dataset
import mmengine.evaluator
import mmengine.hooks
import mmengine.model
import mmengine.optim
import mmengine.runner
import mmengine.visualization # noqa: F401
registries_info = {}
# traverse all registries in MMEngine
for item in dir(root):
if not item.startswith('__'):
registry = getattr(root, item)
if isinstance(registry, Registry):
registries_info[item] = traverse_registry_tree(
registry, verbose)
scan_data = dict(
scan_date=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
registries=registries_info)
if verbose:
print_log(
f'Finish registry analysis, got: {scan_data}', logger='current')
if save_path is not None:
json_path = osp.join(save_path, 'modules_statistic_results.json')
dump(scan_data, json_path, indent=2)
print_log(f'Result has been saved to {json_path}', logger='current')
return scan_data
def init_default_scope(scope: str) -> None:
"""Initialize the given default scope.
Args:
scope (str): The name of the default scope.
"""
never_created = DefaultScope.get_current_instance(
) is None or not DefaultScope.check_instance_created(scope)
if never_created:
DefaultScope.get_instance(scope, scope_name=scope)
return
current_scope = DefaultScope.get_current_instance() # type: ignore
if current_scope.scope_name != scope: # type: ignore
warnings.warn('The current default scope ' # type: ignore
f'"{current_scope.scope_name}" is not "{scope}", '
'`init_default_scope` will force set the current'
f'default scope to "{scope}".')
# avoid name conflict
new_instance_name = f'{scope}-{datetime.datetime.now()}'
DefaultScope.get_instance(new_instance_name, scope_name=scope)
|
from __future__ import annotations
import logging
import numpy as np
from torch.utils.data import IterableDataset
from sentence_transformers.readers import InputExample
logger = logging.getLogger(__name__)
class SentenceLabelDataset(IterableDataset):
"""
This dataset can be used for some specific Triplet Losses like BATCH_HARD_TRIPLET_LOSS which requires
multiple examples with the same label in a batch.
It draws n consecutive, random and unique samples from one label at a time. This is repeated for each label.
Labels with fewer than n unique samples are ignored.
This also applied to drawing without replacement, once less than n samples remain for a label, it is skipped.
This *DOES NOT* check if there are more labels than the batch is large or if the batch size is divisible
by the samples drawn per label.
"""
def __init__(self, examples: list[InputExample], samples_per_label: int = 2, with_replacement: bool = False):
"""
Creates a LabelSampler for a SentenceLabelDataset.
Args:
examples (List[InputExample]): A list of InputExamples.
samples_per_label (int, optional): The number of consecutive, random, and unique samples drawn per label.
The batch size should be a multiple of samples_per_label. Defaults to 2.
with_replacement (bool, optional): If True, each sample is drawn at most once (depending on the total number
of samples per label). If False, one sample can be drawn in multiple draws, but not multiple times in
the same drawing. Defaults to False.
"""
super().__init__()
self.samples_per_label = samples_per_label
# Group examples by label
label2ex = {}
for example in examples:
if example.label not in label2ex:
label2ex[example.label] = []
label2ex[example.label].append(example)
# Include only labels with at least 2 examples
self.grouped_inputs = []
self.groups_right_border = []
num_labels = 0
for label, label_examples in label2ex.items():
if len(label_examples) >= self.samples_per_label:
self.grouped_inputs.extend(label_examples)
self.groups_right_border.append(
len(self.grouped_inputs)
) # At which position does this label group / bucket end?
num_labels += 1
self.label_range = np.arange(num_labels)
self.with_replacement = with_replacement
np.random.shuffle(self.label_range)
logger.info(
"SentenceLabelDataset: {} examples, from which {} examples could be used (those labels appeared at least {} times). {} different labels found.".format(
len(examples), len(self.grouped_inputs), self.samples_per_label, num_labels
)
)
def __iter__(self):
label_idx = 0
count = 0
already_seen = {}
while count < len(self.grouped_inputs):
label = self.label_range[label_idx]
if label not in already_seen:
already_seen[label] = set()
left_border = 0 if label == 0 else self.groups_right_border[label - 1]
right_border = self.groups_right_border[label]
if self.with_replacement:
selection = np.arange(left_border, right_border)
else:
selection = [i for i in np.arange(left_border, right_border) if i not in already_seen[label]]
if len(selection) >= self.samples_per_label:
for element_idx in np.random.choice(selection, self.samples_per_label, replace=False):
count += 1
already_seen[label].add(element_idx)
yield self.grouped_inputs[element_idx]
label_idx += 1
if label_idx >= len(self.label_range):
label_idx = 0
already_seen = {}
np.random.shuffle(self.label_range)
def __len__(self):
return len(self.grouped_inputs)
|
import logging
from typing import List
import numpy as np
from torch.utils.data import IterableDataset
from sentence_transformers.readers import InputExample
logger = logging.getLogger(__name__)
class SentenceLabelDataset(IterableDataset):
"""
This dataset can be used for some specific Triplet Losses like BATCH_HARD_TRIPLET_LOSS which requires
multiple examples with the same label in a batch.
It draws n consecutive, random and unique samples from one label at a time. This is repeated for each label.
Labels with fewer than n unique samples are ignored.
This also applied to drawing without replacement, once less than n samples remain for a label, it is skipped.
This *DOES NOT* check if there are more labels than the batch is large or if the batch size is divisible
by the samples drawn per label.
"""
def __init__(self, examples: List[InputExample], samples_per_label: int = 2, with_replacement: bool = False):
"""
Creates a LabelSampler for a SentenceLabelDataset.
Args:
examples (List[InputExample]): A list of InputExamples.
samples_per_label (int, optional): The number of consecutive, random, and unique samples drawn per label.
The batch size should be a multiple of samples_per_label. Defaults to 2.
with_replacement (bool, optional): If True, each sample is drawn at most once (depending on the total number
of samples per label). If False, one sample can be drawn in multiple draws, but not multiple times in
the same drawing. Defaults to False.
"""
super().__init__()
self.samples_per_label = samples_per_label
# Group examples by label
label2ex = {}
for example in examples:
if example.label not in label2ex:
label2ex[example.label] = []
label2ex[example.label].append(example)
# Include only labels with at least 2 examples
self.grouped_inputs = []
self.groups_right_border = []
num_labels = 0
for label, label_examples in label2ex.items():
if len(label_examples) >= self.samples_per_label:
self.grouped_inputs.extend(label_examples)
self.groups_right_border.append(
len(self.grouped_inputs)
) # At which position does this label group / bucket end?
num_labels += 1
self.label_range = np.arange(num_labels)
self.with_replacement = with_replacement
np.random.shuffle(self.label_range)
logger.info(
"SentenceLabelDataset: {} examples, from which {} examples could be used (those labels appeared at least {} times). {} different labels found.".format(
len(examples), len(self.grouped_inputs), self.samples_per_label, num_labels
)
)
def __iter__(self):
label_idx = 0
count = 0
already_seen = {}
while count < len(self.grouped_inputs):
label = self.label_range[label_idx]
if label not in already_seen:
already_seen[label] = set()
left_border = 0 if label == 0 else self.groups_right_border[label - 1]
right_border = self.groups_right_border[label]
if self.with_replacement:
selection = np.arange(left_border, right_border)
else:
selection = [i for i in np.arange(left_border, right_border) if i not in already_seen[label]]
if len(selection) >= self.samples_per_label:
for element_idx in np.random.choice(selection, self.samples_per_label, replace=False):
count += 1
already_seen[label].add(element_idx)
yield self.grouped_inputs[element_idx]
label_idx += 1
if label_idx >= len(self.label_range):
label_idx = 0
already_seen = {}
np.random.shuffle(self.label_range)
def __len__(self):
return len(self.grouped_inputs)
|
import pytest
import torch
from mmengine.structures import InstanceData
from mmdet.models.utils import empty_instances, unpack_gt_instances
from mmdet.testing import demo_mm_inputs
def test_parse_gt_instance_info():
packed_inputs = demo_mm_inputs()['data_samples']
batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \
= unpack_gt_instances(packed_inputs)
assert len(batch_gt_instances) == len(packed_inputs)
assert len(batch_gt_instances_ignore) == len(packed_inputs)
assert len(batch_img_metas) == len(packed_inputs)
def test_process_empty_roi():
batch_size = 2
batch_img_metas = [{'ori_shape': (10, 12)}] * batch_size
device = torch.device('cpu')
results_list = empty_instances(batch_img_metas, device, task_type='bbox')
assert len(results_list) == batch_size
for results in results_list:
assert isinstance(results, InstanceData)
assert len(results) == 0
assert torch.allclose(results.bboxes, torch.zeros(0, 4, device=device))
results_list = empty_instances(
batch_img_metas,
device,
task_type='mask',
instance_results=results_list,
mask_thr_binary=0.5)
assert len(results_list) == batch_size
for results in results_list:
assert isinstance(results, InstanceData)
assert len(results) == 0
assert results.masks.shape == (0, 10, 12)
# batch_img_metas and instance_results length must be the same
with pytest.raises(AssertionError):
empty_instances(
batch_img_metas,
device,
task_type='mask',
instance_results=[results_list[0]] * 3)
|
import pytest
import torch
from mmengine.data import InstanceData
from mmdet.models.utils import empty_instances, unpack_gt_instances
from mmdet.testing import demo_mm_inputs
def test_parse_gt_instance_info():
packed_inputs = demo_mm_inputs()
batch_data_samples = []
for inputs in packed_inputs:
batch_data_samples.append(inputs['data_sample'])
batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \
= unpack_gt_instances(batch_data_samples)
assert len(batch_gt_instances) == len(packed_inputs)
assert len(batch_gt_instances_ignore) == len(packed_inputs)
assert len(batch_img_metas) == len(packed_inputs)
def test_process_empty_roi():
batch_size = 2
batch_img_metas = [{'ori_shape': (10, 12)}] * batch_size
device = torch.device('cpu')
results_list = empty_instances(batch_img_metas, device, task_type='bbox')
assert len(results_list) == batch_size
for results in results_list:
assert isinstance(results, InstanceData)
assert len(results) == 0
assert torch.allclose(results.bboxes, torch.zeros(0, 4, device=device))
results_list = empty_instances(
batch_img_metas,
device,
task_type='mask',
instance_results=results_list,
mask_thr_binary=0.5)
assert len(results_list) == batch_size
for results in results_list:
assert isinstance(results, InstanceData)
assert len(results) == 0
assert results.masks.shape == (0, 10, 12)
# batch_img_metas and instance_results length must be the same
with pytest.raises(AssertionError):
empty_instances(
batch_img_metas,
device,
task_type='mask',
instance_results=[results_list[0]] * 3)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .coco_api import COCO, COCOeval, COCOPanoptic
__all__ = ['COCO', 'COCOeval', 'COCOPanoptic']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .coco_api import COCO, COCOeval, COCOPanoptic
from .panoptic_evaluation import pq_compute_multi_core, pq_compute_single_core
__all__ = [
'COCO', 'COCOeval', 'pq_compute_multi_core', 'pq_compute_single_core',
'COCOPanoptic'
]
|
import pathlib
from typing import Any, Dict, List, Tuple, Union
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
path_comparator,
read_categories_file,
)
from torchvision.prototype.features import Label
from .._api import register_dataset, register_info
NAME = "country211"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class Country211(Dataset):
"""
- **homepage**: https://github.com/openai/CLIP/blob/main/data/country211.md
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "val", "test"))
self._split_folder_name = "valid" if split == "val" else split
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
"https://openaipublic.azureedge.net/clip/data/country211.tgz",
sha256="c011343cdc1296a8c31ff1d7129cf0b5e5b8605462cffd24f89266d6e6f4da3c",
)
]
def _prepare_sample(self, data: Tuple[str, Any]) -> Dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).parent.name
return dict(
label=Label.from_category(category, categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _filter_split(self, data: Tuple[str, Any], *, split: str) -> bool:
return pathlib.Path(data[0]).parent.parent.name == split
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Filter(dp, path_comparator("parent.parent.name", self._split_folder_name))
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
"train": 31_650,
"val": 10_550,
"test": 21_100,
}[self._split]
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
return sorted({pathlib.Path(path).parent.name for path, _ in dp})
|
import pathlib
from typing import Any, Dict, List, Tuple, Union
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
path_comparator,
read_categories_file,
)
from torchvision.prototype.features import EncodedImage, Label
from .._api import register_dataset, register_info
NAME = "country211"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class Country211(Dataset):
"""
- **homepage**: https://github.com/openai/CLIP/blob/main/data/country211.md
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "val", "test"))
self._split_folder_name = "valid" if split == "val" else split
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
"https://openaipublic.azureedge.net/clip/data/country211.tgz",
sha256="c011343cdc1296a8c31ff1d7129cf0b5e5b8605462cffd24f89266d6e6f4da3c",
)
]
def _prepare_sample(self, data: Tuple[str, Any]) -> Dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).parent.name
return dict(
label=Label.from_category(category, categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _filter_split(self, data: Tuple[str, Any], *, split: str) -> bool:
return pathlib.Path(data[0]).parent.parent.name == split
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Filter(dp, path_comparator("parent.parent.name", self._split_folder_name))
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
"train": 31_650,
"val": 10_550,
"test": 21_100,
}[self._split]
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
return sorted({pathlib.Path(path).parent.name for path, _ in dp})
|
class AudioMetaData:
"""Return type of ``torchaudio.info`` function.
This class is used by :py:mod:`"sox_io" backend<torchaudio.backends.sox_io_backend>` and
:py:mod:`"soundfile" backend<torchaudio.backends.soundfile_backend>`.
:ivar int sample_rate: Sample rate
:ivar int num_frames: The number of frames
:ivar int num_channels: The number of channels
:ivar int bits_per_sample: The number of bits per sample. This is 0 for lossy formats,
or when it cannot be accurately inferred.
:ivar str encoding: Audio encoding
The values encoding can take are one of the following:
* ``PCM_S``: Signed integer linear PCM
* ``PCM_U``: Unsigned integer linear PCM
* ``PCM_F``: Floating point linear PCM
* ``FLAC``: Flac, Free Lossless Audio Codec
* ``ULAW``: Mu-law
* ``ALAW``: A-law
* ``MP3`` : MP3, MPEG-1 Audio Layer III
* ``VORBIS``: OGG Vorbis
* ``AMR_WB``: Adaptive Multi-Rate Wideband
* ``AMR_NB``: Adaptive Multi-Rate Narrowband
* ``OPUS``: Opus
* ``HTK``: Single channel 16-bit PCM
* ``UNKNOWN`` : None of above
"""
def __init__(
self,
sample_rate: int,
num_frames: int,
num_channels: int,
bits_per_sample: int,
encoding: str,
):
self.sample_rate = sample_rate
self.num_frames = num_frames
self.num_channels = num_channels
self.bits_per_sample = bits_per_sample
self.encoding = encoding
def __str__(self):
return (
f"AudioMetaData("
f"sample_rate={self.sample_rate}, "
f"num_frames={self.num_frames}, "
f"num_channels={self.num_channels}, "
f"bits_per_sample={self.bits_per_sample}, "
f"encoding={self.encoding}"
f")"
)
|
class AudioMetaData:
"""Return type of ``torchaudio.info`` function.
This class is used by :py:mod:`"sox_io" backend<torchaudio.backends.sox_io_backend>` and
:py:mod:`"soundfile" backend<torchaudio.backends.soundfile_backend>`.
:ivar int sample_rate: Sample rate
:ivar int num_frames: The number of frames
:ivar int num_channels: The number of channels
:ivar int bits_per_sample: The number of bits per sample. This is 0 for lossy formats,
or when it cannot be accurately inferred.
:ivar str encoding: Audio encoding
The values encoding can take are one of the following:
* ``PCM_S``: Signed integer linear PCM
* ``PCM_U``: Unsigned integer linear PCM
* ``PCM_F``: Floating point linear PCM
* ``FLAC``: Flac, Free Lossless Audio Codec
* ``ULAW``: Mu-law
* ``ALAW``: A-law
* ``MP3`` : MP3, MPEG-1 Audio Layer III
* ``VORBIS``: OGG Vorbis
* ``AMR_WB``: Adaptive Multi-Rate
* ``AMR_NB``: Adaptive Multi-Rate Wideband
* ``OPUS``: Opus
* ``HTK``: Single channel 16-bit PCM
* ``UNKNOWN`` : None of above
"""
def __init__(
self,
sample_rate: int,
num_frames: int,
num_channels: int,
bits_per_sample: int,
encoding: str,
):
self.sample_rate = sample_rate
self.num_frames = num_frames
self.num_channels = num_channels
self.bits_per_sample = bits_per_sample
self.encoding = encoding
def __str__(self):
return (
f"AudioMetaData("
f"sample_rate={self.sample_rate}, "
f"num_frames={self.num_frames}, "
f"num_channels={self.num_channels}, "
f"bits_per_sample={self.bits_per_sample}, "
f"encoding={self.encoding}"
f")"
)
|
"""
This example uses a simple bag-of-words (BoW) approach. A sentence is mapped
to a sparse vector with e.g. 25,000 dimensions. Optionally, you can also use tf-idf.
To make the model trainable, we add multiple dense layers to create a Deep Averaging Network (DAN).
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses, util
from sentence_transformers import LoggingHandler, SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
from sentence_transformers.models.tokenizer.WordTokenizer import ENGLISH_STOP_WORDS
import logging
from datetime import datetime
import os
import csv
import gzip
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Read the dataset
batch_size = 32
model_save_path = "output/training_tf-idf_word_embeddings-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
##### Construction of the SentenceTransformer Model #####
# Wikipedia document frequency for words
wiki_doc_freq = "wikipedia_doc_frequencies.txt"
if not os.path.exists(wiki_doc_freq):
util.http_get(
"https://public.ukp.informatik.tu-darmstadt.de/reimers/embeddings/wikipedia_doc_frequencies.txt", wiki_doc_freq
)
# Create the vocab for the BoW model
stop_words = ENGLISH_STOP_WORDS
max_vocab_size = 25000 # This is also the size of the BoW sentence vector.
# Read the most common max_vocab_size words. Skip stop-words
vocab = set()
weights = {}
lines = open("wikipedia_doc_frequencies.txt", encoding="utf8").readlines()
num_docs = int(lines[0])
for line in lines[1:]:
word, freq = line.lower().strip().split("\t")
if word in stop_words:
continue
vocab.add(word)
weights[word] = math.log(num_docs / int(freq))
if len(vocab) >= max_vocab_size:
break
##### Construction of the SentenceTransformer Model #####
# Create the BoW model. Because we set word_weights to the IDF values and cumulative_term_frequency=True, we
# get tf-idf vectors. Set word_weights to an empty dict and cumulative_term_frequency=False to get a 1-hot sentence encoding
bow = models.BoW(vocab=vocab, word_weights=weights, cumulative_term_frequency=True)
# Add two trainable feed-forward networks (DAN) with max_vocab_size -> 768 -> 512 dimensions.
sent_embeddings_dimension = max_vocab_size
dan1 = models.Dense(in_features=sent_embeddings_dimension, out_features=768)
dan2 = models.Dense(in_features=768, out_features=512)
model = SentenceTransformer(modules=[bow, dan1, dan2])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training
num_epochs = 10
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
model.evaluate(test_evaluator)
|
"""
This example uses a simple bag-of-words (BoW) approach. A sentence is mapped
to a sparse vector with e.g. 25,000 dimensions. Optionally, you can also use tf-idf.
To make the model trainable, we add multiple dense layers to create a Deep Averaging Network (DAN).
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses, util
from sentence_transformers import LoggingHandler, SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
from sentence_transformers.models.tokenizer.WordTokenizer import ENGLISH_STOP_WORDS
import logging
from datetime import datetime
import os
import csv
import gzip
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Read the dataset
batch_size = 32
model_save_path = "output/training_tf-idf_word_embeddings-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
##### Construction of the SentenceTransformer Model #####
# Wikipedia document frequency for words
wiki_doc_freq = "wikipedia_doc_frequencies.txt"
if not os.path.exists(wiki_doc_freq):
util.http_get(
"https://public.ukp.informatik.tu-darmstadt.de/reimers/embeddings/wikipedia_doc_frequencies.txt", wiki_doc_freq
)
# Create the vocab for the BoW model
stop_words = ENGLISH_STOP_WORDS
max_vocab_size = 25000 # This is also the size of the BoW sentence vector.
# Read the most common max_vocab_size words. Skip stop-words
vocab = set()
weights = {}
lines = open("wikipedia_doc_frequencies.txt", encoding="utf8").readlines()
num_docs = int(lines[0])
for line in lines[1:]:
word, freq = line.lower().strip().split("\t")
if word in stop_words:
continue
vocab.add(word)
weights[word] = math.log(num_docs / int(freq))
if len(vocab) >= max_vocab_size:
break
##### Construction of the SentenceTransformer Model #####
# Create the BoW model. Because we set word_weights to the IDF values and cumulative_term_frequency=True, we
# get tf-idf vectors. Set word_weights to an empty dict and cumulative_term_frequency=False to get a 1-hot sentence encoding
bow = models.BoW(vocab=vocab, word_weights=weights, cumulative_term_frequency=True)
# Add two trainable feed-forward networks (DAN) with max_vocab_size -> 768 -> 512 dimensions.
sent_embeddings_dimension = max_vocab_size
dan1 = models.Dense(in_features=sent_embeddings_dimension, out_features=768)
dan2 = models.Dense(in_features=768, out_features=512)
model = SentenceTransformer(modules=[bow, dan1, dan2])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training
num_epochs = 10
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
model.evaluate(evaluator)
|
_base_ = 'faster-rcnn_r50-caffe_fpn_ms-1x_coco.py'
max_iter = 90000
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_iter,
by_epoch=False,
milestones=[60000, 80000],
gamma=0.1)
]
train_cfg = dict(
_delete_=True,
type='IterBasedTrainLoop',
max_iters=max_iter,
val_interval=10000)
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=10000))
log_processor = dict(by_epoch=False)
|
_base_ = 'faster-rcnn_r50-caffe_fpn_ms-1x_coco.py'
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[60000, 80000])
# Runner type
runner = dict(_delete_=True, type='IterBasedRunner', max_iters=90000)
checkpoint_config = dict(interval=10000)
evaluation = dict(interval=10000, metric='bbox')
|
from __future__ import annotations
from collections.abc import Iterable
import torch.nn as nn
from torch import Tensor
from sentence_transformers.losses.CosineSimilarityLoss import CosineSimilarityLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCosineSimilarityLoss(CosineSimilarityLoss):
def __init__(
self,
model: SparseEncoder,
loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
"""
SparseCosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SparseEncoder model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range `[0, 1]`
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseAnglELoss` is :class:`SparseCoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(model=model, loss=losses.SparseCosineSimilarityLoss(model), lambda_corpus=5e-5, all_docs=True)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
model.similarity_fn_name = "cosine"
return super().__init__(model, loss_fct=loss_fct, cos_score_transformation=cos_score_transformation)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseCosineSimilarityLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from __future__ import annotations
from collections.abc import Iterable
import torch.nn as nn
from torch import Tensor
from sentence_transformers.losses.CosineSimilarityLoss import CosineSimilarityLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCosineSimilarityLoss(CosineSimilarityLoss):
def __init__(
self,
model: SparseEncoder,
loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
"""
SparseCosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SparseEncoder model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
References:
# TODO: Not yet for sparse might want it ?
- `Training Examples > Semantic Textual Similarity <../../../examples/sentence_transformer/training/sts/README.html>`_
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range `[0, 1]`
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseCoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, SparseCoSENTLoss is recommended.
- :class:`SparseAnglELoss` is :class:`SparseCoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than SparseCosineSimilarityLoss.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(model=model, loss=losses.SparseCosineSimilarityLoss(model), lambda_corpus=5e-5, all_docs=True)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
model.similarity_fn_name = "cosine"
return super().__init__(model, loss_fct=loss_fct, cos_score_transformation=cos_score_transformation)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseCosineSimilarityLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
# TODO: Add _log_api_usage_once() in all mid-level kernels. If they remain not jit-scriptable we can use decorators
from torchvision.transforms import InterpolationMode # usort: skip
from ._meta import (
clamp_bounding_box,
convert_format_bounding_box,
convert_color_space_image_tensor,
convert_color_space_image_pil,
convert_color_space_video,
convert_color_space,
convert_dtype_image_tensor,
convert_dtype,
convert_dtype_video,
convert_image_dtype,
get_dimensions_image_tensor,
get_dimensions_image_pil,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image_tensor,
get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_spatial_size_bounding_box,
get_spatial_size_image_tensor,
get_spatial_size_image_pil,
get_spatial_size_mask,
get_spatial_size_video,
get_spatial_size,
) # usort: skip
from ._augment import erase, erase_image_pil, erase_image_tensor, erase_video
from ._color import (
adjust_brightness,
adjust_brightness_image_pil,
adjust_brightness_image_tensor,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image_pil,
adjust_contrast_image_tensor,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image_pil,
adjust_gamma_image_tensor,
adjust_gamma_video,
adjust_hue,
adjust_hue_image_pil,
adjust_hue_image_tensor,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image_pil,
adjust_saturation_image_tensor,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image_pil,
adjust_sharpness_image_tensor,
adjust_sharpness_video,
autocontrast,
autocontrast_image_pil,
autocontrast_image_tensor,
autocontrast_video,
equalize,
equalize_image_pil,
equalize_image_tensor,
equalize_video,
invert,
invert_image_pil,
invert_image_tensor,
invert_video,
posterize,
posterize_image_pil,
posterize_image_tensor,
posterize_video,
solarize,
solarize_image_pil,
solarize_image_tensor,
solarize_video,
)
from ._geometry import (
affine,
affine_bounding_box,
affine_image_pil,
affine_image_tensor,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_box,
center_crop_image_pil,
center_crop_image_tensor,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_box,
crop_image_pil,
crop_image_tensor,
crop_mask,
crop_video,
elastic,
elastic_bounding_box,
elastic_image_pil,
elastic_image_tensor,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image_pil,
five_crop_image_tensor,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_box,
horizontal_flip_image_pil,
horizontal_flip_image_tensor,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_box,
pad_image_pil,
pad_image_tensor,
pad_mask,
pad_video,
perspective,
perspective_bounding_box,
perspective_image_pil,
perspective_image_tensor,
perspective_mask,
perspective_video,
resize,
resize_bounding_box,
resize_image_pil,
resize_image_tensor,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_box,
resized_crop_image_pil,
resized_crop_image_tensor,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_box,
rotate_image_pil,
rotate_image_tensor,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image_pil,
ten_crop_image_tensor,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_box,
vertical_flip_image_pil,
vertical_flip_image_tensor,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
gaussian_blur,
gaussian_blur_image_pil,
gaussian_blur_image_tensor,
gaussian_blur_video,
normalize,
normalize_image_tensor,
normalize_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import (
decode_image_with_pil,
decode_video_with_av,
pil_to_tensor,
to_image_pil,
to_image_tensor,
to_pil_image,
)
from ._deprecated import get_image_size, rgb_to_grayscale, to_grayscale, to_tensor # usort: skip
|
# TODO: Add _log_api_usage_once() in all mid-level kernels. If they remain not jit-scriptable we can use decorators
from torchvision.transforms import InterpolationMode # usort: skip
from ._meta import (
clamp_bounding_box,
convert_format_bounding_box,
convert_color_space_image_tensor,
convert_color_space_image_pil,
convert_color_space_video,
convert_color_space,
convert_dtype_image_tensor,
convert_dtype,
convert_dtype_video,
convert_image_dtype,
get_dimensions_image_tensor,
get_dimensions_image_pil,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image_tensor,
get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_spatial_size_bounding_box,
get_spatial_size_image_tensor,
get_spatial_size_image_pil,
get_spatial_size_mask,
get_spatial_size_video,
get_spatial_size,
) # usort: skip
from ._augment import erase, erase_image_pil, erase_image_tensor, erase_video
from ._color import (
adjust_brightness,
adjust_brightness_image_pil,
adjust_brightness_image_tensor,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image_pil,
adjust_contrast_image_tensor,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image_pil,
adjust_gamma_image_tensor,
adjust_gamma_video,
adjust_hue,
adjust_hue_image_pil,
adjust_hue_image_tensor,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image_pil,
adjust_saturation_image_tensor,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image_pil,
adjust_sharpness_image_tensor,
adjust_sharpness_video,
autocontrast,
autocontrast_image_pil,
autocontrast_image_tensor,
autocontrast_video,
equalize,
equalize_image_pil,
equalize_image_tensor,
equalize_video,
invert,
invert_image_pil,
invert_image_tensor,
invert_video,
posterize,
posterize_image_pil,
posterize_image_tensor,
posterize_video,
solarize,
solarize_image_pil,
solarize_image_tensor,
solarize_video,
)
from ._geometry import (
affine,
affine_bounding_box,
affine_image_pil,
affine_image_tensor,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_box,
center_crop_image_pil,
center_crop_image_tensor,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_box,
crop_image_pil,
crop_image_tensor,
crop_mask,
crop_video,
elastic,
elastic_bounding_box,
elastic_image_pil,
elastic_image_tensor,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image_pil,
five_crop_image_tensor,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_box,
horizontal_flip_image_pil,
horizontal_flip_image_tensor,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_box,
pad_image_pil,
pad_image_tensor,
pad_mask,
pad_video,
perspective,
perspective_bounding_box,
perspective_image_pil,
perspective_image_tensor,
perspective_mask,
perspective_video,
resize,
resize_bounding_box,
resize_image_pil,
resize_image_tensor,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_box,
resized_crop_image_pil,
resized_crop_image_tensor,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_box,
rotate_image_pil,
rotate_image_tensor,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image_pil,
ten_crop_image_tensor,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_box,
vertical_flip_image_pil,
vertical_flip_image_tensor,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
gaussian_blur,
gaussian_blur_image_pil,
gaussian_blur_image_tensor,
gaussian_blur_video,
normalize,
normalize_image_tensor,
normalize_video,
)
from ._type_conversion import (
decode_image_with_pil,
decode_video_with_av,
pil_to_tensor,
to_image_pil,
to_image_tensor,
to_pil_image,
)
from ._deprecated import get_image_size, rgb_to_grayscale, to_grayscale, to_tensor # usort: skip
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'mmengine'
copyright = '2022, mmengine contributors'
author = 'mmengine contributors'
version_file = '../../mmengine/version.py'
with open(version_file) as f:
exec(compile(f.read(), version_file, 'exec'))
__version__ = locals()['__version__']
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'myst_parser',
'sphinx_copybutton',
'sphinx.ext.autodoc.typehints',
'sphinx_tabs.tabs',
] # yapf: disable
autodoc_typehints = 'description'
myst_heading_anchors = 4
myst_enable_extensions = ['colon_fence']
# Configuration for intersphinx
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable', None),
'torch': ('https://pytorch.org/docs/stable/', None),
'mmcv': ('https://mmcv.readthedocs.io/en/2.x/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmengine'
},
],
# Specify the language of shared menu
'menu_lang': 'en',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'mmengine'
copyright = '2022, mmengine contributors'
author = 'mmengine contributors'
version_file = '../../mmengine/version.py'
with open(version_file) as f:
exec(compile(f.read(), version_file, 'exec'))
__version__ = locals()['__version__']
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'myst_parser',
'sphinx_copybutton',
'sphinx.ext.autodoc.typehints',
] # yapf: disable
autodoc_typehints = 'description'
myst_heading_anchors = 4
myst_enable_extensions = ['colon_fence']
# Configuration for intersphinx
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable', None),
'torch': ('https://pytorch.org/docs/stable/', None),
'mmcv': ('https://mmcv.readthedocs.io/en/2.x/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmengine'
},
],
# Specify the language of shared menu
'menu_lang': 'en',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
|
"""Module for Jina Requests."""
from typing import (
TYPE_CHECKING,
AsyncIterable,
Dict,
Iterable,
Iterator,
Optional,
Tuple,
Union,
)
from jina.clients.request.helper import _new_data_request, _new_data_request_from_batch
from jina.enums import DataInputType
from jina.helper import batch_iterator
from jina.logging.predefined import default_logger
if TYPE_CHECKING: # pragma: no cover
from docarray.document import DocumentSourceType
from docarray.document.mixins.content import DocumentContentType
from docarray import Document
from jina.types.request import Request
SingletonDataType = Union[
DocumentContentType,
DocumentSourceType,
Document,
Tuple[DocumentContentType, DocumentContentType],
Tuple[DocumentSourceType, DocumentSourceType],
]
GeneratorSourceType = Union[
Document, Iterable[SingletonDataType], AsyncIterable[SingletonDataType]
]
def request_generator(
exec_endpoint: str,
data: Optional['GeneratorSourceType'] = None,
request_size: int = 0,
data_type: DataInputType = DataInputType.AUTO,
target_executor: Optional[str] = None,
parameters: Optional[Dict] = None,
**kwargs, # do not remove this, add on purpose to suppress unknown kwargs
) -> Iterator['Request']:
"""Generate a request iterator.
:param exec_endpoint: the endpoint string, by convention starts with `/`
:param data: data to send, a list of dict/string/bytes that can be converted into a list of `Document` objects
:param request_size: the number of the `Documents` in each request
:param data_type: if ``data`` is an iterator over self-contained document, i.e. :class:`DocumentSourceType`;
or an iterator over possible Document content (set to text, blob and buffer).
:param parameters: a dictionary of parameters to be sent to the executor
:param target_executor: a regex string. Only matching Executors will process the request.
:param kwargs: additional arguments
:yield: request
"""
try:
if data is None:
# this allows empty inputs, i.e. a data request with only parameters
yield _new_data_request(
endpoint=exec_endpoint, target=target_executor, parameters=parameters
)
else:
if not isinstance(data, Iterable):
data = [data]
for batch in batch_iterator(data, request_size):
yield _new_data_request_from_batch(
batch=batch,
data_type=data_type,
endpoint=exec_endpoint,
target=target_executor,
parameters=parameters,
)
except Exception as ex:
# must be handled here, as grpc channel wont handle Python exception
default_logger.critical(f'inputs is not valid! {ex!r}', exc_info=True)
raise
|
"""Module for Jina Requests."""
from typing import (
TYPE_CHECKING,
AsyncIterable,
Dict,
Iterable,
Iterator,
Optional,
Tuple,
Union,
)
from jina.clients.request.helper import _new_data_request, _new_data_request_from_batch
from jina.enums import DataInputType
from jina.helper import batch_iterator
from jina.logging.predefined import default_logger
if TYPE_CHECKING: # pragma: no cover
from docarray.document import DocumentSourceType
from docarray.document.mixins.content import DocumentContentType
from docarray import Document
from jina.types.request import Request
SingletonDataType = Union[
DocumentContentType,
DocumentSourceType,
Document,
Tuple[DocumentContentType, DocumentContentType],
Tuple[DocumentSourceType, DocumentSourceType],
]
GeneratorSourceType = Union[
Document, Iterable[SingletonDataType], AsyncIterable[SingletonDataType]
]
def request_generator(
exec_endpoint: str,
data: 'GeneratorSourceType',
request_size: int = 0,
data_type: DataInputType = DataInputType.AUTO,
target_executor: Optional[str] = None,
parameters: Optional[Dict] = None,
**kwargs, # do not remove this, add on purpose to suppress unknown kwargs
) -> Iterator['Request']:
"""Generate a request iterator.
:param exec_endpoint: the endpoint string, by convention starts with `/`
:param data: data to send, a list of dict/string/bytes that can be converted into a list of `Document` objects
:param request_size: the number of the `Documents` in each request
:param data_type: if ``data`` is an iterator over self-contained document, i.e. :class:`DocumentSourceType`;
or an iterator over possible Document content (set to text, blob and buffer).
:param parameters: a dictionary of parameters to be sent to the executor
:param target_executor: a regex string. Only matching Executors will process the request.
:param kwargs: additional arguments
:yield: request
"""
_kwargs = dict(extra_kwargs=kwargs)
try:
if data is None:
# this allows empty inputs, i.e. a data request with only parameters
yield _new_data_request(
endpoint=exec_endpoint, target=target_executor, parameters=parameters
)
else:
if not isinstance(data, Iterable):
data = [data]
for batch in batch_iterator(data, request_size):
yield _new_data_request_from_batch(
_kwargs=kwargs,
batch=batch,
data_type=data_type,
endpoint=exec_endpoint,
target=target_executor,
parameters=parameters,
)
except Exception as ex:
# must be handled here, as grpc channel wont handle Python exception
default_logger.critical(f'inputs is not valid! {ex!r}', exc_info=True)
raise
|
"""This file only exists to be lazy-imported and avoid V2-related import warnings when just using V1."""
import torch
from torchvision import tv_tensors
from torchvision.transforms import v2
class PadIfSmaller(v2.Transform):
def __init__(self, size, fill=0):
super().__init__()
self.size = size
self.fill = v2._utils._setup_fill_arg(fill)
def _get_params(self, sample):
_, height, width = v2._utils.query_chw(sample)
padding = [0, 0, max(self.size - width, 0), max(self.size - height, 0)]
needs_padding = any(padding)
return dict(padding=padding, needs_padding=needs_padding)
def _transform(self, inpt, params):
if not params["needs_padding"]:
return inpt
fill = v2._utils._get_fill(self.fill, type(inpt))
fill = v2._utils._convert_fill_arg(fill)
return v2.functional.pad(inpt, padding=params["padding"], fill=fill)
class CocoDetectionToVOCSegmentation(v2.Transform):
"""Turn samples from datasets.CocoDetection into the same format as VOCSegmentation.
This is achieved in two steps:
1. COCO differentiates between 91 categories while VOC only supports 21, including background for both. Fortunately,
the COCO categories are a superset of the VOC ones and thus can be mapped. Instances of the 70 categories not
present in VOC are dropped and replaced by background.
2. COCO only offers detection masks, i.e. a (N, H, W) bool-ish tensor, where the truthy values in each individual
mask denote the instance. However, a segmentation mask is a (H, W) integer tensor (typically torch.uint8), where
the value of each pixel denotes the category it belongs to. The detection masks are merged into one segmentation
mask while pixels that belong to multiple detection masks are marked as invalid.
"""
COCO_TO_VOC_LABEL_MAP = dict(
zip(
[0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4, 1, 64, 20, 63, 7, 72],
range(21),
)
)
INVALID_VALUE = 255
def _coco_detection_masks_to_voc_segmentation_mask(self, target):
if "masks" not in target:
return None
instance_masks, instance_labels_coco = target["masks"], target["labels"]
valid_labels_voc = [
(idx, label_voc)
for idx, label_coco in enumerate(instance_labels_coco.tolist())
if (label_voc := self.COCO_TO_VOC_LABEL_MAP.get(label_coco)) is not None
]
if not valid_labels_voc:
return None
valid_voc_category_idcs, instance_labels_voc = zip(*valid_labels_voc)
instance_masks = instance_masks[list(valid_voc_category_idcs)].to(torch.uint8)
instance_labels_voc = torch.tensor(instance_labels_voc, dtype=torch.uint8)
# Calling `.max()` on the stacked detection masks works fine to separate background from foreground as long as
# there is at most a single instance per pixel. Overlapping instances will be filtered out in the next step.
segmentation_mask, _ = (instance_masks * instance_labels_voc.reshape(-1, 1, 1)).max(dim=0)
segmentation_mask[instance_masks.sum(dim=0) > 1] = self.INVALID_VALUE
return segmentation_mask
def forward(self, image, target):
segmentation_mask = self._coco_detection_masks_to_voc_segmentation_mask(target)
if segmentation_mask is None:
segmentation_mask = torch.zeros(v2.functional.get_size(image), dtype=torch.uint8)
return image, tv_tensors.Mask(segmentation_mask)
|
"""This file only exists to be lazy-imported and avoid V2-related import warnings when just using V1."""
import torch
from torchvision import datapoints
from torchvision.transforms import v2
class PadIfSmaller(v2.Transform):
def __init__(self, size, fill=0):
super().__init__()
self.size = size
self.fill = v2._utils._setup_fill_arg(fill)
def _get_params(self, sample):
_, height, width = v2._utils.query_chw(sample)
padding = [0, 0, max(self.size - width, 0), max(self.size - height, 0)]
needs_padding = any(padding)
return dict(padding=padding, needs_padding=needs_padding)
def _transform(self, inpt, params):
if not params["needs_padding"]:
return inpt
fill = v2._utils._get_fill(self.fill, type(inpt))
fill = v2._utils._convert_fill_arg(fill)
return v2.functional.pad(inpt, padding=params["padding"], fill=fill)
class CocoDetectionToVOCSegmentation(v2.Transform):
"""Turn samples from datasets.CocoDetection into the same format as VOCSegmentation.
This is achieved in two steps:
1. COCO differentiates between 91 categories while VOC only supports 21, including background for both. Fortunately,
the COCO categories are a superset of the VOC ones and thus can be mapped. Instances of the 70 categories not
present in VOC are dropped and replaced by background.
2. COCO only offers detection masks, i.e. a (N, H, W) bool-ish tensor, where the truthy values in each individual
mask denote the instance. However, a segmentation mask is a (H, W) integer tensor (typically torch.uint8), where
the value of each pixel denotes the category it belongs to. The detection masks are merged into one segmentation
mask while pixels that belong to multiple detection masks are marked as invalid.
"""
COCO_TO_VOC_LABEL_MAP = dict(
zip(
[0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4, 1, 64, 20, 63, 7, 72],
range(21),
)
)
INVALID_VALUE = 255
def _coco_detection_masks_to_voc_segmentation_mask(self, target):
if "masks" not in target:
return None
instance_masks, instance_labels_coco = target["masks"], target["labels"]
valid_labels_voc = [
(idx, label_voc)
for idx, label_coco in enumerate(instance_labels_coco.tolist())
if (label_voc := self.COCO_TO_VOC_LABEL_MAP.get(label_coco)) is not None
]
if not valid_labels_voc:
return None
valid_voc_category_idcs, instance_labels_voc = zip(*valid_labels_voc)
instance_masks = instance_masks[list(valid_voc_category_idcs)].to(torch.uint8)
instance_labels_voc = torch.tensor(instance_labels_voc, dtype=torch.uint8)
# Calling `.max()` on the stacked detection masks works fine to separate background from foreground as long as
# there is at most a single instance per pixel. Overlapping instances will be filtered out in the next step.
segmentation_mask, _ = (instance_masks * instance_labels_voc.reshape(-1, 1, 1)).max(dim=0)
segmentation_mask[instance_masks.sum(dim=0) > 1] = self.INVALID_VALUE
return segmentation_mask
def forward(self, image, target):
segmentation_mask = self._coco_detection_masks_to_voc_segmentation_mask(target)
if segmentation_mask is None:
segmentation_mask = torch.zeros(v2.functional.get_size(image), dtype=torch.uint8)
return image, datapoints.Mask(segmentation_mask)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Iterable, Optional
import torch
from jina import DocumentArray, Executor, requests
from .audio_clip.model import AudioCLIP
class AudioCLIPTextEncoder(Executor):
"""
Encode text data with the AudioCLIP model
"""
def __init__(
self,
model_path: str = '.cache/AudioCLIP-Full-Training.pt',
tokenizer_path: str = '.cache/bpe_simple_vocab_16e6.txt.gz',
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
download_model: bool = True,
*args,
**kwargs
):
"""
:param model_path: path to the pre-trained AudioCLIP model.
:param traversal_paths: default traversal path (used if not specified in
request's parameters)
:param batch_size: default batch size (used if not specified in
request's parameters)
:param device: device that the model is on (should be "cpu", "cuda" or "cuda:X",
where X is the index of the GPU on the machine)
"""
super().__init__(*args, **kwargs)
if download_model:
import os
import subprocess
root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
subprocess.call(['sh', 'scripts/download_full.sh'], cwd=root_path)
self.model = (
AudioCLIP(
pretrained=model_path,
bpe_path=tokenizer_path,
)
.to(device)
.eval()
)
self.traversal_paths = traversal_paths
self.batch_size = batch_size
@requests
def encode(
self,
docs: Optional[DocumentArray] = None,
parameters: dict = {},
*args,
**kwargs
) -> None:
"""
Method to create embeddings for documents by encoding their text.
:param docs: A document array with documents to create embeddings for. Only the
documents that have the ``text`` attribute will get embeddings.
:param parameters: A dictionary that contains parameters to control encoding.
The accepted keys are ``traversal_paths`` and ``batch_size`` - in their
absence their corresponding default values are used.
"""
if not docs:
return
batch_generator = docs.batch(
traversal_paths=parameters.get('traversal_paths', self.traversal_paths),
batch_size=parameters.get('batch_size', self.batch_size),
require_attr='text',
)
with torch.inference_mode():
for batch in batch_generator:
embeddings = self.model.encode_text(text=[[doc.text] for doc in batch])
embeddings = embeddings.cpu().numpy()
for idx, doc in enumerate(batch):
doc.embedding = embeddings[idx]
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Iterable, Optional
import torch
from jina import DocumentArray, Executor, requests
from .audio_clip.model import AudioCLIP
class AudioCLIPTextEncoder(Executor):
"""
Encode text data with the AudioCLIP model
"""
def __init__(
self,
model_path: str = '.cache/AudioCLIP-Full-Training.pt',
tokenizer_path: str = '.cache/bpe_simple_vocab_16e6.txt.gz',
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs
):
"""
:param model_path: path to the pre-trained AudioCLIP model.
:param traversal_paths: default traversal path (used if not specified in
request's parameters)
:param batch_size: default batch size (used if not specified in
request's parameters)
:param device: device that the model is on (should be "cpu", "cuda" or "cuda:X",
where X is the index of the GPU on the machine)
"""
super().__init__(*args, **kwargs)
self.model = (
AudioCLIP(
pretrained=model_path,
bpe_path=tokenizer_path,
)
.to(device)
.eval()
)
self.traversal_paths = traversal_paths
self.batch_size = batch_size
@requests
def encode(
self,
docs: Optional[DocumentArray] = None,
parameters: dict = {},
*args,
**kwargs
) -> None:
"""
Method to create embeddings for documents by encoding their text.
:param docs: A document array with documents to create embeddings for. Only the
documents that have the ``text`` attribute will get embeddings.
:param parameters: A dictionary that contains parameters to control encoding.
The accepted keys are ``traversal_paths`` and ``batch_size`` - in their
absence their corresponding default values are used.
"""
if not docs:
return
batch_generator = docs.batch(
traversal_paths=parameters.get('traversal_paths', self.traversal_paths),
batch_size=parameters.get('batch_size', self.batch_size),
require_attr='text',
)
with torch.inference_mode():
for batch in batch_generator:
embeddings = self.model.encode_text(text=[[doc.text] for doc in batch])
embeddings = embeddings.cpu().numpy()
for idx, doc in enumerate(batch):
doc.embedding = embeddings[idx]
|
"""Markdown parser.
Contains parser for md files.
"""
import re
from pathlib import Path
from fsspec import AbstractFileSystem
from fsspec.implementations.local import LocalFileSystem
from typing import Any, Dict, List, Optional, Tuple
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class MarkdownReader(BaseReader):
"""Markdown parser.
Extract text from markdown files.
Returns dictionary with keys as headers and values as the text between headers.
"""
def __init__(
self,
*args: Any,
remove_hyperlinks: bool = True,
remove_images: bool = True,
**kwargs: Any,
) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
self._remove_hyperlinks = remove_hyperlinks
self._remove_images = remove_images
def markdown_to_tups(self, markdown_text: str) -> List[Tuple[Optional[str], str]]:
"""Convert a markdown file to a dictionary.
The keys are the headers and the values are the text under each header.
"""
markdown_tups: List[Tuple[Optional[str], str]] = []
lines = markdown_text.split("\n")
current_header = None
current_lines = []
in_code_block = False
for line in lines:
if line.startswith("```"):
# This is the end of a code block if we are already in it, and vice versa.
in_code_block = not in_code_block
header_match = re.match(r"^#+\s", line)
if not in_code_block and header_match:
# Upon first header, skip if current text chunk is empty
if current_header is not None or len(current_lines) > 0:
markdown_tups.append((current_header, "\n".join(current_lines)))
current_header = line
current_lines.clear()
else:
current_lines.append(line)
# Append final text chunk
markdown_tups.append((current_header, "\n".join(current_lines)))
# Postprocess the tuples before returning
return [
(
key if key is None else re.sub(r"#", "", key).strip(),
re.sub(r"<.*?>", "", value),
)
for key, value in markdown_tups
]
def remove_images(self, content: str) -> str:
"""Remove images in markdown content but keep the description."""
pattern = r""
return re.sub(pattern, r"\1", content)
def remove_hyperlinks(self, content: str) -> str:
"""Remove hyperlinks in markdown content."""
pattern = r"\[(.*?)\]\((.*?)\)"
return re.sub(pattern, r"\1", content)
def _init_parser(self) -> Dict:
"""Initialize the parser with the config."""
return {}
def parse_tups(
self,
filepath: Path,
errors: str = "ignore",
fs: Optional[AbstractFileSystem] = None,
) -> List[Tuple[Optional[str], str]]:
"""Parse file into tuples."""
fs = fs or LocalFileSystem()
with fs.open(filepath, encoding="utf-8") as f:
content = f.read().decode(encoding="utf-8")
if self._remove_hyperlinks:
content = self.remove_hyperlinks(content)
if self._remove_images:
content = self.remove_images(content)
return self.markdown_to_tups(content)
def load_data(
self,
file: Path,
extra_info: Optional[Dict] = None,
fs: Optional[AbstractFileSystem] = None,
) -> List[Document]:
"""Parse file into string."""
tups = self.parse_tups(file, fs=fs)
results = []
# TODO: don't include headers right now
for header, value in tups:
if header is None:
results.append(Document(text=value, metadata=extra_info or {}))
else:
results.append(
Document(text=f"\n\n{header}\n{value}", metadata=extra_info or {})
)
return results
|
"""Markdown parser.
Contains parser for md files.
"""
import re
from pathlib import Path
from fsspec import AbstractFileSystem
from fsspec.implementations.local import LocalFileSystem
from typing import Any, Dict, List, Optional, Tuple
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class MarkdownReader(BaseReader):
"""Markdown parser.
Extract text from markdown files.
Returns dictionary with keys as headers and values as the text between headers.
"""
def __init__(
self,
*args: Any,
remove_hyperlinks: bool = True,
remove_images: bool = True,
**kwargs: Any,
) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
self._remove_hyperlinks = remove_hyperlinks
self._remove_images = remove_images
def markdown_to_tups(self, markdown_text: str) -> List[Tuple[Optional[str], str]]:
"""Convert a markdown file to a dictionary.
The keys are the headers and the values are the text under each header.
"""
markdown_tups: List[Tuple[Optional[str], str]] = []
lines = markdown_text.split("\n")
current_header = None
current_lines = []
in_code_block = False
for line in lines:
if line.startswith("```"):
# This is the end of a code block if we are already in it, and vice versa.
in_code_block = not in_code_block
header_match = re.match(r"^#+\s", line)
if not in_code_block and header_match:
# Upon first header, skip if current text chunk is empty
if current_header is not None or len(current_lines) > 0:
markdown_tups.append((current_header, "\n".join(current_lines)))
current_header = line
current_lines.clear()
else:
current_lines.append(line)
# Append final text chunk
markdown_tups.append((current_header, "\n".join(current_lines)))
# Postprocess the tuples before returning
return [
(
key if key is None else re.sub(r"#", "", key).strip(),
re.sub(r"<.*?>", "", value),
)
for key, value in markdown_tups
]
def remove_images(self, content: str) -> str:
"""Remove images in markdown content."""
pattern = r"!{1}\[\[(.*)\]\]"
return re.sub(pattern, "", content)
def remove_hyperlinks(self, content: str) -> str:
"""Remove hyperlinks in markdown content."""
pattern = r"\[(.*?)\]\((.*?)\)"
return re.sub(pattern, r"\1", content)
def _init_parser(self) -> Dict:
"""Initialize the parser with the config."""
return {}
def parse_tups(
self,
filepath: Path,
errors: str = "ignore",
fs: Optional[AbstractFileSystem] = None,
) -> List[Tuple[Optional[str], str]]:
"""Parse file into tuples."""
fs = fs or LocalFileSystem()
with fs.open(filepath, encoding="utf-8") as f:
content = f.read().decode(encoding="utf-8")
if self._remove_hyperlinks:
content = self.remove_hyperlinks(content)
if self._remove_images:
content = self.remove_images(content)
return self.markdown_to_tups(content)
def load_data(
self,
file: Path,
extra_info: Optional[Dict] = None,
fs: Optional[AbstractFileSystem] = None,
) -> List[Document]:
"""Parse file into string."""
tups = self.parse_tups(file, fs=fs)
results = []
# TODO: don't include headers right now
for header, value in tups:
if header is None:
results.append(Document(text=value, metadata=extra_info or {}))
else:
results.append(
Document(text=f"\n\n{header}\n{value}", metadata=extra_info or {})
)
return results
|
# Copyright (c) OpenMMLab. All rights reserved.
from .builder import DATASETS
from .coco import CocoDataset
@DATASETS.register_module()
class DeepFashionDataset(CocoDataset):
CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag',
'neckwear', 'headwear', 'eyeglass', 'belt', 'footwear', 'hair',
'skin', 'face')
|
from .builder import DATASETS
from .coco import CocoDataset
@DATASETS.register_module()
class DeepFashionDataset(CocoDataset):
CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag',
'neckwear', 'headwear', 'eyeglass', 'belt', 'footwear', 'hair',
'skin', 'face')
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseBinaryClassificationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with two text columns and a class label column (https://huggingface.co/datasets/sentence-transformers/quora-duplicates)
eval_dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train[-1000:]")
# Initialize the evaluator
binary_acc_evaluator = SparseBinaryClassificationEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
labels=eval_dataset["label"],
name="quora_duplicates_dev",
show_progress_bar=True,
similarity_fn_names=["cosine", "dot", "euclidean", "manhattan"],
)
results = binary_acc_evaluator(model)
"""
Accuracy with Cosine-Similarity: 75.00 (Threshold: 0.8668)
F1 with Cosine-Similarity: 67.22 (Threshold: 0.5974)
Precision with Cosine-Similarity: 54.18
Recall with Cosine-Similarity: 88.51
Average Precision with Cosine-Similarity: 67.81
Matthews Correlation with Cosine-Similarity: 49.56
Accuracy with Dot-Product: 76.50 (Threshold: 23.4236)
F1 with Dot-Product: 67.00 (Threshold: 19.0095)
Precision with Dot-Product: 55.93
Recall with Dot-Product: 83.54
Average Precision with Dot-Product: 65.89
Matthews Correlation with Dot-Product: 48.88
Accuracy with Euclidean-Distance: 67.70 (Threshold: -10.0041)
F1 with Euclidean-Distance: 48.60 (Threshold: -0.1876)
Precision with Euclidean-Distance: 32.13
Recall with Euclidean-Distance: 99.69
Average Precision with Euclidean-Distance: 20.52
Matthews Correlation with Euclidean-Distance: -4.59
Accuracy with Manhattan-Distance: 67.70 (Threshold: -103.0263)
F1 with Manhattan-Distance: 48.60 (Threshold: -0.8532)
Precision with Manhattan-Distance: 32.13
Recall with Manhattan-Distance: 99.69
Average Precision with Manhattan-Distance: 21.05
Matthews Correlation with Manhattan-Distance: -4.59
Model Sparsity: Active Dimensions: 61.2, Sparsity Ratio: 0.9980
"""
# Print the results
print(f"Primary metric: {binary_acc_evaluator.primary_metric}")
# => Primary metric: quora_duplicates_dev_max_ap
print(f"Primary metric value: {results[binary_acc_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6781
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseBinaryClassificationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with two text columns and a class label column (https://huggingface.co/datasets/sentence-transformers/quora-duplicates)
eval_dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train[-1000:]")
# Initialize the evaluator
binary_acc_evaluator = SparseBinaryClassificationEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
labels=eval_dataset["label"],
name="quora_duplicates_dev",
show_progress_bar=True,
similarity_fn_names=["cosine", "dot", "euclidean", "manhattan"],
)
results = binary_acc_evaluator(model)
"""
Accuracy with Cosine-Similarity: 74.90 (Threshold: 0.8668)
F1 with Cosine-Similarity: 67.37 (Threshold: 0.5959)
Precision with Cosine-Similarity: 54.15
Recall with Cosine-Similarity: 89.13
Average Precision with Cosine-Similarity: 67.81
Matthews Correlation with Cosine-Similarity: 49.89
Accuracy with Dot-Product: 76.50 (Threshold: 24.3460)
F1 with Dot-Product: 66.93 (Threshold: 20.0762)
Precision with Dot-Product: 57.62
Recall with Dot-Product: 79.81
Average Precision with Dot-Product: 65.94
Matthews Correlation with Dot-Product: 48.82
Accuracy with Euclidean-Distance: 67.70 (Threshold: -10.0062)
F1 with Euclidean-Distance: 48.60 (Threshold: -0.2346)
Precision with Euclidean-Distance: 32.13
Recall with Euclidean-Distance: 99.69
Average Precision with Euclidean-Distance: 20.52
Matthews Correlation with Euclidean-Distance: -4.59
Accuracy with Manhattan-Distance: 67.70 (Threshold: -103.1993)
F1 with Manhattan-Distance: 48.60 (Threshold: -1.1565)
Precision with Manhattan-Distance: 32.13
Recall with Manhattan-Distance: 99.69
Average Precision with Manhattan-Distance: 21.05
Matthews Correlation with Manhattan-Distance: -4.59
Model Sparsity: Active Dimensions: 63.1, Sparsity Ratio: 0.9979
"""
# Print the results
print(f"Primary metric: {binary_acc_evaluator.primary_metric}")
# => Primary metric: quora_duplicates_dev_max_ap
print(f"Primary metric value: {results[binary_acc_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6781
|
import logging
import os
from typing import Any, Callable, Optional, Tuple, Union
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
stop_after_delay,
wait_exponential,
wait_random_exponential,
)
from tenacity.stop import stop_base
import openai
from openai.types.chat import ChatCompletionMessageToolCall
from openai.types.chat.chat_completion_chunk import ChoiceDeltaToolCall
DEFAULT_OPENAI_API_BASE = "https://api.openai.com/v1"
DEFAULT_OPENAI_API_VERSION = ""
MISSING_API_KEY_ERROR_MESSAGE = """No API key found for OpenAI.
Please set either the OPENAI_API_KEY environment variable or \
openai.api_key prior to initialization.
API keys can be found or created at \
https://platform.openai.com/account/api-keys
"""
logger = logging.getLogger(__name__)
OpenAIToolCall = Union[ChatCompletionMessageToolCall, ChoiceDeltaToolCall]
def create_retry_decorator(
max_retries: int,
random_exponential: bool = False,
stop_after_delay_seconds: Optional[float] = None,
min_seconds: float = 4,
max_seconds: float = 10,
) -> Callable[[Any], Any]:
wait_strategy = (
wait_random_exponential(min=min_seconds, max=max_seconds)
if random_exponential
else wait_exponential(multiplier=1, min=min_seconds, max=max_seconds)
)
stop_strategy: stop_base = stop_after_attempt(max_retries)
if stop_after_delay_seconds is not None:
stop_strategy = stop_strategy | stop_after_delay(stop_after_delay_seconds)
return retry(
reraise=True,
stop=stop_strategy,
wait=wait_strategy,
retry=(
retry_if_exception_type(
(
openai.APIConnectionError,
openai.APITimeoutError,
openai.RateLimitError,
openai.InternalServerError,
)
)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def resolve_openai_credentials(
api_key: Optional[str] = None,
api_base: Optional[str] = None,
api_version: Optional[str] = None,
) -> Tuple[Optional[str], str, str]:
"""
"Resolve OpenAI credentials.
The order of precedence is:
1. param
2. env
3. openai module
4. default
"""
# resolve from param or env
api_key = get_from_param_or_env("api_key", api_key, "OPENAI_API_KEY", "")
api_base = get_from_param_or_env("api_base", api_base, "OPENAI_API_BASE", "")
api_version = get_from_param_or_env(
"api_version", api_version, "OPENAI_API_VERSION", ""
)
# resolve from openai module or default
final_api_key = api_key or openai.api_key or ""
final_api_base = api_base or openai.base_url or DEFAULT_OPENAI_API_BASE
final_api_version = api_version or openai.api_version or DEFAULT_OPENAI_API_VERSION
return final_api_key, str(final_api_base), final_api_version
def validate_openai_api_key(api_key: Optional[str] = None) -> None:
openai_api_key = api_key or os.environ.get("OPENAI_API_KEY", "")
if not openai_api_key:
raise ValueError(MISSING_API_KEY_ERROR_MESSAGE)
|
import logging
import os
from typing import Any, Callable, Optional, Tuple, Union
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
stop_after_delay,
wait_exponential,
wait_random_exponential,
)
from tenacity.stop import stop_base
import openai
from openai.types.chat import ChatCompletionMessageToolCall
from openai.types.chat.chat_completion_chunk import ChoiceDeltaToolCall
DEFAULT_OPENAI_API_BASE = "https://api.openai.com/v1"
DEFAULT_OPENAI_API_VERSION = ""
MISSING_API_KEY_ERROR_MESSAGE = """No API key found for OpenAI.
Please set either the OPENAI_API_KEY environment variable or \
openai.api_key prior to initialization.
API keys can be found or created at \
https://platform.openai.com/account/api-keys
"""
logger = logging.getLogger(__name__)
OpenAIToolCall = Union[ChatCompletionMessageToolCall, ChoiceDeltaToolCall]
def create_retry_decorator(
max_retries: int,
random_exponential: bool = False,
stop_after_delay_seconds: Optional[float] = None,
min_seconds: float = 4,
max_seconds: float = 10,
) -> Callable[[Any], Any]:
wait_strategy = (
wait_random_exponential(min=min_seconds, max=max_seconds)
if random_exponential
else wait_exponential(multiplier=1, min=min_seconds, max=max_seconds)
)
stop_strategy: stop_base = stop_after_attempt(max_retries)
if stop_after_delay_seconds is not None:
stop_strategy = stop_strategy | stop_after_delay(stop_after_delay_seconds)
return retry(
reraise=True,
stop=stop_strategy,
wait=wait_strategy,
retry=(
retry_if_exception_type(
(
openai.APIConnectionError,
openai.APITimeoutError,
openai.RateLimitError,
openai.InternalServerError,
)
)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def resolve_openai_credentials(
api_key: Optional[str] = None,
api_base: Optional[str] = None,
api_version: Optional[str] = None,
) -> Tuple[Optional[str], str, str]:
""" "Resolve OpenAI credentials.
The order of precedence is:
1. param
2. env
3. openai module
4. default
"""
# resolve from param or env
api_key = get_from_param_or_env("api_key", api_key, "OPENAI_API_KEY", "")
api_base = get_from_param_or_env("api_base", api_base, "OPENAI_API_BASE", "")
api_version = get_from_param_or_env(
"api_version", api_version, "OPENAI_API_VERSION", ""
)
# resolve from openai module or default
final_api_key = api_key or openai.api_key or ""
final_api_base = api_base or openai.base_url or DEFAULT_OPENAI_API_BASE
final_api_version = api_version or openai.api_version or DEFAULT_OPENAI_API_VERSION
return final_api_key, str(final_api_base), final_api_version
def validate_openai_api_key(api_key: Optional[str] = None) -> None:
openai_api_key = api_key or os.environ.get("OPENAI_API_KEY", "")
if not openai_api_key:
raise ValueError(MISSING_API_KEY_ERROR_MESSAGE)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict
import pytest
import numpy as np
from jina import DocumentArray, Document
from ...torch_encoder import ImageTorchEncoder
MODELS_TO_TEST = [
'mobilenet_v2',
'squeezenet1_0',
'alexnet',
'vgg11',
'densenet121',
'mnasnet0_5',
]
@pytest.mark.parametrize(
'model_name', MODELS_TO_TEST
)
def test_load_torch_models(model_name: str, test_images: Dict[str, np.array]):
encoder = ImageTorchEncoder(model_name=model_name)
docs = DocumentArray([Document(blob=img_arr) for img_arr in test_images.values()])
encoder.encode(
docs=docs,
parameters={}
)
for doc in docs:
assert doc.embedding is not None
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict
import pytest
import numpy as np
from jina import DocumentArray, Document
try:
from torch_encoder import ImageTorchEncoder
except:
from jinahub.image.encoder.torch_encoder import ImageTorchEncoder
MODELS_TO_TEST = [
'mobilenet_v2',
'squeezenet1_0',
'alexnet',
'vgg11',
'densenet121',
'mnasnet0_5',
]
@pytest.mark.parametrize(
'model_name', MODELS_TO_TEST
)
def test_load_torch_models(model_name: str, test_images: Dict[str, np.array]):
encoder = ImageTorchEncoder(model_name=model_name)
docs = DocumentArray([Document(blob=img_arr) for img_arr in test_images.values()])
encoder.encode(
docs=docs,
parameters={}
)
for doc in docs:
assert doc.embedding is not None
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class GridRCNN(TwoStageDetector):
"""Grid R-CNN.
This detector is the implementation of:
- Grid R-CNN (https://arxiv.org/abs/1811.12030)
- Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688)
"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(GridRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class GridRCNN(TwoStageDetector):
"""Grid R-CNN.
This detector is the implementation of:
- Grid R-CNN (https://arxiv.org/abs/1811.12030)
- Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688)
"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(GridRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.core import DetDataSample
from .utils import demo_mm_inputs, get_detector_cfg
class TestSingleStageDetector(TestCase):
@parameterized.expand([
'retinanet/retinanet_r18_fpn_1x_coco.py',
'centernet/centernet_resnet18_140e_coco.py'
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
assert detector.backbone
assert detector.neck
assert detector.bbox_head
assert detector.device.type == 'cpu'
@parameterized.expand([
('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')),
('centernet/centernet_resnet18_140e_coco.py', ('cpu', 'cuda'))
])
def test_single_stage_forward_train(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
detector.init_weights()
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
# Test forward train
losses = detector.forward(packed_inputs, return_loss=True)
assert isinstance(losses, dict)
# Test forward_dummy
batch = torch.ones((1, 3, 64, 64)).to(device=device)
out = detector.forward_dummy(batch)
assert isinstance(out, tuple)
@parameterized.expand([
('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')),
('centernet/centernet_resnet18_140e_coco.py', ('cpu', 'cuda'))
])
def test_single_stage_forward_test(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
packed_inputs, return_loss=False)
assert len(batch_results) == 2
assert isinstance(batch_results[0], DetDataSample)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.core import DetDataSample
from .utils import demo_mm_inputs, get_detector_cfg
class TestSingleStageDetector(TestCase):
@parameterized.expand(['retinanet/retinanet_r18_fpn_1x_coco.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
assert detector.backbone
assert detector.neck
assert detector.bbox_head
assert detector.device.type == 'cpu'
@parameterized.expand([('retinanet/retinanet_r18_fpn_1x_coco.py',
('cpu', 'cuda'))])
def test_single_stage_forward_train(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
# Test forward train
losses = detector.forward(packed_inputs, return_loss=True)
assert isinstance(losses, dict)
# Test forward_dummy
batch = torch.ones((1, 3, 64, 64)).to(device=device)
out = detector.forward_dummy(batch)
assert isinstance(out, tuple)
assert len(out) == 2
@parameterized.expand([('retinanet/retinanet_r18_fpn_1x_coco.py',
('cpu', 'cuda'))])
def test_single_stage_forward_test(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
packed_inputs, return_loss=False)
assert len(batch_results) == 2
assert isinstance(batch_results[0], DetDataSample)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner,
MaxIoUAssigner, RegionAssigner)
from .builder import build_assigner, build_bbox_coder, build_sampler
from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, DistancePointBBoxCoder,
PseudoBBoxCoder, TBLRBBoxCoder)
from .iou_calculators import BboxOverlaps2D, bbox_overlaps
from .match_costs import (BBoxL1Cost, ClassificationCost, CrossEntropyLossCost,
DiceCost, FocalLossCost, IoUCost)
from .samplers import (BaseSampler, CombinedSampler,
InstanceBalancedPosSampler, IoUBalancedNegSampler,
OHEMSampler, PseudoSampler, RandomSampler,
SamplingResult, ScoreHLRSampler)
from .transforms import (bbox2corner, bbox2distance, bbox2result, bbox2roi,
bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping,
bbox_mapping_back, bbox_project, bbox_rescale,
bbox_xyxy_to_cxcywh, corner2bbox, distance2bbox,
find_inside_bboxes, roi2bbox)
__all__ = [
'bbox_overlaps', 'BboxOverlaps2D', 'BaseAssigner', 'MaxIoUAssigner',
'AssignResult', 'BaseSampler', 'PseudoSampler', 'RandomSampler',
'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'build_assigner',
'build_sampler', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back',
'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance',
'build_bbox_coder', 'BaseBBoxCoder', 'PseudoBBoxCoder',
'DeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'DistancePointBBoxCoder',
'CenterRegionAssigner', 'bbox_rescale', 'bbox_cxcywh_to_xyxy',
'bbox_xyxy_to_cxcywh', 'RegionAssigner', 'find_inside_bboxes',
'bbox2corner', 'corner2bbox', 'bbox_project', 'BBoxL1Cost',
'FocalLossCost', 'ClassificationCost', 'IoUCost', 'DiceCost',
'CrossEntropyLossCost'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner,
MaxIoUAssigner, RegionAssigner)
from .builder import build_assigner, build_bbox_coder, build_sampler
from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, DistancePointBBoxCoder,
PseudoBBoxCoder, TBLRBBoxCoder)
from .iou_calculators import BboxOverlaps2D, bbox_overlaps
from .samplers import (BaseSampler, CombinedSampler,
InstanceBalancedPosSampler, IoUBalancedNegSampler,
OHEMSampler, PseudoSampler, RandomSampler,
SamplingResult, ScoreHLRSampler)
from .transforms import (bbox2corner, bbox2distance, bbox2result, bbox2roi,
bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping,
bbox_mapping_back, bbox_project, bbox_rescale,
bbox_xyxy_to_cxcywh, corner2bbox, distance2bbox,
find_inside_bboxes, roi2bbox)
__all__ = [
'bbox_overlaps', 'BboxOverlaps2D', 'BaseAssigner', 'MaxIoUAssigner',
'AssignResult', 'BaseSampler', 'PseudoSampler', 'RandomSampler',
'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'build_assigner',
'build_sampler', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back',
'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance',
'build_bbox_coder', 'BaseBBoxCoder', 'PseudoBBoxCoder',
'DeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'DistancePointBBoxCoder',
'CenterRegionAssigner', 'bbox_rescale', 'bbox_cxcywh_to_xyxy',
'bbox_xyxy_to_cxcywh', 'RegionAssigner', 'find_inside_bboxes',
'bbox2corner', 'corner2bbox', 'bbox_project'
]
|
from io import BytesIO
from typing import TYPE_CHECKING, Any, List, NamedTuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor import AudioNdArray, NdArray, VideoNdArray
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='VideoBytes')
class VideoLoadResult(NamedTuple):
video: VideoNdArray
audio: AudioNdArray
key_frame_indices: NdArray
@_register_proto(proto_type_name='video_bytes')
class VideoBytes(bytes, AbstractType):
"""
Bytes that store a video and that can be load into a video tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(self, **kwargs) -> VideoLoadResult:
"""
Load the video from the bytes into a VideoLoadResult object consisting of:
- a [`VideoNdArray`][docarray.typing.VideoNdArray] (`VideoLoadResult.video`)
- an [`AudioNdArray`][docarray.typing.AudioNdArray] (`VideoLoadResult.audio`)
- an [`NdArray`][docarray.typing.NdArray] containing the key frame indices (`VideoLoadResult.key_frame_indices`).
---
```python
from docarray import BaseDoc
from docarray.typing import AudioNdArray, NdArray, VideoNdArray, VideoUrl
class MyDoc(BaseDoc):
video_url: VideoUrl
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video, audio, key_frame_indices = doc.video_url.load()
assert isinstance(video, VideoNdArray)
assert isinstance(audio, AudioNdArray)
assert isinstance(key_frame_indices, NdArray)
```
---
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described [here](https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open)
:return: a `VideoLoadResult` instance with video, audio and keyframe indices
"""
if TYPE_CHECKING:
import av
else:
av = import_library('av')
with av.open(BytesIO(self), **kwargs) as container:
audio_frames: List[np.ndarray] = []
video_frames: List[np.ndarray] = []
keyframe_indices: List[int] = []
for frame in container.decode():
if type(frame) == av.audio.frame.AudioFrame:
audio_frames.append(frame.to_ndarray())
elif type(frame) == av.video.frame.VideoFrame:
if frame.key_frame == 1:
curr_index = len(video_frames)
keyframe_indices.append(curr_index)
video_frames.append(frame.to_ndarray(format='rgb24'))
if len(audio_frames) == 0:
audio = parse_obj_as(AudioNdArray, np.array(audio_frames))
else:
audio = parse_obj_as(AudioNdArray, np.stack(audio_frames))
video = parse_obj_as(VideoNdArray, np.stack(video_frames))
indices = parse_obj_as(NdArray, keyframe_indices)
return VideoLoadResult(video=video, audio=audio, key_frame_indices=indices)
|
from io import BytesIO
from typing import TYPE_CHECKING, Any, NamedTuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor import AudioNdArray, NdArray, VideoNdArray
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='VideoBytes')
class VideoLoadResult(NamedTuple):
video: VideoNdArray
audio: AudioNdArray
key_frame_indices: NdArray
@_register_proto(proto_type_name='video_bytes')
class VideoBytes(bytes, AbstractType):
"""
Bytes that store a video and that can be load into a video tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(self, **kwargs) -> VideoLoadResult:
"""
Load the video from the bytes into a VideoLoadResult object consisting of:
- a [`VideoNdArray`][docarray.typing.VideoNdArray] (`VideoLoadResult.video`)
- an [`AudioNdArray`][docarray.typing.AudioNdArray] (`VideoLoadResult.audio`)
- an [`NdArray`][docarray.typing.NdArray] containing the key frame indices (`VideoLoadResult.key_frame_indices`).
---
```python
from docarray import BaseDoc
from docarray.typing import AudioNdArray, NdArray, VideoNdArray, VideoUrl
class MyDoc(BaseDoc):
video_url: VideoUrl
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video, audio, key_frame_indices = doc.video_url.load()
assert isinstance(video, VideoNdArray)
assert isinstance(audio, AudioNdArray)
assert isinstance(key_frame_indices, NdArray)
```
---
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described [here](https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open)
:return: a `VideoLoadResult` instance with video, audio and keyframe indices
"""
if TYPE_CHECKING:
import av
else:
av = import_library('av')
with av.open(BytesIO(self), **kwargs) as container:
audio_frames = []
video_frames = []
keyframe_indices = []
for frame in container.decode():
if type(frame) == av.audio.frame.AudioFrame:
audio_frames.append(frame.to_ndarray())
elif type(frame) == av.video.frame.VideoFrame:
video_frames.append(frame.to_ndarray(format='rgb24'))
if frame.key_frame == 1:
curr_index = len(video_frames)
keyframe_indices.append(curr_index)
if len(audio_frames) == 0:
audio = parse_obj_as(AudioNdArray, np.array(audio_frames))
else:
audio = parse_obj_as(AudioNdArray, np.stack(audio_frames))
video = parse_obj_as(VideoNdArray, np.stack(video_frames))
indices = parse_obj_as(NdArray, keyframe_indices)
return VideoLoadResult(video=video, audio=audio, key_frame_indices=indices)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .amp_optimizer_wrapper import AmpOptimWrapper
from .builder import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
build_optim_wrapper)
from .default_constructor import DefaultOptimWrapperConstructor
from .optimizer_wrapper import OptimWrapper
from .optimizer_wrapper_dict import OptimWrapperDict
from .zero_optimizer import ZeroRedundancyOptimizer
__all__ = [
'OPTIM_WRAPPER_CONSTRUCTORS', 'OPTIMIZERS',
'DefaultOptimWrapperConstructor', 'build_optim_wrapper', 'OptimWrapper',
'AmpOptimWrapper', 'OptimWrapperDict', 'ZeroRedundancyOptimizer'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .amp_optimizer_wrapper import AmpOptimWrapper
from .builder import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
build_optim_wrapper)
from .default_constructor import DefaultOptimWrapperConstructor
from .optimizer_wrapper import OptimWrapper
from .optimizer_wrapper_dict import OptimWrapperDict
__all__ = [
'OPTIM_WRAPPER_CONSTRUCTORS', 'OPTIMIZERS',
'DefaultOptimWrapperConstructor', 'build_optim_wrapper', 'OptimWrapper',
'AmpOptimWrapper', 'OptimWrapperDict'
]
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import LuminaNextDiT2DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class LuminaNextDiT2DModelTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = LuminaNextDiT2DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
"""
Args:
None
Returns:
Dict: Dictionary of dummy input tensors
"""
batch_size = 2 # N
num_channels = 4 # C
height = width = 16 # H, W
embedding_dim = 32 # D
sequence_length = 16 # L
hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.rand(size=(batch_size,)).to(torch_device)
encoder_mask = torch.randn(size=(batch_size, sequence_length)).to(torch_device)
image_rotary_emb = torch.randn((384, 384, 4)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
"encoder_mask": encoder_mask,
"image_rotary_emb": image_rotary_emb,
"cross_attention_kwargs": {},
}
@property
def input_shape(self):
"""
Args:
None
Returns:
Tuple: (int, int, int)
"""
return (4, 16, 16)
@property
def output_shape(self):
"""
Args:
None
Returns:
Tuple: (int, int, int)
"""
return (4, 16, 16)
def prepare_init_args_and_inputs_for_common(self):
"""
Args:
None
Returns:
Tuple: (Dict, Dict)
"""
init_dict = {
"sample_size": 16,
"patch_size": 2,
"in_channels": 4,
"hidden_size": 24,
"num_layers": 2,
"num_attention_heads": 3,
"num_kv_heads": 1,
"multiple_of": 16,
"ffn_dim_multiplier": None,
"norm_eps": 1e-5,
"learn_sigma": False,
"qk_norm": True,
"cross_attention_dim": 32,
"scaling_factor": 1.0,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import LuminaNextDiT2DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class LuminaNextDiT2DModelTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = LuminaNextDiT2DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
"""
Args:
None
Returns:
Dict: Dictionary of dummy input tensors
"""
batch_size = 2 # N
num_channels = 4 # C
height = width = 16 # H, W
embedding_dim = 32 # D
sequence_length = 16 # L
hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.rand(size=(batch_size,)).to(torch_device)
encoder_mask = torch.randn(size=(batch_size, sequence_length)).to(torch_device)
image_rotary_emb = torch.randn((384, 384, 4)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
"encoder_mask": encoder_mask,
"image_rotary_emb": image_rotary_emb,
"cross_attention_kwargs": {},
}
@property
def input_shape(self):
"""
Args:
None
Returns:
Tuple: (int, int, int)
"""
return (4, 16, 16)
@property
def output_shape(self):
"""
Args:
None
Returns:
Tuple: (int, int, int)
"""
return (4, 16, 16)
def prepare_init_args_and_inputs_for_common(self):
"""
Args:
None
Returns:
Tuple: (Dict, Dict)
"""
init_dict = {
"sample_size": 16,
"patch_size": 2,
"in_channels": 4,
"hidden_size": 24,
"num_layers": 2,
"num_attention_heads": 3,
"num_kv_heads": 1,
"multiple_of": 16,
"ffn_dim_multiplier": None,
"norm_eps": 1e-5,
"learn_sigma": False,
"qk_norm": True,
"cross_attention_dim": 32,
"scaling_factor": 1.0,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
|
from keras.src import regularizers
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.ActivityRegularization")
class ActivityRegularization(Layer):
"""Layer that applies an update to the cost function based input activity.
Args:
l1: L1 regularization factor (positive float).
l2: L2 regularization factor (positive float).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, l1=0.0, l2=0.0, **kwargs):
super().__init__(
activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs
)
self.supports_masking = True
self.l1 = l1
self.l2 = l2
self.built = True
def call(self, inputs):
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
base_config.pop("activity_regularizer", None)
config = {"l1": self.l1, "l2": self.l2}
return {**base_config, **config}
|
from keras.src import regularizers
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.ActivityRegularization")
class ActivityRegularization(Layer):
"""Layer that applies an update to the cost function based input activity.
Args:
l1: L1 regularization factor (positive float).
l2: L2 regularization factor (positive float).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, l1=0.0, l2=0.0, **kwargs):
super().__init__(
activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs
)
self.supports_masking = True
self.l1 = l1
self.l2 = l2
def call(self, inputs):
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
base_config.pop("activity_regularizer", None)
config = {"l1": self.l1, "l2": self.l2}
return {**base_config, **config}
|
from typing import List, Union
class InputExample:
"""Structure for one input example with texts, the label and a unique id"""
def __init__(self, guid: str = "", texts: List[str] = None, label: Union[int, float] = 0):
"""
Creates one InputExample with the given texts, guid and label
Args:
guid: id for the example
texts: the texts for the example.
label: the label for the example
"""
self.guid = guid
self.texts = texts
self.label = label
def __str__(self):
return "<InputExample> label: {}, texts: {}".format(str(self.label), "; ".join(self.texts))
|
from typing import Union, List
class InputExample:
"""
Structure for one input example with texts, the label and a unique id
"""
def __init__(self, guid: str = "", texts: List[str] = None, label: Union[int, float] = 0):
"""
Creates one InputExample with the given texts, guid and label
:param guid
id for the example
:param texts
the texts for the example.
:param label
the label for the example
"""
self.guid = guid
self.texts = texts
self.label = label
def __str__(self):
return "<InputExample> label: {}, texts: {}".format(str(self.label), "; ".join(self.texts))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.